From 2ecba4f2a036fed41f85d69c8beea42470bcf07a Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Wed, 1 Mar 2023 22:59:54 +0100 Subject: [PATCH 01/95] first commit for the addition of the TabDDPM plugin --- .../core/models/tabular_ddpm/.lib/__init__.py | 12 + .../core/models/tabular_ddpm/.lib/data.py | 718 +++++++++++++ .../core/models/tabular_ddpm/.lib/deep.py | 168 +++ .../core/models/tabular_ddpm/.lib/env.py | 39 + .../core/models/tabular_ddpm/.lib/metrics.py | 158 +++ .../core/models/tabular_ddpm/.lib/util.py | 433 ++++++++ .../core/models/tabular_ddpm/.pipeline.py | 80 ++ .../core/models/tabular_ddpm/.sample.py | 159 +++ .../core/models/tabular_ddpm/.train.py | 156 +++ .../plugins/core/models/tabular_ddpm/.tune.py | 127 +++ .../core/models/tabular_ddpm/.utils_train.py | 88 ++ .../core/models/tabular_ddpm/README.md | 3 + .../core/models/tabular_ddpm/__init__.py | 2 + .../gaussian_multinomial_diffsuion.py | 992 ++++++++++++++++++ .../core/models/tabular_ddpm/modules.py | 486 +++++++++ .../core/models/tabular_ddpm/requirements.txt | 15 + .../plugins/core/models/tabular_ddpm/utils.py | 174 +++ src/synthcity/plugins/generic/plugin_ddpm.py | 217 ++++ third-party/tab-ddpm | 1 + 19 files changed, 4028 insertions(+) create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.sample.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.train.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.tune.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/README.md create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/__init__.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/modules.py create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/utils.py create mode 100644 src/synthcity/plugins/generic/plugin_ddpm.py create mode 160000 third-party/tab-ddpm diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py new file mode 100644 index 00000000..54d6f6bb --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py @@ -0,0 +1,12 @@ +import torch +from icecream import install + +torch.set_num_threads(1) +install() + +from . import env # noqa +from .data import * # noqa +from .deep import * # noqa +from .env import * # noqa +from .metrics import * # noqa +from .util import * # noqa diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py new file mode 100644 index 00000000..912ce259 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py @@ -0,0 +1,718 @@ +import hashlib +from collections import Counter +from copy import deepcopy +from dataclasses import astuple, dataclass, replace +from importlib.resources import path +from pathlib import Path +from typing import Any, Literal, Optional, Union, cast, Tuple, Dict, List + +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split +from sklearn.pipeline import make_pipeline +import sklearn.preprocessing +import torch +import os +from category_encoders import LeaveOneOutEncoder +from sklearn.impute import SimpleImputer +from sklearn.preprocessing import StandardScaler +from scipy.spatial.distance import cdist + +from . import env, util +from .metrics import calculate_metrics as calculate_metrics_ +from .util import TaskType, load_json + +ArrayDict = Dict[str, np.ndarray] +TensorDict = Dict[str, torch.Tensor] + + +CAT_MISSING_VALUE = '__nan__' +CAT_RARE_VALUE = '__rare__' +Normalization = Literal['standard', 'quantile', 'minmax'] +NumNanPolicy = Literal['drop-rows', 'mean'] +CatNanPolicy = Literal['most_frequent'] +CatEncoding = Literal['one-hot', 'counter'] +YPolicy = Literal['default'] + + +class StandardScaler1d(StandardScaler): + def partial_fit(self, X, *args, **kwargs): + assert X.ndim == 1 + return super().partial_fit(X[:, None], *args, **kwargs) + + def transform(self, X, *args, **kwargs): + assert X.ndim == 1 + return super().transform(X[:, None], *args, **kwargs).squeeze(1) + + def inverse_transform(self, X, *args, **kwargs): + assert X.ndim == 1 + return super().inverse_transform(X[:, None], *args, **kwargs).squeeze(1) + + +def get_category_sizes(X: Union[torch.Tensor, np.ndarray]) -> List[int]: + XT = X.T.cpu().tolist() if isinstance(X, torch.Tensor) else X.T.tolist() + return [len(set(x)) for x in XT] + + +@dataclass(frozen=False) +class Dataset: + X_num: Optional[ArrayDict] + X_cat: Optional[ArrayDict] + y: ArrayDict + y_info: Dict[str, Any] + task_type: TaskType + n_classes: Optional[int] + + @classmethod + def from_dir(cls, dir_: Union[Path, str]) -> 'Dataset': + dir_ = Path(dir_) + splits = [k for k in ['train', 'val', 'test'] if dir_.joinpath(f'y_{k}.npy').exists()] + + def load(item) -> ArrayDict: + return { + x: cast(np.ndarray, np.load(dir_ / f'{item}_{x}.npy', allow_pickle=True)) # type: ignore[code] + for x in splits + } + + if Path(dir_ / 'info.json').exists(): + info = util.load_json(dir_ / 'info.json') + else: + info = None + return Dataset( + load('X_num') if dir_.joinpath('X_num_train.npy').exists() else None, + load('X_cat') if dir_.joinpath('X_cat_train.npy').exists() else None, + load('y'), + {}, + TaskType(info['task_type']), + info.get('n_classes'), + ) + + @property + def is_binclass(self) -> bool: + return self.task_type == TaskType.BINCLASS + + @property + def is_multiclass(self) -> bool: + return self.task_type == TaskType.MULTICLASS + + @property + def is_regression(self) -> bool: + return self.task_type == TaskType.REGRESSION + + @property + def n_num_features(self) -> int: + return 0 if self.X_num is None else self.X_num['train'].shape[1] + + @property + def n_cat_features(self) -> int: + return 0 if self.X_cat is None else self.X_cat['train'].shape[1] + + @property + def n_features(self) -> int: + return self.n_num_features + self.n_cat_features + + def size(self, part: Optional[str]) -> int: + return sum(map(len, self.y.values())) if part is None else len(self.y[part]) + + @property + def nn_output_dim(self) -> int: + if self.is_multiclass: + assert self.n_classes is not None + return self.n_classes + else: + return 1 + + def get_category_sizes(self, part: str) -> List[int]: + return [] if self.X_cat is None else get_category_sizes(self.X_cat[part]) + + def calculate_metrics( + self, + predictions: Dict[str, np.ndarray], + prediction_type: Optional[str], + ) -> Dict[str, Any]: + metrics = { + x: calculate_metrics_( + self.y[x], predictions[x], self.task_type, prediction_type, self.y_info + ) + for x in predictions + } + if self.task_type == TaskType.REGRESSION: + score_key = 'rmse' + score_sign = -1 + else: + score_key = 'accuracy' + score_sign = 1 + for part_metrics in metrics.values(): + part_metrics['score'] = score_sign * part_metrics[score_key] + return metrics + +def change_val(dataset: Dataset, val_size: float = 0.2): + # should be done before transformations + + y = np.concatenate([dataset.y['train'], dataset.y['val']], axis=0) + + ixs = np.arange(y.shape[0]) + if dataset.is_regression: + train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777) + else: + train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777, stratify=y) + + dataset.y['train'] = y[train_ixs] + dataset.y['val'] = y[val_ixs] + + if dataset.X_num is not None: + X_num = np.concatenate([dataset.X_num['train'], dataset.X_num['val']], axis=0) + dataset.X_num['train'] = X_num[train_ixs] + dataset.X_num['val'] = X_num[val_ixs] + + if dataset.X_cat is not None: + X_cat = np.concatenate([dataset.X_cat['train'], dataset.X_cat['val']], axis=0) + dataset.X_cat['train'] = X_cat[train_ixs] + dataset.X_cat['val'] = X_cat[val_ixs] + + return dataset + +def num_process_nans(dataset: Dataset, policy: Optional[NumNanPolicy]) -> Dataset: + assert dataset.X_num is not None + nan_masks = {k: np.isnan(v) for k, v in dataset.X_num.items()} + if not any(x.any() for x in nan_masks.values()): # type: ignore[code] + assert policy is None + return dataset + + assert policy is not None + if policy == 'drop-rows': + valid_masks = {k: ~v.any(1) for k, v in nan_masks.items()} + assert valid_masks[ + 'test' + ].all(), 'Cannot drop test rows, since this will affect the final metrics.' + new_data = {} + for data_name in ['X_num', 'X_cat', 'y']: + data_dict = getattr(dataset, data_name) + if data_dict is not None: + new_data[data_name] = { + k: v[valid_masks[k]] for k, v in data_dict.items() + } + dataset = replace(dataset, **new_data) + elif policy == 'mean': + new_values = np.nanmean(dataset.X_num['train'], axis=0) + X_num = deepcopy(dataset.X_num) + for k, v in X_num.items(): + num_nan_indices = np.where(nan_masks[k]) + v[num_nan_indices] = np.take(new_values, num_nan_indices[1]) + dataset = replace(dataset, X_num=X_num) + else: + assert util.raise_unknown('policy', policy) + return dataset + + +# Inspired by: https://github.com/yandex-research/rtdl/blob/a4c93a32b334ef55d2a0559a4407c8306ffeeaee/lib/data.py#L20 +def normalize( + X: ArrayDict, normalization: Normalization, seed: Optional[int], return_normalizer : bool = False +) -> ArrayDict: + X_train = X['train'] + if normalization == 'standard': + normalizer = sklearn.preprocessing.StandardScaler() + elif normalization == 'minmax': + normalizer = sklearn.preprocessing.MinMaxScaler() + elif normalization == 'quantile': + normalizer = sklearn.preprocessing.QuantileTransformer( + output_distribution='normal', + n_quantiles=max(min(X['train'].shape[0] // 30, 1000), 10), + subsample=1e9, + random_state=seed, + ) + # noise = 1e-3 + # if noise > 0: + # assert seed is not None + # stds = np.std(X_train, axis=0, keepdims=True) + # noise_std = noise / np.maximum(stds, noise) # type: ignore[code] + # X_train = X_train + noise_std * np.random.default_rng(seed).standard_normal( + # X_train.shape + # ) + else: + util.raise_unknown('normalization', normalization) + normalizer.fit(X_train) + if return_normalizer: + return {k: normalizer.transform(v) for k, v in X.items()}, normalizer + return {k: normalizer.transform(v) for k, v in X.items()} + + +def cat_process_nans(X: ArrayDict, policy: Optional[CatNanPolicy]) -> ArrayDict: + assert X is not None + nan_masks = {k: v == CAT_MISSING_VALUE for k, v in X.items()} + if any(x.any() for x in nan_masks.values()): # type: ignore[code] + if policy is None: + X_new = X + elif policy == 'most_frequent': + imputer = SimpleImputer(missing_values=CAT_MISSING_VALUE, strategy=policy) # type: ignore[code] + imputer.fit(X['train']) + X_new = {k: cast(np.ndarray, imputer.transform(v)) for k, v in X.items()} + else: + util.raise_unknown('categorical NaN policy', policy) + else: + assert policy is None + X_new = X + return X_new + + +def cat_drop_rare(X: ArrayDict, min_frequency: float) -> ArrayDict: + assert 0.0 < min_frequency < 1.0 + min_count = round(len(X['train']) * min_frequency) + X_new = {x: [] for x in X} + for column_idx in range(X['train'].shape[1]): + counter = Counter(X['train'][:, column_idx].tolist()) + popular_categories = {k for k, v in counter.items() if v >= min_count} + for part in X_new: + X_new[part].append( + [ + (x if x in popular_categories else CAT_RARE_VALUE) + for x in X[part][:, column_idx].tolist() + ] + ) + return {k: np.array(v).T for k, v in X_new.items()} + + +def cat_encode( + X: ArrayDict, + encoding: Optional[CatEncoding], + y_train: Optional[np.ndarray], + seed: Optional[int], + return_encoder : bool = False +) -> Tuple[ArrayDict, bool, Optional[Any]]: # (X, is_converted_to_numerical) + if encoding != 'counter': + y_train = None + + # Step 1. Map strings to 0-based ranges + + if encoding is None: + unknown_value = np.iinfo('int64').max - 3 + oe = sklearn.preprocessing.OrdinalEncoder( + handle_unknown='use_encoded_value', # type: ignore[code] + unknown_value=unknown_value, # type: ignore[code] + dtype='int64', # type: ignore[code] + ).fit(X['train']) + encoder = make_pipeline(oe) + encoder.fit(X['train']) + X = {k: encoder.transform(v) for k, v in X.items()} + max_values = X['train'].max(axis=0) + for part in X.keys(): + if part == 'train': continue + for column_idx in range(X[part].shape[1]): + X[part][X[part][:, column_idx] == unknown_value, column_idx] = ( + max_values[column_idx] + 1 + ) + if return_encoder: + return (X, False, encoder) + return (X, False) + + # Step 2. Encode. + + elif encoding == 'one-hot': + ohe = sklearn.preprocessing.OneHotEncoder( + handle_unknown='ignore', sparse=False, dtype=np.float32 # type: ignore[code] + ) + encoder = make_pipeline(ohe) + + # encoder.steps.append(('ohe', ohe)) + encoder.fit(X['train']) + X = {k: encoder.transform(v) for k, v in X.items()} + elif encoding == 'counter': + assert y_train is not None + assert seed is not None + loe = LeaveOneOutEncoder(sigma=0.1, random_state=seed, return_df=False) + encoder.steps.append(('loe', loe)) + encoder.fit(X['train'], y_train) + X = {k: encoder.transform(v).astype('float32') for k, v in X.items()} # type: ignore[code] + if not isinstance(X['train'], pd.DataFrame): + X = {k: v.values for k, v in X.items()} # type: ignore[code] + else: + util.raise_unknown('encoding', encoding) + + if return_encoder: + return X, True, encoder # type: ignore[code] + return (X, True) + + +def build_target( + y: ArrayDict, policy: Optional[YPolicy], task_type: TaskType +) -> Tuple[ArrayDict, Dict[str, Any]]: + info: Dict[str, Any] = {'policy': policy} + if policy is None: + pass + elif policy == 'default': + if task_type == TaskType.REGRESSION: + mean, std = float(y['train'].mean()), float(y['train'].std()) + y = {k: (v - mean) / std for k, v in y.items()} + info['mean'] = mean + info['std'] = std + else: + util.raise_unknown('policy', policy) + return y, info + + +@dataclass(frozen=True) +class Transformations: + seed: int = 0 + normalization: Optional[Normalization] = None + num_nan_policy: Optional[NumNanPolicy] = None + cat_nan_policy: Optional[CatNanPolicy] = None + cat_min_frequency: Optional[float] = None + cat_encoding: Optional[CatEncoding] = None + y_policy: Optional[YPolicy] = 'default' + + +def transform_dataset( + dataset: Dataset, + transformations: Transformations, + cache_dir: Optional[Path], + return_transforms: bool = False +) -> Dataset: + # WARNING: the order of transformations matters. Moreover, the current + # implementation is not ideal in that sense. + if cache_dir is not None: + transformations_md5 = hashlib.md5( + str(transformations).encode('utf-8') + ).hexdigest() + transformations_str = '__'.join(map(str, astuple(transformations))) + cache_path = ( + cache_dir / f'cache__{transformations_str}__{transformations_md5}.pickle' + ) + if cache_path.exists(): + cache_transformations, value = util.load_pickle(cache_path) + if transformations == cache_transformations: + print( + f"Using cached features: {cache_dir.name + '/' + cache_path.name}" + ) + return value + else: + raise RuntimeError(f'Hash collision for {cache_path}') + else: + cache_path = None + + if dataset.X_num is not None: + dataset = num_process_nans(dataset, transformations.num_nan_policy) + + num_transform = None + cat_transform = None + X_num = dataset.X_num + + if X_num is not None and transformations.normalization is not None: + X_num, num_transform = normalize( + X_num, + transformations.normalization, + transformations.seed, + return_normalizer=True + ) + num_transform = num_transform + + if dataset.X_cat is None: + assert transformations.cat_nan_policy is None + assert transformations.cat_min_frequency is None + # assert transformations.cat_encoding is None + X_cat = None + else: + X_cat = cat_process_nans(dataset.X_cat, transformations.cat_nan_policy) + if transformations.cat_min_frequency is not None: + X_cat = cat_drop_rare(X_cat, transformations.cat_min_frequency) + X_cat, is_num, cat_transform = cat_encode( + X_cat, + transformations.cat_encoding, + dataset.y['train'], + transformations.seed, + return_encoder=True + ) + if is_num: + X_num = ( + X_cat + if X_num is None + else {x: np.hstack([X_num[x], X_cat[x]]) for x in X_num} + ) + X_cat = None + + y, y_info = build_target(dataset.y, transformations.y_policy, dataset.task_type) + + dataset = replace(dataset, X_num=X_num, X_cat=X_cat, y=y, y_info=y_info) + dataset.num_transform = num_transform + dataset.cat_transform = cat_transform + + if cache_path is not None: + util.dump_pickle((transformations, dataset), cache_path) + # if return_transforms: + # return dataset, num_transform, cat_transform + return dataset + + +def build_dataset( + path: Union[str, Path], + transformations: Transformations, + cache: bool +) -> Dataset: + path = Path(path) + dataset = Dataset.from_dir(path) + return transform_dataset(dataset, transformations, path if cache else None) + + +def prepare_tensors( + dataset: Dataset, device: Union[str, torch.device] +) -> Tuple[Optional[TensorDict], Optional[TensorDict], TensorDict]: + X_num, X_cat, Y = ( + None if x is None else {k: torch.as_tensor(v) for k, v in x.items()} + for x in [dataset.X_num, dataset.X_cat, dataset.y] + ) + if device.type != 'cpu': + X_num, X_cat, Y = ( + None if x is None else {k: v.to(device) for k, v in x.items()} + for x in [X_num, X_cat, Y] + ) + assert X_num is not None + assert Y is not None + if not dataset.is_multiclass: + Y = {k: v.float() for k, v in Y.items()} + return X_num, X_cat, Y + +############### +## DataLoader## +############### + +class TabDataset(torch.utils.data.Dataset): + def __init__( + self, dataset : Dataset, split : Literal['train', 'val', 'test'] + ): + super().__init__() + + self.X_num = torch.from_numpy(dataset.X_num[split]) if dataset.X_num is not None else None + self.X_cat = torch.from_numpy(dataset.X_cat[split]) if dataset.X_cat is not None else None + self.y = torch.from_numpy(dataset.y[split]) + + assert self.y is not None + assert self.X_num is not None or self.X_cat is not None + + def __len__(self): + return len(self.y) + + def __getitem__(self, idx): + out_dict = { + 'y': self.y[idx].long() if self.y is not None else None, + } + + x = np.empty((0,)) + if self.X_num is not None: + x = self.X_num[idx] + if self.X_cat is not None: + x = torch.cat([x, self.X_cat[idx]], dim=0) + return x.float(), out_dict + +def prepare_dataloader( + dataset : Dataset, + split : str, + batch_size: int, +): + + torch_dataset = TabDataset(dataset, split) + loader = torch.utils.data.DataLoader( + torch_dataset, + batch_size=batch_size, + shuffle=(split == 'train'), + num_workers=1, + ) + while True: + yield from loader + +def prepare_torch_dataloader( + dataset : Dataset, + split : str, + shuffle : bool, + batch_size: int, +) -> torch.utils.data.DataLoader: + + torch_dataset = TabDataset(dataset, split) + loader = torch.utils.data.DataLoader(torch_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1) + + return loader + +def dataset_from_csv(paths : Dict[str, str], cat_features, target, T): + assert 'train' in paths + y = {} + X_num = {} + X_cat = {} if len(cat_features) else None + for split in paths.keys(): + df = pd.read_csv(paths[split]) + y[split] = df[target].to_numpy().astype(float) + if X_cat is not None: + X_cat[split] = df[cat_features].to_numpy().astype(str) + X_num[split] = df.drop(cat_features + [target], axis=1).to_numpy().astype(float) + + dataset = Dataset(X_num, X_cat, y, {}, None, len(np.unique(y['train']))) + return transform_dataset(dataset, T, None) + +class FastTensorDataLoader: + """ + A DataLoader-like object for a set of tensors that can be much faster than + TensorDataset + DataLoader because dataloader grabs individual indices of + the dataset and calls cat (slow). + Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6 + """ + def __init__(self, *tensors, batch_size=32, shuffle=False): + """ + Initialize a FastTensorDataLoader. + :param *tensors: tensors to store. Must have the same length @ dim 0. + :param batch_size: batch size to load. + :param shuffle: if True, shuffle the data *in-place* whenever an + iterator is created out of this object. + :returns: A FastTensorDataLoader. + """ + assert all(t.shape[0] == tensors[0].shape[0] for t in tensors) + self.tensors = tensors + + self.dataset_len = self.tensors[0].shape[0] + self.batch_size = batch_size + self.shuffle = shuffle + + # Calculate # batches + n_batches, remainder = divmod(self.dataset_len, self.batch_size) + if remainder > 0: + n_batches += 1 + self.n_batches = n_batches + def __iter__(self): + if self.shuffle: + r = torch.randperm(self.dataset_len) + self.tensors = [t[r] for t in self.tensors] + self.i = 0 + return self + + def __next__(self): + if self.i >= self.dataset_len: + raise StopIteration + batch = tuple(t[self.i:self.i+self.batch_size] for t in self.tensors) + self.i += self.batch_size + return batch + + def __len__(self): + return self.n_batches + +def prepare_fast_dataloader( + D : Dataset, + split : str, + batch_size: int +): + if D.X_cat is not None: + if D.X_num is not None: + X = torch.from_numpy(np.concatenate([D.X_num[split], D.X_cat[split]], axis=1)).float() + else: + X = torch.from_numpy(D.X_cat[split]).float() + else: + X = torch.from_numpy(D.X_num[split]).float() + y = torch.from_numpy(D.y[split]) + dataloader = FastTensorDataLoader(X, y, batch_size=batch_size, shuffle=(split=='train')) + while True: + yield from dataloader + +def prepare_fast_torch_dataloader( + D : Dataset, + split : str, + batch_size: int +): + if D.X_cat is not None: + X = torch.from_numpy(np.concatenate([D.X_num[split], D.X_cat[split]], axis=1)).float() + else: + X = torch.from_numpy(D.X_num[split]).float() + y = torch.from_numpy(D.y[split]) + dataloader = FastTensorDataLoader(X, y, batch_size=batch_size, shuffle=(split=='train')) + return dataloader + +def round_columns(X_real, X_synth, columns): + for col in columns: + uniq = np.unique(X_real[:,col]) + dist = cdist(X_synth[:, col][:, np.newaxis].astype(float), uniq[:, np.newaxis].astype(float)) + X_synth[:, col] = uniq[dist.argmin(axis=1)] + return X_synth + +def concat_features(D : Dataset): + if D.X_num is None: + assert D.X_cat is not None + X = {k: pd.DataFrame(v, columns=range(D.n_features)) for k, v in D.X_cat.items()} + elif D.X_cat is None: + assert D.X_num is not None + X = {k: pd.DataFrame(v, columns=range(D.n_features)) for k, v in D.X_num.items()} + else: + X = { + part: pd.concat( + [ + pd.DataFrame(D.X_num[part], columns=range(D.n_num_features)), + pd.DataFrame( + D.X_cat[part], + columns=range(D.n_num_features, D.n_features), + ), + ], + axis=1, + ) + for part in D.y.keys() + } + + return X + +def concat_to_pd(X_num, X_cat, y): + if X_num is None: + return pd.concat([ + pd.DataFrame(X_cat, columns=list(range(X_cat.shape[1]))), + pd.DataFrame(y, columns=['y']) + ], axis=1) + if X_cat is not None: + return pd.concat([ + pd.DataFrame(X_num, columns=list(range(X_num.shape[1]))), + pd.DataFrame(X_cat, columns=list(range(X_num.shape[1], X_num.shape[1] + X_cat.shape[1]))), + pd.DataFrame(y, columns=['y']) + ], axis=1) + return pd.concat([ + pd.DataFrame(X_num, columns=list(range(X_num.shape[1]))), + pd.DataFrame(y, columns=['y']) + ], axis=1) + +def read_pure_data(path, split='train'): + y = np.load(os.path.join(path, f'y_{split}.npy'), allow_pickle=True) + X_num = None + X_cat = None + if os.path.exists(os.path.join(path, f'X_num_{split}.npy')): + X_num = np.load(os.path.join(path, f'X_num_{split}.npy'), allow_pickle=True) + if os.path.exists(os.path.join(path, f'X_cat_{split}.npy')): + X_cat = np.load(os.path.join(path, f'X_cat_{split}.npy'), allow_pickle=True) + + return X_num, X_cat, y + +def read_changed_val(path, val_size=0.2): + path = Path(path) + X_num_train, X_cat_train, y_train = read_pure_data(path, 'train') + X_num_val, X_cat_val, y_val = read_pure_data(path, 'val') + is_regression = load_json(path / 'info.json')['task_type'] == 'regression' + + y = np.concatenate([y_train, y_val], axis=0) + + ixs = np.arange(y.shape[0]) + if is_regression: + train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777) + else: + train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777, stratify=y) + y_train = y[train_ixs] + y_val = y[val_ixs] + + if X_num_train is not None: + X_num = np.concatenate([X_num_train, X_num_val], axis=0) + X_num_train = X_num[train_ixs] + X_num_val = X_num[val_ixs] + + if X_cat_train is not None: + X_cat = np.concatenate([X_cat_train, X_cat_val], axis=0) + X_cat_train = X_cat[train_ixs] + X_cat_val = X_cat[val_ixs] + + return X_num_train, X_cat_train, y_train, X_num_val, X_cat_val, y_val + +############# + +def load_dataset_info(dataset_dir_name: str) -> Dict[str, Any]: + path = Path("data/" + dataset_dir_name) + info = util.load_json(path / 'info.json') + info['size'] = info['train_size'] + info['val_size'] + info['test_size'] + info['n_features'] = info['n_num_features'] + info['n_cat_features'] + info['path'] = path + return info diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py new file mode 100644 index 00000000..aeed3e2a --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py @@ -0,0 +1,168 @@ +import statistics +from dataclasses import dataclass +from typing import Any, Callable, Literal, cast + +import rtdl +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import zero +from torch import Tensor + +from .util import TaskType + + +def cos_sin(x: Tensor) -> Tensor: + return torch.cat([torch.cos(x), torch.sin(x)], -1) + + +@dataclass +class PeriodicOptions: + n: int # the output size is 2 * n + sigma: float + trainable: bool + initialization: Literal['log-linear', 'normal'] + + +class Periodic(nn.Module): + def __init__(self, n_features: int, options: PeriodicOptions) -> None: + super().__init__() + if options.initialization == 'log-linear': + coefficients = options.sigma ** (torch.arange(options.n) / options.n) + coefficients = coefficients[None].repeat(n_features, 1) + else: + assert options.initialization == 'normal' + coefficients = torch.normal(0.0, options.sigma, (n_features, options.n)) + if options.trainable: + self.coefficients = nn.Parameter(coefficients) # type: ignore[code] + else: + self.register_buffer('coefficients', coefficients) + + def forward(self, x: Tensor) -> Tensor: + assert x.ndim == 2 + return cos_sin(2 * torch.pi * self.coefficients[None] * x[..., None]) + + +def get_n_parameters(m: nn.Module): + return sum(x.numel() for x in m.parameters() if x.requires_grad) + + +def get_loss_fn(task_type: TaskType) -> Callable[..., Tensor]: + return ( + F.binary_cross_entropy_with_logits + if task_type == TaskType.BINCLASS + else F.cross_entropy + if task_type == TaskType.MULTICLASS + else F.mse_loss + ) + + +def default_zero_weight_decay_condition(module_name, module, parameter_name, parameter): + del module_name, parameter + return parameter_name.endswith('bias') or isinstance( + module, + ( + nn.BatchNorm1d, + nn.LayerNorm, + nn.InstanceNorm1d, + rtdl.CLSToken, + rtdl.NumericalFeatureTokenizer, + rtdl.CategoricalFeatureTokenizer, + Periodic, + ), + ) + + +def split_parameters_by_weight_decay( + model: nn.Module, zero_weight_decay_condition=default_zero_weight_decay_condition +) -> list[dict[str, Any]]: + parameters_info = {} + for module_name, module in model.named_modules(): + for parameter_name, parameter in module.named_parameters(): + full_parameter_name = ( + f'{module_name}.{parameter_name}' if module_name else parameter_name + ) + parameters_info.setdefault(full_parameter_name, ([], parameter))[0].append( + zero_weight_decay_condition( + module_name, module, parameter_name, parameter + ) + ) + params_with_wd = {'params': []} + params_without_wd = {'params': [], 'weight_decay': 0.0} + for full_parameter_name, (results, parameter) in parameters_info.items(): + (params_without_wd if any(results) else params_with_wd)['params'].append( + parameter + ) + return [params_with_wd, params_without_wd] + + +def make_optimizer( + config: dict[str, Any], + parameter_groups, +) -> optim.Optimizer: + if config['optimizer'] == 'FT-Transformer-default': + return optim.AdamW(parameter_groups, lr=1e-4, weight_decay=1e-5) + return getattr(optim, config['optimizer'])( + parameter_groups, + **{x: config[x] for x in ['lr', 'weight_decay', 'momentum'] if x in config}, + ) + + +def get_lr(optimizer: optim.Optimizer) -> float: + return next(iter(optimizer.param_groups))['lr'] + + +def is_oom_exception(err: RuntimeError) -> bool: + return any( + x in str(err) + for x in [ + 'CUDA out of memory', + 'CUBLAS_STATUS_ALLOC_FAILED', + 'CUDA error: out of memory', + ] + ) + + +def train_with_auto_virtual_batch( + optimizer, + loss_fn, + step, + batch, + chunk_size: int, +) -> tuple[Tensor, int]: + batch_size = len(batch) + random_state = zero.random.get_state() + loss = None + while chunk_size != 0: + try: + zero.random.set_state(random_state) + optimizer.zero_grad() + if batch_size <= chunk_size: + loss = loss_fn(*step(batch)) + loss.backward() + else: + loss = None + for chunk in zero.iter_batches(batch, chunk_size): + chunk_loss = loss_fn(*step(chunk)) + chunk_loss = chunk_loss * (len(chunk) / batch_size) + chunk_loss.backward() + if loss is None: + loss = chunk_loss.detach() + else: + loss += chunk_loss.detach() + except RuntimeError as err: + if not is_oom_exception(err): + raise + chunk_size //= 2 + else: + break + if not chunk_size: + raise RuntimeError('Not enough memory even for batch_size=1') + optimizer.step() + return cast(Tensor, loss), chunk_size + + +def process_epoch_losses(losses: list[Tensor]) -> tuple[list[float], float]: + losses_ = torch.stack(losses).tolist() + return losses_, statistics.mean(losses_) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py new file mode 100644 index 00000000..64be89d7 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py @@ -0,0 +1,39 @@ +""" +Have not used in TabDDPM project. +""" + +import datetime +import os +import shutil +import typing as ty +from pathlib import Path + +PROJ = Path('tab-ddpm/').absolute().resolve() +EXP = PROJ / 'exp' +DATA = PROJ / 'data' + + +def get_path(path: ty.Union[str, Path]) -> Path: + if isinstance(path, str): + path = Path(path) + if not path.is_absolute(): + path = PROJ / path + return path.resolve() + + +def get_relative_path(path: ty.Union[str, Path]) -> Path: + return get_path(path).relative_to(PROJ) + + +def duplicate_path( + src: ty.Union[str, Path], alternative_project_dir: ty.Union[str, Path] +) -> None: + src = get_path(src) + alternative_project_dir = get_path(alternative_project_dir) + dst = alternative_project_dir / src.relative_to(PROJ) + dst.parent.mkdir(parents=True, exist_ok=True) + if dst.exists(): + dst = dst.with_name( + dst.name + '_' + datetime.datetime.now().strftime('%Y%m%dT%H%M%S') + ) + (shutil.copytree if src.is_dir() else shutil.copyfile)(src, dst) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py new file mode 100644 index 00000000..bdcac817 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py @@ -0,0 +1,158 @@ +import enum +from typing import Any, Optional, Tuple, Dict, Union, cast +from functools import partial + +import numpy as np +import scipy.special +import sklearn.metrics as skm + +from . import util +from .util import TaskType + + +class PredictionType(enum.Enum): + LOGITS = 'logits' + PROBS = 'probs' + +class MetricsReport: + def __init__(self, report: dict, task_type: TaskType): + self._res = {k: {} for k in report.keys()} + if task_type in (TaskType.BINCLASS, TaskType.MULTICLASS): + self._metrics_names = ["acc", "f1"] + for k in report.keys(): + self._res[k]["acc"] = report[k]["accuracy"] + self._res[k]["f1"] = report[k]["macro avg"]["f1-score"] + if task_type == TaskType.BINCLASS: + self._res[k]["roc_auc"] = report[k]["roc_auc"] + self._metrics_names.append("roc_auc") + + elif task_type == TaskType.REGRESSION: + self._metrics_names = ["r2", "rmse"] + for k in report.keys(): + self._res[k]["r2"] = report[k]["r2"] + self._res[k]["rmse"] = report[k]["rmse"] + else: + raise "Unknown TaskType!" + + def get_splits_names(self) -> list[str]: + return self._res.keys() + + def get_metrics_names(self) -> list[str]: + return self._metrics_names + + def get_metric(self, split: str, metric: str) -> float: + return self._res[split][metric] + + def get_val_score(self) -> float: + return self._res["val"]["r2"] if "r2" in self._res["val"] else self._res["val"]["f1"] + + def get_test_score(self) -> float: + return self._res["test"]["r2"] if "r2" in self._res["test"] else self._res["test"]["f1"] + + def print_metrics(self) -> None: + res = { + "val": {k: np.around(self._res["val"][k], 4) for k in self._res["val"]}, + "test": {k: np.around(self._res["test"][k], 4) for k in self._res["test"]} + } + + print("*"*100) + print("[val]") + print(res["val"]) + print("[test]") + print(res["test"]) + + return res + +class SeedsMetricsReport: + def __init__(self): + self._reports = [] + + def add_report(self, report: MetricsReport) -> None: + self._reports.append(report) + + def get_mean_std(self) -> dict: + res = {k: {} for k in ["train", "val", "test"]} + for split in self._reports[0].get_splits_names(): + for metric in self._reports[0].get_metrics_names(): + res[split][metric] = [x.get_metric(split, metric) for x in self._reports] + + agg_res = {k: {} for k in ["train", "val", "test"]} + for split in self._reports[0].get_splits_names(): + for metric in self._reports[0].get_metrics_names(): + for k, f in [("count", len), ("mean", np.mean), ("std", np.std)]: + agg_res[split][f"{metric}-{k}"] = f(res[split][metric]) + self._res = res + self._agg_res = agg_res + + return agg_res + + def print_result(self) -> dict: + res = {split: {k: float(np.around(self._agg_res[split][k], 4)) for k in self._agg_res[split]} for split in ["val", "test"]} + print("="*100) + print("EVAL RESULTS:") + print("[val]") + print(res["val"]) + print("[test]") + print(res["test"]) + print("="*100) + return res + +def calculate_rmse( + y_true: np.ndarray, y_pred: np.ndarray, std: Optional[float] +) -> float: + rmse = skm.mean_squared_error(y_true, y_pred) ** 0.5 + if std is not None: + rmse *= std + return rmse + + +def _get_labels_and_probs( + y_pred: np.ndarray, task_type: TaskType, prediction_type: Optional[PredictionType] +) -> Tuple[np.ndarray, Optional[np.ndarray]]: + assert task_type in (TaskType.BINCLASS, TaskType.MULTICLASS) + + if prediction_type is None: + return y_pred, None + + if prediction_type == PredictionType.LOGITS: + probs = ( + scipy.special.expit(y_pred) + if task_type == TaskType.BINCLASS + else scipy.special.softmax(y_pred, axis=1) + ) + elif prediction_type == PredictionType.PROBS: + probs = y_pred + else: + util.raise_unknown('prediction_type', prediction_type) + + assert probs is not None + labels = np.round(probs) if task_type == TaskType.BINCLASS else probs.argmax(axis=1) + return labels.astype('int64'), probs + + +def calculate_metrics( + y_true: np.ndarray, + y_pred: np.ndarray, + task_type: Union[str, TaskType], + prediction_type: Optional[Union[str, PredictionType]], + y_info: Dict[str, Any], +) -> Dict[str, Any]: + # Example: calculate_metrics(y_true, y_pred, 'binclass', 'logits', {}) + task_type = TaskType(task_type) + if prediction_type is not None: + prediction_type = PredictionType(prediction_type) + + if task_type == TaskType.REGRESSION: + assert prediction_type is None + assert 'std' in y_info + rmse = calculate_rmse(y_true, y_pred, y_info['std']) + r2 = skm.r2_score(y_true, y_pred) + result = {'rmse': rmse, 'r2': r2} + else: + labels, probs = _get_labels_and_probs(y_pred, task_type, prediction_type) + result = cast( + Dict[str, Any], skm.classification_report(y_true, labels, output_dict=True) + ) + if task_type == TaskType.BINCLASS: + result['roc_auc'] = skm.roc_auc_score(y_true, probs) + return result diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py new file mode 100644 index 00000000..75e05c9c --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py @@ -0,0 +1,433 @@ +import argparse +import atexit +import enum +import json +import os +import pickle +import shutil +import sys +import time +import uuid +from copy import deepcopy +from dataclasses import asdict, fields, is_dataclass +from pathlib import Path +from pprint import pprint +from typing import Any, Callable, List, Dict, Type, Optional, Tuple, TypeVar, Union, cast, get_args, get_origin + +import __main__ +import numpy as np +import tomli +import tomli_w +import torch +import zero + +from . import env + +RawConfig = Dict[str, Any] +Report = Dict[str, Any] +T = TypeVar('T') + + +class Part(enum.Enum): + TRAIN = 'train' + VAL = 'val' + TEST = 'test' + + def __str__(self) -> str: + return self.value + + +class TaskType(enum.Enum): + BINCLASS = 'binclass' + MULTICLASS = 'multiclass' + REGRESSION = 'regression' + + def __str__(self) -> str: + return self.value + + +class Timer(zero.Timer): + @classmethod + def launch(cls) -> 'Timer': + timer = cls() + timer.run() + return timer + + +def update_training_log(training_log, data, metrics): + def _update(log_part, data_part): + for k, v in data_part.items(): + if isinstance(v, dict): + _update(log_part.setdefault(k, {}), v) + elif isinstance(v, list): + log_part.setdefault(k, []).extend(v) + else: + log_part.setdefault(k, []).append(v) + + _update(training_log, data) + transposed_metrics = {} + for part, part_metrics in metrics.items(): + for metric_name, value in part_metrics.items(): + transposed_metrics.setdefault(metric_name, {})[part] = value + _update(training_log, transposed_metrics) + + +def raise_unknown(unknown_what: str, unknown_value: Any): + raise ValueError(f'Unknown {unknown_what}: {unknown_value}') + + +def _replace(data, condition, value): + def do(x): + if isinstance(x, dict): + return {k: do(v) for k, v in x.items()} + elif isinstance(x, list): + return [do(y) for y in x] + else: + return value if condition(x) else x + + return do(data) + + +_CONFIG_NONE = '__none__' + + +def unpack_config(config: RawConfig) -> RawConfig: + config = cast(RawConfig, _replace(config, lambda x: x == _CONFIG_NONE, None)) + return config + + +def pack_config(config: RawConfig) -> RawConfig: + config = cast(RawConfig, _replace(config, lambda x: x is None, _CONFIG_NONE)) + return config + + +def load_config(path: Union[Path, str]) -> Any: + with open(path, 'rb') as f: + return unpack_config(tomli.load(f)) + + +def dump_config(config: Any, path: Union[Path, str]) -> None: + with open(path, 'wb') as f: + tomli_w.dump(pack_config(config), f) + # check that there are no bugs in all these "pack/unpack" things + assert config == load_config(path) + + +def load_json(path: Union[Path, str], **kwargs) -> Any: + return json.loads(Path(path).read_text(), **kwargs) + + +def dump_json(x: Any, path: Union[Path, str], **kwargs) -> None: + kwargs.setdefault('indent', 4) + Path(path).write_text(json.dumps(x, **kwargs) + '\n') + + +def load_pickle(path: Union[Path, str], **kwargs) -> Any: + return pickle.loads(Path(path).read_bytes(), **kwargs) + + +def dump_pickle(x: Any, path: Union[Path, str], **kwargs) -> None: + Path(path).write_bytes(pickle.dumps(x, **kwargs)) + + +def load(path: Union[Path, str], **kwargs) -> Any: + return globals()[f'load_{Path(path).suffix[1:]}'](Path(path), **kwargs) + + +def dump(x: Any, path: Union[Path, str], **kwargs) -> Any: + return globals()[f'dump_{Path(path).suffix[1:]}'](x, Path(path), **kwargs) + + +def _get_output_item_path( + path: Union[str, Path], filename: str, must_exist: bool +) -> Path: + path = env.get_path(path) + if path.suffix == '.toml': + path = path.with_suffix('') + if path.is_dir(): + path = path / filename + else: + assert path.name == filename + assert path.parent.exists() + if must_exist: + assert path.exists() + return path + + +def load_report(path: Path) -> Report: + return load_json(_get_output_item_path(path, 'report.json', True)) + + +def dump_report(report: dict, path: Path) -> None: + dump_json(report, _get_output_item_path(path, 'report.json', False)) + + +def load_predictions(path: Path) -> Dict[str, np.ndarray]: + with np.load(_get_output_item_path(path, 'predictions.npz', True)) as predictions: + return {x: predictions[x] for x in predictions} + + +def dump_predictions(predictions: Dict[str, np.ndarray], path: Path) -> None: + np.savez(_get_output_item_path(path, 'predictions.npz', False), **predictions) + + +def dump_metrics(metrics: Dict[str, Any], path: Path) -> None: + dump_json(metrics, _get_output_item_path(path, 'metrics.json', False)) + + +def load_checkpoint(path: Path, *args, **kwargs) -> Dict[str, np.ndarray]: + return torch.load( + _get_output_item_path(path, 'checkpoint.pt', True), *args, **kwargs + ) + + +def get_device() -> torch.device: + if torch.cuda.is_available(): + assert os.environ.get('CUDA_VISIBLE_DEVICES') is not None + return torch.device('cuda:0') + else: + return torch.device('cpu') + + +def _print_sep(c, size=100): + print(c * size) + + +def start( + config_cls: Type[T] = RawConfig, + argv: Optional[List[str]] = None, + patch_raw_config: Optional[Callable[[RawConfig], None]] = None, +) -> Tuple[T, Path, Report]: # config # output dir # report + parser = argparse.ArgumentParser() + parser.add_argument('config', metavar='FILE') + parser.add_argument('--force', action='store_true') + parser.add_argument('--continue', action='store_true', dest='continue_') + if argv is None: + program = __main__.__file__ + args = parser.parse_args() + else: + program = argv[0] + try: + args = parser.parse_args(argv[1:]) + except Exception: + print( + 'Failed to parse `argv`.' + ' Remember that the first item of `argv` must be the path (relative to' + ' the project root) to the script/notebook.' + ) + raise + args = parser.parse_args(argv) + + snapshot_dir = os.environ.get('SNAPSHOT_PATH') + if snapshot_dir and Path(snapshot_dir).joinpath('CHECKPOINTS_RESTORED').exists(): + assert args.continue_ + + config_path = env.get_path(args.config) + output_dir = config_path.with_suffix('') + _print_sep('=') + print(f'[output] {output_dir}') + _print_sep('=') + + assert config_path.exists() + raw_config = load_config(config_path) + if patch_raw_config is not None: + patch_raw_config(raw_config) + if is_dataclass(config_cls): + config = from_dict(config_cls, raw_config) + full_raw_config = asdict(config) + else: + assert config_cls is dict + full_raw_config = config = raw_config + full_raw_config = asdict(config) + + if output_dir.exists(): + if args.force: + print('Removing the existing output and creating a new one...') + shutil.rmtree(output_dir) + output_dir.mkdir() + elif not args.continue_: + backup_output(output_dir) + print('The output directory already exists. Done!\n') + sys.exit() + elif output_dir.joinpath('DONE').exists(): + backup_output(output_dir) + print('The "DONE" file already exists. Done!') + sys.exit() + else: + print('Continuing with the existing output...') + else: + print('Creating the output...') + output_dir.mkdir() + + report = { + 'program': str(env.get_relative_path(program)), + 'environment': {}, + 'config': full_raw_config, + } + if torch.cuda.is_available(): # type: ignore[code] + report['environment'].update( + { + 'CUDA_VISIBLE_DEVICES': os.environ.get('CUDA_VISIBLE_DEVICES'), + 'gpus': zero.hardware.get_gpus_info(), + 'torch.version.cuda': torch.version.cuda, + 'torch.backends.cudnn.version()': torch.backends.cudnn.version(), # type: ignore[code] + 'torch.cuda.nccl.version()': torch.cuda.nccl.version(), # type: ignore[code] + } + ) + dump_report(report, output_dir) + dump_json(raw_config, output_dir / 'raw_config.json') + _print_sep('-') + pprint(full_raw_config, width=100) + _print_sep('-') + return cast(config_cls, config), output_dir, report + + +_LAST_SNAPSHOT_TIME = None + + +def backup_output(output_dir: Path) -> None: + backup_dir = os.environ.get('TMP_OUTPUT_PATH') + snapshot_dir = os.environ.get('SNAPSHOT_PATH') + if backup_dir is None: + assert snapshot_dir is None + return + assert snapshot_dir is not None + + try: + relative_output_dir = output_dir.relative_to(env.PROJ) + except ValueError: + return + + for dir_ in [backup_dir, snapshot_dir]: + new_output_dir = dir_ / relative_output_dir + prev_backup_output_dir = new_output_dir.with_name(new_output_dir.name + '_prev') + new_output_dir.parent.mkdir(exist_ok=True, parents=True) + if new_output_dir.exists(): + new_output_dir.rename(prev_backup_output_dir) + shutil.copytree(output_dir, new_output_dir) + # the case for evaluate.py which automatically creates configs + if output_dir.with_suffix('.toml').exists(): + shutil.copyfile( + output_dir.with_suffix('.toml'), new_output_dir.with_suffix('.toml') + ) + if prev_backup_output_dir.exists(): + shutil.rmtree(prev_backup_output_dir) + + global _LAST_SNAPSHOT_TIME + if _LAST_SNAPSHOT_TIME is None or time.time() - _LAST_SNAPSHOT_TIME > 10 * 60: + import nirvana_dl.snapshot # type: ignore[code] + + nirvana_dl.snapshot.dump_snapshot() + _LAST_SNAPSHOT_TIME = time.time() + print('The snapshot was saved!') + + +def _get_scores(metrics: Dict[str, Dict[str, Any]]) -> Optional[Dict[str, float]]: + return ( + {k: v['score'] for k, v in metrics.items()} + if 'score' in next(iter(metrics.values())) + else None + ) + + +def format_scores(metrics: Dict[str, Dict[str, Any]]) -> str: + return ' '.join( + f"[{x}] {metrics[x]['score']:.3f}" + for x in ['test', 'val', 'train'] + if x in metrics + ) + + +def finish(output_dir: Path, report: dict) -> None: + print() + _print_sep('=') + + metrics = report.get('metrics') + if metrics is not None: + scores = _get_scores(metrics) + if scores is not None: + dump_json(scores, output_dir / 'scores.json') + print(format_scores(metrics)) + _print_sep('-') + + dump_report(report, output_dir) + json_output_path = os.environ.get('JSON_OUTPUT_FILE') + if json_output_path: + try: + key = str(output_dir.relative_to(env.PROJ)) + except ValueError: + pass + else: + json_output_path = Path(json_output_path) + try: + json_data = json.loads(json_output_path.read_text()) + except (FileNotFoundError, json.decoder.JSONDecodeError): + json_data = {} + json_data[key] = load_json(output_dir / 'report.json') + json_output_path.write_text(json.dumps(json_data, indent=4)) + shutil.copyfile( + json_output_path, + os.path.join(os.environ['SNAPSHOT_PATH'], 'json_output.json'), + ) + + output_dir.joinpath('DONE').touch() + backup_output(output_dir) + print(f'Done! | {report.get("time")} | {output_dir}') + _print_sep('=') + print() + + +def from_dict(datacls: Type[T], data: dict) -> T: + assert is_dataclass(datacls) + data = deepcopy(data) + for field in fields(datacls): + if field.name not in data: + continue + if is_dataclass(field.type): + data[field.name] = from_dict(field.type, data[field.name]) + elif ( + get_origin(field.type) is Union + and len(get_args(field.type)) == 2 + and get_args(field.type)[1] is type(None) + and is_dataclass(get_args(field.type)[0]) + ): + if data[field.name] is not None: + data[field.name] = from_dict(get_args(field.type)[0], data[field.name]) + return datacls(**data) + + +def replace_factor_with_value( + config: RawConfig, + key: str, + reference_value: int, + bounds: Tuple[float, float], +) -> None: + factor_key = key + '_factor' + if factor_key not in config: + assert key in config + else: + assert key not in config + factor = config.pop(factor_key) + assert bounds[0] <= factor <= bounds[1] + config[key] = int(factor * reference_value) + + +def get_temporary_copy(path: Union[str, Path]) -> Path: + path = env.get_path(path) + assert not path.is_dir() and not path.is_symlink() + tmp_path = path.with_name( + path.stem + '___' + str(uuid.uuid4()).replace('-', '') + path.suffix + ) + shutil.copyfile(path, tmp_path) + atexit.register(lambda: tmp_path.unlink()) + return tmp_path + + +def get_python(): + python = Path('python3.9') + return str(python) if python.exists() else 'python' + +def get_catboost_config(real_data_path, is_cv=False): + ds_name = Path(real_data_path).name + C = load_json(f'tuned_models/catboost/{ds_name}_cv.json') + return C \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py b/src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py new file mode 100644 index 00000000..f6855f6b --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py @@ -0,0 +1,80 @@ +import tomli +import shutil +import os +import argparse +from train import train +from sample import sample +import pandas as pd +import matplotlib.pyplot as plt +import zero +import lib +import torch + +def load_config(path) : + with open(path, 'rb') as f: + return tomli.load(f) + +def save_file(parent_dir, config_path): + try: + dst = os.path.join(parent_dir) + os.makedirs(os.path.dirname(dst), exist_ok=True) + shutil.copyfile(os.path.abspath(config_path), dst) + except shutil.SameFileError: + pass + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--config', metavar='FILE') + parser.add_argument('--train', action='store_true', default=False) + parser.add_argument('--sample', action='store_true', default=False) + parser.add_argument('--eval', action='store_true', default=False) + parser.add_argument('--change_val', action='store_true', default=False) + + args = parser.parse_args() + raw_config = lib.load_config(args.config) + if 'device' in raw_config: + device = torch.device(raw_config['device']) + else: + device = torch.device('cuda:1') + + timer = zero.Timer() + timer.run() + save_file(os.path.join(raw_config['parent_dir'], 'config.toml'), args.config) + + if args.train: + train( + **raw_config['train']['main'], + **raw_config['diffusion_params'], + parent_dir=raw_config['parent_dir'], + real_data_path=raw_config['real_data_path'], + model_type=raw_config['model_type'], + model_params=raw_config['model_params'], + T_dict=raw_config['train']['T'], + num_numerical_features=raw_config['num_numerical_features'], + device=device, + change_val=args.change_val + ) + if args.sample: + sample( + num_samples=raw_config['sample']['num_samples'], + batch_size=raw_config['sample']['batch_size'], + disbalance=raw_config['sample'].get('disbalance', None), + **raw_config['diffusion_params'], + parent_dir=raw_config['parent_dir'], + real_data_path=raw_config['real_data_path'], + model_path=os.path.join(raw_config['parent_dir'], 'model.pt'), + model_type=raw_config['model_type'], + model_params=raw_config['model_params'], + T_dict=raw_config['train']['T'], + num_numerical_features=raw_config['num_numerical_features'], + device=device, + seed=raw_config['sample'].get('seed', 0), + change_val=args.change_val + ) + + save_file(os.path.join(raw_config['parent_dir'], 'info.json'), os.path.join(raw_config['real_data_path'], 'info.json')) + + print(f'Elapsed time: {str(timer)}') + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.sample.py b/src/synthcity/plugins/core/models/tabular_ddpm/.sample.py new file mode 100644 index 00000000..abc68162 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.sample.py @@ -0,0 +1,159 @@ +import torch +import numpy as np +import zero +import os +from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion +from .utils import FoundNANsError +from utils_train import get_model, make_dataset +from .lib import round_columns +import lib + +def to_good_ohe(ohe, X): + indices = np.cumsum([0] + ohe._n_features_outs) + Xres = [] + for i in range(1, len(indices)): + x_ = np.max(X[:, indices[i - 1]:indices[i]], axis=1) + t = X[:, indices[i - 1]:indices[i]] - x_.reshape(-1, 1) + Xres.append(np.where(t >= 0, 1, 0)) + return np.hstack(Xres) + +def sample( + parent_dir, + real_data_path = 'data/higgs-small', + batch_size = 2000, + num_samples = 0, + model_type = 'mlp', + model_params = None, + model_path = None, + num_timesteps = 1000, + gaussian_loss_type = 'mse', + scheduler = 'cosine', + T_dict = None, + num_numerical_features = 0, + disbalance = None, + device = torch.device('cuda:1'), + seed = 0, + change_val = False +): + zero.improve_reproducibility(seed) + + T = lib.Transformations(**T_dict) + D = make_dataset( + real_data_path, + T, + num_classes=model_params['num_classes'], + is_y_cond=model_params['is_y_cond'], + change_val=change_val + ) + + K = np.array(D.get_category_sizes('train')) + if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': + K = np.array([0]) + + num_numerical_features_ = D.X_num['train'].shape[1] if D.X_num is not None else 0 + d_in = np.sum(K) + num_numerical_features_ + model_params['d_in'] = int(d_in) + model = get_model( + model_type, + model_params, + num_numerical_features_, + category_sizes=D.get_category_sizes('train') + ) + + model.load_state_dict( + torch.load(model_path, map_location="cpu") + ) + + diffusion = GaussianMultinomialDiffusion( + K, + num_numerical_features=num_numerical_features_, + denoise_fn=model, num_timesteps=num_timesteps, + gaussian_loss_type=gaussian_loss_type, scheduler=scheduler, device=device + ) + + diffusion.to(device) + diffusion.eval() + + _, empirical_class_dist = torch.unique(torch.from_numpy(D.y['train']), return_counts=True) + # empirical_class_dist = empirical_class_dist.float() + torch.tensor([-5000., 10000.]).float() + if disbalance == 'fix': + empirical_class_dist[0], empirical_class_dist[1] = empirical_class_dist[1], empirical_class_dist[0] + x_gen, y_gen = diffusion.sample_all(num_samples, batch_size, empirical_class_dist.float(), ddim=False) + + elif disbalance == 'fill': + ix_major = empirical_class_dist.argmax().item() + val_major = empirical_class_dist[ix_major].item() + x_gen, y_gen = [], [] + for i in range(empirical_class_dist.shape[0]): + if i == ix_major: + continue + distrib = torch.zeros_like(empirical_class_dist) + distrib[i] = 1 + num_samples = val_major - empirical_class_dist[i].item() + x_temp, y_temp = diffusion.sample_all(num_samples, batch_size, distrib.float(), ddim=False) + x_gen.append(x_temp) + y_gen.append(y_temp) + + x_gen = torch.cat(x_gen, dim=0) + y_gen = torch.cat(y_gen, dim=0) + + else: + x_gen, y_gen = diffusion.sample_all(num_samples, batch_size, empirical_class_dist.float(), ddim=False) + + + # try: + # except FoundNANsError as ex: + # print("Found NaNs during sampling!") + # loader = lib.prepare_fast_dataloader(D, 'train', 8) + # x_gen = next(loader)[0] + # y_gen = torch.multinomial( + # empirical_class_dist.float(), + # num_samples=8, + # replacement=True + # ) + X_gen, y_gen = x_gen.numpy(), y_gen.numpy() + + ### + # X_num_unnorm = X_gen[:, :num_numerical_features] + # lo = np.percentile(X_num_unnorm, 2.5, axis=0) + # hi = np.percentile(X_num_unnorm, 97.5, axis=0) + # idx = (lo < X_num_unnorm) & (hi > X_num_unnorm) + # X_gen = X_gen[np.all(idx, axis=1)] + # y_gen = y_gen[np.all(idx, axis=1)] + ### + + num_numerical_features = num_numerical_features + int(D.is_regression and not model_params["is_y_cond"]) + + X_num_ = X_gen + if num_numerical_features < X_gen.shape[1]: + np.save(os.path.join(parent_dir, 'X_cat_unnorm'), X_gen[:, num_numerical_features:]) + # _, _, cat_encoder = lib.cat_encode({'train': X_cat_real}, T_dict['cat_encoding'], y_real, T_dict['seed'], True) + if T_dict['cat_encoding'] == 'one-hot': + X_gen[:, num_numerical_features:] = to_good_ohe(D.cat_transform.steps[0][1], X_num_[:, num_numerical_features:]) + X_cat = D.cat_transform.inverse_transform(X_gen[:, num_numerical_features:]) + + if num_numerical_features_ != 0: + # _, normalize = lib.normalize({'train' : X_num_real}, T_dict['normalization'], T_dict['seed'], True) + np.save(os.path.join(parent_dir, 'X_num_unnorm'), X_gen[:, :num_numerical_features]) + X_num_ = D.num_transform.inverse_transform(X_gen[:, :num_numerical_features]) + X_num = X_num_[:, :num_numerical_features] + + X_num_real = np.load(os.path.join(real_data_path, "X_num_train.npy"), allow_pickle=True) + disc_cols = [] + for col in range(X_num_real.shape[1]): + uniq_vals = np.unique(X_num_real[:, col]) + if len(uniq_vals) <= 32 and ((uniq_vals - np.round(uniq_vals)) == 0).all(): + disc_cols.append(col) + print("Discrete cols:", disc_cols) + if model_params['num_classes'] == 0: + y_gen = X_num[:, 0] + X_num = X_num[:, 1:] + if len(disc_cols): + X_num = round_columns(X_num_real, X_num, disc_cols) + + if num_numerical_features != 0: + print("Num shape: ", X_num.shape) + np.save(os.path.join(parent_dir, 'X_num_train'), X_num) + if num_numerical_features < X_gen.shape[1]: + np.save(os.path.join(parent_dir, 'X_cat_train'), X_cat) + np.save(os.path.join(parent_dir, 'y_train'), y_gen) \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.train.py b/src/synthcity/plugins/core/models/tabular_ddpm/.train.py new file mode 100644 index 00000000..85cac744 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.train.py @@ -0,0 +1,156 @@ +from copy import deepcopy +import torch +import os +import numpy as np +import zero +from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion +from utils_train import get_model, make_dataset, update_ema +from . import lib +import pandas as pd + +class Trainer: + def __init__(self, diffusion, train_iter, lr, weight_decay, steps, device=torch.device('cuda:1')): + self.diffusion = diffusion + self.ema_model = deepcopy(self.diffusion._denoise_fn) + for param in self.ema_model.parameters(): + param.detach_() + + self.train_iter = train_iter + self.steps = steps + self.init_lr = lr + self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) + self.device = device + self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) + self.log_every = 100 + self.print_every = 500 + self.ema_every = 1000 + + def _anneal_lr(self, step): + frac_done = step / self.steps + lr = self.init_lr * (1 - frac_done) + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def _run_step(self, x, out_dict): + x = x.to(self.device) + for k in out_dict: + out_dict[k] = out_dict[k].long().to(self.device) + self.optimizer.zero_grad() + loss_multi, loss_gauss = self.diffusion.mixed_loss(x, out_dict) + loss = loss_multi + loss_gauss + loss.backward() + self.optimizer.step() + + return loss_multi, loss_gauss + + def run_loop(self): + step = 0 + curr_loss_multi = 0.0 + curr_loss_gauss = 0.0 + + curr_count = 0 + while step < self.steps: + x, out_dict = next(self.train_iter) + out_dict = {'y': out_dict} + batch_loss_multi, batch_loss_gauss = self._run_step(x, out_dict) + + self._anneal_lr(step) + + curr_count += len(x) + curr_loss_multi += batch_loss_multi.item() * len(x) + curr_loss_gauss += batch_loss_gauss.item() * len(x) + + if (step + 1) % self.log_every == 0: + mloss = np.around(curr_loss_multi / curr_count, 4) + gloss = np.around(curr_loss_gauss / curr_count, 4) + if (step + 1) % self.print_every == 0: + print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') + self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] + curr_count = 0 + curr_loss_gauss = 0.0 + curr_loss_multi = 0.0 + + update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) + + step += 1 + +def train( + parent_dir, + real_data_path = 'data/higgs-small', + steps = 1000, + lr = 0.002, + weight_decay = 1e-4, + batch_size = 1024, + model_type = 'mlp', + model_params = None, + num_timesteps = 1000, + gaussian_loss_type = 'mse', + scheduler = 'cosine', + T_dict = None, + num_numerical_features = 0, + device = torch.device('cuda:1'), + seed = 0, + change_val = False +): + real_data_path = os.path.normpath(real_data_path) + parent_dir = os.path.normpath(parent_dir) + + zero.improve_reproducibility(seed) + + T = lib.Transformations(**T_dict) + + dataset = make_dataset( + real_data_path, + T, + num_classes=model_params['num_classes'], + is_y_cond=model_params['is_y_cond'], + change_val=change_val + ) + + K = np.array(dataset.get_category_sizes('train')) + if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': + K = np.array([0]) + print(K) + + num_numerical_features = dataset.X_num['train'].shape[1] if dataset.X_num is not None else 0 + d_in = np.sum(K) + num_numerical_features + model_params['d_in'] = d_in + print(d_in) + + print(model_params) + model = get_model( + model_type, + model_params, + num_numerical_features, + category_sizes=dataset.get_category_sizes('train') + ) + model.to(device) + + # train_loader = lib.prepare_beton_loader(dataset, split='train', batch_size=batch_size) + train_loader = lib.prepare_fast_dataloader(dataset, split='train', batch_size=batch_size) + + diffusion = GaussianMultinomialDiffusion( + num_classes=K, + num_numerical_features=num_numerical_features, + denoise_fn=model, + gaussian_loss_type=gaussian_loss_type, + num_timesteps=num_timesteps, + scheduler=scheduler, + device=device + ) + diffusion.to(device) + diffusion.train() + + trainer = Trainer( + diffusion, + train_loader, + lr=lr, + weight_decay=weight_decay, + steps=steps, + device=device + ) + trainer.run_loop() + + trainer.loss_history.to_csv(os.path.join(parent_dir, 'loss.csv'), index=False) + torch.save(diffusion._denoise_fn.state_dict(), os.path.join(parent_dir, 'model.pt')) + torch.save(trainer.ema_model.state_dict(), os.path.join(parent_dir, 'model_ema.pt')) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.tune.py b/src/synthcity/plugins/core/models/tabular_ddpm/.tune.py new file mode 100644 index 00000000..5a95dc23 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.tune.py @@ -0,0 +1,127 @@ +import subprocess +import lib +import os +import optuna +from copy import deepcopy +import shutil +import argparse +from pathlib import Path + +parser = argparse.ArgumentParser() +parser.add_argument('ds_name', type=str) +parser.add_argument('train_size', type=int) +parser.add_argument('eval_type', type=str) +parser.add_argument('eval_model', type=str) +parser.add_argument('prefix', type=str) +parser.add_argument('--eval_seeds', action='store_true', default=False) + +args = parser.parse_args() +train_size = args.train_size +ds_name = args.ds_name +eval_type = args.eval_type +assert eval_type in ('merged', 'synthetic') +prefix = str(args.prefix) + +pipeline = f'scripts/pipeline.py' +base_config_path = f'exp/{ds_name}/config.toml' +parent_path = Path(f'exp/{ds_name}/') +exps_path = Path(f'exp/{ds_name}/many-exps/') # temporary dir. maybe will be replaced with tempdiвdr +eval_seeds = f'scripts/eval_seeds.py' + +os.makedirs(exps_path, exist_ok=True) + +def _suggest_mlp_layers(trial): + def suggest_dim(name): + t = trial.suggest_int(name, d_min, d_max) + return 2 ** t + min_n_layers, max_n_layers, d_min, d_max = 1, 4, 7, 10 + n_layers = 2 * trial.suggest_int('n_layers', min_n_layers, max_n_layers) + d_first = [suggest_dim('d_first')] if n_layers else [] + d_middle = ( + [suggest_dim('d_middle')] * (n_layers - 2) + if n_layers > 2 + else [] + ) + d_last = [suggest_dim('d_last')] if n_layers > 1 else [] + d_layers = d_first + d_middle + d_last + return d_layers + +def objective(trial): + + lr = trial.suggest_loguniform('lr', 0.00001, 0.003) + d_layers = _suggest_mlp_layers(trial) + weight_decay = 0.0 + batch_size = trial.suggest_categorical('batch_size', [256, 4096]) + steps = trial.suggest_categorical('steps', [5000, 20000, 30000]) + # steps = trial.suggest_categorical('steps', [500]) # for debug + gaussian_loss_type = 'mse' + # scheduler = trial.suggest_categorical('scheduler', ['cosine', 'linear']) + num_timesteps = trial.suggest_categorical('num_timesteps', [100, 1000]) + num_samples = int(train_size * (2 ** trial.suggest_int('num_samples', -2, 1))) + + base_config = lib.load_config(base_config_path) + + base_config['train']['main']['lr'] = lr + base_config['train']['main']['steps'] = steps + base_config['train']['main']['batch_size'] = batch_size + base_config['train']['main']['weight_decay'] = weight_decay + base_config['model_params']['rtdl_params']['d_layers'] = d_layers + base_config['eval']['type']['eval_type'] = eval_type + base_config['sample']['num_samples'] = num_samples + base_config['diffusion_params']['gaussian_loss_type'] = gaussian_loss_type + base_config['diffusion_params']['num_timesteps'] = num_timesteps + # base_config['diffusion_params']['scheduler'] = scheduler + + base_config['parent_dir'] = str(exps_path / f"{trial.number}") + base_config['eval']['type']['eval_model'] = args.eval_model + if args.eval_model == "mlp": + base_config['eval']['T']['normalization'] = "quantile" + base_config['eval']['T']['cat_encoding'] = "one-hot" + + trial.set_user_attr("config", base_config) + + lib.dump_config(base_config, exps_path / 'config.toml') + + subprocess.run(['python3.9', f'{pipeline}', '--config', f'{exps_path / "config.toml"}', '--train', '--change_val'], check=True) + + n_datasets = 5 + score = 0.0 + + for sample_seed in range(n_datasets): + base_config['sample']['seed'] = sample_seed + lib.dump_config(base_config, exps_path / 'config.toml') + + subprocess.run(['python3.9', f'{pipeline}', '--config', f'{exps_path / "config.toml"}', '--sample', '--eval', '--change_val'], check=True) + + report_path = str(Path(base_config['parent_dir']) / f'results_{args.eval_model}.json') + report = lib.load_json(report_path) + + if 'r2' in report['metrics']['val']: + score += report['metrics']['val']['r2'] + else: + score += report['metrics']['val']['macro avg']['f1-score'] + + shutil.rmtree(exps_path / f"{trial.number}") + + return score / n_datasets + +study = optuna.create_study( + direction='maximize', + sampler=optuna.samplers.TPESampler(seed=0), +) + +study.optimize(objective, n_trials=50, show_progress_bar=True) + +best_config_path = parent_path / f'{prefix}_best/config.toml' +best_config = study.best_trial.user_attrs['config'] +best_config["parent_dir"] = str(parent_path / f'{prefix}_best/') + +os.makedirs(parent_path / f'{prefix}_best', exist_ok=True) +lib.dump_config(best_config, best_config_path) +lib.dump_json(optuna.importance.get_param_importances(study), parent_path / f'{prefix}_best/importance.json') + +subprocess.run(['python3.9', f'{pipeline}', '--config', f'{best_config_path}', '--train', '--sample'], check=True) + +if args.eval_seeds: + best_exp = str(parent_path / f'{prefix}_best/config.toml') + subprocess.run(['python3.9', f'{eval_seeds}', '--config', f'{best_exp}', '10', "ddpm", eval_type, args.eval_model, '5'], check=True) \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py b/src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py new file mode 100644 index 00000000..3062b15d --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py @@ -0,0 +1,88 @@ +import numpy as np +import os +import lib +from .modules import MLPDiffusion, ResNetDiffusion + +def get_model( + model_name, + model_params, + n_num_features, + category_sizes +): + if model_name == 'mlp': + model = MLPDiffusion(**model_params) + elif model_name == 'resnet': + model = ResNetDiffusion(**model_params) + else: + raise "Unknown model!" + return model + +def update_ema(target_params, source_params, rate=0.999): + """ + Update target parameters to be closer to those of source parameters using + an exponential moving average. + :param target_params: the target parameter sequence. + :param source_params: the source parameter sequence. + :param rate: the EMA rate (closer to 1 means slower). + """ + for targ, src in zip(target_params, source_params): + targ.detach().mul_(rate).add_(src.detach(), alpha=1 - rate) + +def concat_y_to_X(X, y): + if X is None: + return y.reshape(-1, 1) + return np.concatenate([y.reshape(-1, 1), X], axis=1) + +def make_dataset( + data_path: str, + T: lib.Transformations, + num_classes: int, + is_y_cond: bool, + change_val: bool +): + # classification + if num_classes > 0: + X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) or not is_y_cond else None + X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None + y = {} + + for split in ['train', 'val', 'test']: + X_num_t, X_cat_t, y_t = lib.read_pure_data(data_path, split) + if X_num is not None: + X_num[split] = X_num_t + if not is_y_cond: + X_cat_t = concat_y_to_X(X_cat_t, y_t) + if X_cat is not None: + X_cat[split] = X_cat_t + y[split] = y_t + else: + # regression + X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None + X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) or not is_y_cond else None + y = {} + + for split in ['train', 'val', 'test']: + X_num_t, X_cat_t, y_t = lib.read_pure_data(data_path, split) + if not is_y_cond: + X_num_t = concat_y_to_X(X_num_t, y_t) + if X_num is not None: + X_num[split] = X_num_t + if X_cat is not None: + X_cat[split] = X_cat_t + y[split] = y_t + + info = lib.load_json(os.path.join(data_path, 'info.json')) + + D = lib.Dataset( + X_num, + X_cat, + y, + y_info={}, + task_type=lib.TaskType(info['task_type']), + n_classes=info.get('n_classes') + ) + + if change_val: + D = lib.change_val(D) + + return lib.transform_dataset(D, T, None) \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/README.md b/src/synthcity/plugins/core/models/tabular_ddpm/README.md new file mode 100644 index 00000000..3d418685 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/README.md @@ -0,0 +1,3 @@ +# TabDDPM: Modelling Tabular Data with Diffusion Models + +Adapted from https://github.com/rotot0/tab-ddpm. diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py new file mode 100644 index 00000000..80d346c2 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -0,0 +1,2 @@ +from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion # noqa +from .modules import MLPDiffusion, ResNetDiffusion # noqa \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py new file mode 100644 index 00000000..0d0f2ce4 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -0,0 +1,992 @@ +""" +Based on https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +and https://github.com/ehoogeboom/multinomial_diffusion +""" + +import torch.nn.functional as F +import torch +import math + +import numpy as np +from .utils import * + +""" +Based in part on: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/5989f4c77eafcdc6be0fb4739f0f277a6dd7f7d8/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L281 +""" +eps = 1e-8 + +def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): + """ + Get a pre-defined beta schedule for the given name. + The beta schedule library consists of beta schedules which remain similar + in the limit of num_diffusion_timesteps. + Beta schedules may be added, but should not be removed or changed once + they are committed to maintain backwards compatibility. + """ + if schedule_name == "linear": + # Linear schedule from Ho et al, extended to work for any number of + # diffusion steps. + scale = 1000 / num_diffusion_timesteps + beta_start = scale * 0.0001 + beta_end = scale * 0.02 + return np.linspace( + beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64 + ) + elif schedule_name == "cosine": + return betas_for_alpha_bar( + num_diffusion_timesteps, + lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, + ) + else: + raise NotImplementedError(f"unknown beta schedule: {schedule_name}") + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + +class GaussianMultinomialDiffusion(torch.nn.Module): + def __init__( + self, + num_classes: np.array, + num_numerical_features: int, + denoise_fn, + num_timesteps=1000, + gaussian_loss_type='mse', + gaussian_parametrization='eps', + multinomial_loss_type='vb_stochastic', + parametrization='x0', + scheduler='cosine', + device=torch.device('cpu') + ): + + super(GaussianMultinomialDiffusion, self).__init__() + assert multinomial_loss_type in ('vb_stochastic', 'vb_all') + assert parametrization in ('x0', 'direct') + + if multinomial_loss_type == 'vb_all': + print('Computing the loss using the bound on _all_ timesteps.' + ' This is expensive both in terms of memory and computation.') + + self.num_numerical_features = num_numerical_features + self.num_classes = num_classes # it as a vector [K1, K2, ..., Km] + self.num_classes_expanded = torch.from_numpy( + np.concatenate([num_classes[i].repeat(num_classes[i]) for i in range(len(num_classes))]) + ).to(device) + + self.slices_for_classes = [np.arange(self.num_classes[0])] + offsets = np.cumsum(self.num_classes) + for i in range(1, len(offsets)): + self.slices_for_classes.append(np.arange(offsets[i - 1], offsets[i])) + self.offsets = torch.from_numpy(np.append([0], offsets)).to(device) + + self._denoise_fn = denoise_fn + self.gaussian_loss_type = gaussian_loss_type + self.gaussian_parametrization = gaussian_parametrization + self.multinomial_loss_type = multinomial_loss_type + self.num_timesteps = num_timesteps + self.parametrization = parametrization + self.scheduler = scheduler + + alphas = 1. - get_named_beta_schedule(scheduler, num_timesteps) + alphas = torch.tensor(alphas.astype('float64')) + betas = 1. - alphas + + log_alpha = np.log(alphas) + log_cumprod_alpha = np.cumsum(log_alpha) + + log_1_min_alpha = log_1_min_a(log_alpha) + log_1_min_cumprod_alpha = log_1_min_a(log_cumprod_alpha) + + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = torch.tensor(np.append(1.0, alphas_cumprod[:-1])) + alphas_cumprod_next = torch.tensor(np.append(alphas_cumprod[1:], 0.0)) + sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) + sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - alphas_cumprod) + sqrt_recip_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod) + sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / alphas_cumprod - 1) + + # Gaussian diffusion + + self.posterior_variance = ( + betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) + ) + self.posterior_log_variance_clipped = torch.from_numpy( + np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:])) + ).float().to(device) + self.posterior_mean_coef1 = ( + betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod) + ).float().to(device) + self.posterior_mean_coef2 = ( + (1.0 - alphas_cumprod_prev) + * np.sqrt(alphas.numpy()) + / (1.0 - alphas_cumprod) + ).float().to(device) + + assert log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.e-5 + assert log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() < 1e-5 + assert (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.e-5 + + # Convert to float32 and register buffers. + self.register_buffer('alphas', alphas.float().to(device)) + self.register_buffer('log_alpha', log_alpha.float().to(device)) + self.register_buffer('log_1_min_alpha', log_1_min_alpha.float().to(device)) + self.register_buffer('log_1_min_cumprod_alpha', log_1_min_cumprod_alpha.float().to(device)) + self.register_buffer('log_cumprod_alpha', log_cumprod_alpha.float().to(device)) + self.register_buffer('alphas_cumprod', alphas_cumprod.float().to(device)) + self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev.float().to(device)) + self.register_buffer('alphas_cumprod_next', alphas_cumprod_next.float().to(device)) + self.register_buffer('sqrt_alphas_cumprod', sqrt_alphas_cumprod.float().to(device)) + self.register_buffer('sqrt_one_minus_alphas_cumprod', sqrt_one_minus_alphas_cumprod.float().to(device)) + self.register_buffer('sqrt_recip_alphas_cumprod', sqrt_recip_alphas_cumprod.float().to(device)) + self.register_buffer('sqrt_recipm1_alphas_cumprod', sqrt_recipm1_alphas_cumprod.float().to(device)) + + self.register_buffer('Lt_history', torch.zeros(num_timesteps)) + self.register_buffer('Lt_count', torch.zeros(num_timesteps)) + + # Gaussian part + def gaussian_q_mean_variance(self, x_start, t): + mean = ( + extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + ) + variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract( + self.log_1_min_cumprod_alpha, t, x_start.shape + ) + return mean, variance, log_variance + + def gaussian_q_sample(self, x_start, t, noise=None): + if noise is None: + noise = torch.randn_like(x_start) + assert noise.shape == x_start.shape + return ( + extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) + * noise + ) + + def gaussian_q_posterior_mean_variance(self, x_start, x_t, t): + assert x_start.shape == x_t.shape + posterior_mean = ( + extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract( + self.posterior_log_variance_clipped, t, x_t.shape + ) + assert ( + posterior_mean.shape[0] + == posterior_variance.shape[0] + == posterior_log_variance_clipped.shape[0] + == x_start.shape[0] + ) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def gaussian_p_mean_variance( + self, model_output, x, t, clip_denoised=False, denoised_fn=None, model_kwargs=None + ): + if model_kwargs is None: + model_kwargs = {} + + B, C = x.shape[:2] + assert t.shape == (B,) + + model_variance = torch.cat([self.posterior_variance[1].unsqueeze(0).to(x.device), (1. - self.alphas)[1:]], dim=0) + # model_variance = self.posterior_variance.to(x.device) + model_log_variance = torch.log(model_variance) + + model_variance = extract(model_variance, t, x.shape) + model_log_variance = extract(model_log_variance, t, x.shape) + + + if self.gaussian_parametrization == 'eps': + pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) + elif self.gaussian_parametrization == 'x0': + pred_xstart = model_output + else: + raise NotImplementedError + + model_mean, _, _ = self.gaussian_q_posterior_mean_variance( + x_start=pred_xstart, x_t=x, t=t + ) + + assert ( + model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape + ), f'{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}' + + return { + "mean": model_mean, + "variance": model_variance, + "log_variance": model_log_variance, + "pred_xstart": pred_xstart, + } + + def _vb_terms_bpd( + self, model_output, x_start, x_t, t, clip_denoised=False, model_kwargs=None + ): + true_mean, _, true_log_variance_clipped = self.gaussian_q_posterior_mean_variance( + x_start=x_start, x_t=x_t, t=t + ) + out = self.gaussian_p_mean_variance( + model_output, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs + ) + kl = normal_kl( + true_mean, true_log_variance_clipped, out["mean"], out["log_variance"] + ) + kl = mean_flat(kl) / np.log(2.0) + + decoder_nll = -discretized_gaussian_log_likelihood( + x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] + ) + assert decoder_nll.shape == x_start.shape + decoder_nll = mean_flat(decoder_nll) / np.log(2.0) + + # At the first timestep return the decoder NLL, + # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t)) + output = torch.where((t == 0), decoder_nll, kl) + return {"output": output, "pred_xstart": out["pred_xstart"], "out_mean": out["mean"], "true_mean": true_mean} + + def _prior_gaussian(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + + This term can't be optimized, as it only depends on the encoder. + + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.gaussian_q_mean_variance(x_start, t) + kl_prior = normal_kl( + mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 + ) + return mean_flat(kl_prior) / np.log(2.0) + + def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None): + if model_kwargs is None: + model_kwargs = {} + + terms = {} + if self.gaussian_loss_type == 'mse': + terms["loss"] = mean_flat((noise - model_out) ** 2) + elif self.gaussian_loss_type == 'kl': + terms["loss"] = self._vb_terms_bpd( + model_output=model_out, + x_start=x_start, + x_t=x_t, + t=t, + clip_denoised=False, + model_kwargs=model_kwargs, + )["output"] + + + return terms['loss'] + + def _predict_xstart_from_eps(self, x_t, t, eps): + assert x_t.shape == eps.shape + return ( + extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps + ) + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - pred_xstart + ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def gaussian_p_sample( + self, + model_out, + x, + t, + clip_denoised=False, + denoised_fn=None, + model_kwargs=None, + ): + out = self.gaussian_p_mean_variance( + model_out, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + noise = torch.randn_like(x) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + + sample = out["mean"] + nonzero_mask * torch.exp(0.5 * out["log_variance"]) * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + # Multinomial part + + def multinomial_kl(self, log_prob1, log_prob2): + kl = (log_prob1.exp() * (log_prob1 - log_prob2)).sum(dim=1) + return kl + + def q_pred_one_timestep(self, log_x_t, t): + log_alpha_t = extract(self.log_alpha, t, log_x_t.shape) + log_1_min_alpha_t = extract(self.log_1_min_alpha, t, log_x_t.shape) + + # alpha_t * E[xt] + (1 - alpha_t) 1 / K + log_probs = log_add_exp( + log_x_t + log_alpha_t, + log_1_min_alpha_t - torch.log(self.num_classes_expanded) + ) + + return log_probs + + def q_pred(self, log_x_start, t): + log_cumprod_alpha_t = extract(self.log_cumprod_alpha, t, log_x_start.shape) + log_1_min_cumprod_alpha = extract(self.log_1_min_cumprod_alpha, t, log_x_start.shape) + + log_probs = log_add_exp( + log_x_start + log_cumprod_alpha_t, + log_1_min_cumprod_alpha - torch.log(self.num_classes_expanded) + ) + + return log_probs + + def predict_start(self, model_out, log_x_t, t, out_dict): + + # model_out = self._denoise_fn(x_t, t.to(x_t.device), **out_dict) + + assert model_out.size(0) == log_x_t.size(0) + assert model_out.size(1) == self.num_classes.sum(), f'{model_out.size()}' + + log_pred = torch.empty_like(model_out) + for ix in self.slices_for_classes: + log_pred[:, ix] = F.log_softmax(model_out[:, ix], dim=1) + return log_pred + + def q_posterior(self, log_x_start, log_x_t, t): + # q(xt-1 | xt, x0) = q(xt | xt-1, x0) * q(xt-1 | x0) / q(xt | x0) + # where q(xt | xt-1, x0) = q(xt | xt-1). + + # EV_log_qxt_x0 = self.q_pred(log_x_start, t) + + # print('sum exp', EV_log_qxt_x0.exp().sum(1).mean()) + # assert False + + # log_qxt_x0 = (log_x_t.exp() * EV_log_qxt_x0).sum(dim=1) + t_minus_1 = t - 1 + # Remove negative values, will not be used anyway for final decoder + t_minus_1 = torch.where(t_minus_1 < 0, torch.zeros_like(t_minus_1), t_minus_1) + log_EV_qxtmin_x0 = self.q_pred(log_x_start, t_minus_1) + + num_axes = (1,) * (len(log_x_start.size()) - 1) + t_broadcast = t.to(log_x_start.device).view(-1, *num_axes) * torch.ones_like(log_x_start) + log_EV_qxtmin_x0 = torch.where(t_broadcast == 0, log_x_start, log_EV_qxtmin_x0.to(torch.float32)) + + # unnormed_logprobs = log_EV_qxtmin_x0 + + # log q_pred_one_timestep(x_t, t) + # Note: _NOT_ x_tmin1, which is how the formula is typically used!!! + # Not very easy to see why this is true. But it is :) + unnormed_logprobs = log_EV_qxtmin_x0 + self.q_pred_one_timestep(log_x_t, t) + + log_EV_xtmin_given_xt_given_xstart = \ + unnormed_logprobs \ + - sliced_logsumexp(unnormed_logprobs, self.offsets) + + return log_EV_xtmin_given_xt_given_xstart + + def p_pred(self, model_out, log_x, t, out_dict): + if self.parametrization == 'x0': + log_x_recon = self.predict_start(model_out, log_x, t=t, out_dict=out_dict) + log_model_pred = self.q_posterior( + log_x_start=log_x_recon, log_x_t=log_x, t=t) + elif self.parametrization == 'direct': + log_model_pred = self.predict_start(model_out, log_x, t=t, out_dict=out_dict) + else: + raise ValueError + return log_model_pred + + @torch.no_grad() + def p_sample(self, model_out, log_x, t, out_dict): + model_log_prob = self.p_pred(model_out, log_x=log_x, t=t, out_dict=out_dict) + out = self.log_sample_categorical(model_log_prob) + return out + + @torch.no_grad() + def p_sample_loop(self, shape, out_dict): + device = self.log_alpha.device + + b = shape[0] + # start with random normal image. + img = torch.randn(shape, device=device) + + for i in reversed(range(1, self.num_timesteps)): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), out_dict) + return img + + @torch.no_grad() + def _sample(self, image_size, out_dict, batch_size = 16): + return self.p_sample_loop((batch_size, 3, image_size, image_size), out_dict) + + @torch.no_grad() + def interpolate(self, x1, x2, t = None, lam = 0.5): + b, *_, device = *x1.shape, x1.device + t = default(t, self.num_timesteps - 1) + + assert x1.shape == x2.shape + + t_batched = torch.stack([torch.tensor(t, device=device)] * b) + xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2)) + + img = (1 - lam) * xt1 + lam * xt2 + for i in reversed(range(0, t)): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long)) + + return img + + def log_sample_categorical(self, logits): + full_sample = [] + for i in range(len(self.num_classes)): + one_class_logits = logits[:, self.slices_for_classes[i]] + uniform = torch.rand_like(one_class_logits) + gumbel_noise = -torch.log(-torch.log(uniform + 1e-30) + 1e-30) + sample = (gumbel_noise + one_class_logits).argmax(dim=1) + full_sample.append(sample.unsqueeze(1)) + full_sample = torch.cat(full_sample, dim=1) + log_sample = index_to_log_onehot(full_sample, self.num_classes) + return log_sample + + def q_sample(self, log_x_start, t): + log_EV_qxt_x0 = self.q_pred(log_x_start, t) + + log_sample = self.log_sample_categorical(log_EV_qxt_x0) + + return log_sample + + def nll(self, log_x_start, out_dict): + b = log_x_start.size(0) + device = log_x_start.device + loss = 0 + for t in range(0, self.num_timesteps): + t_array = (torch.ones(b, device=device) * t).long() + + kl = self.compute_Lt( + log_x_start=log_x_start, + log_x_t=self.q_sample(log_x_start=log_x_start, t=t_array), + t=t_array, + out_dict=out_dict) + + loss += kl + + loss += self.kl_prior(log_x_start) + + return loss + + def kl_prior(self, log_x_start): + b = log_x_start.size(0) + device = log_x_start.device + ones = torch.ones(b, device=device).long() + + log_qxT_prob = self.q_pred(log_x_start, t=(self.num_timesteps - 1) * ones) + log_half_prob = -torch.log(self.num_classes_expanded * torch.ones_like(log_qxT_prob)) + + kl_prior = self.multinomial_kl(log_qxT_prob, log_half_prob) + return sum_except_batch(kl_prior) + + def compute_Lt(self, model_out, log_x_start, log_x_t, t, out_dict, detach_mean=False): + log_true_prob = self.q_posterior( + log_x_start=log_x_start, log_x_t=log_x_t, t=t) + log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t, out_dict=out_dict) + + if detach_mean: + log_model_prob = log_model_prob.detach() + + kl = self.multinomial_kl(log_true_prob, log_model_prob) + kl = sum_except_batch(kl) + + decoder_nll = -log_categorical(log_x_start, log_model_prob) + decoder_nll = sum_except_batch(decoder_nll) + + mask = (t == torch.zeros_like(t)).float() + loss = mask * decoder_nll + (1. - mask) * kl + + return loss + + def sample_time(self, b, device, method='uniform'): + if method == 'importance': + if not (self.Lt_count > 10).all(): + return self.sample_time(b, device, method='uniform') + + Lt_sqrt = torch.sqrt(self.Lt_history + 1e-10) + 0.0001 + Lt_sqrt[0] = Lt_sqrt[1] # Overwrite decoder term with L1. + pt_all = (Lt_sqrt / Lt_sqrt.sum()).to(device) + + t = torch.multinomial(pt_all, num_samples=b, replacement=True).to(device) + + pt = pt_all.gather(dim=0, index=t) + + return t, pt + + elif method == 'uniform': + t = torch.randint(0, self.num_timesteps, (b,), device=device).long() + + pt = torch.ones_like(t).float() / self.num_timesteps + return t, pt + else: + raise ValueError + + def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt, out_dict): + + if self.multinomial_loss_type == 'vb_stochastic': + kl = self.compute_Lt( + model_out, log_x_start, log_x_t, t, out_dict + ) + kl_prior = self.kl_prior(log_x_start) + # Upweigh loss term of the kl + vb_loss = kl / pt + kl_prior + + return vb_loss + + elif self.multinomial_loss_type == 'vb_all': + # Expensive, dont do it ;). + # DEPRECATED + return -self.nll(log_x_start) + else: + raise ValueError() + + def log_prob(self, x, out_dict): + b, device = x.size(0), x.device + if self.training: + return self._multinomial_loss(x, out_dict) + + else: + log_x_start = index_to_log_onehot(x, self.num_classes) + + t, pt = self.sample_time(b, device, 'importance') + + kl = self.compute_Lt( + log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t, out_dict) + + kl_prior = self.kl_prior(log_x_start) + + # Upweigh loss term of the kl + loss = kl / pt + kl_prior + + return -loss + + def mixed_loss(self, x, out_dict): + b = x.shape[0] + device = x.device + t, pt = self.sample_time(b, device, 'uniform') + + x_num = x[:, :self.num_numerical_features] + x_cat = x[:, self.num_numerical_features:] + + x_num_t = x_num + log_x_cat_t = x_cat + if x_num.shape[1] > 0: + noise = torch.randn_like(x_num) + x_num_t = self.gaussian_q_sample(x_num, t, noise=noise) + if x_cat.shape[1] > 0: + log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes) + log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t) + + x_in = torch.cat([x_num_t, log_x_cat_t], dim=1) + + model_out = self._denoise_fn( + x_in, + t, + **out_dict + ) + + model_out_num = model_out[:, :self.num_numerical_features] + model_out_cat = model_out[:, self.num_numerical_features:] + + loss_multi = torch.zeros((1,)).float() + loss_gauss = torch.zeros((1,)).float() + if x_cat.shape[1] > 0: + loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt, out_dict) / len(self.num_classes) + + if x_num.shape[1] > 0: + loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise) + + # loss_multi = torch.where(out_dict['y'] == 1, loss_multi, 2 * loss_multi) + # loss_gauss = torch.where(out_dict['y'] == 1, loss_gauss, 2 * loss_gauss) + + return loss_multi.mean(), loss_gauss.mean() + + @torch.no_grad() + def mixed_elbo(self, x0, out_dict): + b = x0.size(0) + device = x0.device + + x_num = x0[:, :self.num_numerical_features] + x_cat = x0[:, self.num_numerical_features:] + has_cat = x_cat.shape[1] > 0 + if has_cat: + log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes).to(device) + + gaussian_loss = [] + xstart_mse = [] + mse = [] + mu_mse = [] + out_mean = [] + true_mean = [] + multinomial_loss = [] + for t in range(self.num_timesteps): + t_array = (torch.ones(b, device=device) * t).long() + noise = torch.randn_like(x_num) + + x_num_t = self.gaussian_q_sample(x_start=x_num, t=t_array, noise=noise) + if has_cat: + log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t_array) + else: + log_x_cat_t = x_cat + + model_out = self._denoise_fn( + torch.cat([x_num_t, log_x_cat_t], dim=1), + t_array, + **out_dict + ) + + model_out_num = model_out[:, :self.num_numerical_features] + model_out_cat = model_out[:, self.num_numerical_features:] + + kl = torch.tensor([0.0]) + if has_cat: + kl = self.compute_Lt( + model_out=model_out_cat, + log_x_start=log_x_cat, + log_x_t=log_x_cat_t, + t=t_array, + out_dict=out_dict + ) + + out = self._vb_terms_bpd( + model_out_num, + x_start=x_num, + x_t=x_num_t, + t=t_array, + clip_denoised=False + ) + + multinomial_loss.append(kl) + gaussian_loss.append(out["output"]) + xstart_mse.append(mean_flat((out["pred_xstart"] - x_num) ** 2)) + # mu_mse.append(mean_flat(out["mean_mse"])) + out_mean.append(mean_flat(out["out_mean"])) + true_mean.append(mean_flat(out["true_mean"])) + + eps = self._predict_eps_from_xstart(x_num_t, t_array, out["pred_xstart"]) + mse.append(mean_flat((eps - noise) ** 2)) + + gaussian_loss = torch.stack(gaussian_loss, dim=1) + multinomial_loss = torch.stack(multinomial_loss, dim=1) + xstart_mse = torch.stack(xstart_mse, dim=1) + mse = torch.stack(mse, dim=1) + # mu_mse = torch.stack(mu_mse, dim=1) + out_mean = torch.stack(out_mean, dim=1) + true_mean = torch.stack(true_mean, dim=1) + + + prior_gauss = self._prior_gaussian(x_num) + + prior_multin = torch.tensor([0.0]) + if has_cat: + prior_multin = self.kl_prior(log_x_cat) + + total_gauss = gaussian_loss.sum(dim=1) + prior_gauss + total_multin = multinomial_loss.sum(dim=1) + prior_multin + return { + "total_gaussian": total_gauss, + "total_multinomial": total_multin, + "losses_gaussian": gaussian_loss, + "losses_multinimial": multinomial_loss, + "xstart_mse": xstart_mse, + "mse": mse, + # "mu_mse": mu_mse + "out_mean": out_mean, + "true_mean": true_mean + } + + @torch.no_grad() + def gaussian_ddim_step( + self, + model_out_num, + x, + t, + clip_denoised=False, + denoised_fn=None, + eta=0.0 + ): + out = self.gaussian_p_mean_variance( + model_out_num, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=None, + ) + + eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) + + alpha_bar = extract(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = extract(self.alphas_cumprod_prev, t, x.shape) + sigma = ( + eta + * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) + * torch.sqrt(1 - alpha_bar / alpha_bar_prev) + ) + + noise = torch.randn_like(x) + mean_pred = ( + out["pred_xstart"] * torch.sqrt(alpha_bar_prev) + + torch.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps + ) + nonzero_mask = ( + (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) + ) # no noise when t == 0 + sample = mean_pred + nonzero_mask * sigma * noise + + return sample + + @torch.no_grad() + def gaussian_ddim_sample( + self, + noise, + T, + out_dict, + eta=0.0 + ): + x = noise + b = x.shape[0] + device = x.device + for t in reversed(range(T)): + print(f'Sample timestep {t:4d}', end='\r') + t_array = (torch.ones(b, device=device) * t).long() + out_num = self._denoise_fn(x, t_array, **out_dict) + x = self.gaussian_ddim_step( + out_num, + x, + t_array + ) + print() + return x + + + @torch.no_grad() + def gaussian_ddim_reverse_step( + self, + model_out_num, + x, + t, + clip_denoised=False, + eta=0.0 + ): + assert eta == 0.0, "Eta must be zero." + out = self.gaussian_p_mean_variance( + model_out_num, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=None, + model_kwargs=None, + ) + + eps = ( + extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x + - out["pred_xstart"] + ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x.shape) + alpha_bar_next = extract(self.alphas_cumprod_next, t, x.shape) + + mean_pred = ( + out["pred_xstart"] * torch.sqrt(alpha_bar_next) + + torch.sqrt(1 - alpha_bar_next) * eps + ) + + return mean_pred + + @torch.no_grad() + def gaussian_ddim_reverse_sample( + self, + x, + T, + out_dict, + ): + b = x.shape[0] + device = x.device + for t in range(T): + print(f'Reverse timestep {t:4d}', end='\r') + t_array = (torch.ones(b, device=device) * t).long() + out_num = self._denoise_fn(x, t_array, **out_dict) + x = self.gaussian_ddim_reverse_step( + out_num, + x, + t_array, + eta=0.0 + ) + print() + + return x + + + @torch.no_grad() + def multinomial_ddim_step( + self, + model_out_cat, + log_x_t, + t, + out_dict, + eta=0.0 + ): + # not ddim, essentially + log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t, t=t, out_dict=out_dict) + + alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape) + alpha_bar_prev = extract(self.alphas_cumprod_prev, t, log_x_t.shape) + sigma = ( + eta + * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) + * torch.sqrt(1 - alpha_bar / alpha_bar_prev) + ) + + coef1 = sigma + coef2 = alpha_bar_prev - sigma * alpha_bar + coef3 = 1 - coef1 - coef2 + + + log_ps = torch.stack([ + torch.log(coef1) + log_x_t, + torch.log(coef2) + log_x0, + torch.log(coef3) - torch.log(self.num_classes_expanded) + ], dim=2) + + log_prob = torch.logsumexp(log_ps, dim=2) + + out = self.log_sample_categorical(log_prob) + + return out + + @torch.no_grad() + def sample_ddim(self, num_samples, y_dist): + b = num_samples + device = self.log_alpha.device + z_norm = torch.randn((b, self.num_numerical_features), device=device) + + has_cat = self.num_classes[0] != 0 + log_z = torch.zeros((b, 0), device=device).float() + if has_cat: + uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device) + log_z = self.log_sample_categorical(uniform_logits) + + y = torch.multinomial( + y_dist, + num_samples=b, + replacement=True + ) + out_dict = {'y': y.long().to(device)} + for i in reversed(range(0, self.num_timesteps)): + print(f'Sample timestep {i:4d}', end='\r') + t = torch.full((b,), i, device=device, dtype=torch.long) + model_out = self._denoise_fn( + torch.cat([z_norm, log_z], dim=1).float(), + t, + **out_dict + ) + model_out_num = model_out[:, :self.num_numerical_features] + model_out_cat = model_out[:, self.num_numerical_features:] + z_norm = self.gaussian_ddim_step(model_out_num, z_norm, t, clip_denoised=False) + if has_cat: + log_z = self.multinomial_ddim_step(model_out_cat, log_z, t, out_dict) + + print() + z_ohe = torch.exp(log_z).round() + z_cat = log_z + if has_cat: + z_cat = ohe_to_categories(z_ohe, self.num_classes) + sample = torch.cat([z_norm, z_cat], dim=1).cpu() + return sample, out_dict + + + @torch.no_grad() + def sample(self, num_samples, y_dist): + b = num_samples + device = self.log_alpha.device + z_norm = torch.randn((b, self.num_numerical_features), device=device) + + has_cat = self.num_classes[0] != 0 + log_z = torch.zeros((b, 0), device=device).float() + if has_cat: + uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device) + log_z = self.log_sample_categorical(uniform_logits) + + y = torch.multinomial( + y_dist, + num_samples=b, + replacement=True + ) + out_dict = {'y': y.long().to(device)} + for i in reversed(range(0, self.num_timesteps)): + print(f'Sample timestep {i:4d}', end='\r') + t = torch.full((b,), i, device=device, dtype=torch.long) + model_out = self._denoise_fn( + torch.cat([z_norm, log_z], dim=1).float(), + t, + **out_dict + ) + model_out_num = model_out[:, :self.num_numerical_features] + model_out_cat = model_out[:, self.num_numerical_features:] + z_norm = self.gaussian_p_sample(model_out_num, z_norm, t, clip_denoised=False)['sample'] + if has_cat: + log_z = self.p_sample(model_out_cat, log_z, t, out_dict) + + print() + z_ohe = torch.exp(log_z).round() + z_cat = log_z + if has_cat: + z_cat = ohe_to_categories(z_ohe, self.num_classes) + sample = torch.cat([z_norm, z_cat], dim=1).cpu() + return sample, out_dict + + def sample_all(self, num_samples, batch_size, y_dist, ddim=False): + if ddim: + print('Sample using DDIM.') + sample_fn = self.sample_ddim + else: + sample_fn = self.sample + + b = batch_size + + all_y = [] + all_samples = [] + num_generated = 0 + while num_generated < num_samples: + sample, out_dict = sample_fn(b, y_dist) + mask_nan = torch.any(sample.isnan(), dim=1) + sample = sample[~mask_nan] + out_dict['y'] = out_dict['y'][~mask_nan] + + all_samples.append(sample) + all_y.append(out_dict['y'].cpu()) + if sample.shape[0] != b: + raise FoundNANsError + num_generated += sample.shape[0] + + x_gen = torch.cat(all_samples, dim=0)[:num_samples] + y_gen = torch.cat(all_y, dim=0)[:num_samples] + + return x_gen, y_gen \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py new file mode 100644 index 00000000..472ba5b5 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -0,0 +1,486 @@ +""" +Code was adapted from https://github.com/Yura52/rtdl +""" + +import math +from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim +from torch import Tensor + +ModuleType = Union[str, Callable[..., nn.Module]] + +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + +def timestep_embedding(timesteps, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + +def _is_glu_activation(activation: ModuleType): + return ( + isinstance(activation, str) + and activation.endswith('GLU') + or activation in [ReGLU, GEGLU] + ) + + +def _all_or_none(values): + assert all(x is None for x in values) or all(x is not None for x in values) + +def reglu(x: Tensor) -> Tensor: + """The ReGLU activation function from [1]. + References: + [1] Noam Shazeer, "GLU Variants Improve Transformer", 2020 + """ + assert x.shape[-1] % 2 == 0 + a, b = x.chunk(2, dim=-1) + return a * F.relu(b) + + +def geglu(x: Tensor) -> Tensor: + """The GEGLU activation function from [1]. + References: + [1] Noam Shazeer, "GLU Variants Improve Transformer", 2020 + """ + assert x.shape[-1] % 2 == 0 + a, b = x.chunk(2, dim=-1) + return a * F.gelu(b) + +class ReGLU(nn.Module): + """The ReGLU activation function from [shazeer2020glu]. + + Examples: + .. testcode:: + + module = ReGLU() + x = torch.randn(3, 4) + assert module(x).shape == (3, 2) + + References: + * [shazeer2020glu] Noam Shazeer, "GLU Variants Improve Transformer", 2020 + """ + + def forward(self, x: Tensor) -> Tensor: + return reglu(x) + + +class GEGLU(nn.Module): + """The GEGLU activation function from [shazeer2020glu]. + + Examples: + .. testcode:: + + module = GEGLU() + x = torch.randn(3, 4) + assert module(x).shape == (3, 2) + + References: + * [shazeer2020glu] Noam Shazeer, "GLU Variants Improve Transformer", 2020 + """ + + def forward(self, x: Tensor) -> Tensor: + return geglu(x) + +def _make_nn_module(module_type: ModuleType, *args) -> nn.Module: + return ( + ( + ReGLU() + if module_type == 'ReGLU' + else GEGLU() + if module_type == 'GEGLU' + else getattr(nn, module_type)(*args) + ) + if isinstance(module_type, str) + else module_type(*args) + ) + + +class MLP(nn.Module): + """The MLP model used in [gorishniy2021revisiting]. + + The following scheme describes the architecture: + + .. code-block:: text + + MLP: (in) -> Block -> ... -> Block -> Linear -> (out) + Block: (in) -> Linear -> Activation -> Dropout -> (out) + + Examples: + .. testcode:: + + x = torch.randn(4, 2) + module = MLP.make_baseline(x.shape[1], [3, 5], 0.1, 1) + assert module(x).shape == (len(x), 1) + + References: + * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 + """ + + class Block(nn.Module): + """The main building block of `MLP`.""" + + def __init__( + self, + *, + d_in: int, + d_out: int, + bias: bool, + activation: ModuleType, + dropout: float, + ) -> None: + super().__init__() + self.linear = nn.Linear(d_in, d_out, bias) + self.activation = _make_nn_module(activation) + self.dropout = nn.Dropout(dropout) + + def forward(self, x: Tensor) -> Tensor: + return self.dropout(self.activation(self.linear(x))) + + def __init__( + self, + *, + d_in: int, + d_layers: List[int], + dropouts: Union[float, List[float]], + activation: Union[str, Callable[[], nn.Module]], + d_out: int, + ) -> None: + """ + Note: + `make_baseline` is the recommended constructor. + """ + super().__init__() + if isinstance(dropouts, float): + dropouts = [dropouts] * len(d_layers) + assert len(d_layers) == len(dropouts) + assert activation not in ['ReGLU', 'GEGLU'] + + self.blocks = nn.ModuleList( + [ + MLP.Block( + d_in=d_layers[i - 1] if i else d_in, + d_out=d, + bias=True, + activation=activation, + dropout=dropout, + ) + for i, (d, dropout) in enumerate(zip(d_layers, dropouts)) + ] + ) + self.head = nn.Linear(d_layers[-1] if d_layers else d_in, d_out) + + @classmethod + def make_baseline( + cls: Type['MLP'], + d_in: int, + d_layers: List[int], + dropout: float, + d_out: int, + ) -> 'MLP': + """Create a "baseline" `MLP`. + + This variation of MLP was used in [gorishniy2021revisiting]. Features: + + * :code:`Activation` = :code:`ReLU` + * all linear layers except for the first one and the last one are of the same dimension + * the dropout rate is the same for all dropout layers + + Args: + d_in: the input size + d_layers: the dimensions of the linear layers. If there are more than two + layers, then all of them except for the first and the last ones must + have the same dimension. Valid examples: :code:`[]`, :code:`[8]`, + :code:`[8, 16]`, :code:`[2, 2, 2, 2]`, :code:`[1, 2, 2, 4]`. Invalid + example: :code:`[1, 2, 3, 4]`. + dropout: the dropout rate for all hidden layers + d_out: the output size + Returns: + MLP + + References: + * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 + """ + assert isinstance(dropout, float) + if len(d_layers) > 2: + assert len(set(d_layers[1:-1])) == 1, ( + 'if d_layers contains more than two elements, then' + ' all elements except for the first and the last ones must be equal.' + ) + return MLP( + d_in=d_in, + d_layers=d_layers, # type: ignore + dropouts=dropout, + activation='ReLU', + d_out=d_out, + ) + + def forward(self, x: Tensor) -> Tensor: + x = x.float() + for block in self.blocks: + x = block(x) + x = self.head(x) + return x + + +class ResNet(nn.Module): + """The ResNet model used in [gorishniy2021revisiting]. + The following scheme describes the architecture: + .. code-block:: text + ResNet: (in) -> Linear -> Block -> ... -> Block -> Head -> (out) + |-> Norm -> Linear -> Activation -> Dropout -> Linear -> Dropout ->| + | | + Block: (in) ------------------------------------------------------------> Add -> (out) + Head: (in) -> Norm -> Activation -> Linear -> (out) + Examples: + .. testcode:: + x = torch.randn(4, 2) + module = ResNet.make_baseline( + d_in=x.shape[1], + n_blocks=2, + d_main=3, + d_hidden=4, + dropout_first=0.25, + dropout_second=0.0, + d_out=1 + ) + assert module(x).shape == (len(x), 1) + References: + * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 + """ + + class Block(nn.Module): + """The main building block of `ResNet`.""" + + def __init__( + self, + *, + d_main: int, + d_hidden: int, + bias_first: bool, + bias_second: bool, + dropout_first: float, + dropout_second: float, + normalization: ModuleType, + activation: ModuleType, + skip_connection: bool, + ) -> None: + super().__init__() + self.normalization = _make_nn_module(normalization, d_main) + self.linear_first = nn.Linear(d_main, d_hidden, bias_first) + self.activation = _make_nn_module(activation) + self.dropout_first = nn.Dropout(dropout_first) + self.linear_second = nn.Linear(d_hidden, d_main, bias_second) + self.dropout_second = nn.Dropout(dropout_second) + self.skip_connection = skip_connection + + def forward(self, x: Tensor) -> Tensor: + x_input = x + x = self.normalization(x) + x = self.linear_first(x) + x = self.activation(x) + x = self.dropout_first(x) + x = self.linear_second(x) + x = self.dropout_second(x) + if self.skip_connection: + x = x_input + x + return x + + class Head(nn.Module): + """The final module of `ResNet`.""" + + def __init__( + self, + *, + d_in: int, + d_out: int, + bias: bool, + normalization: ModuleType, + activation: ModuleType, + ) -> None: + super().__init__() + self.normalization = _make_nn_module(normalization, d_in) + self.activation = _make_nn_module(activation) + self.linear = nn.Linear(d_in, d_out, bias) + + def forward(self, x: Tensor) -> Tensor: + if self.normalization is not None: + x = self.normalization(x) + x = self.activation(x) + x = self.linear(x) + return x + + def __init__( + self, + *, + d_in: int, + n_blocks: int, + d_main: int, + d_hidden: int, + dropout_first: float, + dropout_second: float, + normalization: ModuleType, + activation: ModuleType, + d_out: int, + ) -> None: + """ + Note: + `make_baseline` is the recommended constructor. + """ + super().__init__() + + self.first_layer = nn.Linear(d_in, d_main) + if d_main is None: + d_main = d_in + self.blocks = nn.Sequential( + *[ + ResNet.Block( + d_main=d_main, + d_hidden=d_hidden, + bias_first=True, + bias_second=True, + dropout_first=dropout_first, + dropout_second=dropout_second, + normalization=normalization, + activation=activation, + skip_connection=True, + ) + for _ in range(n_blocks) + ] + ) + self.head = ResNet.Head( + d_in=d_main, + d_out=d_out, + bias=True, + normalization=normalization, + activation=activation, + ) + + @classmethod + def make_baseline( + cls: Type['ResNet'], + *, + d_in: int, + n_blocks: int, + d_main: int, + d_hidden: int, + dropout_first: float, + dropout_second: float, + d_out: int, + ) -> 'ResNet': + """Create a "baseline" `ResNet`. + This variation of ResNet was used in [gorishniy2021revisiting]. Features: + * :code:`Activation` = :code:`ReLU` + * :code:`Norm` = :code:`BatchNorm1d` + Args: + d_in: the input size + n_blocks: the number of Blocks + d_main: the input size (or, equivalently, the output size) of each Block + d_hidden: the output size of the first linear layer in each Block + dropout_first: the dropout rate of the first dropout layer in each Block. + dropout_second: the dropout rate of the second dropout layer in each Block. + References: + * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 + """ + return cls( + d_in=d_in, + n_blocks=n_blocks, + d_main=d_main, + d_hidden=d_hidden, + dropout_first=dropout_first, + dropout_second=dropout_second, + normalization='BatchNorm1d', + activation='ReLU', + d_out=d_out, + ) + + def forward(self, x: Tensor) -> Tensor: + x = x.float() + x = self.first_layer(x) + x = self.blocks(x) + x = self.head(x) + return x +#### For diffusion + +class MLPDiffusion(nn.Module): + def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 128): + super().__init__() + self.dim_t = dim_t + self.num_classes = num_classes + self.is_y_cond = is_y_cond + + # d0 = rtdl_params['d_layers'][0] + + rtdl_params['d_in'] = dim_t + rtdl_params['d_out'] = d_in + + self.mlp = MLP.make_baseline(**rtdl_params) + + if self.num_classes > 0 and is_y_cond: + self.label_emb = nn.Embedding(self.num_classes, dim_t) + elif self.num_classes == 0 and is_y_cond: + self.label_emb = nn.Linear(1, dim_t) + + self.proj = nn.Linear(d_in, dim_t) + self.time_embed = nn.Sequential( + nn.Linear(dim_t, dim_t), + nn.SiLU(), + nn.Linear(dim_t, dim_t) + ) + + def forward(self, x, timesteps, y=None): + emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) + if self.is_y_cond and y is not None: + if self.num_classes > 0: + y = y.squeeze() + else: + y = y.resize(y.size(0), 1).float() + emb += F.silu(self.label_emb(y)) + x = self.proj(x) + emb + return self.mlp(x) + +class ResNetDiffusion(nn.Module): + def __init__(self, d_in, num_classes, rtdl_params, dim_t = 256): + super().__init__() + self.dim_t = dim_t + self.num_classes = num_classes + + rtdl_params['d_in'] = d_in + rtdl_params['d_out'] = d_in + rtdl_params['emb_d'] = dim_t + self.resnet = ResNet.make_baseline(**rtdl_params) + + if self.num_classes > 0: + self.label_emb = nn.Embedding(self.num_classes, dim_t) + + self.time_embed = nn.Sequential( + nn.Linear(dim_t, dim_t), + nn.SiLU(), + nn.Linear(dim_t, dim_t) + ) + + def forward(self, x, timesteps, y=None): + emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) + if y is not None and self.num_classes > 0: + emb += self.label_emb(y.squeeze()) + return self.resnet(x, emb) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt b/src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt new file mode 100644 index 00000000..acc088c4 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt @@ -0,0 +1,15 @@ +category-encoders==2.3.0 +dython==0.5.1 +icecream==2.1.2 +libzero==0.0.8 +numpy==1.21.4 +optuna==2.10.1 +pandas==1.3.4 +pyarrow==6.0.0 +rtdl==0.0.9 +scikit-learn==1.0.2 +scipy==1.7.2 +skorch==0.11.0 +tomli-w==0.4.0 +tomli==1.2.2 +tqdm==4.62.3 diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py new file mode 100644 index 00000000..6376bfbf --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -0,0 +1,174 @@ +import torch +import numpy as np +import torch.nn.functional as F +from torch.profiler import record_function +from inspect import isfunction + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + Compute the KL divergence between two gaussians. + + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) + +def approx_standard_normal_cdf(x): + """ + A fast approximation of the cumulative distribution function of the + standard normal. + """ + return 0.5 * (1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))) + + +def discretized_gaussian_log_likelihood(x, *, means, log_scales): + """ + Compute the log-likelihood of a Gaussian distribution discretizing to a + given image. + + :param x: the target images. It is assumed that this was uint8 values, + rescaled to the range [-1, 1]. + :param means: the Gaussian mean Tensor. + :param log_scales: the Gaussian log stddev Tensor. + :return: a tensor like x of log probabilities (in nats). + """ + assert x.shape == means.shape == log_scales.shape + centered_x = x - means + inv_stdv = torch.exp(-log_scales) + plus_in = inv_stdv * (centered_x + 1.0 / 255.0) + cdf_plus = approx_standard_normal_cdf(plus_in) + min_in = inv_stdv * (centered_x - 1.0 / 255.0) + cdf_min = approx_standard_normal_cdf(min_in) + log_cdf_plus = torch.log(cdf_plus.clamp(min=1e-12)) + log_one_minus_cdf_min = torch.log((1.0 - cdf_min).clamp(min=1e-12)) + cdf_delta = cdf_plus - cdf_min + log_probs = torch.where( + x < -0.999, + log_cdf_plus, + torch.where(x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12))), + ) + assert log_probs.shape == x.shape + return log_probs + +def sum_except_batch(x, num_dims=1): + ''' + Sums all dimensions except the first. + + Args: + x: Tensor, shape (batch_size, ...) + num_dims: int, number of batch dims (default=1) + + Returns: + x_sum: Tensor, shape (batch_size,) + ''' + return x.reshape(*x.shape[:num_dims], -1).sum(-1) + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + +def ohe_to_categories(ohe, K): + K = torch.from_numpy(K) + indices = torch.cat([torch.zeros((1,)), K.cumsum(dim=0)], dim=0).int().tolist() + res = [] + for i in range(len(indices) - 1): + res.append(ohe[:, indices[i]:indices[i+1]].argmax(dim=1)) + return torch.stack(res, dim=1) + +def log_1_min_a(a): + return torch.log(1 - a.exp() + 1e-40) + + +def log_add_exp(a, b): + maximum = torch.max(a, b) + return maximum + torch.log(torch.exp(a - maximum) + torch.exp(b - maximum)) + +def exists(x): + return x is not None + +def extract(a, t, x_shape): + b, *_ = t.shape + t = t.to(a.device) + out = a.gather(-1, t) + while len(out.shape) < len(x_shape): + out = out[..., None] + return out.expand(x_shape) + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + +def log_categorical(log_x_start, log_prob): + return (log_x_start.exp() * log_prob).sum(dim=1) + +def index_to_log_onehot(x, num_classes): + onehots = [] + for i in range(len(num_classes)): + onehots.append(F.one_hot(x[:, i], num_classes[i])) + + x_onehot = torch.cat(onehots, dim=1) + log_onehot = torch.log(x_onehot.float().clamp(min=1e-30)) + return log_onehot + +def log_sum_exp_by_classes(x, slices): + device = x.device + res = torch.zeros_like(x) + for ixs in slices: + res[:, ixs] = torch.logsumexp(x[:, ixs], dim=1, keepdim=True) + + assert x.size() == res.size() + + return res + +@torch.jit.script +def log_sub_exp(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: + m = torch.maximum(a, b) + return torch.log(torch.exp(a - m) - torch.exp(b - m)) + m + +@torch.jit.script +def sliced_logsumexp(x, slices): + lse = torch.logcumsumexp( + torch.nn.functional.pad(x, [1, 0, 0, 0], value=-float('inf')), + dim=-1) + + slice_starts = slices[:-1] + slice_ends = slices[1:] + + slice_lse = log_sub_exp(lse[:, slice_ends], lse[:, slice_starts]) + slice_lse_repeated = torch.repeat_interleave( + slice_lse, + slice_ends - slice_starts, + dim=-1 + ) + return slice_lse_repeated + +def log_onehot_to_index(log_x): + return log_x.argmax(1) + +class FoundNANsError(BaseException): + """Found NANs during sampling""" + def __init__(self, message='Found NANs during sampling.'): + super(FoundNANsError, self).__init__(message) \ No newline at end of file diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py new file mode 100644 index 00000000..999f7312 --- /dev/null +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -0,0 +1,217 @@ +""" +Reference: Kotelnikov, Akim et al. “TabDDPM: Modelling Tabular Data with Diffusion Models.” ArXiv abs/2209.15421 (2022): n. pag. +""" + +# stdlib +from pathlib import Path +from copy import deepcopy +from typing import Any, List, Optional, Union + +# third party +import numpy as np +import pandas as pd + +# Necessary packages +from pydantic import validate_arguments +import torch +from torch.utils.data import sampler + +# synthcity absolute +from synthcity.metrics.weighted_metrics import WeightedMetrics +from synthcity.plugins.core.dataloader import DataLoader +from synthcity.plugins.core.distribution import ( + CategoricalDistribution, + Distribution, + FloatDistribution, + IntegerDistribution, +) +from synthcity.plugins.core.models.tabular_ddpm import GaussianMultinomialDiffusion, MLPDiffusion, ResNetDiffusion +from synthcity.plugins.core.plugin import Plugin +from synthcity.plugins.core.schema import Schema +from synthcity.utils.constants import DEVICE + + +class DDPMPlugin(Plugin): + """ + .. inheritance-diagram:: synthcity.plugins.generic.plugin_tab_ddpm.TabDDPMPlugin + :parts: 1 + + + Tabular denoising diffusion probabilistic model. + + Args: + ... + + Example: + >>> from sklearn.datasets import load_iris + >>> from synthcity.plugins import Plugins + >>> X, y = load_iris(as_frame = True, return_X_y = True) + >>> X["target"] = y + >>> plugin = Plugins().get("ddpm", n_iter = 100) + >>> plugin.fit(X) + >>> plugin.generate(50) + + """ + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def __init__( + self, + n_iter = 1000, + lr = 0.002, + weight_decay = 1e-4, + batch_size = 1024, + model_type = 'mlp', + model_params = None, + num_timesteps = 1000, + gaussian_loss_type = 'mse', + scheduler = 'cosine', + change_val = False, + device: Any = DEVICE, + # early stopping + n_iter_min: int = 100, + n_iter_print: int = 50, + patience: int = 5, + patience_metric: Optional[WeightedMetrics] = None, + # core plugin arguments + random_state: int = 0, + workspace: Path = Path("workspace"), + compress_dataset: bool = False, + sampling_patience: int = 500, + **kwargs: Any + ) -> None: + super().__init__( + device=device, + random_state=random_state, + sampling_patience=sampling_patience, + workspace=workspace, + compress_dataset=compress_dataset, + **kwargs + ) + + if patience_metric is None: + patience_metric = WeightedMetrics( + metrics=[("detection", "detection_mlp")], + weights=[1], + workspace=workspace, + ) + + self.__dict__.update(locals()) + del self.self, self.kwargs + + @staticmethod + def name() -> str: + return "ddpm" + + @staticmethod + def type() -> str: + return "generic" + + @staticmethod + def hyperparameter_space(**kwargs: Any) -> List[Distribution]: + raise NotImplementedError + + def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "DDPMPlugin": + + if self.model_type == 'mlp': + self.model = MLPDiffusion(**self.model_params) + elif self.model_type == 'resnet': + self.model = ResNetDiffusion(**self.model_params) + else: + raise "Unknown model!" + + self.diffusion = GaussianMultinomialDiffusion( + num_classes=num_classes, + num_numerical_features=num_numerical_features, + denoise_fn=self.model, + gaussian_loss_type=self.gaussian_loss_type, + num_timesteps=self.num_timesteps, + scheduler=self.scheduler, + device=self.device + ).to(self.device).train() + + trainer = Trainer( + self.model, + X, + lr=self.lr, + weight_decay=self.weight_decay, + steps=self.n_iter, + device=self.device + ) + + trainer.run_loop() + return self + + def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader: + self.diffusion.eval() + return self._safe_generate(self.model.sample_all, count, syn_schema) + + + +class Trainer: + def __init__(self, diffusion, train_iter, lr, weight_decay, steps, device=DEVICE): + self.diffusion = diffusion + self.ema_model = deepcopy(self.diffusion._denoise_fn) + for param in self.ema_model.parameters(): + param.detach_() + + self.train_iter = train_iter + self.steps = steps + self.init_lr = lr + self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) + self.device = device + self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) + self.log_every = 100 + self.print_every = 500 + self.ema_every = 1000 + + def _anneal_lr(self, step): + frac_done = step / self.steps + lr = self.init_lr * (1 - frac_done) + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def _run_step(self, x, out_dict): + x = x.to(self.device) + for k in out_dict: + out_dict[k] = out_dict[k].long().to(self.device) + self.optimizer.zero_grad() + loss_multi, loss_gauss = self.diffusion.mixed_loss(x, out_dict) + loss = loss_multi + loss_gauss + loss.backward() + self.optimizer.step() + + return loss_multi, loss_gauss + + def run_loop(self): + step = 0 + curr_loss_multi = 0.0 + curr_loss_gauss = 0.0 + + curr_count = 0 + while step < self.steps: + x, out_dict = next(self.train_iter) + out_dict = {'y': out_dict} + batch_loss_multi, batch_loss_gauss = self._run_step(x, out_dict) + + self._anneal_lr(step) + + curr_count += len(x) + curr_loss_multi += batch_loss_multi.item() * len(x) + curr_loss_gauss += batch_loss_gauss.item() * len(x) + + if (step + 1) % self.log_every == 0: + mloss = np.around(curr_loss_multi / curr_count, 4) + gloss = np.around(curr_loss_gauss / curr_count, 4) + if (step + 1) % self.print_every == 0: + print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') + self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] + curr_count = 0 + curr_loss_gauss = 0.0 + curr_loss_multi = 0.0 + + update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) + + step += 1 + + +plugin = DDPMPlugin diff --git a/third-party/tab-ddpm b/third-party/tab-ddpm new file mode 160000 index 00000000..41f2415a --- /dev/null +++ b/third-party/tab-ddpm @@ -0,0 +1 @@ +Subproject commit 41f2415a378f1e8e8f4f5c3b8736521c0d47cf22 From fed898b86dd68a47bbf105877de69217add0d0a0 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 3 Mar 2023 13:55:30 +0100 Subject: [PATCH 02/95] Add DDPM test script and update DDPM plugin --- .../core/models/tabular_ddpm/__init__.py | 36 ++++- .../gaussian_multinomial_diffsuion.py | 31 ++--- .../plugins/core/models/tabular_ddpm/utils.py | 2 +- src/synthcity/plugins/generic/plugin_ddpm.py | 127 +++++++++--------- tests/plugins/generic/test_ddpm.py | 125 +++++++++++++++++ 5 files changed, 234 insertions(+), 87 deletions(-) create mode 100644 tests/plugins/generic/test_ddpm.py diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 80d346c2..6dfe0bb3 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -1,2 +1,36 @@ from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion # noqa -from .modules import MLPDiffusion, ResNetDiffusion # noqa \ No newline at end of file +from .modules import MLPDiffusion, ResNetDiffusion # noqa + +# stdlib +from copy import deepcopy +from typing import Any, Optional, Union + +# third party +import numpy as np +import pandas as pd +import torch +from pydantic import validate_arguments +from sklearn.preprocessing import OneHotEncoder +from torch import nn + +# synthcity absolute +from synthcity.utils.constants import DEVICE +from synthcity.utils.samplers import BaseSampler, ConditionalDatasetSampler + +# synthcity relative +from ..tabular_encoder import TabularEncoder + + +# class TabDDPM(nn.Module): +# def __init__( +# self, +# X: pd.DataFrame, + +# def generate(self, n_samples: int) -> pd.DataFrame: +# self.eval() +# with torch.no_grad(): +# samples = self.diffusion.sample(n_samples) +# return samples + +# def forward(self, count: int) -> pd.DataFrame: +# pass \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 0d0f2ce4..7e93c070 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -6,6 +6,7 @@ import torch.nn.functional as F import torch import math +import pandas as pd import numpy as np from .utils import * @@ -13,7 +14,6 @@ """ Based in part on: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/5989f4c77eafcdc6be0fb4739f0f277a6dd7f7d8/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L281 """ -eps = 1e-8 def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): """ @@ -59,12 +59,13 @@ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas) + class GaussianMultinomialDiffusion(torch.nn.Module): def __init__( self, - num_classes: np.array, - num_numerical_features: int, denoise_fn, + num_numerical_features, + num_classes=None, num_timesteps=1000, gaussian_loss_type='mse', gaussian_parametrization='eps', @@ -83,7 +84,7 @@ def __init__( ' This is expensive both in terms of memory and computation.') self.num_numerical_features = num_numerical_features - self.num_classes = num_classes # it as a vector [K1, K2, ..., Km] + self.num_classes = num_classes or [0] self.num_classes_expanded = torch.from_numpy( np.concatenate([num_classes[i].repeat(num_classes[i]) for i in range(len(num_classes))]) ).to(device) @@ -213,7 +214,6 @@ def gaussian_p_mean_variance( model_variance = extract(model_variance, t, x.shape) model_log_variance = extract(model_log_variance, t, x.shape) - if self.gaussian_parametrization == 'eps': pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) elif self.gaussian_parametrization == 'x0': @@ -299,7 +299,7 @@ def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None): return terms['loss'] - def _predict_xstart_from_eps(self, x_t, t, eps): + def _predict_xstart_from_eps(self, x_t, t, eps=1e-8): assert x_t.shape == eps.shape return ( extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t @@ -702,7 +702,6 @@ def mixed_elbo(self, x0, out_dict): out_mean = torch.stack(out_mean, dim=1) true_mean = torch.stack(true_mean, dim=1) - prior_gauss = self._prior_gaussian(x_num) prior_multin = torch.tensor([0.0]) @@ -920,7 +919,6 @@ def sample_ddim(self, num_samples, y_dist): z_cat = ohe_to_categories(z_ohe, self.num_classes) sample = torch.cat([z_norm, z_cat], dim=1).cpu() return sample, out_dict - @torch.no_grad() def sample(self, num_samples, y_dist): @@ -962,31 +960,28 @@ def sample(self, num_samples, y_dist): sample = torch.cat([z_norm, z_cat], dim=1).cpu() return sample, out_dict - def sample_all(self, num_samples, batch_size, y_dist, ddim=False): + def sample_all(self, num_samples, y_dist, max_batch_size=2000, ddim=False): if ddim: print('Sample using DDIM.') sample_fn = self.sample_ddim else: sample_fn = self.sample - - b = batch_size + bs = np.diff(list(range(0, num_samples, max_batch_size)) + [num_samples]) all_y = [] all_samples = [] - num_generated = 0 - while num_generated < num_samples: + + for b in bs: sample, out_dict = sample_fn(b, y_dist) mask_nan = torch.any(sample.isnan(), dim=1) sample = sample[~mask_nan] + if sample.shape[0] != b: + raise FoundNANsError out_dict['y'] = out_dict['y'][~mask_nan] - all_samples.append(sample) all_y.append(out_dict['y'].cpu()) - if sample.shape[0] != b: - raise FoundNANsError - num_generated += sample.shape[0] x_gen = torch.cat(all_samples, dim=0)[:num_samples] y_gen = torch.cat(all_y, dim=0)[:num_samples] - return x_gen, y_gen \ No newline at end of file + return x_gen, y_gen diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index 6376bfbf..95abd42a 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -171,4 +171,4 @@ def log_onehot_to_index(log_x): class FoundNANsError(BaseException): """Found NANs during sampling""" def __init__(self, message='Found NANs during sampling.'): - super(FoundNANsError, self).__init__(message) \ No newline at end of file + super(FoundNANsError, self).__init__(message) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 999f7312..5565eb92 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -26,12 +26,13 @@ IntegerDistribution, ) from synthcity.plugins.core.models.tabular_ddpm import GaussianMultinomialDiffusion, MLPDiffusion, ResNetDiffusion +from synthcity.plugins.core.models.tabular_encoder import TabularEncoder from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema from synthcity.utils.constants import DEVICE -class DDPMPlugin(Plugin): +class TabDDPMPlugin(Plugin): """ .. inheritance-diagram:: synthcity.plugins.generic.plugin_tab_ddpm.TabDDPMPlugin :parts: 1 @@ -67,6 +68,8 @@ def __init__( scheduler = 'cosine', change_val = False, device: Any = DEVICE, + log_interval: int = 100, + print_interval: int = 500, # early stopping n_iter_min: int = 100, n_iter_print: int = 50, @@ -87,7 +90,7 @@ def __init__( compress_dataset=compress_dataset, **kwargs ) - + if patience_metric is None: patience_metric = WeightedMetrics( metrics=[("detection", "detection_mlp")], @@ -110,88 +113,72 @@ def type() -> str: def hyperparameter_space(**kwargs: Any) -> List[Distribution]: raise NotImplementedError - def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "DDPMPlugin": + def _anneal_lr(self, step): + frac_done = step / self.steps + lr = self.lr * (1 - frac_done) + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def _one_step(self, x, out_dict): + x = x.to(self.device) + for k in out_dict: + out_dict[k] = out_dict[k].long().to(self.device) + self.optimizer.zero_grad() + loss_multi, loss_gauss = self.diffusion.mixed_loss(x, out_dict) + loss = loss_multi + loss_gauss + loss.backward() + self.optimizer.step() + return loss_multi, loss_gauss + def _update_ema(self, target_params, source_params, rate=0.999): + """ + Update target parameters to be closer to those of source parameters using + an exponential moving average. + :param target_params: the target parameter sequence. + :param source_params: the source parameter sequence. + :param rate: the EMA rate (closer to 1 means slower). + """ + for targ, src in zip(target_params, source_params): + targ.detach().mul_(rate).add_(src.detach(), alpha=1 - rate) + + def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": + # TODO: add parameters of TabularEncoder + self.encoder = TabularEncoder().fit(X) + if self.model_type == 'mlp': self.model = MLPDiffusion(**self.model_params) elif self.model_type == 'resnet': self.model = ResNetDiffusion(**self.model_params) else: raise "Unknown model!" - + self.diffusion = GaussianMultinomialDiffusion( - num_classes=num_classes, - num_numerical_features=num_numerical_features, denoise_fn=self.model, + num_numerical_features=self.encoder.n_features(), gaussian_loss_type=self.gaussian_loss_type, num_timesteps=self.num_timesteps, scheduler=self.scheduler, device=self.device - ).to(self.device).train() + ).to(self.device) - trainer = Trainer( - self.model, - X, - lr=self.lr, - weight_decay=self.weight_decay, - steps=self.n_iter, - device=self.device - ) - - trainer.run_loop() - return self - - def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader: - self.diffusion.eval() - return self._safe_generate(self.model.sample_all, count, syn_schema) - - - -class Trainer: - def __init__(self, diffusion, train_iter, lr, weight_decay, steps, device=DEVICE): - self.diffusion = diffusion - self.ema_model = deepcopy(self.diffusion._denoise_fn) + self.ema_model = deepcopy(self.model) for param in self.ema_model.parameters(): param.detach_() - self.train_iter = train_iter - self.steps = steps - self.init_lr = lr - self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) - self.device = device + self.optimizer = torch.optim.AdamW( + self.diffusion.parameters(), lr=self.lr, weight_decay=self.weight_decay) + + # TODO: check data type of X self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) - self.log_every = 100 - self.print_every = 500 - self.ema_every = 1000 - - def _anneal_lr(self, step): - frac_done = step / self.steps - lr = self.init_lr * (1 - frac_done) - for param_group in self.optimizer.param_groups: - param_group["lr"] = lr - - def _run_step(self, x, out_dict): - x = x.to(self.device) - for k in out_dict: - out_dict[k] = out_dict[k].long().to(self.device) - self.optimizer.zero_grad() - loss_multi, loss_gauss = self.diffusion.mixed_loss(x, out_dict) - loss = loss_multi + loss_gauss - loss.backward() - self.optimizer.step() - - return loss_multi, loss_gauss - - def run_loop(self): - step = 0 + curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 - while step < self.steps: - x, out_dict = next(self.train_iter) + for step in range(self.n_iter): + x, out_dict = next(X) out_dict = {'y': out_dict} - batch_loss_multi, batch_loss_gauss = self._run_step(x, out_dict) + batch_loss_multi, batch_loss_gauss = self._one_step(x, out_dict) self._anneal_lr(step) @@ -199,19 +186,25 @@ def run_loop(self): curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) - if (step + 1) % self.log_every == 0: + if (step + 1) % self.log_interval == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) - if (step + 1) % self.print_every == 0: + if (step + 1) % self.print_interval == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') - self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] + self.loss_history.loc[len(self.loss_history)] = [ + step + 1, mloss, gloss, mloss + gloss] curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 - update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) + self._update_ema(self.ema_model.parameters(), self.model.parameters()) + + return self - step += 1 + def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader: + self.diffusion.eval() + # TODO: check self.model.sample_all + return self._safe_generate(self.diffusion.sample_all, count, syn_schema) -plugin = DDPMPlugin +plugin = TabDDPMPlugin diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py new file mode 100644 index 00000000..ec112f48 --- /dev/null +++ b/tests/plugins/generic/test_ddpm.py @@ -0,0 +1,125 @@ +# third party +import numpy as np +import pandas as pd +import pytest +from generic_helpers import generate_fixtures +from sklearn.datasets import load_iris + +# synthcity absolute +from synthcity.metrics.eval import PerformanceEvaluatorXGB +from synthcity.plugins import Plugin +from synthcity.plugins.core.constraints import Constraints +from synthcity.plugins.core.dataloader import GenericDataLoader +from synthcity.plugins.generic.plugin_ddpm import plugin + +plugin_name = "ddpm" +plugin_args = {"n_iter": 100} + + +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_sanity(test_plugin: Plugin) -> None: + assert test_plugin is not None + + +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_name(test_plugin: Plugin) -> None: + assert test_plugin.name() == plugin_name + + +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_type(test_plugin: Plugin) -> None: + assert test_plugin.type() == "generic" + + +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_hyperparams(test_plugin: Plugin) -> None: + assert len(test_plugin.hyperparameter_space()) == 9 + + +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_fit(test_plugin: Plugin) -> None: + X = pd.DataFrame(load_iris()["data"]) + test_plugin.fit(GenericDataLoader(X)) + + +def test_plugin_generate() -> None: + test_plugin = plugin(n_layers_hidden=2, n_units_hidden=100, n_iter=50) + X = pd.DataFrame(load_iris()["data"]) + test_plugin.fit(GenericDataLoader(X)) + + X_gen = test_plugin.generate() + assert len(X_gen) == len(X) + assert test_plugin.schema_includes(X_gen) + + X_gen = test_plugin.generate(50) + assert len(X_gen) == 50 + assert test_plugin.schema_includes(X_gen) + + +def test_plugin_generate_constraints() -> None: + test_plugin = plugin(n_layers_hidden=2, n_units_hidden=100, n_iter=50) + X = pd.DataFrame(load_iris()["data"]) + test_plugin.fit(GenericDataLoader(X)) + + constraints = Constraints( + rules=[ + ("0", "le", 6), + ("0", "ge", 4.3), + ("1", "le", 4.4), + ("1", "ge", 3), + ("2", "le", 5.5), + ("2", "ge", 1.0), + ("3", "le", 2), + ("3", "ge", 0.1), + ] + ) + + X_gen = test_plugin.generate(constraints=constraints).dataframe() + assert len(X_gen) == len(X) + assert test_plugin.schema_includes(X_gen) + assert constraints.filter(X_gen).sum() == len(X_gen) + + X_gen = test_plugin.generate(count=50, constraints=constraints).dataframe() + assert len(X_gen) == 50 + assert test_plugin.schema_includes(X_gen) + assert constraints.filter(X_gen).sum() == len(X_gen) + assert list(X_gen.columns) == list(X.columns) + + +def test_sample_hyperparams() -> None: + for i in range(100): + args = plugin.sample_hyperparameters() + + assert plugin(**args) is not None + + +@pytest.mark.slow +@pytest.mark.parametrize("compress_dataset", [True, False]) +def test_eval_performance_nflow(compress_dataset: bool) -> None: + results = [] + + Xraw, y = load_iris(return_X_y=True, as_frame=True) + Xraw["target"] = y + X = GenericDataLoader(Xraw) + + for retry in range(2): + test_plugin = plugin(n_iter=5000, compress_dataset=compress_dataset) + evaluator = PerformanceEvaluatorXGB() + + test_plugin.fit(X) + X_syn = test_plugin.generate() + + results.append(evaluator.evaluate(X, X_syn)["syn_id"]) + + print(plugin.name(), results) + assert np.mean(results) > 0.8 From 34979cf240acbaa75a4d72998f18a5ff8b2a9100 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 5 Mar 2023 21:15:17 +0100 Subject: [PATCH 03/95] add TabDDPM class and refactor --- .../core/models/tabular_ddpm/__init__.py | 143 ++++++++++++++--- .../gaussian_multinomial_diffsuion.py | 57 +++++-- .../core/models/tabular_ddpm/modules.py | 21 ++- .../plugins/core/models/tabular_ddpm/utils.py | 38 +++++ src/synthcity/plugins/generic/plugin_ddpm.py | 149 ++++++------------ 5 files changed, 263 insertions(+), 145 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 6dfe0bb3..46977c79 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -1,6 +1,3 @@ -from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion # noqa -from .modules import MLPDiffusion, ResNetDiffusion # noqa - # stdlib from copy import deepcopy from typing import Any, Optional, Union @@ -9,28 +6,136 @@ import numpy as np import pandas as pd import torch -from pydantic import validate_arguments -from sklearn.preprocessing import OneHotEncoder from torch import nn +from pydantic import validate_arguments # synthcity absolute from synthcity.utils.constants import DEVICE -from synthcity.utils.samplers import BaseSampler, ConditionalDatasetSampler +from synthcity.metrics.weighted_metrics import WeightedMetrics -# synthcity relative -from ..tabular_encoder import TabularEncoder +from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion # noqa +from .modules import MLPDiffusion, ResNetDiffusion # noqa +from .utils import TensorDataLoader + + +class TabDDPM(nn.Module): + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def __init__( + self, + n_iter = 10000, + lr = 0.002, + weight_decay = 1e-4, + batch_size = 1024, + num_timesteps = 1000, + gaussian_loss_type = 'mse', + scheduler = 'cosine', + device: Any = DEVICE, + log_interval: int = 100, + print_interval: int = 500, + # model params + model_type = 'mlp', + rtdl_params: Optional[dict] = None, # {'d_layers', 'dropout'} + dim_label_emb: int = 128, + # early stopping + n_iter_min: int = 100, + n_iter_print: int = 50, + patience: int = 5, + ) -> None: + super().__init__() + self.__dict__.update(locals()) + del self.self, self.kwargs + + def _anneal_lr(self, step): + frac_done = step / self.steps + lr = self.lr * (1 - frac_done) + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + def _update_ema(self, target_params, source_params, rate=0.999): + """ + Update target parameters to be closer to those of source parameters using + an exponential moving average. + :param target_params: the target parameter sequence. + :param source_params: the source parameter sequence. + :param rate: the EMA rate (closer to 1 means slower). + """ + for targ, src in zip(target_params, source_params): + targ.detach().mul_(rate).add_(src.detach(), alpha=1 - rate) -# class TabDDPM(nn.Module): -# def __init__( -# self, -# X: pd.DataFrame, + def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): + if cond is not None: + n_classes = len(np.unique(cond)) + else: + n_classes = 0 + + model_params = dict( + num_classes=n_classes, + is_y_cond=cond is not None, + rtdl_params=self.rtdl_params, + dim_t = self.dim_label_emb + ) -# def generate(self, n_samples: int) -> pd.DataFrame: -# self.eval() -# with torch.no_grad(): -# samples = self.diffusion.sample(n_samples) -# return samples + tensors = [X] if cond is None else [X, cond] + tensors = [torch.tensor(t.values, dtype=torch.float32, device=self.device) for t in tensors] + self.dataloader = TensorDataLoader(tensors, batch_size=self.batch_size) + + self.diffusion = GaussianMultinomialDiffusion( + model_type=self.model_type, + model_params=model_params, + num_numerical_features=self.encoder.n_features(), + gaussian_loss_type=self.gaussian_loss_type, + num_timesteps=self.num_timesteps, + scheduler=self.scheduler, + device=self.device + ).to(self.device) + + self.ema_model = deepcopy(self.diffusion.denoise_fn) + for param in self.ema_model.parameters(): + param.detach_() + + self.optimizer = torch.optim.AdamW( + self.diffusion.parameters(), lr=self.lr, weight_decay=self.weight_decay) -# def forward(self, count: int) -> pd.DataFrame: -# pass \ No newline at end of file + self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) + + for step, (x, y) in enumerate(self.dataloader): + curr_loss_multi = 0.0 + curr_loss_gauss = 0.0 + curr_count = 0 + self.diffusion.train() + + self.optimizer.zero_grad() + loss_multi, loss_gauss = self.diffusion.mixed_loss(x, dict(y=y)) + loss = loss_multi + loss_gauss + loss.backward() + self.optimizer.step() + + self._anneal_lr(step) + + curr_count += len(x) + curr_loss_multi += loss_multi.item() * len(x) + curr_loss_gauss += loss_gauss.item() * len(x) + + if (step + 1) % self.log_interval == 0: + mloss = np.around(curr_loss_multi / curr_count, 4) + gloss = np.around(curr_loss_gauss / curr_count, 4) + if (step + 1) % self.print_interval == 0: + print(f'Step {(step + 1)}/{self.n_iter} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') + self.loss_history.loc[len(self.loss_history)] = [ + step + 1, mloss, gloss, mloss + gloss] + curr_count = 0 + curr_loss_gauss = 0.0 + curr_loss_multi = 0.0 + + self._update_ema(self.ema_model.parameters(), self.model.parameters()) + + if step == self.n_iter - 1: + break + + return self + + def generate(self, count: int, cond=None): + self.diffusion.eval() + sample, out_dict = self.diffusion.sample_all(count) + return sample, out_dict['y'] diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 7e93c070..70dd0a9f 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -10,6 +10,7 @@ import numpy as np from .utils import * +from .modules import MLPDiffusion, ResNetDiffusion """ Based in part on: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/5989f4c77eafcdc6be0fb4739f0f277a6dd7f7d8/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L281 @@ -63,9 +64,10 @@ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): class GaussianMultinomialDiffusion(torch.nn.Module): def __init__( self, - denoise_fn, num_numerical_features, - num_classes=None, + num_classes, + model_type='mlp', + model_params=None, num_timesteps=1000, gaussian_loss_type='mse', gaussian_parametrization='eps', @@ -84,18 +86,41 @@ def __init__( ' This is expensive both in terms of memory and computation.') self.num_numerical_features = num_numerical_features - self.num_classes = num_classes or [0] + self.num_classes = num_classes self.num_classes_expanded = torch.from_numpy( np.concatenate([num_classes[i].repeat(num_classes[i]) for i in range(len(num_classes))]) ).to(device) - + self.dim_input = num_numerical_features + sum(self.num_classes) + self.slices_for_classes = [np.arange(self.num_classes[0])] offsets = np.cumsum(self.num_classes) for i in range(1, len(offsets)): self.slices_for_classes.append(np.arange(offsets[i - 1], offsets[i])) self.offsets = torch.from_numpy(np.append([0], offsets)).to(device) - self._denoise_fn = denoise_fn + if model_params is None: + model_params = dict( + d_in = self.dim_input, + num_classes = 0, + is_y_cond = False, + rtdl_params = None + ) + else: + model_params['d_in'] = self.dim_input + + if model_params['rtdl_params'] is None: + model_params['rtdl_params'] = dict( + d_layers = [256, 256, 256], + dropout = 0.0 + ) + + if model_type == 'mlp': + self.denoise_fn = MLPDiffusion(**model_params) + elif model_type == 'resnet': + self.denoise_fn = ResNetDiffusion(**model_params) + else: + raise "Unknown diffusion model type!" + self.gaussian_loss_type = gaussian_loss_type self.gaussian_parametrization = gaussian_parametrization self.multinomial_loss_type = multinomial_loss_type @@ -607,7 +632,7 @@ def mixed_loss(self, x, out_dict): x_in = torch.cat([x_num_t, log_x_cat_t], dim=1) - model_out = self._denoise_fn( + model_out = self.denoise_fn( x_in, t, **out_dict @@ -619,7 +644,8 @@ def mixed_loss(self, x, out_dict): loss_multi = torch.zeros((1,)).float() loss_gauss = torch.zeros((1,)).float() if x_cat.shape[1] > 0: - loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt, out_dict) / len(self.num_classes) + loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt, + out_dict) / len(self.num_classes) if x_num.shape[1] > 0: loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise) @@ -657,7 +683,7 @@ def mixed_elbo(self, x0, out_dict): else: log_x_cat_t = x_cat - model_out = self._denoise_fn( + model_out = self.denoise_fn( torch.cat([x_num_t, log_x_cat_t], dim=1), t_array, **out_dict @@ -777,7 +803,7 @@ def gaussian_ddim_sample( for t in reversed(range(T)): print(f'Sample timestep {t:4d}', end='\r') t_array = (torch.ones(b, device=device) * t).long() - out_num = self._denoise_fn(x, t_array, **out_dict) + out_num = self.denoise_fn(x, t_array, **out_dict) x = self.gaussian_ddim_step( out_num, x, @@ -831,7 +857,7 @@ def gaussian_ddim_reverse_sample( for t in range(T): print(f'Reverse timestep {t:4d}', end='\r') t_array = (torch.ones(b, device=device) * t).long() - out_num = self._denoise_fn(x, t_array, **out_dict) + out_num = self.denoise_fn(x, t_array, **out_dict) x = self.gaussian_ddim_reverse_step( out_num, x, @@ -881,7 +907,7 @@ def multinomial_ddim_step( return out @torch.no_grad() - def sample_ddim(self, num_samples, y_dist): + def sample_ddim(self, num_samples, y_dist=None): b = num_samples device = self.log_alpha.device z_norm = torch.randn((b, self.num_numerical_features), device=device) @@ -901,7 +927,7 @@ def sample_ddim(self, num_samples, y_dist): for i in reversed(range(0, self.num_timesteps)): print(f'Sample timestep {i:4d}', end='\r') t = torch.full((b,), i, device=device, dtype=torch.long) - model_out = self._denoise_fn( + model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), t, **out_dict @@ -921,7 +947,8 @@ def sample_ddim(self, num_samples, y_dist): return sample, out_dict @torch.no_grad() - def sample(self, num_samples, y_dist): + def sample(self, num_samples, y_dist=None): + # TODO: handle y_dist=None b = num_samples device = self.log_alpha.device z_norm = torch.randn((b, self.num_numerical_features), device=device) @@ -941,7 +968,7 @@ def sample(self, num_samples, y_dist): for i in reversed(range(0, self.num_timesteps)): print(f'Sample timestep {i:4d}', end='\r') t = torch.full((b,), i, device=device, dtype=torch.long) - model_out = self._denoise_fn( + model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), t, **out_dict @@ -960,7 +987,7 @@ def sample(self, num_samples, y_dist): sample = torch.cat([z_norm, z_cat], dim=1).cpu() return sample, out_dict - def sample_all(self, num_samples, y_dist, max_batch_size=2000, ddim=False): + def sample_all(self, num_samples, y_dist=None, max_batch_size=2000, ddim=False): if ddim: print('Sample using DDIM.') sample_fn = self.sample_ddim diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 472ba5b5..a6164119 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -420,6 +420,7 @@ def forward(self, x: Tensor) -> Tensor: x = self.blocks(x) x = self.head(x) return x + #### For diffusion class MLPDiffusion(nn.Module): @@ -460,7 +461,7 @@ def forward(self, x, timesteps, y=None): return self.mlp(x) class ResNetDiffusion(nn.Module): - def __init__(self, d_in, num_classes, rtdl_params, dim_t = 256): + def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 256): super().__init__() self.dim_t = dim_t self.num_classes = num_classes @@ -469,10 +470,13 @@ def __init__(self, d_in, num_classes, rtdl_params, dim_t = 256): rtdl_params['d_out'] = d_in rtdl_params['emb_d'] = dim_t self.resnet = ResNet.make_baseline(**rtdl_params) - - if self.num_classes > 0: + + if self.num_classes > 0 and is_y_cond: self.label_emb = nn.Embedding(self.num_classes, dim_t) + elif self.num_classes == 0 and is_y_cond: + self.label_emb = nn.Linear(1, dim_t) + self.proj = nn.Linear(d_in, dim_t) self.time_embed = nn.Sequential( nn.Linear(dim_t, dim_t), nn.SiLU(), @@ -481,6 +485,11 @@ def __init__(self, d_in, num_classes, rtdl_params, dim_t = 256): def forward(self, x, timesteps, y=None): emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) - if y is not None and self.num_classes > 0: - emb += self.label_emb(y.squeeze()) - return self.resnet(x, emb) + if self.is_y_cond and y is not None: + if self.num_classes > 0: + y = y.squeeze() + else: + y = y.resize(y.size(0), 1).float() + emb += F.silu(self.label_emb(y)) + x = self.proj(x) + emb + return self.resnet(x) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index 95abd42a..a0021a68 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -168,7 +168,45 @@ def sliced_logsumexp(x, slices): def log_onehot_to_index(log_x): return log_x.argmax(1) + class FoundNANsError(BaseException): """Found NANs during sampling""" def __init__(self, message='Found NANs during sampling.'): super(FoundNANsError, self).__init__(message) + + +class TensorDataLoader: + """ + A DataLoader-like object for a set of tensors that can be much faster than + TensorDataset + DataLoader because dataloader grabs individual indices of + the dataset and calls cat (slow). + Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6 + """ + def __init__(self, *tensors, batch_size=32, shuffle=False): + """ + Initialize a FastTensorDataLoader. + :param *tensors: tensors to store. Must have the same length @ dim 0. + :param batch_size: batch size to load. + :param shuffle: if True, shuffle the data *in-place* whenever an + iterator is created out of this object. + :returns: A FastTensorDataLoader. + """ + assert all(t.shape[0] == tensors[0].shape[0] for t in tensors) + self.tensors = tensors + self.dataset_len = self.tensors[0].shape[0] + self.batch_size = batch_size + self.shuffle = shuffle + + def __iter__(self): + i = 0 + idx = np.arange(self.dataset_len) + if self.shuffle: + np.random.shuffle(idx) + while True: + j = i + self.batch_size + s = slice(i, j) + if j > self.dataset_len: + s = list(range(i, self.dataset_len)) + list(range(0, j - self.dataset_len)) + if self.shuffle: + np.random.shuffle(idx) + yield tuple(t[idx[s]] for t in self.tensors) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 5565eb92..6ed9fd89 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -25,7 +25,7 @@ FloatDistribution, IntegerDistribution, ) -from synthcity.plugins.core.models.tabular_ddpm import GaussianMultinomialDiffusion, MLPDiffusion, ResNetDiffusion +from synthcity.plugins.core.models.tabular_ddpm import TabDDPM from synthcity.plugins.core.models.tabular_encoder import TabularEncoder from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema @@ -57,24 +57,27 @@ class TabDDPMPlugin(Plugin): @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( self, + *, + is_classification: bool = False, n_iter = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, model_type = 'mlp', - model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', - change_val = False, device: Any = DEVICE, log_interval: int = 100, print_interval: int = 500, + # model params + rtdl_params: Optional[dict] = None, # {'d_layers', 'dropout'} + dim_label_emb: int = 128, # early stopping n_iter_min: int = 100, n_iter_print: int = 50, patience: int = 5, - patience_metric: Optional[WeightedMetrics] = None, + # patience_metric: Optional[WeightedMetrics] = None, # core plugin arguments random_state: int = 0, workspace: Path = Path("workspace"), @@ -90,16 +93,27 @@ def __init__( compress_dataset=compress_dataset, **kwargs ) - - if patience_metric is None: - patience_metric = WeightedMetrics( - metrics=[("detection", "detection_mlp")], - weights=[1], - workspace=workspace, - ) - - self.__dict__.update(locals()) - del self.self, self.kwargs + + self.is_classification = is_classification + + self.model = TabDDPM( + n_iter=n_iter, + lr=lr, + weight_decay=weight_decay, + batch_size=batch_size, + num_timesteps=num_timesteps, + gaussian_loss_type=gaussian_loss_type, + scheduler=scheduler, + device=device, + log_interval=log_interval, + print_interval=print_interval, + model_type=model_type, + rtdl_params=rtdl_params, + dim_label_emb=dim_label_emb, + n_iter_min=n_iter_min, + n_iter_print=n_iter_print, + patience=patience, + ) @staticmethod def name() -> str: @@ -113,98 +127,23 @@ def type() -> str: def hyperparameter_space(**kwargs: Any) -> List[Distribution]: raise NotImplementedError - def _anneal_lr(self, step): - frac_done = step / self.steps - lr = self.lr * (1 - frac_done) - for param_group in self.optimizer.param_groups: - param_group["lr"] = lr - - def _one_step(self, x, out_dict): - x = x.to(self.device) - for k in out_dict: - out_dict[k] = out_dict[k].long().to(self.device) - self.optimizer.zero_grad() - loss_multi, loss_gauss = self.diffusion.mixed_loss(x, out_dict) - loss = loss_multi + loss_gauss - loss.backward() - self.optimizer.step() - return loss_multi, loss_gauss - - def _update_ema(self, target_params, source_params, rate=0.999): - """ - Update target parameters to be closer to those of source parameters using - an exponential moving average. - :param target_params: the target parameter sequence. - :param source_params: the source parameter sequence. - :param rate: the EMA rate (closer to 1 means slower). - """ - for targ, src in zip(target_params, source_params): - targ.detach().mul_(rate).add_(src.detach(), alpha=1 - rate) - - def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": - # TODO: add parameters of TabularEncoder - self.encoder = TabularEncoder().fit(X) - - if self.model_type == 'mlp': - self.model = MLPDiffusion(**self.model_params) - elif self.model_type == 'resnet': - self.model = ResNetDiffusion(**self.model_params) - else: - raise "Unknown model!" - - self.diffusion = GaussianMultinomialDiffusion( - denoise_fn=self.model, - num_numerical_features=self.encoder.n_features(), - gaussian_loss_type=self.gaussian_loss_type, - num_timesteps=self.num_timesteps, - scheduler=self.scheduler, - device=self.device - ).to(self.device) + def _fit(self, data: DataLoader, cond: pd.Series = None, **kwargs) -> "TabDDPMPlugin": + if self.is_classification: + assert cond is None + _, cond = data.unpack() + + if cond is not None: + cond = pd.Series(cond, index=data.index) + data = data.dataframe() + + # self.encoder = TabularEncoder().fit(X) - self.ema_model = deepcopy(self.model) - for param in self.ema_model.parameters(): - param.detach_() - - self.optimizer = torch.optim.AdamW( - self.diffusion.parameters(), lr=self.lr, weight_decay=self.weight_decay) - - # TODO: check data type of X - self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) + self.model.fit(data, cond, **kwargs) - curr_loss_multi = 0.0 - curr_loss_gauss = 0.0 - - curr_count = 0 - for step in range(self.n_iter): - x, out_dict = next(X) - out_dict = {'y': out_dict} - batch_loss_multi, batch_loss_gauss = self._one_step(x, out_dict) - - self._anneal_lr(step) - - curr_count += len(x) - curr_loss_multi += batch_loss_multi.item() * len(x) - curr_loss_gauss += batch_loss_gauss.item() * len(x) - - if (step + 1) % self.log_interval == 0: - mloss = np.around(curr_loss_multi / curr_count, 4) - gloss = np.around(curr_loss_gauss / curr_count, 4) - if (step + 1) % self.print_interval == 0: - print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') - self.loss_history.loc[len(self.loss_history)] = [ - step + 1, mloss, gloss, mloss + gloss] - curr_count = 0 - curr_loss_gauss = 0.0 - curr_loss_multi = 0.0 - - self._update_ema(self.ema_model.parameters(), self.model.parameters()) - - return self - - def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader: - self.diffusion.eval() - # TODO: check self.model.sample_all - return self._safe_generate(self.diffusion.sample_all, count, syn_schema) - + def _generate(self, count: int, syn_schema: Schema, cond=None, **kwargs: Any) -> DataLoader: + def callback(count, cond): + sample, cond = self.model.generate(count, cond=cond) + return sample + return self._safe_generate(callback, count, syn_schema, cond=cond, **kwargs) plugin = TabDDPMPlugin From 0abdc010a15ee3abe4330ffcaf2c310baf7046d2 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 17:35:29 +0100 Subject: [PATCH 04/95] handle discrete cols and label generation --- .../core/models/tabular_ddpm/__init__.py | 24 ++- .../gaussian_multinomial_diffsuion.py | 167 ++++++++---------- .../core/models/tabular_ddpm/modules.py | 1 + .../plugins/core/models/tabular_encoder.py | 15 +- src/synthcity/plugins/generic/plugin_ddpm.py | 18 +- src/synthcity/utils/dataframe.py | 20 ++- 6 files changed, 126 insertions(+), 119 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 46977c79..d4fa28e6 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -11,6 +11,7 @@ # synthcity absolute from synthcity.utils.constants import DEVICE +from synthcity.utils.dataframe import discrete_columns from synthcity.metrics.weighted_metrics import WeightedMetrics from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion # noqa @@ -65,12 +66,21 @@ def _update_ema(self, target_params, source_params, rate=0.999): def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): if cond is not None: - n_classes = len(np.unique(cond)) + n_labels = cond.nunique() else: - n_classes = 0 + n_labels = 0 + cat_cols = discrete_columns(X, return_counts=True) + ini_cols = X.columns + cat_cols, cat_counts = zip(*cat_cols) + # reorder the columns so that the categorical ones go to the end + X = X[np.hstack([X.columns[~X.keys().isin(cat_cols)], cat_cols])] + cur_cols = X.columns + # find the permutation from the reordered columns to the original ones + self._col_perm = np.argsort(cur_cols)[np.argsort(np.argsort(ini_cols))] + model_params = dict( - num_classes=n_classes, + num_classes=n_labels, is_y_cond=cond is not None, rtdl_params=self.rtdl_params, dim_t = self.dim_label_emb @@ -83,7 +93,8 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): self.diffusion = GaussianMultinomialDiffusion( model_type=self.model_type, model_params=model_params, - num_numerical_features=self.encoder.n_features(), + num_categorical_features=cat_counts, + num_numerical_features=X.shape[1]-len(cat_cols), gaussian_loss_type=self.gaussian_loss_type, num_timesteps=self.num_timesteps, scheduler=self.scheduler, @@ -137,5 +148,6 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): def generate(self, count: int, cond=None): self.diffusion.eval() - sample, out_dict = self.diffusion.sample_all(count) - return sample, out_dict['y'] + sample = self.diffusion.sample_all(count, cond).detach().cpu().numpy() + sample = sample[:, self._col_perm] + return sample diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 70dd0a9f..dfcbd00a 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -65,7 +65,7 @@ class GaussianMultinomialDiffusion(torch.nn.Module): def __init__( self, num_numerical_features, - num_classes, + num_categorical_features, model_type='mlp', model_params=None, num_timesteps=1000, @@ -85,12 +85,12 @@ def __init__( print('Computing the loss using the bound on _all_ timesteps.' ' This is expensive both in terms of memory and computation.') - self.num_numerical_features = num_numerical_features - self.num_classes = num_classes + self.num_numerics = num_numerical_features + self.num_classes = num_categorical_features self.num_classes_expanded = torch.from_numpy( - np.concatenate([num_classes[i].repeat(num_classes[i]) for i in range(len(num_classes))]) + np.concatenate([num_categorical_features[i].repeat(num_categorical_features[i]) for i in range(len(num_categorical_features))]) ).to(device) - self.dim_input = num_numerical_features + sum(self.num_classes) + self.dim_input = self.num_numerics + sum(self.num_classes) self.slices_for_classes = [np.arange(self.num_classes[0])] offsets = np.cumsum(self.num_classes) @@ -391,7 +391,7 @@ def q_pred(self, log_x_start, t): return log_probs - def predict_start(self, model_out, log_x_t, t, out_dict): + def predict_start(self, model_out, log_x_t): # model_out = self._denoise_fn(x_t, t.to(x_t.device), **out_dict) @@ -434,25 +434,25 @@ def q_posterior(self, log_x_start, log_x_t, t): return log_EV_xtmin_given_xt_given_xstart - def p_pred(self, model_out, log_x, t, out_dict): + def p_pred(self, model_out, log_x, t): if self.parametrization == 'x0': - log_x_recon = self.predict_start(model_out, log_x, t=t, out_dict=out_dict) + log_x_recon = self.predict_start(model_out, log_x) log_model_pred = self.q_posterior( log_x_start=log_x_recon, log_x_t=log_x, t=t) elif self.parametrization == 'direct': - log_model_pred = self.predict_start(model_out, log_x, t=t, out_dict=out_dict) + log_model_pred = self.predict_start(model_out, log_x) else: raise ValueError return log_model_pred @torch.no_grad() - def p_sample(self, model_out, log_x, t, out_dict): - model_log_prob = self.p_pred(model_out, log_x=log_x, t=t, out_dict=out_dict) + def p_sample(self, model_out, log_x, t): + model_log_prob = self.p_pred(model_out, log_x=log_x, t=t) out = self.log_sample_categorical(model_log_prob) return out @torch.no_grad() - def p_sample_loop(self, shape, out_dict): + def p_sample_loop(self, shape): device = self.log_alpha.device b = shape[0] @@ -460,12 +460,12 @@ def p_sample_loop(self, shape, out_dict): img = torch.randn(shape, device=device) for i in reversed(range(1, self.num_timesteps)): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), out_dict) + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long)) return img @torch.no_grad() - def _sample(self, image_size, out_dict, batch_size = 16): - return self.p_sample_loop((batch_size, 3, image_size, image_size), out_dict) + def _sample(self, image_size, batch_size = 16): + return self.p_sample_loop((batch_size, 3, image_size, image_size)) @torch.no_grad() def interpolate(self, x1, x2, t = None, lam = 0.5): @@ -502,7 +502,7 @@ def q_sample(self, log_x_start, t): return log_sample - def nll(self, log_x_start, out_dict): + def nll(self, log_x_start): b = log_x_start.size(0) device = log_x_start.device loss = 0 @@ -512,8 +512,7 @@ def nll(self, log_x_start, out_dict): kl = self.compute_Lt( log_x_start=log_x_start, log_x_t=self.q_sample(log_x_start=log_x_start, t=t_array), - t=t_array, - out_dict=out_dict) + t=t_array) loss += kl @@ -532,10 +531,10 @@ def kl_prior(self, log_x_start): kl_prior = self.multinomial_kl(log_qxT_prob, log_half_prob) return sum_except_batch(kl_prior) - def compute_Lt(self, model_out, log_x_start, log_x_t, t, out_dict, detach_mean=False): + def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False): log_true_prob = self.q_posterior( log_x_start=log_x_start, log_x_t=log_x_t, t=t) - log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t, out_dict=out_dict) + log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t) if detach_mean: log_model_prob = log_model_prob.detach() @@ -574,11 +573,11 @@ def sample_time(self, b, device, method='uniform'): else: raise ValueError - def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt, out_dict): + def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt): if self.multinomial_loss_type == 'vb_stochastic': kl = self.compute_Lt( - model_out, log_x_start, log_x_t, t, out_dict + model_out, log_x_start, log_x_t, t ) kl_prior = self.kl_prior(log_x_start) # Upweigh loss term of the kl @@ -593,10 +592,13 @@ def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt, out_dict): else: raise ValueError() - def log_prob(self, x, out_dict): + #! Not used + def log_prob(self, x): b, device = x.size(0), x.device + if self.training: - return self._multinomial_loss(x, out_dict) + #! not enough arguments + return self._multinomial_loss(x) else: log_x_start = index_to_log_onehot(x, self.num_classes) @@ -604,7 +606,7 @@ def log_prob(self, x, out_dict): t, pt = self.sample_time(b, device, 'importance') kl = self.compute_Lt( - log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t, out_dict) + log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t) kl_prior = self.kl_prior(log_x_start) @@ -613,13 +615,13 @@ def log_prob(self, x, out_dict): return -loss - def mixed_loss(self, x, out_dict): + def mixed_loss(self, x, cond=None): b = x.shape[0] device = x.device t, pt = self.sample_time(b, device, 'uniform') - x_num = x[:, :self.num_numerical_features] - x_cat = x[:, self.num_numerical_features:] + x_num = x[:, :self.num_numerics] + x_cat = x[:, self.num_numerics:] x_num_t = x_num log_x_cat_t = x_cat @@ -634,18 +636,17 @@ def mixed_loss(self, x, out_dict): model_out = self.denoise_fn( x_in, - t, - **out_dict + t, y=cond ) - model_out_num = model_out[:, :self.num_numerical_features] - model_out_cat = model_out[:, self.num_numerical_features:] + model_out_num = model_out[:, :self.num_numerics] + model_out_cat = model_out[:, self.num_numerics:] loss_multi = torch.zeros((1,)).float() loss_gauss = torch.zeros((1,)).float() if x_cat.shape[1] > 0: - loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, t, pt, - out_dict) / len(self.num_classes) + loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, + t, pt) / len(self.num_classes) if x_num.shape[1] > 0: loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise) @@ -656,12 +657,12 @@ def mixed_loss(self, x, out_dict): return loss_multi.mean(), loss_gauss.mean() @torch.no_grad() - def mixed_elbo(self, x0, out_dict): + def mixed_elbo(self, x0, cond=None): b = x0.size(0) device = x0.device - x_num = x0[:, :self.num_numerical_features] - x_cat = x0[:, self.num_numerical_features:] + x_num = x0[:, :self.num_numerics] + x_cat = x0[:, self.num_numerics:] has_cat = x_cat.shape[1] > 0 if has_cat: log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes).to(device) @@ -685,12 +686,11 @@ def mixed_elbo(self, x0, out_dict): model_out = self.denoise_fn( torch.cat([x_num_t, log_x_cat_t], dim=1), - t_array, - **out_dict + t_array, y=cond ) - model_out_num = model_out[:, :self.num_numerical_features] - model_out_cat = model_out[:, self.num_numerical_features:] + model_out_num = model_out[:, :self.num_numerics] + model_out_cat = model_out[:, self.num_numerics:] kl = torch.tensor([0.0]) if has_cat: @@ -699,7 +699,6 @@ def mixed_elbo(self, x0, out_dict): log_x_start=log_x_cat, log_x_t=log_x_cat_t, t=t_array, - out_dict=out_dict ) out = self._vb_terms_bpd( @@ -794,7 +793,7 @@ def gaussian_ddim_sample( self, noise, T, - out_dict, + cond=None, eta=0.0 ): x = noise @@ -803,7 +802,7 @@ def gaussian_ddim_sample( for t in reversed(range(T)): print(f'Sample timestep {t:4d}', end='\r') t_array = (torch.ones(b, device=device) * t).long() - out_num = self.denoise_fn(x, t_array, **out_dict) + out_num = self.denoise_fn(x, t_array, y=cond) x = self.gaussian_ddim_step( out_num, x, @@ -850,14 +849,14 @@ def gaussian_ddim_reverse_sample( self, x, T, - out_dict, + cond=None ): b = x.shape[0] device = x.device for t in range(T): print(f'Reverse timestep {t:4d}', end='\r') t_array = (torch.ones(b, device=device) * t).long() - out_num = self.denoise_fn(x, t_array, **out_dict) + out_num = self.denoise_fn(x, t_array, y=cond) x = self.gaussian_ddim_reverse_step( out_num, x, @@ -875,11 +874,10 @@ def multinomial_ddim_step( model_out_cat, log_x_t, t, - out_dict, eta=0.0 ): # not ddim, essentially - log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t, t=t, out_dict=out_dict) + log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t) alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape) alpha_bar_prev = extract(self.alphas_cumprod_prev, t, log_x_t.shape) @@ -907,10 +905,10 @@ def multinomial_ddim_step( return out @torch.no_grad() - def sample_ddim(self, num_samples, y_dist=None): + def sample_ddim(self, num_samples, cond=None): b = num_samples device = self.log_alpha.device - z_norm = torch.randn((b, self.num_numerical_features), device=device) + z_norm = torch.randn((b, self.num_numerics), device=device) has_cat = self.num_classes[0] != 0 log_z = torch.zeros((b, 0), device=device).float() @@ -918,25 +916,24 @@ def sample_ddim(self, num_samples, y_dist=None): uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device) log_z = self.log_sample_categorical(uniform_logits) - y = torch.multinomial( - y_dist, - num_samples=b, - replacement=True - ) - out_dict = {'y': y.long().to(device)} + # y = torch.multinomial( + # cond, + # num_samples=b, + # replacement=True + # ) + # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): print(f'Sample timestep {i:4d}', end='\r') t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), - t, - **out_dict + t, y=cond ) - model_out_num = model_out[:, :self.num_numerical_features] - model_out_cat = model_out[:, self.num_numerical_features:] + model_out_num = model_out[:, :self.num_numerics] + model_out_cat = model_out[:, self.num_numerics:] z_norm = self.gaussian_ddim_step(model_out_num, z_norm, t, clip_denoised=False) if has_cat: - log_z = self.multinomial_ddim_step(model_out_cat, log_z, t, out_dict) + log_z = self.multinomial_ddim_step(model_out_cat, log_z, t) print() z_ohe = torch.exp(log_z).round() @@ -944,14 +941,13 @@ def sample_ddim(self, num_samples, y_dist=None): if has_cat: z_cat = ohe_to_categories(z_ohe, self.num_classes) sample = torch.cat([z_norm, z_cat], dim=1).cpu() - return sample, out_dict + return sample @torch.no_grad() - def sample(self, num_samples, y_dist=None): - # TODO: handle y_dist=None + def sample(self, num_samples, cond=None): b = num_samples device = self.log_alpha.device - z_norm = torch.randn((b, self.num_numerical_features), device=device) + z_norm = torch.randn((b, self.num_numerics), device=device) has_cat = self.num_classes[0] != 0 log_z = torch.zeros((b, 0), device=device).float() @@ -959,25 +955,24 @@ def sample(self, num_samples, y_dist=None): uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device) log_z = self.log_sample_categorical(uniform_logits) - y = torch.multinomial( - y_dist, - num_samples=b, - replacement=True - ) - out_dict = {'y': y.long().to(device)} + # y = torch.multinomial( + # cond, + # num_samples=b, + # replacement=True + # ) + # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): print(f'Sample timestep {i:4d}', end='\r') t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), - t, - **out_dict + t, y=cond ) - model_out_num = model_out[:, :self.num_numerical_features] - model_out_cat = model_out[:, self.num_numerical_features:] + model_out_num = model_out[:, :self.num_numerics] + model_out_cat = model_out[:, self.num_numerics:] z_norm = self.gaussian_p_sample(model_out_num, z_norm, t, clip_denoised=False)['sample'] if has_cat: - log_z = self.p_sample(model_out_cat, log_z, t, out_dict) + log_z = self.p_sample(model_out_cat, log_z, t=t) print() z_ohe = torch.exp(log_z).round() @@ -985,30 +980,22 @@ def sample(self, num_samples, y_dist=None): if has_cat: z_cat = ohe_to_categories(z_ohe, self.num_classes) sample = torch.cat([z_norm, z_cat], dim=1).cpu() - return sample, out_dict + return sample - def sample_all(self, num_samples, y_dist=None, max_batch_size=2000, ddim=False): + def sample_all(self, num_samples, cond=None, max_batch_size=2000, ddim=False): if ddim: print('Sample using DDIM.') sample_fn = self.sample_ddim else: sample_fn = self.sample - bs = np.diff(list(range(0, num_samples, max_batch_size)) + [num_samples]) - all_y = [] + bs = np.diff([*range(0, num_samples, max_batch_size), num_samples]) all_samples = [] for b in bs: - sample, out_dict = sample_fn(b, y_dist) - mask_nan = torch.any(sample.isnan(), dim=1) - sample = sample[~mask_nan] - if sample.shape[0] != b: + sample = sample_fn(b, cond) + if torch.any(sample.isnan()).item(): raise FoundNANsError - out_dict['y'] = out_dict['y'][~mask_nan] all_samples.append(sample) - all_y.append(out_dict['y'].cpu()) - - x_gen = torch.cat(all_samples, dim=0)[:num_samples] - y_gen = torch.cat(all_y, dim=0)[:num_samples] - return x_gen, y_gen + return torch.cat(all_samples, dim=0) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index a6164119..44c63884 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -460,6 +460,7 @@ def forward(self, x, timesteps, y=None): x = self.proj(x) + emb return self.mlp(x) + class ResNetDiffusion(nn.Module): def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 256): super().__init__() diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 33929dc7..638b5e6c 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -14,6 +14,7 @@ # synthcity absolute import synthcity.logger as log from synthcity.utils.serialization import dataframe_hash +from synthcity.utils.dataframe import discrete_columns as find_cat_cols # synthcity relative from .data_encoder import ContinuousDataEncoder @@ -103,18 +104,14 @@ def _fit_continuous(self, data: pd.Series) -> FeatureInfo: ) def fit( - self, raw_data: pd.Series, discrete_columns: Optional[List] = None + self, raw_data: pd.DataFrame, discrete_columns: Optional[List] = None ) -> "BinEncoder": """Fit the ``BinEncoder``. Fits a ``ContinuousDataEncoder`` for continuous columns """ if discrete_columns is None: - discrete_columns = [] - - for col in raw_data.columns: - if len(raw_data[col].unique()) < self.categorical_limit: - discrete_columns.append(col) + discrete_columns = find_cat_cols(raw_data, self.categorical_limit) self.output_dimensions = 0 @@ -247,11 +244,7 @@ def fit( This step also counts the #columns in matrix data and span information. """ if discrete_columns is None: - discrete_columns = [] - - for col in raw_data.columns: - if len(raw_data[col].unique()) < self.categorical_limit: - discrete_columns.append(col) + discrete_columns = find_cat_cols(raw_data, self.categorical_limit) self.output_dimensions = 0 diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 6ed9fd89..11371c11 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -131,19 +131,25 @@ def _fit(self, data: DataLoader, cond: pd.Series = None, **kwargs) -> "TabDDPMPl if self.is_classification: assert cond is None _, cond = data.unpack() - + self._labels, self._cond_dist = np.unique(cond, return_counts=True) + self._cond_dist /= self._cond_dist.sum() + if cond is not None: cond = pd.Series(cond, index=data.index) - data = data.dataframe() + # NOTE: should we include the target column in `data`? + data = data.dataframe() + # self.encoder = TabularEncoder().fit(X) self.model.fit(data, cond, **kwargs) def _generate(self, count: int, syn_schema: Schema, cond=None, **kwargs: Any) -> DataLoader: - def callback(count, cond): - sample, cond = self.model.generate(count, cond=cond) - return sample - return self._safe_generate(callback, count, syn_schema, cond=cond, **kwargs) + if self.is_classification and cond is None: + # randomly generate labels following the distribution of the training data + cond = np.random.choice(self._labels, size=count, p=self._cond_dist) + def callback(count, cond=cond): + return self.model.generate(count, cond=cond) + return self._safe_generate(callback, count, syn_schema, **kwargs) plugin = TabDDPMPlugin diff --git a/src/synthcity/utils/dataframe.py b/src/synthcity/utils/dataframe.py index 35d7226b..069b6eab 100644 --- a/src/synthcity/utils/dataframe.py +++ b/src/synthcity/utils/dataframe.py @@ -4,10 +4,18 @@ def constant_columns(dataframe: pd.DataFrame) -> list: """ - Drops constant value columns of pandas dataframe. + Find constant value columns in a pandas dataframe. """ - result = [] - for column in dataframe.columns: - if len(dataframe[column].unique()) == 1: - result.append(column) - return result + return discrete_columns(dataframe, 2) + + +def discrete_columns(dataframe: pd.DataFrame, + max_classes: int = 10, + return_counts=False) -> list: + """ + Find columns containing discrete values in a pandas dataframe. + """ + return [(col, cnt) if return_counts else col + for col, vals in dataframe.items() + for cnt in [vals.nunique()] + if cnt < max_classes] From 405a052f16e9cc5bed6774ab95aa33f248331ce1 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 18:01:38 +0100 Subject: [PATCH 05/95] add hparam space and update tests of DDPM --- src/synthcity/plugins/generic/plugin_ddpm.py | 33 +++++++++++++++- tests/plugins/generic/test_ddpm.py | 40 ++++++++++++-------- 2 files changed, 56 insertions(+), 17 deletions(-) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 11371c11..f2f07f80 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -71,7 +71,9 @@ def __init__( log_interval: int = 100, print_interval: int = 500, # model params - rtdl_params: Optional[dict] = None, # {'d_layers', 'dropout'} + num_layers: int = 3, + dim_hidden: int = 256, + dropout: float = 0.0, dim_label_emb: int = 128, # early stopping n_iter_min: int = 100, @@ -96,6 +98,10 @@ def __init__( self.is_classification = is_classification + rtdl_params = dict( + d_layers = [self.dim_hidden] * self.num_layers, + dropout = self.dropout + ) self.model = TabDDPM( n_iter=n_iter, lr=lr, @@ -125,7 +131,30 @@ def type() -> str: @staticmethod def hyperparameter_space(**kwargs: Any) -> List[Distribution]: - raise NotImplementedError + """ + Hyperparameter Search space (from the paper) + ---------------------------------------------- + Learning rate LogUniform[0.00001, 0.003] + Batch size Cat{256, 4096} + Diffusion timesteps Cat{100, 1000} + Training iterations Cat{5000, 10000, 20000} + Number of MLP layers Int{2, 4, 6, 8} + MLP width of layers Int{128, 256, 512, 1024} + Proportion of samples Float{0.25, 0.5, 1, 2, 4, 8} + ---------------------------------------------- + Dropout 0.0 + Scheduler cosine (Nichol, 2021) + Gaussian diffusion loss MSE + """ + return [ + # TODO: change to loguniform distribution + CategoricalDistribution(name="lr", choices=[1e-5, 1e-4, 1e-3, 2e-3, 3e-3]), + CategoricalDistribution(name="batch_size", choices=[256, 4096]), + CategoricalDistribution(name="num_timesteps", choices=[100, 1000]), + CategoricalDistribution(name="n_iter", choices=[5000, 10000, 20000]), + CategoricalDistribution(name="num_layers", choices=[2, 4, 6, 8]), + CategoricalDistribution(name="dim_hidden", choices=[128, 256, 512, 1024]), + ] def _fit(self, data: DataLoader, cond: pd.Series = None, **kwargs) -> "TabDDPMPlugin": if self.is_classification: diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index ec112f48..a398c000 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -13,7 +13,14 @@ from synthcity.plugins.generic.plugin_ddpm import plugin plugin_name = "ddpm" -plugin_args = {"n_iter": 100} +plugin_args = dict( + n_iter=100, + is_classification=True, + # rtdl_params=dict( + # d_layers=[256, 256], + # dropout=0.0 + # ) +) @pytest.mark.parametrize( @@ -37,13 +44,6 @@ def test_plugin_type(test_plugin: Plugin) -> None: assert test_plugin.type() == "generic" -@pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) -) -def test_plugin_hyperparams(test_plugin: Plugin) -> None: - assert len(test_plugin.hyperparameter_space()) == 9 - - @pytest.mark.parametrize( "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) ) @@ -52,8 +52,10 @@ def test_plugin_fit(test_plugin: Plugin) -> None: test_plugin.fit(GenericDataLoader(X)) -def test_plugin_generate() -> None: - test_plugin = plugin(n_layers_hidden=2, n_units_hidden=100, n_iter=50) +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_generate(test_plugin: Plugin) -> None: X = pd.DataFrame(load_iris()["data"]) test_plugin.fit(GenericDataLoader(X)) @@ -66,8 +68,10 @@ def test_plugin_generate() -> None: assert test_plugin.schema_includes(X_gen) -def test_plugin_generate_constraints() -> None: - test_plugin = plugin(n_layers_hidden=2, n_units_hidden=100, n_iter=50) +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_generate_constraints(test_plugin: Plugin) -> None: X = pd.DataFrame(load_iris()["data"]) test_plugin.fit(GenericDataLoader(X)) @@ -96,23 +100,29 @@ def test_plugin_generate_constraints() -> None: assert list(X_gen.columns) == list(X.columns) +@pytest.mark.parametrize( + "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) +) +def test_plugin_hyperparams(test_plugin: Plugin) -> None: + assert len(test_plugin.hyperparameter_space()) == 6 + + def test_sample_hyperparams() -> None: for i in range(100): args = plugin.sample_hyperparameters() - assert plugin(**args) is not None @pytest.mark.slow @pytest.mark.parametrize("compress_dataset", [True, False]) -def test_eval_performance_nflow(compress_dataset: bool) -> None: +def test_eval_performance_ddpm(compress_dataset: bool) -> None: results = [] Xraw, y = load_iris(return_X_y=True, as_frame=True) Xraw["target"] = y X = GenericDataLoader(Xraw) - for retry in range(2): + for _ in range(2): test_plugin = plugin(n_iter=5000, compress_dataset=compress_dataset) evaluator = PerformanceEvaluatorXGB() From 0e36041c056a6642810cf3c3203d7d5e832dfd7f Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 19:34:32 +0100 Subject: [PATCH 06/95] debug and test DDPM --- .../core/models/tabular_ddpm/.lib/__init__.py | 12 - .../core/models/tabular_ddpm/.lib/data.py | 718 ------------------ .../core/models/tabular_ddpm/.lib/deep.py | 168 ---- .../core/models/tabular_ddpm/.lib/env.py | 39 - .../core/models/tabular_ddpm/.lib/metrics.py | 158 ---- .../core/models/tabular_ddpm/.lib/util.py | 433 ----------- .../core/models/tabular_ddpm/.pipeline.py | 80 -- .../core/models/tabular_ddpm/.sample.py | 159 ---- .../core/models/tabular_ddpm/.train.py | 156 ---- .../plugins/core/models/tabular_ddpm/.tune.py | 127 ---- .../core/models/tabular_ddpm/.utils_train.py | 88 --- .../core/models/tabular_ddpm/README.md | 3 - .../core/models/tabular_ddpm/__init__.py | 20 +- .../gaussian_multinomial_diffsuion.py | 34 +- .../core/models/tabular_ddpm/requirements.txt | 15 - src/synthcity/plugins/generic/plugin_ddpm.py | 14 +- src/temp.py | 15 + tests/plugins/generic/test_ddpm.py | 1 + 18 files changed, 57 insertions(+), 2183 deletions(-) delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.sample.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.train.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.tune.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/README.md delete mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt create mode 100644 src/temp.py diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py deleted file mode 100644 index 54d6f6bb..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -import torch -from icecream import install - -torch.set_num_threads(1) -install() - -from . import env # noqa -from .data import * # noqa -from .deep import * # noqa -from .env import * # noqa -from .metrics import * # noqa -from .util import * # noqa diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py deleted file mode 100644 index 912ce259..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/data.py +++ /dev/null @@ -1,718 +0,0 @@ -import hashlib -from collections import Counter -from copy import deepcopy -from dataclasses import astuple, dataclass, replace -from importlib.resources import path -from pathlib import Path -from typing import Any, Literal, Optional, Union, cast, Tuple, Dict, List - -import numpy as np -import pandas as pd -from sklearn.model_selection import train_test_split -from sklearn.pipeline import make_pipeline -import sklearn.preprocessing -import torch -import os -from category_encoders import LeaveOneOutEncoder -from sklearn.impute import SimpleImputer -from sklearn.preprocessing import StandardScaler -from scipy.spatial.distance import cdist - -from . import env, util -from .metrics import calculate_metrics as calculate_metrics_ -from .util import TaskType, load_json - -ArrayDict = Dict[str, np.ndarray] -TensorDict = Dict[str, torch.Tensor] - - -CAT_MISSING_VALUE = '__nan__' -CAT_RARE_VALUE = '__rare__' -Normalization = Literal['standard', 'quantile', 'minmax'] -NumNanPolicy = Literal['drop-rows', 'mean'] -CatNanPolicy = Literal['most_frequent'] -CatEncoding = Literal['one-hot', 'counter'] -YPolicy = Literal['default'] - - -class StandardScaler1d(StandardScaler): - def partial_fit(self, X, *args, **kwargs): - assert X.ndim == 1 - return super().partial_fit(X[:, None], *args, **kwargs) - - def transform(self, X, *args, **kwargs): - assert X.ndim == 1 - return super().transform(X[:, None], *args, **kwargs).squeeze(1) - - def inverse_transform(self, X, *args, **kwargs): - assert X.ndim == 1 - return super().inverse_transform(X[:, None], *args, **kwargs).squeeze(1) - - -def get_category_sizes(X: Union[torch.Tensor, np.ndarray]) -> List[int]: - XT = X.T.cpu().tolist() if isinstance(X, torch.Tensor) else X.T.tolist() - return [len(set(x)) for x in XT] - - -@dataclass(frozen=False) -class Dataset: - X_num: Optional[ArrayDict] - X_cat: Optional[ArrayDict] - y: ArrayDict - y_info: Dict[str, Any] - task_type: TaskType - n_classes: Optional[int] - - @classmethod - def from_dir(cls, dir_: Union[Path, str]) -> 'Dataset': - dir_ = Path(dir_) - splits = [k for k in ['train', 'val', 'test'] if dir_.joinpath(f'y_{k}.npy').exists()] - - def load(item) -> ArrayDict: - return { - x: cast(np.ndarray, np.load(dir_ / f'{item}_{x}.npy', allow_pickle=True)) # type: ignore[code] - for x in splits - } - - if Path(dir_ / 'info.json').exists(): - info = util.load_json(dir_ / 'info.json') - else: - info = None - return Dataset( - load('X_num') if dir_.joinpath('X_num_train.npy').exists() else None, - load('X_cat') if dir_.joinpath('X_cat_train.npy').exists() else None, - load('y'), - {}, - TaskType(info['task_type']), - info.get('n_classes'), - ) - - @property - def is_binclass(self) -> bool: - return self.task_type == TaskType.BINCLASS - - @property - def is_multiclass(self) -> bool: - return self.task_type == TaskType.MULTICLASS - - @property - def is_regression(self) -> bool: - return self.task_type == TaskType.REGRESSION - - @property - def n_num_features(self) -> int: - return 0 if self.X_num is None else self.X_num['train'].shape[1] - - @property - def n_cat_features(self) -> int: - return 0 if self.X_cat is None else self.X_cat['train'].shape[1] - - @property - def n_features(self) -> int: - return self.n_num_features + self.n_cat_features - - def size(self, part: Optional[str]) -> int: - return sum(map(len, self.y.values())) if part is None else len(self.y[part]) - - @property - def nn_output_dim(self) -> int: - if self.is_multiclass: - assert self.n_classes is not None - return self.n_classes - else: - return 1 - - def get_category_sizes(self, part: str) -> List[int]: - return [] if self.X_cat is None else get_category_sizes(self.X_cat[part]) - - def calculate_metrics( - self, - predictions: Dict[str, np.ndarray], - prediction_type: Optional[str], - ) -> Dict[str, Any]: - metrics = { - x: calculate_metrics_( - self.y[x], predictions[x], self.task_type, prediction_type, self.y_info - ) - for x in predictions - } - if self.task_type == TaskType.REGRESSION: - score_key = 'rmse' - score_sign = -1 - else: - score_key = 'accuracy' - score_sign = 1 - for part_metrics in metrics.values(): - part_metrics['score'] = score_sign * part_metrics[score_key] - return metrics - -def change_val(dataset: Dataset, val_size: float = 0.2): - # should be done before transformations - - y = np.concatenate([dataset.y['train'], dataset.y['val']], axis=0) - - ixs = np.arange(y.shape[0]) - if dataset.is_regression: - train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777) - else: - train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777, stratify=y) - - dataset.y['train'] = y[train_ixs] - dataset.y['val'] = y[val_ixs] - - if dataset.X_num is not None: - X_num = np.concatenate([dataset.X_num['train'], dataset.X_num['val']], axis=0) - dataset.X_num['train'] = X_num[train_ixs] - dataset.X_num['val'] = X_num[val_ixs] - - if dataset.X_cat is not None: - X_cat = np.concatenate([dataset.X_cat['train'], dataset.X_cat['val']], axis=0) - dataset.X_cat['train'] = X_cat[train_ixs] - dataset.X_cat['val'] = X_cat[val_ixs] - - return dataset - -def num_process_nans(dataset: Dataset, policy: Optional[NumNanPolicy]) -> Dataset: - assert dataset.X_num is not None - nan_masks = {k: np.isnan(v) for k, v in dataset.X_num.items()} - if not any(x.any() for x in nan_masks.values()): # type: ignore[code] - assert policy is None - return dataset - - assert policy is not None - if policy == 'drop-rows': - valid_masks = {k: ~v.any(1) for k, v in nan_masks.items()} - assert valid_masks[ - 'test' - ].all(), 'Cannot drop test rows, since this will affect the final metrics.' - new_data = {} - for data_name in ['X_num', 'X_cat', 'y']: - data_dict = getattr(dataset, data_name) - if data_dict is not None: - new_data[data_name] = { - k: v[valid_masks[k]] for k, v in data_dict.items() - } - dataset = replace(dataset, **new_data) - elif policy == 'mean': - new_values = np.nanmean(dataset.X_num['train'], axis=0) - X_num = deepcopy(dataset.X_num) - for k, v in X_num.items(): - num_nan_indices = np.where(nan_masks[k]) - v[num_nan_indices] = np.take(new_values, num_nan_indices[1]) - dataset = replace(dataset, X_num=X_num) - else: - assert util.raise_unknown('policy', policy) - return dataset - - -# Inspired by: https://github.com/yandex-research/rtdl/blob/a4c93a32b334ef55d2a0559a4407c8306ffeeaee/lib/data.py#L20 -def normalize( - X: ArrayDict, normalization: Normalization, seed: Optional[int], return_normalizer : bool = False -) -> ArrayDict: - X_train = X['train'] - if normalization == 'standard': - normalizer = sklearn.preprocessing.StandardScaler() - elif normalization == 'minmax': - normalizer = sklearn.preprocessing.MinMaxScaler() - elif normalization == 'quantile': - normalizer = sklearn.preprocessing.QuantileTransformer( - output_distribution='normal', - n_quantiles=max(min(X['train'].shape[0] // 30, 1000), 10), - subsample=1e9, - random_state=seed, - ) - # noise = 1e-3 - # if noise > 0: - # assert seed is not None - # stds = np.std(X_train, axis=0, keepdims=True) - # noise_std = noise / np.maximum(stds, noise) # type: ignore[code] - # X_train = X_train + noise_std * np.random.default_rng(seed).standard_normal( - # X_train.shape - # ) - else: - util.raise_unknown('normalization', normalization) - normalizer.fit(X_train) - if return_normalizer: - return {k: normalizer.transform(v) for k, v in X.items()}, normalizer - return {k: normalizer.transform(v) for k, v in X.items()} - - -def cat_process_nans(X: ArrayDict, policy: Optional[CatNanPolicy]) -> ArrayDict: - assert X is not None - nan_masks = {k: v == CAT_MISSING_VALUE for k, v in X.items()} - if any(x.any() for x in nan_masks.values()): # type: ignore[code] - if policy is None: - X_new = X - elif policy == 'most_frequent': - imputer = SimpleImputer(missing_values=CAT_MISSING_VALUE, strategy=policy) # type: ignore[code] - imputer.fit(X['train']) - X_new = {k: cast(np.ndarray, imputer.transform(v)) for k, v in X.items()} - else: - util.raise_unknown('categorical NaN policy', policy) - else: - assert policy is None - X_new = X - return X_new - - -def cat_drop_rare(X: ArrayDict, min_frequency: float) -> ArrayDict: - assert 0.0 < min_frequency < 1.0 - min_count = round(len(X['train']) * min_frequency) - X_new = {x: [] for x in X} - for column_idx in range(X['train'].shape[1]): - counter = Counter(X['train'][:, column_idx].tolist()) - popular_categories = {k for k, v in counter.items() if v >= min_count} - for part in X_new: - X_new[part].append( - [ - (x if x in popular_categories else CAT_RARE_VALUE) - for x in X[part][:, column_idx].tolist() - ] - ) - return {k: np.array(v).T for k, v in X_new.items()} - - -def cat_encode( - X: ArrayDict, - encoding: Optional[CatEncoding], - y_train: Optional[np.ndarray], - seed: Optional[int], - return_encoder : bool = False -) -> Tuple[ArrayDict, bool, Optional[Any]]: # (X, is_converted_to_numerical) - if encoding != 'counter': - y_train = None - - # Step 1. Map strings to 0-based ranges - - if encoding is None: - unknown_value = np.iinfo('int64').max - 3 - oe = sklearn.preprocessing.OrdinalEncoder( - handle_unknown='use_encoded_value', # type: ignore[code] - unknown_value=unknown_value, # type: ignore[code] - dtype='int64', # type: ignore[code] - ).fit(X['train']) - encoder = make_pipeline(oe) - encoder.fit(X['train']) - X = {k: encoder.transform(v) for k, v in X.items()} - max_values = X['train'].max(axis=0) - for part in X.keys(): - if part == 'train': continue - for column_idx in range(X[part].shape[1]): - X[part][X[part][:, column_idx] == unknown_value, column_idx] = ( - max_values[column_idx] + 1 - ) - if return_encoder: - return (X, False, encoder) - return (X, False) - - # Step 2. Encode. - - elif encoding == 'one-hot': - ohe = sklearn.preprocessing.OneHotEncoder( - handle_unknown='ignore', sparse=False, dtype=np.float32 # type: ignore[code] - ) - encoder = make_pipeline(ohe) - - # encoder.steps.append(('ohe', ohe)) - encoder.fit(X['train']) - X = {k: encoder.transform(v) for k, v in X.items()} - elif encoding == 'counter': - assert y_train is not None - assert seed is not None - loe = LeaveOneOutEncoder(sigma=0.1, random_state=seed, return_df=False) - encoder.steps.append(('loe', loe)) - encoder.fit(X['train'], y_train) - X = {k: encoder.transform(v).astype('float32') for k, v in X.items()} # type: ignore[code] - if not isinstance(X['train'], pd.DataFrame): - X = {k: v.values for k, v in X.items()} # type: ignore[code] - else: - util.raise_unknown('encoding', encoding) - - if return_encoder: - return X, True, encoder # type: ignore[code] - return (X, True) - - -def build_target( - y: ArrayDict, policy: Optional[YPolicy], task_type: TaskType -) -> Tuple[ArrayDict, Dict[str, Any]]: - info: Dict[str, Any] = {'policy': policy} - if policy is None: - pass - elif policy == 'default': - if task_type == TaskType.REGRESSION: - mean, std = float(y['train'].mean()), float(y['train'].std()) - y = {k: (v - mean) / std for k, v in y.items()} - info['mean'] = mean - info['std'] = std - else: - util.raise_unknown('policy', policy) - return y, info - - -@dataclass(frozen=True) -class Transformations: - seed: int = 0 - normalization: Optional[Normalization] = None - num_nan_policy: Optional[NumNanPolicy] = None - cat_nan_policy: Optional[CatNanPolicy] = None - cat_min_frequency: Optional[float] = None - cat_encoding: Optional[CatEncoding] = None - y_policy: Optional[YPolicy] = 'default' - - -def transform_dataset( - dataset: Dataset, - transformations: Transformations, - cache_dir: Optional[Path], - return_transforms: bool = False -) -> Dataset: - # WARNING: the order of transformations matters. Moreover, the current - # implementation is not ideal in that sense. - if cache_dir is not None: - transformations_md5 = hashlib.md5( - str(transformations).encode('utf-8') - ).hexdigest() - transformations_str = '__'.join(map(str, astuple(transformations))) - cache_path = ( - cache_dir / f'cache__{transformations_str}__{transformations_md5}.pickle' - ) - if cache_path.exists(): - cache_transformations, value = util.load_pickle(cache_path) - if transformations == cache_transformations: - print( - f"Using cached features: {cache_dir.name + '/' + cache_path.name}" - ) - return value - else: - raise RuntimeError(f'Hash collision for {cache_path}') - else: - cache_path = None - - if dataset.X_num is not None: - dataset = num_process_nans(dataset, transformations.num_nan_policy) - - num_transform = None - cat_transform = None - X_num = dataset.X_num - - if X_num is not None and transformations.normalization is not None: - X_num, num_transform = normalize( - X_num, - transformations.normalization, - transformations.seed, - return_normalizer=True - ) - num_transform = num_transform - - if dataset.X_cat is None: - assert transformations.cat_nan_policy is None - assert transformations.cat_min_frequency is None - # assert transformations.cat_encoding is None - X_cat = None - else: - X_cat = cat_process_nans(dataset.X_cat, transformations.cat_nan_policy) - if transformations.cat_min_frequency is not None: - X_cat = cat_drop_rare(X_cat, transformations.cat_min_frequency) - X_cat, is_num, cat_transform = cat_encode( - X_cat, - transformations.cat_encoding, - dataset.y['train'], - transformations.seed, - return_encoder=True - ) - if is_num: - X_num = ( - X_cat - if X_num is None - else {x: np.hstack([X_num[x], X_cat[x]]) for x in X_num} - ) - X_cat = None - - y, y_info = build_target(dataset.y, transformations.y_policy, dataset.task_type) - - dataset = replace(dataset, X_num=X_num, X_cat=X_cat, y=y, y_info=y_info) - dataset.num_transform = num_transform - dataset.cat_transform = cat_transform - - if cache_path is not None: - util.dump_pickle((transformations, dataset), cache_path) - # if return_transforms: - # return dataset, num_transform, cat_transform - return dataset - - -def build_dataset( - path: Union[str, Path], - transformations: Transformations, - cache: bool -) -> Dataset: - path = Path(path) - dataset = Dataset.from_dir(path) - return transform_dataset(dataset, transformations, path if cache else None) - - -def prepare_tensors( - dataset: Dataset, device: Union[str, torch.device] -) -> Tuple[Optional[TensorDict], Optional[TensorDict], TensorDict]: - X_num, X_cat, Y = ( - None if x is None else {k: torch.as_tensor(v) for k, v in x.items()} - for x in [dataset.X_num, dataset.X_cat, dataset.y] - ) - if device.type != 'cpu': - X_num, X_cat, Y = ( - None if x is None else {k: v.to(device) for k, v in x.items()} - for x in [X_num, X_cat, Y] - ) - assert X_num is not None - assert Y is not None - if not dataset.is_multiclass: - Y = {k: v.float() for k, v in Y.items()} - return X_num, X_cat, Y - -############### -## DataLoader## -############### - -class TabDataset(torch.utils.data.Dataset): - def __init__( - self, dataset : Dataset, split : Literal['train', 'val', 'test'] - ): - super().__init__() - - self.X_num = torch.from_numpy(dataset.X_num[split]) if dataset.X_num is not None else None - self.X_cat = torch.from_numpy(dataset.X_cat[split]) if dataset.X_cat is not None else None - self.y = torch.from_numpy(dataset.y[split]) - - assert self.y is not None - assert self.X_num is not None or self.X_cat is not None - - def __len__(self): - return len(self.y) - - def __getitem__(self, idx): - out_dict = { - 'y': self.y[idx].long() if self.y is not None else None, - } - - x = np.empty((0,)) - if self.X_num is not None: - x = self.X_num[idx] - if self.X_cat is not None: - x = torch.cat([x, self.X_cat[idx]], dim=0) - return x.float(), out_dict - -def prepare_dataloader( - dataset : Dataset, - split : str, - batch_size: int, -): - - torch_dataset = TabDataset(dataset, split) - loader = torch.utils.data.DataLoader( - torch_dataset, - batch_size=batch_size, - shuffle=(split == 'train'), - num_workers=1, - ) - while True: - yield from loader - -def prepare_torch_dataloader( - dataset : Dataset, - split : str, - shuffle : bool, - batch_size: int, -) -> torch.utils.data.DataLoader: - - torch_dataset = TabDataset(dataset, split) - loader = torch.utils.data.DataLoader(torch_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1) - - return loader - -def dataset_from_csv(paths : Dict[str, str], cat_features, target, T): - assert 'train' in paths - y = {} - X_num = {} - X_cat = {} if len(cat_features) else None - for split in paths.keys(): - df = pd.read_csv(paths[split]) - y[split] = df[target].to_numpy().astype(float) - if X_cat is not None: - X_cat[split] = df[cat_features].to_numpy().astype(str) - X_num[split] = df.drop(cat_features + [target], axis=1).to_numpy().astype(float) - - dataset = Dataset(X_num, X_cat, y, {}, None, len(np.unique(y['train']))) - return transform_dataset(dataset, T, None) - -class FastTensorDataLoader: - """ - A DataLoader-like object for a set of tensors that can be much faster than - TensorDataset + DataLoader because dataloader grabs individual indices of - the dataset and calls cat (slow). - Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6 - """ - def __init__(self, *tensors, batch_size=32, shuffle=False): - """ - Initialize a FastTensorDataLoader. - :param *tensors: tensors to store. Must have the same length @ dim 0. - :param batch_size: batch size to load. - :param shuffle: if True, shuffle the data *in-place* whenever an - iterator is created out of this object. - :returns: A FastTensorDataLoader. - """ - assert all(t.shape[0] == tensors[0].shape[0] for t in tensors) - self.tensors = tensors - - self.dataset_len = self.tensors[0].shape[0] - self.batch_size = batch_size - self.shuffle = shuffle - - # Calculate # batches - n_batches, remainder = divmod(self.dataset_len, self.batch_size) - if remainder > 0: - n_batches += 1 - self.n_batches = n_batches - def __iter__(self): - if self.shuffle: - r = torch.randperm(self.dataset_len) - self.tensors = [t[r] for t in self.tensors] - self.i = 0 - return self - - def __next__(self): - if self.i >= self.dataset_len: - raise StopIteration - batch = tuple(t[self.i:self.i+self.batch_size] for t in self.tensors) - self.i += self.batch_size - return batch - - def __len__(self): - return self.n_batches - -def prepare_fast_dataloader( - D : Dataset, - split : str, - batch_size: int -): - if D.X_cat is not None: - if D.X_num is not None: - X = torch.from_numpy(np.concatenate([D.X_num[split], D.X_cat[split]], axis=1)).float() - else: - X = torch.from_numpy(D.X_cat[split]).float() - else: - X = torch.from_numpy(D.X_num[split]).float() - y = torch.from_numpy(D.y[split]) - dataloader = FastTensorDataLoader(X, y, batch_size=batch_size, shuffle=(split=='train')) - while True: - yield from dataloader - -def prepare_fast_torch_dataloader( - D : Dataset, - split : str, - batch_size: int -): - if D.X_cat is not None: - X = torch.from_numpy(np.concatenate([D.X_num[split], D.X_cat[split]], axis=1)).float() - else: - X = torch.from_numpy(D.X_num[split]).float() - y = torch.from_numpy(D.y[split]) - dataloader = FastTensorDataLoader(X, y, batch_size=batch_size, shuffle=(split=='train')) - return dataloader - -def round_columns(X_real, X_synth, columns): - for col in columns: - uniq = np.unique(X_real[:,col]) - dist = cdist(X_synth[:, col][:, np.newaxis].astype(float), uniq[:, np.newaxis].astype(float)) - X_synth[:, col] = uniq[dist.argmin(axis=1)] - return X_synth - -def concat_features(D : Dataset): - if D.X_num is None: - assert D.X_cat is not None - X = {k: pd.DataFrame(v, columns=range(D.n_features)) for k, v in D.X_cat.items()} - elif D.X_cat is None: - assert D.X_num is not None - X = {k: pd.DataFrame(v, columns=range(D.n_features)) for k, v in D.X_num.items()} - else: - X = { - part: pd.concat( - [ - pd.DataFrame(D.X_num[part], columns=range(D.n_num_features)), - pd.DataFrame( - D.X_cat[part], - columns=range(D.n_num_features, D.n_features), - ), - ], - axis=1, - ) - for part in D.y.keys() - } - - return X - -def concat_to_pd(X_num, X_cat, y): - if X_num is None: - return pd.concat([ - pd.DataFrame(X_cat, columns=list(range(X_cat.shape[1]))), - pd.DataFrame(y, columns=['y']) - ], axis=1) - if X_cat is not None: - return pd.concat([ - pd.DataFrame(X_num, columns=list(range(X_num.shape[1]))), - pd.DataFrame(X_cat, columns=list(range(X_num.shape[1], X_num.shape[1] + X_cat.shape[1]))), - pd.DataFrame(y, columns=['y']) - ], axis=1) - return pd.concat([ - pd.DataFrame(X_num, columns=list(range(X_num.shape[1]))), - pd.DataFrame(y, columns=['y']) - ], axis=1) - -def read_pure_data(path, split='train'): - y = np.load(os.path.join(path, f'y_{split}.npy'), allow_pickle=True) - X_num = None - X_cat = None - if os.path.exists(os.path.join(path, f'X_num_{split}.npy')): - X_num = np.load(os.path.join(path, f'X_num_{split}.npy'), allow_pickle=True) - if os.path.exists(os.path.join(path, f'X_cat_{split}.npy')): - X_cat = np.load(os.path.join(path, f'X_cat_{split}.npy'), allow_pickle=True) - - return X_num, X_cat, y - -def read_changed_val(path, val_size=0.2): - path = Path(path) - X_num_train, X_cat_train, y_train = read_pure_data(path, 'train') - X_num_val, X_cat_val, y_val = read_pure_data(path, 'val') - is_regression = load_json(path / 'info.json')['task_type'] == 'regression' - - y = np.concatenate([y_train, y_val], axis=0) - - ixs = np.arange(y.shape[0]) - if is_regression: - train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777) - else: - train_ixs, val_ixs = train_test_split(ixs, test_size=val_size, random_state=777, stratify=y) - y_train = y[train_ixs] - y_val = y[val_ixs] - - if X_num_train is not None: - X_num = np.concatenate([X_num_train, X_num_val], axis=0) - X_num_train = X_num[train_ixs] - X_num_val = X_num[val_ixs] - - if X_cat_train is not None: - X_cat = np.concatenate([X_cat_train, X_cat_val], axis=0) - X_cat_train = X_cat[train_ixs] - X_cat_val = X_cat[val_ixs] - - return X_num_train, X_cat_train, y_train, X_num_val, X_cat_val, y_val - -############# - -def load_dataset_info(dataset_dir_name: str) -> Dict[str, Any]: - path = Path("data/" + dataset_dir_name) - info = util.load_json(path / 'info.json') - info['size'] = info['train_size'] + info['val_size'] + info['test_size'] - info['n_features'] = info['n_num_features'] + info['n_cat_features'] - info['path'] = path - return info diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py deleted file mode 100644 index aeed3e2a..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/deep.py +++ /dev/null @@ -1,168 +0,0 @@ -import statistics -from dataclasses import dataclass -from typing import Any, Callable, Literal, cast - -import rtdl -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -import zero -from torch import Tensor - -from .util import TaskType - - -def cos_sin(x: Tensor) -> Tensor: - return torch.cat([torch.cos(x), torch.sin(x)], -1) - - -@dataclass -class PeriodicOptions: - n: int # the output size is 2 * n - sigma: float - trainable: bool - initialization: Literal['log-linear', 'normal'] - - -class Periodic(nn.Module): - def __init__(self, n_features: int, options: PeriodicOptions) -> None: - super().__init__() - if options.initialization == 'log-linear': - coefficients = options.sigma ** (torch.arange(options.n) / options.n) - coefficients = coefficients[None].repeat(n_features, 1) - else: - assert options.initialization == 'normal' - coefficients = torch.normal(0.0, options.sigma, (n_features, options.n)) - if options.trainable: - self.coefficients = nn.Parameter(coefficients) # type: ignore[code] - else: - self.register_buffer('coefficients', coefficients) - - def forward(self, x: Tensor) -> Tensor: - assert x.ndim == 2 - return cos_sin(2 * torch.pi * self.coefficients[None] * x[..., None]) - - -def get_n_parameters(m: nn.Module): - return sum(x.numel() for x in m.parameters() if x.requires_grad) - - -def get_loss_fn(task_type: TaskType) -> Callable[..., Tensor]: - return ( - F.binary_cross_entropy_with_logits - if task_type == TaskType.BINCLASS - else F.cross_entropy - if task_type == TaskType.MULTICLASS - else F.mse_loss - ) - - -def default_zero_weight_decay_condition(module_name, module, parameter_name, parameter): - del module_name, parameter - return parameter_name.endswith('bias') or isinstance( - module, - ( - nn.BatchNorm1d, - nn.LayerNorm, - nn.InstanceNorm1d, - rtdl.CLSToken, - rtdl.NumericalFeatureTokenizer, - rtdl.CategoricalFeatureTokenizer, - Periodic, - ), - ) - - -def split_parameters_by_weight_decay( - model: nn.Module, zero_weight_decay_condition=default_zero_weight_decay_condition -) -> list[dict[str, Any]]: - parameters_info = {} - for module_name, module in model.named_modules(): - for parameter_name, parameter in module.named_parameters(): - full_parameter_name = ( - f'{module_name}.{parameter_name}' if module_name else parameter_name - ) - parameters_info.setdefault(full_parameter_name, ([], parameter))[0].append( - zero_weight_decay_condition( - module_name, module, parameter_name, parameter - ) - ) - params_with_wd = {'params': []} - params_without_wd = {'params': [], 'weight_decay': 0.0} - for full_parameter_name, (results, parameter) in parameters_info.items(): - (params_without_wd if any(results) else params_with_wd)['params'].append( - parameter - ) - return [params_with_wd, params_without_wd] - - -def make_optimizer( - config: dict[str, Any], - parameter_groups, -) -> optim.Optimizer: - if config['optimizer'] == 'FT-Transformer-default': - return optim.AdamW(parameter_groups, lr=1e-4, weight_decay=1e-5) - return getattr(optim, config['optimizer'])( - parameter_groups, - **{x: config[x] for x in ['lr', 'weight_decay', 'momentum'] if x in config}, - ) - - -def get_lr(optimizer: optim.Optimizer) -> float: - return next(iter(optimizer.param_groups))['lr'] - - -def is_oom_exception(err: RuntimeError) -> bool: - return any( - x in str(err) - for x in [ - 'CUDA out of memory', - 'CUBLAS_STATUS_ALLOC_FAILED', - 'CUDA error: out of memory', - ] - ) - - -def train_with_auto_virtual_batch( - optimizer, - loss_fn, - step, - batch, - chunk_size: int, -) -> tuple[Tensor, int]: - batch_size = len(batch) - random_state = zero.random.get_state() - loss = None - while chunk_size != 0: - try: - zero.random.set_state(random_state) - optimizer.zero_grad() - if batch_size <= chunk_size: - loss = loss_fn(*step(batch)) - loss.backward() - else: - loss = None - for chunk in zero.iter_batches(batch, chunk_size): - chunk_loss = loss_fn(*step(chunk)) - chunk_loss = chunk_loss * (len(chunk) / batch_size) - chunk_loss.backward() - if loss is None: - loss = chunk_loss.detach() - else: - loss += chunk_loss.detach() - except RuntimeError as err: - if not is_oom_exception(err): - raise - chunk_size //= 2 - else: - break - if not chunk_size: - raise RuntimeError('Not enough memory even for batch_size=1') - optimizer.step() - return cast(Tensor, loss), chunk_size - - -def process_epoch_losses(losses: list[Tensor]) -> tuple[list[float], float]: - losses_ = torch.stack(losses).tolist() - return losses_, statistics.mean(losses_) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py deleted file mode 100644 index 64be89d7..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/env.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Have not used in TabDDPM project. -""" - -import datetime -import os -import shutil -import typing as ty -from pathlib import Path - -PROJ = Path('tab-ddpm/').absolute().resolve() -EXP = PROJ / 'exp' -DATA = PROJ / 'data' - - -def get_path(path: ty.Union[str, Path]) -> Path: - if isinstance(path, str): - path = Path(path) - if not path.is_absolute(): - path = PROJ / path - return path.resolve() - - -def get_relative_path(path: ty.Union[str, Path]) -> Path: - return get_path(path).relative_to(PROJ) - - -def duplicate_path( - src: ty.Union[str, Path], alternative_project_dir: ty.Union[str, Path] -) -> None: - src = get_path(src) - alternative_project_dir = get_path(alternative_project_dir) - dst = alternative_project_dir / src.relative_to(PROJ) - dst.parent.mkdir(parents=True, exist_ok=True) - if dst.exists(): - dst = dst.with_name( - dst.name + '_' + datetime.datetime.now().strftime('%Y%m%dT%H%M%S') - ) - (shutil.copytree if src.is_dir() else shutil.copyfile)(src, dst) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py deleted file mode 100644 index bdcac817..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/metrics.py +++ /dev/null @@ -1,158 +0,0 @@ -import enum -from typing import Any, Optional, Tuple, Dict, Union, cast -from functools import partial - -import numpy as np -import scipy.special -import sklearn.metrics as skm - -from . import util -from .util import TaskType - - -class PredictionType(enum.Enum): - LOGITS = 'logits' - PROBS = 'probs' - -class MetricsReport: - def __init__(self, report: dict, task_type: TaskType): - self._res = {k: {} for k in report.keys()} - if task_type in (TaskType.BINCLASS, TaskType.MULTICLASS): - self._metrics_names = ["acc", "f1"] - for k in report.keys(): - self._res[k]["acc"] = report[k]["accuracy"] - self._res[k]["f1"] = report[k]["macro avg"]["f1-score"] - if task_type == TaskType.BINCLASS: - self._res[k]["roc_auc"] = report[k]["roc_auc"] - self._metrics_names.append("roc_auc") - - elif task_type == TaskType.REGRESSION: - self._metrics_names = ["r2", "rmse"] - for k in report.keys(): - self._res[k]["r2"] = report[k]["r2"] - self._res[k]["rmse"] = report[k]["rmse"] - else: - raise "Unknown TaskType!" - - def get_splits_names(self) -> list[str]: - return self._res.keys() - - def get_metrics_names(self) -> list[str]: - return self._metrics_names - - def get_metric(self, split: str, metric: str) -> float: - return self._res[split][metric] - - def get_val_score(self) -> float: - return self._res["val"]["r2"] if "r2" in self._res["val"] else self._res["val"]["f1"] - - def get_test_score(self) -> float: - return self._res["test"]["r2"] if "r2" in self._res["test"] else self._res["test"]["f1"] - - def print_metrics(self) -> None: - res = { - "val": {k: np.around(self._res["val"][k], 4) for k in self._res["val"]}, - "test": {k: np.around(self._res["test"][k], 4) for k in self._res["test"]} - } - - print("*"*100) - print("[val]") - print(res["val"]) - print("[test]") - print(res["test"]) - - return res - -class SeedsMetricsReport: - def __init__(self): - self._reports = [] - - def add_report(self, report: MetricsReport) -> None: - self._reports.append(report) - - def get_mean_std(self) -> dict: - res = {k: {} for k in ["train", "val", "test"]} - for split in self._reports[0].get_splits_names(): - for metric in self._reports[0].get_metrics_names(): - res[split][metric] = [x.get_metric(split, metric) for x in self._reports] - - agg_res = {k: {} for k in ["train", "val", "test"]} - for split in self._reports[0].get_splits_names(): - for metric in self._reports[0].get_metrics_names(): - for k, f in [("count", len), ("mean", np.mean), ("std", np.std)]: - agg_res[split][f"{metric}-{k}"] = f(res[split][metric]) - self._res = res - self._agg_res = agg_res - - return agg_res - - def print_result(self) -> dict: - res = {split: {k: float(np.around(self._agg_res[split][k], 4)) for k in self._agg_res[split]} for split in ["val", "test"]} - print("="*100) - print("EVAL RESULTS:") - print("[val]") - print(res["val"]) - print("[test]") - print(res["test"]) - print("="*100) - return res - -def calculate_rmse( - y_true: np.ndarray, y_pred: np.ndarray, std: Optional[float] -) -> float: - rmse = skm.mean_squared_error(y_true, y_pred) ** 0.5 - if std is not None: - rmse *= std - return rmse - - -def _get_labels_and_probs( - y_pred: np.ndarray, task_type: TaskType, prediction_type: Optional[PredictionType] -) -> Tuple[np.ndarray, Optional[np.ndarray]]: - assert task_type in (TaskType.BINCLASS, TaskType.MULTICLASS) - - if prediction_type is None: - return y_pred, None - - if prediction_type == PredictionType.LOGITS: - probs = ( - scipy.special.expit(y_pred) - if task_type == TaskType.BINCLASS - else scipy.special.softmax(y_pred, axis=1) - ) - elif prediction_type == PredictionType.PROBS: - probs = y_pred - else: - util.raise_unknown('prediction_type', prediction_type) - - assert probs is not None - labels = np.round(probs) if task_type == TaskType.BINCLASS else probs.argmax(axis=1) - return labels.astype('int64'), probs - - -def calculate_metrics( - y_true: np.ndarray, - y_pred: np.ndarray, - task_type: Union[str, TaskType], - prediction_type: Optional[Union[str, PredictionType]], - y_info: Dict[str, Any], -) -> Dict[str, Any]: - # Example: calculate_metrics(y_true, y_pred, 'binclass', 'logits', {}) - task_type = TaskType(task_type) - if prediction_type is not None: - prediction_type = PredictionType(prediction_type) - - if task_type == TaskType.REGRESSION: - assert prediction_type is None - assert 'std' in y_info - rmse = calculate_rmse(y_true, y_pred, y_info['std']) - r2 = skm.r2_score(y_true, y_pred) - result = {'rmse': rmse, 'r2': r2} - else: - labels, probs = _get_labels_and_probs(y_pred, task_type, prediction_type) - result = cast( - Dict[str, Any], skm.classification_report(y_true, labels, output_dict=True) - ) - if task_type == TaskType.BINCLASS: - result['roc_auc'] = skm.roc_auc_score(y_true, probs) - return result diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py b/src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py deleted file mode 100644 index 75e05c9c..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.lib/util.py +++ /dev/null @@ -1,433 +0,0 @@ -import argparse -import atexit -import enum -import json -import os -import pickle -import shutil -import sys -import time -import uuid -from copy import deepcopy -from dataclasses import asdict, fields, is_dataclass -from pathlib import Path -from pprint import pprint -from typing import Any, Callable, List, Dict, Type, Optional, Tuple, TypeVar, Union, cast, get_args, get_origin - -import __main__ -import numpy as np -import tomli -import tomli_w -import torch -import zero - -from . import env - -RawConfig = Dict[str, Any] -Report = Dict[str, Any] -T = TypeVar('T') - - -class Part(enum.Enum): - TRAIN = 'train' - VAL = 'val' - TEST = 'test' - - def __str__(self) -> str: - return self.value - - -class TaskType(enum.Enum): - BINCLASS = 'binclass' - MULTICLASS = 'multiclass' - REGRESSION = 'regression' - - def __str__(self) -> str: - return self.value - - -class Timer(zero.Timer): - @classmethod - def launch(cls) -> 'Timer': - timer = cls() - timer.run() - return timer - - -def update_training_log(training_log, data, metrics): - def _update(log_part, data_part): - for k, v in data_part.items(): - if isinstance(v, dict): - _update(log_part.setdefault(k, {}), v) - elif isinstance(v, list): - log_part.setdefault(k, []).extend(v) - else: - log_part.setdefault(k, []).append(v) - - _update(training_log, data) - transposed_metrics = {} - for part, part_metrics in metrics.items(): - for metric_name, value in part_metrics.items(): - transposed_metrics.setdefault(metric_name, {})[part] = value - _update(training_log, transposed_metrics) - - -def raise_unknown(unknown_what: str, unknown_value: Any): - raise ValueError(f'Unknown {unknown_what}: {unknown_value}') - - -def _replace(data, condition, value): - def do(x): - if isinstance(x, dict): - return {k: do(v) for k, v in x.items()} - elif isinstance(x, list): - return [do(y) for y in x] - else: - return value if condition(x) else x - - return do(data) - - -_CONFIG_NONE = '__none__' - - -def unpack_config(config: RawConfig) -> RawConfig: - config = cast(RawConfig, _replace(config, lambda x: x == _CONFIG_NONE, None)) - return config - - -def pack_config(config: RawConfig) -> RawConfig: - config = cast(RawConfig, _replace(config, lambda x: x is None, _CONFIG_NONE)) - return config - - -def load_config(path: Union[Path, str]) -> Any: - with open(path, 'rb') as f: - return unpack_config(tomli.load(f)) - - -def dump_config(config: Any, path: Union[Path, str]) -> None: - with open(path, 'wb') as f: - tomli_w.dump(pack_config(config), f) - # check that there are no bugs in all these "pack/unpack" things - assert config == load_config(path) - - -def load_json(path: Union[Path, str], **kwargs) -> Any: - return json.loads(Path(path).read_text(), **kwargs) - - -def dump_json(x: Any, path: Union[Path, str], **kwargs) -> None: - kwargs.setdefault('indent', 4) - Path(path).write_text(json.dumps(x, **kwargs) + '\n') - - -def load_pickle(path: Union[Path, str], **kwargs) -> Any: - return pickle.loads(Path(path).read_bytes(), **kwargs) - - -def dump_pickle(x: Any, path: Union[Path, str], **kwargs) -> None: - Path(path).write_bytes(pickle.dumps(x, **kwargs)) - - -def load(path: Union[Path, str], **kwargs) -> Any: - return globals()[f'load_{Path(path).suffix[1:]}'](Path(path), **kwargs) - - -def dump(x: Any, path: Union[Path, str], **kwargs) -> Any: - return globals()[f'dump_{Path(path).suffix[1:]}'](x, Path(path), **kwargs) - - -def _get_output_item_path( - path: Union[str, Path], filename: str, must_exist: bool -) -> Path: - path = env.get_path(path) - if path.suffix == '.toml': - path = path.with_suffix('') - if path.is_dir(): - path = path / filename - else: - assert path.name == filename - assert path.parent.exists() - if must_exist: - assert path.exists() - return path - - -def load_report(path: Path) -> Report: - return load_json(_get_output_item_path(path, 'report.json', True)) - - -def dump_report(report: dict, path: Path) -> None: - dump_json(report, _get_output_item_path(path, 'report.json', False)) - - -def load_predictions(path: Path) -> Dict[str, np.ndarray]: - with np.load(_get_output_item_path(path, 'predictions.npz', True)) as predictions: - return {x: predictions[x] for x in predictions} - - -def dump_predictions(predictions: Dict[str, np.ndarray], path: Path) -> None: - np.savez(_get_output_item_path(path, 'predictions.npz', False), **predictions) - - -def dump_metrics(metrics: Dict[str, Any], path: Path) -> None: - dump_json(metrics, _get_output_item_path(path, 'metrics.json', False)) - - -def load_checkpoint(path: Path, *args, **kwargs) -> Dict[str, np.ndarray]: - return torch.load( - _get_output_item_path(path, 'checkpoint.pt', True), *args, **kwargs - ) - - -def get_device() -> torch.device: - if torch.cuda.is_available(): - assert os.environ.get('CUDA_VISIBLE_DEVICES') is not None - return torch.device('cuda:0') - else: - return torch.device('cpu') - - -def _print_sep(c, size=100): - print(c * size) - - -def start( - config_cls: Type[T] = RawConfig, - argv: Optional[List[str]] = None, - patch_raw_config: Optional[Callable[[RawConfig], None]] = None, -) -> Tuple[T, Path, Report]: # config # output dir # report - parser = argparse.ArgumentParser() - parser.add_argument('config', metavar='FILE') - parser.add_argument('--force', action='store_true') - parser.add_argument('--continue', action='store_true', dest='continue_') - if argv is None: - program = __main__.__file__ - args = parser.parse_args() - else: - program = argv[0] - try: - args = parser.parse_args(argv[1:]) - except Exception: - print( - 'Failed to parse `argv`.' - ' Remember that the first item of `argv` must be the path (relative to' - ' the project root) to the script/notebook.' - ) - raise - args = parser.parse_args(argv) - - snapshot_dir = os.environ.get('SNAPSHOT_PATH') - if snapshot_dir and Path(snapshot_dir).joinpath('CHECKPOINTS_RESTORED').exists(): - assert args.continue_ - - config_path = env.get_path(args.config) - output_dir = config_path.with_suffix('') - _print_sep('=') - print(f'[output] {output_dir}') - _print_sep('=') - - assert config_path.exists() - raw_config = load_config(config_path) - if patch_raw_config is not None: - patch_raw_config(raw_config) - if is_dataclass(config_cls): - config = from_dict(config_cls, raw_config) - full_raw_config = asdict(config) - else: - assert config_cls is dict - full_raw_config = config = raw_config - full_raw_config = asdict(config) - - if output_dir.exists(): - if args.force: - print('Removing the existing output and creating a new one...') - shutil.rmtree(output_dir) - output_dir.mkdir() - elif not args.continue_: - backup_output(output_dir) - print('The output directory already exists. Done!\n') - sys.exit() - elif output_dir.joinpath('DONE').exists(): - backup_output(output_dir) - print('The "DONE" file already exists. Done!') - sys.exit() - else: - print('Continuing with the existing output...') - else: - print('Creating the output...') - output_dir.mkdir() - - report = { - 'program': str(env.get_relative_path(program)), - 'environment': {}, - 'config': full_raw_config, - } - if torch.cuda.is_available(): # type: ignore[code] - report['environment'].update( - { - 'CUDA_VISIBLE_DEVICES': os.environ.get('CUDA_VISIBLE_DEVICES'), - 'gpus': zero.hardware.get_gpus_info(), - 'torch.version.cuda': torch.version.cuda, - 'torch.backends.cudnn.version()': torch.backends.cudnn.version(), # type: ignore[code] - 'torch.cuda.nccl.version()': torch.cuda.nccl.version(), # type: ignore[code] - } - ) - dump_report(report, output_dir) - dump_json(raw_config, output_dir / 'raw_config.json') - _print_sep('-') - pprint(full_raw_config, width=100) - _print_sep('-') - return cast(config_cls, config), output_dir, report - - -_LAST_SNAPSHOT_TIME = None - - -def backup_output(output_dir: Path) -> None: - backup_dir = os.environ.get('TMP_OUTPUT_PATH') - snapshot_dir = os.environ.get('SNAPSHOT_PATH') - if backup_dir is None: - assert snapshot_dir is None - return - assert snapshot_dir is not None - - try: - relative_output_dir = output_dir.relative_to(env.PROJ) - except ValueError: - return - - for dir_ in [backup_dir, snapshot_dir]: - new_output_dir = dir_ / relative_output_dir - prev_backup_output_dir = new_output_dir.with_name(new_output_dir.name + '_prev') - new_output_dir.parent.mkdir(exist_ok=True, parents=True) - if new_output_dir.exists(): - new_output_dir.rename(prev_backup_output_dir) - shutil.copytree(output_dir, new_output_dir) - # the case for evaluate.py which automatically creates configs - if output_dir.with_suffix('.toml').exists(): - shutil.copyfile( - output_dir.with_suffix('.toml'), new_output_dir.with_suffix('.toml') - ) - if prev_backup_output_dir.exists(): - shutil.rmtree(prev_backup_output_dir) - - global _LAST_SNAPSHOT_TIME - if _LAST_SNAPSHOT_TIME is None or time.time() - _LAST_SNAPSHOT_TIME > 10 * 60: - import nirvana_dl.snapshot # type: ignore[code] - - nirvana_dl.snapshot.dump_snapshot() - _LAST_SNAPSHOT_TIME = time.time() - print('The snapshot was saved!') - - -def _get_scores(metrics: Dict[str, Dict[str, Any]]) -> Optional[Dict[str, float]]: - return ( - {k: v['score'] for k, v in metrics.items()} - if 'score' in next(iter(metrics.values())) - else None - ) - - -def format_scores(metrics: Dict[str, Dict[str, Any]]) -> str: - return ' '.join( - f"[{x}] {metrics[x]['score']:.3f}" - for x in ['test', 'val', 'train'] - if x in metrics - ) - - -def finish(output_dir: Path, report: dict) -> None: - print() - _print_sep('=') - - metrics = report.get('metrics') - if metrics is not None: - scores = _get_scores(metrics) - if scores is not None: - dump_json(scores, output_dir / 'scores.json') - print(format_scores(metrics)) - _print_sep('-') - - dump_report(report, output_dir) - json_output_path = os.environ.get('JSON_OUTPUT_FILE') - if json_output_path: - try: - key = str(output_dir.relative_to(env.PROJ)) - except ValueError: - pass - else: - json_output_path = Path(json_output_path) - try: - json_data = json.loads(json_output_path.read_text()) - except (FileNotFoundError, json.decoder.JSONDecodeError): - json_data = {} - json_data[key] = load_json(output_dir / 'report.json') - json_output_path.write_text(json.dumps(json_data, indent=4)) - shutil.copyfile( - json_output_path, - os.path.join(os.environ['SNAPSHOT_PATH'], 'json_output.json'), - ) - - output_dir.joinpath('DONE').touch() - backup_output(output_dir) - print(f'Done! | {report.get("time")} | {output_dir}') - _print_sep('=') - print() - - -def from_dict(datacls: Type[T], data: dict) -> T: - assert is_dataclass(datacls) - data = deepcopy(data) - for field in fields(datacls): - if field.name not in data: - continue - if is_dataclass(field.type): - data[field.name] = from_dict(field.type, data[field.name]) - elif ( - get_origin(field.type) is Union - and len(get_args(field.type)) == 2 - and get_args(field.type)[1] is type(None) - and is_dataclass(get_args(field.type)[0]) - ): - if data[field.name] is not None: - data[field.name] = from_dict(get_args(field.type)[0], data[field.name]) - return datacls(**data) - - -def replace_factor_with_value( - config: RawConfig, - key: str, - reference_value: int, - bounds: Tuple[float, float], -) -> None: - factor_key = key + '_factor' - if factor_key not in config: - assert key in config - else: - assert key not in config - factor = config.pop(factor_key) - assert bounds[0] <= factor <= bounds[1] - config[key] = int(factor * reference_value) - - -def get_temporary_copy(path: Union[str, Path]) -> Path: - path = env.get_path(path) - assert not path.is_dir() and not path.is_symlink() - tmp_path = path.with_name( - path.stem + '___' + str(uuid.uuid4()).replace('-', '') + path.suffix - ) - shutil.copyfile(path, tmp_path) - atexit.register(lambda: tmp_path.unlink()) - return tmp_path - - -def get_python(): - python = Path('python3.9') - return str(python) if python.exists() else 'python' - -def get_catboost_config(real_data_path, is_cv=False): - ds_name = Path(real_data_path).name - C = load_json(f'tuned_models/catboost/{ds_name}_cv.json') - return C \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py b/src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py deleted file mode 100644 index f6855f6b..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.pipeline.py +++ /dev/null @@ -1,80 +0,0 @@ -import tomli -import shutil -import os -import argparse -from train import train -from sample import sample -import pandas as pd -import matplotlib.pyplot as plt -import zero -import lib -import torch - -def load_config(path) : - with open(path, 'rb') as f: - return tomli.load(f) - -def save_file(parent_dir, config_path): - try: - dst = os.path.join(parent_dir) - os.makedirs(os.path.dirname(dst), exist_ok=True) - shutil.copyfile(os.path.abspath(config_path), dst) - except shutil.SameFileError: - pass - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--config', metavar='FILE') - parser.add_argument('--train', action='store_true', default=False) - parser.add_argument('--sample', action='store_true', default=False) - parser.add_argument('--eval', action='store_true', default=False) - parser.add_argument('--change_val', action='store_true', default=False) - - args = parser.parse_args() - raw_config = lib.load_config(args.config) - if 'device' in raw_config: - device = torch.device(raw_config['device']) - else: - device = torch.device('cuda:1') - - timer = zero.Timer() - timer.run() - save_file(os.path.join(raw_config['parent_dir'], 'config.toml'), args.config) - - if args.train: - train( - **raw_config['train']['main'], - **raw_config['diffusion_params'], - parent_dir=raw_config['parent_dir'], - real_data_path=raw_config['real_data_path'], - model_type=raw_config['model_type'], - model_params=raw_config['model_params'], - T_dict=raw_config['train']['T'], - num_numerical_features=raw_config['num_numerical_features'], - device=device, - change_val=args.change_val - ) - if args.sample: - sample( - num_samples=raw_config['sample']['num_samples'], - batch_size=raw_config['sample']['batch_size'], - disbalance=raw_config['sample'].get('disbalance', None), - **raw_config['diffusion_params'], - parent_dir=raw_config['parent_dir'], - real_data_path=raw_config['real_data_path'], - model_path=os.path.join(raw_config['parent_dir'], 'model.pt'), - model_type=raw_config['model_type'], - model_params=raw_config['model_params'], - T_dict=raw_config['train']['T'], - num_numerical_features=raw_config['num_numerical_features'], - device=device, - seed=raw_config['sample'].get('seed', 0), - change_val=args.change_val - ) - - save_file(os.path.join(raw_config['parent_dir'], 'info.json'), os.path.join(raw_config['real_data_path'], 'info.json')) - - print(f'Elapsed time: {str(timer)}') - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.sample.py b/src/synthcity/plugins/core/models/tabular_ddpm/.sample.py deleted file mode 100644 index abc68162..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.sample.py +++ /dev/null @@ -1,159 +0,0 @@ -import torch -import numpy as np -import zero -import os -from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion -from .utils import FoundNANsError -from utils_train import get_model, make_dataset -from .lib import round_columns -import lib - -def to_good_ohe(ohe, X): - indices = np.cumsum([0] + ohe._n_features_outs) - Xres = [] - for i in range(1, len(indices)): - x_ = np.max(X[:, indices[i - 1]:indices[i]], axis=1) - t = X[:, indices[i - 1]:indices[i]] - x_.reshape(-1, 1) - Xres.append(np.where(t >= 0, 1, 0)) - return np.hstack(Xres) - -def sample( - parent_dir, - real_data_path = 'data/higgs-small', - batch_size = 2000, - num_samples = 0, - model_type = 'mlp', - model_params = None, - model_path = None, - num_timesteps = 1000, - gaussian_loss_type = 'mse', - scheduler = 'cosine', - T_dict = None, - num_numerical_features = 0, - disbalance = None, - device = torch.device('cuda:1'), - seed = 0, - change_val = False -): - zero.improve_reproducibility(seed) - - T = lib.Transformations(**T_dict) - D = make_dataset( - real_data_path, - T, - num_classes=model_params['num_classes'], - is_y_cond=model_params['is_y_cond'], - change_val=change_val - ) - - K = np.array(D.get_category_sizes('train')) - if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': - K = np.array([0]) - - num_numerical_features_ = D.X_num['train'].shape[1] if D.X_num is not None else 0 - d_in = np.sum(K) + num_numerical_features_ - model_params['d_in'] = int(d_in) - model = get_model( - model_type, - model_params, - num_numerical_features_, - category_sizes=D.get_category_sizes('train') - ) - - model.load_state_dict( - torch.load(model_path, map_location="cpu") - ) - - diffusion = GaussianMultinomialDiffusion( - K, - num_numerical_features=num_numerical_features_, - denoise_fn=model, num_timesteps=num_timesteps, - gaussian_loss_type=gaussian_loss_type, scheduler=scheduler, device=device - ) - - diffusion.to(device) - diffusion.eval() - - _, empirical_class_dist = torch.unique(torch.from_numpy(D.y['train']), return_counts=True) - # empirical_class_dist = empirical_class_dist.float() + torch.tensor([-5000., 10000.]).float() - if disbalance == 'fix': - empirical_class_dist[0], empirical_class_dist[1] = empirical_class_dist[1], empirical_class_dist[0] - x_gen, y_gen = diffusion.sample_all(num_samples, batch_size, empirical_class_dist.float(), ddim=False) - - elif disbalance == 'fill': - ix_major = empirical_class_dist.argmax().item() - val_major = empirical_class_dist[ix_major].item() - x_gen, y_gen = [], [] - for i in range(empirical_class_dist.shape[0]): - if i == ix_major: - continue - distrib = torch.zeros_like(empirical_class_dist) - distrib[i] = 1 - num_samples = val_major - empirical_class_dist[i].item() - x_temp, y_temp = diffusion.sample_all(num_samples, batch_size, distrib.float(), ddim=False) - x_gen.append(x_temp) - y_gen.append(y_temp) - - x_gen = torch.cat(x_gen, dim=0) - y_gen = torch.cat(y_gen, dim=0) - - else: - x_gen, y_gen = diffusion.sample_all(num_samples, batch_size, empirical_class_dist.float(), ddim=False) - - - # try: - # except FoundNANsError as ex: - # print("Found NaNs during sampling!") - # loader = lib.prepare_fast_dataloader(D, 'train', 8) - # x_gen = next(loader)[0] - # y_gen = torch.multinomial( - # empirical_class_dist.float(), - # num_samples=8, - # replacement=True - # ) - X_gen, y_gen = x_gen.numpy(), y_gen.numpy() - - ### - # X_num_unnorm = X_gen[:, :num_numerical_features] - # lo = np.percentile(X_num_unnorm, 2.5, axis=0) - # hi = np.percentile(X_num_unnorm, 97.5, axis=0) - # idx = (lo < X_num_unnorm) & (hi > X_num_unnorm) - # X_gen = X_gen[np.all(idx, axis=1)] - # y_gen = y_gen[np.all(idx, axis=1)] - ### - - num_numerical_features = num_numerical_features + int(D.is_regression and not model_params["is_y_cond"]) - - X_num_ = X_gen - if num_numerical_features < X_gen.shape[1]: - np.save(os.path.join(parent_dir, 'X_cat_unnorm'), X_gen[:, num_numerical_features:]) - # _, _, cat_encoder = lib.cat_encode({'train': X_cat_real}, T_dict['cat_encoding'], y_real, T_dict['seed'], True) - if T_dict['cat_encoding'] == 'one-hot': - X_gen[:, num_numerical_features:] = to_good_ohe(D.cat_transform.steps[0][1], X_num_[:, num_numerical_features:]) - X_cat = D.cat_transform.inverse_transform(X_gen[:, num_numerical_features:]) - - if num_numerical_features_ != 0: - # _, normalize = lib.normalize({'train' : X_num_real}, T_dict['normalization'], T_dict['seed'], True) - np.save(os.path.join(parent_dir, 'X_num_unnorm'), X_gen[:, :num_numerical_features]) - X_num_ = D.num_transform.inverse_transform(X_gen[:, :num_numerical_features]) - X_num = X_num_[:, :num_numerical_features] - - X_num_real = np.load(os.path.join(real_data_path, "X_num_train.npy"), allow_pickle=True) - disc_cols = [] - for col in range(X_num_real.shape[1]): - uniq_vals = np.unique(X_num_real[:, col]) - if len(uniq_vals) <= 32 and ((uniq_vals - np.round(uniq_vals)) == 0).all(): - disc_cols.append(col) - print("Discrete cols:", disc_cols) - if model_params['num_classes'] == 0: - y_gen = X_num[:, 0] - X_num = X_num[:, 1:] - if len(disc_cols): - X_num = round_columns(X_num_real, X_num, disc_cols) - - if num_numerical_features != 0: - print("Num shape: ", X_num.shape) - np.save(os.path.join(parent_dir, 'X_num_train'), X_num) - if num_numerical_features < X_gen.shape[1]: - np.save(os.path.join(parent_dir, 'X_cat_train'), X_cat) - np.save(os.path.join(parent_dir, 'y_train'), y_gen) \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.train.py b/src/synthcity/plugins/core/models/tabular_ddpm/.train.py deleted file mode 100644 index 85cac744..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.train.py +++ /dev/null @@ -1,156 +0,0 @@ -from copy import deepcopy -import torch -import os -import numpy as np -import zero -from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion -from utils_train import get_model, make_dataset, update_ema -from . import lib -import pandas as pd - -class Trainer: - def __init__(self, diffusion, train_iter, lr, weight_decay, steps, device=torch.device('cuda:1')): - self.diffusion = diffusion - self.ema_model = deepcopy(self.diffusion._denoise_fn) - for param in self.ema_model.parameters(): - param.detach_() - - self.train_iter = train_iter - self.steps = steps - self.init_lr = lr - self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) - self.device = device - self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) - self.log_every = 100 - self.print_every = 500 - self.ema_every = 1000 - - def _anneal_lr(self, step): - frac_done = step / self.steps - lr = self.init_lr * (1 - frac_done) - for param_group in self.optimizer.param_groups: - param_group["lr"] = lr - - def _run_step(self, x, out_dict): - x = x.to(self.device) - for k in out_dict: - out_dict[k] = out_dict[k].long().to(self.device) - self.optimizer.zero_grad() - loss_multi, loss_gauss = self.diffusion.mixed_loss(x, out_dict) - loss = loss_multi + loss_gauss - loss.backward() - self.optimizer.step() - - return loss_multi, loss_gauss - - def run_loop(self): - step = 0 - curr_loss_multi = 0.0 - curr_loss_gauss = 0.0 - - curr_count = 0 - while step < self.steps: - x, out_dict = next(self.train_iter) - out_dict = {'y': out_dict} - batch_loss_multi, batch_loss_gauss = self._run_step(x, out_dict) - - self._anneal_lr(step) - - curr_count += len(x) - curr_loss_multi += batch_loss_multi.item() * len(x) - curr_loss_gauss += batch_loss_gauss.item() * len(x) - - if (step + 1) % self.log_every == 0: - mloss = np.around(curr_loss_multi / curr_count, 4) - gloss = np.around(curr_loss_gauss / curr_count, 4) - if (step + 1) % self.print_every == 0: - print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') - self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] - curr_count = 0 - curr_loss_gauss = 0.0 - curr_loss_multi = 0.0 - - update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) - - step += 1 - -def train( - parent_dir, - real_data_path = 'data/higgs-small', - steps = 1000, - lr = 0.002, - weight_decay = 1e-4, - batch_size = 1024, - model_type = 'mlp', - model_params = None, - num_timesteps = 1000, - gaussian_loss_type = 'mse', - scheduler = 'cosine', - T_dict = None, - num_numerical_features = 0, - device = torch.device('cuda:1'), - seed = 0, - change_val = False -): - real_data_path = os.path.normpath(real_data_path) - parent_dir = os.path.normpath(parent_dir) - - zero.improve_reproducibility(seed) - - T = lib.Transformations(**T_dict) - - dataset = make_dataset( - real_data_path, - T, - num_classes=model_params['num_classes'], - is_y_cond=model_params['is_y_cond'], - change_val=change_val - ) - - K = np.array(dataset.get_category_sizes('train')) - if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': - K = np.array([0]) - print(K) - - num_numerical_features = dataset.X_num['train'].shape[1] if dataset.X_num is not None else 0 - d_in = np.sum(K) + num_numerical_features - model_params['d_in'] = d_in - print(d_in) - - print(model_params) - model = get_model( - model_type, - model_params, - num_numerical_features, - category_sizes=dataset.get_category_sizes('train') - ) - model.to(device) - - # train_loader = lib.prepare_beton_loader(dataset, split='train', batch_size=batch_size) - train_loader = lib.prepare_fast_dataloader(dataset, split='train', batch_size=batch_size) - - diffusion = GaussianMultinomialDiffusion( - num_classes=K, - num_numerical_features=num_numerical_features, - denoise_fn=model, - gaussian_loss_type=gaussian_loss_type, - num_timesteps=num_timesteps, - scheduler=scheduler, - device=device - ) - diffusion.to(device) - diffusion.train() - - trainer = Trainer( - diffusion, - train_loader, - lr=lr, - weight_decay=weight_decay, - steps=steps, - device=device - ) - trainer.run_loop() - - trainer.loss_history.to_csv(os.path.join(parent_dir, 'loss.csv'), index=False) - torch.save(diffusion._denoise_fn.state_dict(), os.path.join(parent_dir, 'model.pt')) - torch.save(trainer.ema_model.state_dict(), os.path.join(parent_dir, 'model_ema.pt')) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.tune.py b/src/synthcity/plugins/core/models/tabular_ddpm/.tune.py deleted file mode 100644 index 5a95dc23..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.tune.py +++ /dev/null @@ -1,127 +0,0 @@ -import subprocess -import lib -import os -import optuna -from copy import deepcopy -import shutil -import argparse -from pathlib import Path - -parser = argparse.ArgumentParser() -parser.add_argument('ds_name', type=str) -parser.add_argument('train_size', type=int) -parser.add_argument('eval_type', type=str) -parser.add_argument('eval_model', type=str) -parser.add_argument('prefix', type=str) -parser.add_argument('--eval_seeds', action='store_true', default=False) - -args = parser.parse_args() -train_size = args.train_size -ds_name = args.ds_name -eval_type = args.eval_type -assert eval_type in ('merged', 'synthetic') -prefix = str(args.prefix) - -pipeline = f'scripts/pipeline.py' -base_config_path = f'exp/{ds_name}/config.toml' -parent_path = Path(f'exp/{ds_name}/') -exps_path = Path(f'exp/{ds_name}/many-exps/') # temporary dir. maybe will be replaced with tempdiвdr -eval_seeds = f'scripts/eval_seeds.py' - -os.makedirs(exps_path, exist_ok=True) - -def _suggest_mlp_layers(trial): - def suggest_dim(name): - t = trial.suggest_int(name, d_min, d_max) - return 2 ** t - min_n_layers, max_n_layers, d_min, d_max = 1, 4, 7, 10 - n_layers = 2 * trial.suggest_int('n_layers', min_n_layers, max_n_layers) - d_first = [suggest_dim('d_first')] if n_layers else [] - d_middle = ( - [suggest_dim('d_middle')] * (n_layers - 2) - if n_layers > 2 - else [] - ) - d_last = [suggest_dim('d_last')] if n_layers > 1 else [] - d_layers = d_first + d_middle + d_last - return d_layers - -def objective(trial): - - lr = trial.suggest_loguniform('lr', 0.00001, 0.003) - d_layers = _suggest_mlp_layers(trial) - weight_decay = 0.0 - batch_size = trial.suggest_categorical('batch_size', [256, 4096]) - steps = trial.suggest_categorical('steps', [5000, 20000, 30000]) - # steps = trial.suggest_categorical('steps', [500]) # for debug - gaussian_loss_type = 'mse' - # scheduler = trial.suggest_categorical('scheduler', ['cosine', 'linear']) - num_timesteps = trial.suggest_categorical('num_timesteps', [100, 1000]) - num_samples = int(train_size * (2 ** trial.suggest_int('num_samples', -2, 1))) - - base_config = lib.load_config(base_config_path) - - base_config['train']['main']['lr'] = lr - base_config['train']['main']['steps'] = steps - base_config['train']['main']['batch_size'] = batch_size - base_config['train']['main']['weight_decay'] = weight_decay - base_config['model_params']['rtdl_params']['d_layers'] = d_layers - base_config['eval']['type']['eval_type'] = eval_type - base_config['sample']['num_samples'] = num_samples - base_config['diffusion_params']['gaussian_loss_type'] = gaussian_loss_type - base_config['diffusion_params']['num_timesteps'] = num_timesteps - # base_config['diffusion_params']['scheduler'] = scheduler - - base_config['parent_dir'] = str(exps_path / f"{trial.number}") - base_config['eval']['type']['eval_model'] = args.eval_model - if args.eval_model == "mlp": - base_config['eval']['T']['normalization'] = "quantile" - base_config['eval']['T']['cat_encoding'] = "one-hot" - - trial.set_user_attr("config", base_config) - - lib.dump_config(base_config, exps_path / 'config.toml') - - subprocess.run(['python3.9', f'{pipeline}', '--config', f'{exps_path / "config.toml"}', '--train', '--change_val'], check=True) - - n_datasets = 5 - score = 0.0 - - for sample_seed in range(n_datasets): - base_config['sample']['seed'] = sample_seed - lib.dump_config(base_config, exps_path / 'config.toml') - - subprocess.run(['python3.9', f'{pipeline}', '--config', f'{exps_path / "config.toml"}', '--sample', '--eval', '--change_val'], check=True) - - report_path = str(Path(base_config['parent_dir']) / f'results_{args.eval_model}.json') - report = lib.load_json(report_path) - - if 'r2' in report['metrics']['val']: - score += report['metrics']['val']['r2'] - else: - score += report['metrics']['val']['macro avg']['f1-score'] - - shutil.rmtree(exps_path / f"{trial.number}") - - return score / n_datasets - -study = optuna.create_study( - direction='maximize', - sampler=optuna.samplers.TPESampler(seed=0), -) - -study.optimize(objective, n_trials=50, show_progress_bar=True) - -best_config_path = parent_path / f'{prefix}_best/config.toml' -best_config = study.best_trial.user_attrs['config'] -best_config["parent_dir"] = str(parent_path / f'{prefix}_best/') - -os.makedirs(parent_path / f'{prefix}_best', exist_ok=True) -lib.dump_config(best_config, best_config_path) -lib.dump_json(optuna.importance.get_param_importances(study), parent_path / f'{prefix}_best/importance.json') - -subprocess.run(['python3.9', f'{pipeline}', '--config', f'{best_config_path}', '--train', '--sample'], check=True) - -if args.eval_seeds: - best_exp = str(parent_path / f'{prefix}_best/config.toml') - subprocess.run(['python3.9', f'{eval_seeds}', '--config', f'{best_exp}', '10', "ddpm", eval_type, args.eval_model, '5'], check=True) \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py b/src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py deleted file mode 100644 index 3062b15d..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/.utils_train.py +++ /dev/null @@ -1,88 +0,0 @@ -import numpy as np -import os -import lib -from .modules import MLPDiffusion, ResNetDiffusion - -def get_model( - model_name, - model_params, - n_num_features, - category_sizes -): - if model_name == 'mlp': - model = MLPDiffusion(**model_params) - elif model_name == 'resnet': - model = ResNetDiffusion(**model_params) - else: - raise "Unknown model!" - return model - -def update_ema(target_params, source_params, rate=0.999): - """ - Update target parameters to be closer to those of source parameters using - an exponential moving average. - :param target_params: the target parameter sequence. - :param source_params: the source parameter sequence. - :param rate: the EMA rate (closer to 1 means slower). - """ - for targ, src in zip(target_params, source_params): - targ.detach().mul_(rate).add_(src.detach(), alpha=1 - rate) - -def concat_y_to_X(X, y): - if X is None: - return y.reshape(-1, 1) - return np.concatenate([y.reshape(-1, 1), X], axis=1) - -def make_dataset( - data_path: str, - T: lib.Transformations, - num_classes: int, - is_y_cond: bool, - change_val: bool -): - # classification - if num_classes > 0: - X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) or not is_y_cond else None - X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) else None - y = {} - - for split in ['train', 'val', 'test']: - X_num_t, X_cat_t, y_t = lib.read_pure_data(data_path, split) - if X_num is not None: - X_num[split] = X_num_t - if not is_y_cond: - X_cat_t = concat_y_to_X(X_cat_t, y_t) - if X_cat is not None: - X_cat[split] = X_cat_t - y[split] = y_t - else: - # regression - X_cat = {} if os.path.exists(os.path.join(data_path, 'X_cat_train.npy')) else None - X_num = {} if os.path.exists(os.path.join(data_path, 'X_num_train.npy')) or not is_y_cond else None - y = {} - - for split in ['train', 'val', 'test']: - X_num_t, X_cat_t, y_t = lib.read_pure_data(data_path, split) - if not is_y_cond: - X_num_t = concat_y_to_X(X_num_t, y_t) - if X_num is not None: - X_num[split] = X_num_t - if X_cat is not None: - X_cat[split] = X_cat_t - y[split] = y_t - - info = lib.load_json(os.path.join(data_path, 'info.json')) - - D = lib.Dataset( - X_num, - X_cat, - y, - y_info={}, - task_type=lib.TaskType(info['task_type']), - n_classes=info.get('n_classes') - ) - - if change_val: - D = lib.change_val(D) - - return lib.transform_dataset(D, T, None) \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/README.md b/src/synthcity/plugins/core/models/tabular_ddpm/README.md deleted file mode 100644 index 3d418685..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# TabDDPM: Modelling Tabular Data with Diffusion Models - -Adapted from https://github.com/rotot0/tab-ddpm. diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index d4fa28e6..95d31db6 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -32,6 +32,7 @@ def __init__( gaussian_loss_type = 'mse', scheduler = 'cosine', device: Any = DEVICE, + verbose: int = 0, log_interval: int = 100, print_interval: int = 500, # model params @@ -45,7 +46,7 @@ def __init__( ) -> None: super().__init__() self.__dict__.update(locals()) - del self.self, self.kwargs + del self.self def _anneal_lr(self, step): frac_done = step / self.steps @@ -69,7 +70,7 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): n_labels = cond.nunique() else: n_labels = 0 - + cat_cols = discrete_columns(X, return_counts=True) ini_cols = X.columns cat_cols, cat_counts = zip(*cat_cols) @@ -86,9 +87,9 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): dim_t = self.dim_label_emb ) - tensors = [X] if cond is None else [X, cond] - tensors = [torch.tensor(t.values, dtype=torch.float32, device=self.device) for t in tensors] - self.dataloader = TensorDataLoader(tensors, batch_size=self.batch_size) + tensors = [torch.tensor(t.values, dtype=torch.float32, device=self.device) + for t in ([X] if cond is None else [X, cond])] + self.dataloader = TensorDataLoader(*tensors, batch_size=self.batch_size) self.diffusion = GaussianMultinomialDiffusion( model_type=self.model_type, @@ -98,7 +99,8 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): gaussian_loss_type=self.gaussian_loss_type, num_timesteps=self.num_timesteps, scheduler=self.scheduler, - device=self.device + device=self.device, + verbose=self.verbose, ).to(self.device) self.ema_model = deepcopy(self.diffusion.denoise_fn) @@ -110,6 +112,10 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) + if self.verbose: + print("Starting training") + print(self) + for step, (x, y) in enumerate(self.dataloader): curr_loss_multi = 0.0 curr_loss_gauss = 0.0 @@ -131,7 +137,7 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): if (step + 1) % self.log_interval == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) - if (step + 1) % self.print_interval == 0: + if self.verbose and (step + 1) % self.print_interval == 0: print(f'Step {(step + 1)}/{self.n_iter} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] = [ step + 1, mloss, gloss, mloss + gloss] diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index dfcbd00a..580de6fe 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -74,22 +74,28 @@ def __init__( multinomial_loss_type='vb_stochastic', parametrization='x0', scheduler='cosine', - device=torch.device('cpu') + device=torch.device('cpu'), + verbose=0 ): super(GaussianMultinomialDiffusion, self).__init__() assert multinomial_loss_type in ('vb_stochastic', 'vb_all') assert parametrization in ('x0', 'direct') + + if verbose: + self.print = print + else: + self.print = lambda *args, **kwargs: None if multinomial_loss_type == 'vb_all': - print('Computing the loss using the bound on _all_ timesteps.' + self.print('Computing the loss using the bound on _all_ timesteps.' ' This is expensive both in terms of memory and computation.') self.num_numerics = num_numerical_features self.num_classes = num_categorical_features self.num_classes_expanded = torch.from_numpy( - np.concatenate([num_categorical_features[i].repeat(num_categorical_features[i]) for i in range(len(num_categorical_features))]) - ).to(device) + np.concatenate([np.repeat(k, k) for k in num_categorical_features], + dtype=np.float32)).to(device) self.dim_input = self.num_numerics + sum(self.num_classes) self.slices_for_classes = [np.arange(self.num_classes[0])] @@ -409,7 +415,7 @@ def q_posterior(self, log_x_start, log_x_t, t): # EV_log_qxt_x0 = self.q_pred(log_x_start, t) - # print('sum exp', EV_log_qxt_x0.exp().sum(1).mean()) + # self.print('sum exp', EV_log_qxt_x0.exp().sum(1).mean()) # assert False # log_qxt_x0 = (log_x_t.exp() * EV_log_qxt_x0).sum(dim=1) @@ -800,7 +806,7 @@ def gaussian_ddim_sample( b = x.shape[0] device = x.device for t in reversed(range(T)): - print(f'Sample timestep {t:4d}', end='\r') + self.print(f'Sample timestep {t:4d}', end='\r') t_array = (torch.ones(b, device=device) * t).long() out_num = self.denoise_fn(x, t_array, y=cond) x = self.gaussian_ddim_step( @@ -808,7 +814,7 @@ def gaussian_ddim_sample( x, t_array ) - print() + self.print() return x @@ -854,7 +860,7 @@ def gaussian_ddim_reverse_sample( b = x.shape[0] device = x.device for t in range(T): - print(f'Reverse timestep {t:4d}', end='\r') + self.print(f'Reverse timestep {t:4d}', end='\r') t_array = (torch.ones(b, device=device) * t).long() out_num = self.denoise_fn(x, t_array, y=cond) x = self.gaussian_ddim_reverse_step( @@ -863,7 +869,7 @@ def gaussian_ddim_reverse_sample( t_array, eta=0.0 ) - print() + self.print() return x @@ -923,7 +929,7 @@ def sample_ddim(self, num_samples, cond=None): # ) # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): - print(f'Sample timestep {i:4d}', end='\r') + self.print(f'Sample timestep {i:4d}', end='\r') t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), @@ -935,7 +941,7 @@ def sample_ddim(self, num_samples, cond=None): if has_cat: log_z = self.multinomial_ddim_step(model_out_cat, log_z, t) - print() + self.print() z_ohe = torch.exp(log_z).round() z_cat = log_z if has_cat: @@ -962,7 +968,7 @@ def sample(self, num_samples, cond=None): # ) # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): - print(f'Sample timestep {i:4d}', end='\r') + self.print(f'Sample timestep {i:4d}', end='\r') t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), @@ -974,7 +980,7 @@ def sample(self, num_samples, cond=None): if has_cat: log_z = self.p_sample(model_out_cat, log_z, t=t) - print() + self.print() z_ohe = torch.exp(log_z).round() z_cat = log_z if has_cat: @@ -984,7 +990,7 @@ def sample(self, num_samples, cond=None): def sample_all(self, num_samples, cond=None, max_batch_size=2000, ddim=False): if ddim: - print('Sample using DDIM.') + self.print('Sample using DDIM.') sample_fn = self.sample_ddim else: sample_fn = self.sample diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt b/src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt deleted file mode 100644 index acc088c4..00000000 --- a/src/synthcity/plugins/core/models/tabular_ddpm/requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -category-encoders==2.3.0 -dython==0.5.1 -icecream==2.1.2 -libzero==0.0.8 -numpy==1.21.4 -optuna==2.10.1 -pandas==1.3.4 -pyarrow==6.0.0 -rtdl==0.0.9 -scikit-learn==1.0.2 -scipy==1.7.2 -skorch==0.11.0 -tomli-w==0.4.0 -tomli==1.2.2 -tqdm==4.62.3 diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index f2f07f80..e2fde2d1 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -68,6 +68,7 @@ def __init__( gaussian_loss_type = 'mse', scheduler = 'cosine', device: Any = DEVICE, + verbose: int = 0, log_interval: int = 100, print_interval: int = 500, # model params @@ -99,8 +100,8 @@ def __init__( self.is_classification = is_classification rtdl_params = dict( - d_layers = [self.dim_hidden] * self.num_layers, - dropout = self.dropout + d_layers = [dim_hidden] * num_layers, + dropout = dropout ) self.model = TabDDPM( n_iter=n_iter, @@ -111,6 +112,7 @@ def __init__( gaussian_loss_type=gaussian_loss_type, scheduler=scheduler, device=device, + verbose=verbose, log_interval=log_interval, print_interval=print_interval, model_type=model_type, @@ -161,14 +163,14 @@ def _fit(self, data: DataLoader, cond: pd.Series = None, **kwargs) -> "TabDDPMPl assert cond is None _, cond = data.unpack() self._labels, self._cond_dist = np.unique(cond, return_counts=True) - self._cond_dist /= self._cond_dist.sum() - - if cond is not None: - cond = pd.Series(cond, index=data.index) + self._cond_dist = self._cond_dist / self._cond_dist.sum() # NOTE: should we include the target column in `data`? data = data.dataframe() + if cond is not None: + cond = pd.Series(cond, index=data.index) + # self.encoder = TabularEncoder().fit(X) self.model.fit(data, cond, **kwargs) diff --git a/src/temp.py b/src/temp.py new file mode 100644 index 00000000..73e3390c --- /dev/null +++ b/src/temp.py @@ -0,0 +1,15 @@ +import numpy as np +import pandas as pd + +from my_utils.debug import loadDebugger +from synthcity.plugins import Plugins +from sklearn.datasets import load_iris +from synthcity.plugins.core.dataloader import GenericDataLoader + +# loadDebugger() +X, y = load_iris(as_frame = True, return_X_y = True) +X = GenericDataLoader(X.assign(target = y), target_column="target") +plugin = Plugins().get("ddpm", n_iter=3, is_classification=True, verbose=1) +plugin.fit(X) +X_syn = plugin.generate(50) +print(X_syn) diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index a398c000..1d734753 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -16,6 +16,7 @@ plugin_args = dict( n_iter=100, is_classification=True, + verbose=1, # rtdl_params=dict( # d_layers=[256, 256], # dropout=0.0 From fc9cee0403fc76f779b704585191d6f147efb2f3 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 19:43:08 +0100 Subject: [PATCH 07/95] update TensorDataLoader and training loop --- .../core/models/tabular_ddpm/__init__.py | 71 ++++++++++--------- .../plugins/core/models/tabular_ddpm/utils.py | 16 ++--- 2 files changed, 43 insertions(+), 44 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 95d31db6..ea288ec8 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -24,7 +24,7 @@ class TabDDPM(nn.Module): @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( self, - n_iter = 10000, + n_iter = 100, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, @@ -48,8 +48,8 @@ def __init__( self.__dict__.update(locals()) del self.self - def _anneal_lr(self, step): - frac_done = step / self.steps + def _anneal_lr(self, epoch): + frac_done = epoch / self.n_iter lr = self.lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr @@ -116,40 +116,41 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): print("Starting training") print(self) - for step, (x, y) in enumerate(self.dataloader): - curr_loss_multi = 0.0 - curr_loss_gauss = 0.0 - curr_count = 0 + steps = 0 + curr_loss_multi = 0.0 + curr_loss_gauss = 0.0 + curr_count = 0 + + for epoch in range(self.n_iter): self.diffusion.train() - self.optimizer.zero_grad() - loss_multi, loss_gauss = self.diffusion.mixed_loss(x, dict(y=y)) - loss = loss_multi + loss_gauss - loss.backward() - self.optimizer.step() - - self._anneal_lr(step) - - curr_count += len(x) - curr_loss_multi += loss_multi.item() * len(x) - curr_loss_gauss += loss_gauss.item() * len(x) - - if (step + 1) % self.log_interval == 0: - mloss = np.around(curr_loss_multi / curr_count, 4) - gloss = np.around(curr_loss_gauss / curr_count, 4) - if self.verbose and (step + 1) % self.print_interval == 0: - print(f'Step {(step + 1)}/{self.n_iter} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') - self.loss_history.loc[len(self.loss_history)] = [ - step + 1, mloss, gloss, mloss + gloss] - curr_count = 0 - curr_loss_gauss = 0.0 - curr_loss_multi = 0.0 - - self._update_ema(self.ema_model.parameters(), self.model.parameters()) - - if step == self.n_iter - 1: - break - + for x, y in self.dataloader: + self.optimizer.zero_grad() + loss_multi, loss_gauss = self.diffusion.mixed_loss(x, y) + loss = loss_multi + loss_gauss + loss.backward() + self.optimizer.step() + + self._anneal_lr(epoch + 1) + + curr_count += len(x) + curr_loss_multi += loss_multi.item() * len(x) + curr_loss_gauss += loss_gauss.item() * len(x) + + steps += 1 + if steps % self.log_interval == 0: + mloss = np.around(curr_loss_multi / curr_count, 4) + gloss = np.around(curr_loss_gauss / curr_count, 4) + if self.verbose and steps % self.print_interval == 0: + print(f'Step {steps}: MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') + self.loss_history.loc[len(self.loss_history)] = \ + [steps, mloss, gloss, mloss + gloss] + curr_count = 0 + curr_loss_gauss = 0.0 + curr_loss_multi = 0.0 + + self._update_ema(self.ema_model.parameters(), self.model.parameters()) + return self def generate(self, count: int, cond=None): diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index a0021a68..ff92f275 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -198,15 +198,13 @@ def __init__(self, *tensors, batch_size=32, shuffle=False): self.shuffle = shuffle def __iter__(self): - i = 0 idx = np.arange(self.dataset_len) if self.shuffle: np.random.shuffle(idx) - while True: - j = i + self.batch_size - s = slice(i, j) - if j > self.dataset_len: - s = list(range(i, self.dataset_len)) + list(range(0, j - self.dataset_len)) - if self.shuffle: - np.random.shuffle(idx) - yield tuple(t[idx[s]] for t in self.tensors) + for i in range(0, self.dataset_len, self.batch_size): + s = idx[i:i+self.batch_size] + yield tuple(t[s] for t in self.tensors) + + def __len__(self): + return len(range(0, self.dataset_len, self.batch_size)) + \ No newline at end of file From d8b57addee3d08eff70f1eb98256664547fb19ee Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 20:20:01 +0100 Subject: [PATCH 08/95] clear bugs --- .../plugins/core/models/tabular_ddpm/__init__.py | 12 +++++++++--- .../gaussian_multinomial_diffsuion.py | 7 ++++--- src/synthcity/plugins/generic/plugin_ddpm.py | 2 +- src/temp.py | 15 --------------- tests/plugins/generic/test_ddpm.py | 5 +++++ 5 files changed, 19 insertions(+), 22 deletions(-) delete mode 100644 src/temp.py diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index ea288ec8..daa8c8eb 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -87,8 +87,9 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): dim_t = self.dim_label_emb ) - tensors = [torch.tensor(t.values, dtype=torch.float32, device=self.device) - for t in ([X] if cond is None else [X, cond])] + tensors = [torch.tensor(X.values, dtype=torch.float32, device=self.device)] + if cond is not None: + tensors.append(torch.tensor(cond.values, dtype=torch.long, device=self.device)) self.dataloader = TensorDataLoader(*tensors, batch_size=self.batch_size) self.diffusion = GaussianMultinomialDiffusion( @@ -149,12 +150,17 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): curr_loss_gauss = 0.0 curr_loss_multi = 0.0 - self._update_ema(self.ema_model.parameters(), self.model.parameters()) + self._update_ema(self.ema_model.parameters(), self.diffusion.parameters()) return self def generate(self, count: int, cond=None): self.diffusion.eval() + if cond is not None: + cond = torch.tensor(cond, dtype=torch.long, device=self.device) sample = self.diffusion.sample_all(count, cond).detach().cpu().numpy() sample = sample[:, self._col_perm] + if self.verbose: + print("Generated sample") + print(sample) return sample diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 580de6fe..ba7b1e55 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -92,7 +92,7 @@ def __init__( ' This is expensive both in terms of memory and computation.') self.num_numerics = num_numerical_features - self.num_classes = num_categorical_features + self.num_classes = np.asarray(num_categorical_features) self.num_classes_expanded = torch.from_numpy( np.concatenate([np.repeat(k, k) for k in num_categorical_features], dtype=np.float32)).to(device) @@ -102,7 +102,7 @@ def __init__( offsets = np.cumsum(self.num_classes) for i in range(1, len(offsets)): self.slices_for_classes.append(np.arange(offsets[i - 1], offsets[i])) - self.offsets = torch.from_numpy(np.append([0], offsets)).to(device) + self.offsets = torch.from_numpy(np.append([0], offsets)).to(device).long() if model_params is None: model_params = dict( @@ -426,7 +426,8 @@ def q_posterior(self, log_x_start, log_x_t, t): num_axes = (1,) * (len(log_x_start.size()) - 1) t_broadcast = t.to(log_x_start.device).view(-1, *num_axes) * torch.ones_like(log_x_start) - log_EV_qxtmin_x0 = torch.where(t_broadcast == 0, log_x_start, log_EV_qxtmin_x0.to(torch.float32)) + log_EV_qxtmin_x0 = torch.where(t_broadcast == 0, log_x_start, + log_EV_qxtmin_x0.to(torch.float32)) # unnormed_logprobs = log_EV_qxtmin_x0 + # log q_pred_one_timestep(x_t, t) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index e2fde2d1..3a3da116 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -174,7 +174,7 @@ def _fit(self, data: DataLoader, cond: pd.Series = None, **kwargs) -> "TabDDPMPl # self.encoder = TabularEncoder().fit(X) self.model.fit(data, cond, **kwargs) - + def _generate(self, count: int, syn_schema: Schema, cond=None, **kwargs: Any) -> DataLoader: if self.is_classification and cond is None: # randomly generate labels following the distribution of the training data diff --git a/src/temp.py b/src/temp.py deleted file mode 100644 index 73e3390c..00000000 --- a/src/temp.py +++ /dev/null @@ -1,15 +0,0 @@ -import numpy as np -import pandas as pd - -from my_utils.debug import loadDebugger -from synthcity.plugins import Plugins -from sklearn.datasets import load_iris -from synthcity.plugins.core.dataloader import GenericDataLoader - -# loadDebugger() -X, y = load_iris(as_frame = True, return_X_y = True) -X = GenericDataLoader(X.assign(target = y), target_column="target") -plugin = Plugins().get("ddpm", n_iter=3, is_classification=True, verbose=1) -plugin.fit(X) -X_syn = plugin.generate(50) -print(X_syn) diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index 1d734753..e11d8af9 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -16,7 +16,12 @@ plugin_args = dict( n_iter=100, is_classification=True, + n_iter=1000, + batch_size=200, + num_timesteps=500, verbose=1, + log_interval=10, + print_interval=50 # rtdl_params=dict( # d_layers=[256, 256], # dropout=0.0 From 92dcc328456e9c3230cd565497d14dc72c7e6fb8 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 20:35:58 +0100 Subject: [PATCH 09/95] debug for regression tasks --- .../plugins/core/models/tabular_ddpm/__init__.py | 12 ++++++------ tests/plugins/generic/test_ddpm.py | 9 ++++----- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index daa8c8eb..495302fa 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -87,9 +87,9 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): dim_t = self.dim_label_emb ) - tensors = [torch.tensor(X.values, dtype=torch.float32, device=self.device)] - if cond is not None: - tensors.append(torch.tensor(cond.values, dtype=torch.long, device=self.device)) + tensors = [torch.tensor(X.values, dtype=torch.float32, device=self.device), + np.repeat(None, len(X)) if cond is None else + torch.tensor(cond.values, dtype=torch.long, device=self.device)] self.dataloader = TensorDataLoader(*tensors, batch_size=self.batch_size) self.diffusion = GaussianMultinomialDiffusion( @@ -113,9 +113,9 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) - if self.verbose: - print("Starting training") - print(self) + # if self.verbose: + # print("Starting training") + # print(self) steps = 0 curr_loss_multi = 0.0 diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index e11d8af9..7f56077a 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -14,14 +14,13 @@ plugin_name = "ddpm" plugin_args = dict( - n_iter=100, - is_classification=True, - n_iter=1000, + n_iter=1000, + # is_classification=True, batch_size=200, num_timesteps=500, verbose=1, log_interval=10, - print_interval=50 + print_interval=100 # rtdl_params=dict( # d_layers=[256, 256], # dropout=0.0 @@ -129,7 +128,7 @@ def test_eval_performance_ddpm(compress_dataset: bool) -> None: X = GenericDataLoader(Xraw) for _ in range(2): - test_plugin = plugin(n_iter=5000, compress_dataset=compress_dataset) + test_plugin = plugin(**plugin_args, compress_dataset=compress_dataset) evaluator = PerformanceEvaluatorXGB() test_plugin.fit(X) From 0b9d0e3d6dc05fb47c3fb01439b55ec65db18767 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 20:49:00 +0100 Subject: [PATCH 10/95] debug for regression tasks; ALL TESTS PASSED --- .../core/models/tabular_ddpm/__init__.py | 31 +++++++++++-------- tests/plugins/generic/test_ddpm.py | 9 +++--- 2 files changed, 22 insertions(+), 18 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index daa8c8eb..6c9947b3 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -72,13 +72,18 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): n_labels = 0 cat_cols = discrete_columns(X, return_counts=True) - ini_cols = X.columns - cat_cols, cat_counts = zip(*cat_cols) - # reorder the columns so that the categorical ones go to the end - X = X[np.hstack([X.columns[~X.keys().isin(cat_cols)], cat_cols])] - cur_cols = X.columns - # find the permutation from the reordered columns to the original ones - self._col_perm = np.argsort(cur_cols)[np.argsort(np.argsort(ini_cols))] + + if cat_cols: + ini_cols = X.columns + cat_cols, cat_counts = zip(*cat_cols) + # reorder the columns so that the categorical ones go to the end + X = X[np.hstack([X.columns[~X.keys().isin(cat_cols)], cat_cols])] + cur_cols = X.columns + # find the permutation from the reordered columns to the original ones + self._col_perm = np.argsort(cur_cols)[np.argsort(np.argsort(ini_cols))] + else: + cat_counts = [0] + self._col_perm = np.arange(X.shape[1]) model_params = dict( num_classes=n_labels, @@ -87,9 +92,9 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): dim_t = self.dim_label_emb ) - tensors = [torch.tensor(X.values, dtype=torch.float32, device=self.device)] - if cond is not None: - tensors.append(torch.tensor(cond.values, dtype=torch.long, device=self.device)) + tensors = [torch.tensor(X.values, dtype=torch.float32, device=self.device), + np.repeat(None, len(X)) if cond is None else + torch.tensor(cond.values, dtype=torch.long, device=self.device)] self.dataloader = TensorDataLoader(*tensors, batch_size=self.batch_size) self.diffusion = GaussianMultinomialDiffusion( @@ -113,9 +118,9 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) - if self.verbose: - print("Starting training") - print(self) + # if self.verbose: + # print("Starting training") + # print(self) steps = 0 curr_loss_multi = 0.0 diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index e11d8af9..7f56077a 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -14,14 +14,13 @@ plugin_name = "ddpm" plugin_args = dict( - n_iter=100, - is_classification=True, - n_iter=1000, + n_iter=1000, + # is_classification=True, batch_size=200, num_timesteps=500, verbose=1, log_interval=10, - print_interval=50 + print_interval=100 # rtdl_params=dict( # d_layers=[256, 256], # dropout=0.0 @@ -129,7 +128,7 @@ def test_eval_performance_ddpm(compress_dataset: bool) -> None: X = GenericDataLoader(Xraw) for _ in range(2): - test_plugin = plugin(n_iter=5000, compress_dataset=compress_dataset) + test_plugin = plugin(**plugin_args, compress_dataset=compress_dataset) evaluator = PerformanceEvaluatorXGB() test_plugin.fit(X) From bb9822954f960652e277db117d734bf3b97cec22 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 7 Mar 2023 21:09:28 +0100 Subject: [PATCH 11/95] remove the official repo of TabDDPM --- third-party/tab-ddpm | 1 - 1 file changed, 1 deletion(-) delete mode 160000 third-party/tab-ddpm diff --git a/third-party/tab-ddpm b/third-party/tab-ddpm deleted file mode 160000 index 41f2415a..00000000 --- a/third-party/tab-ddpm +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 41f2415a378f1e8e8f4f5c3b8736521c0d47cf22 From b4486a48caf102fda67495816b6b314dd03ebe62 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Wed, 8 Mar 2023 10:57:56 +0100 Subject: [PATCH 12/95] passed all pre-commit checks --- docs/tutorials | 2 +- .../core/models/tabular_ddpm/__init__.py | 90 +-- .../gaussian_multinomial_diffsuion.py | 527 +++++++++--------- .../core/models/tabular_ddpm/modules.py | 80 +-- .../plugins/core/models/tabular_ddpm/utils.py | 62 ++- .../plugins/core/models/tabular_encoder.py | 2 +- src/synthcity/plugins/generic/plugin_ddpm.py | 68 +-- src/synthcity/utils/dataframe.py | 16 +- 8 files changed, 460 insertions(+), 387 deletions(-) diff --git a/docs/tutorials b/docs/tutorials index c6fce2d2..27afa3de 120000 --- a/docs/tutorials +++ b/docs/tutorials @@ -1 +1 @@ -../tutorials/ \ No newline at end of file +../tutorials/ diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 6c9947b3..98ac9619 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -1,3 +1,6 @@ +# mypy: allow-untyped-defs, allow-untyped-calls +# flake8: noqa: F401 + # stdlib from copy import deepcopy from typing import Any, Optional, Union @@ -6,37 +9,36 @@ import numpy as np import pandas as pd import torch -from torch import nn from pydantic import validate_arguments +from torch import nn # synthcity absolute +from synthcity.metrics.weighted_metrics import WeightedMetrics from synthcity.utils.constants import DEVICE from synthcity.utils.dataframe import discrete_columns -from synthcity.metrics.weighted_metrics import WeightedMetrics -from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion # noqa -from .modules import MLPDiffusion, ResNetDiffusion # noqa +# synthcity relative +from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion from .utils import TensorDataLoader class TabDDPM(nn.Module): - @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( self, - n_iter = 100, - lr = 0.002, - weight_decay = 1e-4, - batch_size = 1024, - num_timesteps = 1000, - gaussian_loss_type = 'mse', - scheduler = 'cosine', + n_iter: int = 1000, + lr: float = 0.002, + weight_decay: float = 1e-4, + batch_size: int = 1024, + num_timesteps: int = 1000, + gaussian_loss_type: str = "mse", + scheduler: str = "cosine", device: Any = DEVICE, verbose: int = 0, - log_interval: int = 100, - print_interval: int = 500, + log_interval: int = 10, + print_interval: int = 100, # model params - model_type = 'mlp', + model_type: str = "mlp", rtdl_params: Optional[dict] = None, # {'d_layers', 'dropout'} dim_label_emb: int = 128, # early stopping @@ -47,7 +49,7 @@ def __init__( super().__init__() self.__dict__.update(locals()) del self.self - + def _anneal_lr(self, epoch): frac_done = epoch / self.n_iter lr = self.lr * (1 - frac_done) @@ -65,14 +67,14 @@ def _update_ema(self, target_params, source_params, rate=0.999): for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src.detach(), alpha=1 - rate) - def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): + def fit(self, X: pd.DataFrame, cond: Any = None, **kwargs: Any) -> "TabDDPM": if cond is not None: n_labels = cond.nunique() else: n_labels = 0 cat_cols = discrete_columns(X, return_counts=True) - + if cat_cols: ini_cols = X.columns cat_cols, cat_counts = zip(*cat_cols) @@ -89,47 +91,51 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): num_classes=n_labels, is_y_cond=cond is not None, rtdl_params=self.rtdl_params, - dim_t = self.dim_label_emb + dim_t=self.dim_label_emb, ) - - tensors = [torch.tensor(X.values, dtype=torch.float32, device=self.device), - np.repeat(None, len(X)) if cond is None else - torch.tensor(cond.values, dtype=torch.long, device=self.device)] + + tensors = [ + torch.tensor(X.values, dtype=torch.float32, device=self.device), + np.repeat(None, len(X)) + if cond is None + else torch.tensor(cond.values, dtype=torch.long, device=self.device), + ] self.dataloader = TensorDataLoader(*tensors, batch_size=self.batch_size) self.diffusion = GaussianMultinomialDiffusion( model_type=self.model_type, model_params=model_params, num_categorical_features=cat_counts, - num_numerical_features=X.shape[1]-len(cat_cols), + num_numerical_features=X.shape[1] - len(cat_cols), gaussian_loss_type=self.gaussian_loss_type, num_timesteps=self.num_timesteps, scheduler=self.scheduler, device=self.device, verbose=self.verbose, ).to(self.device) - + self.ema_model = deepcopy(self.diffusion.denoise_fn) for param in self.ema_model.parameters(): param.detach_() self.optimizer = torch.optim.AdamW( - self.diffusion.parameters(), lr=self.lr, weight_decay=self.weight_decay) - - self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) - + self.diffusion.parameters(), lr=self.lr, weight_decay=self.weight_decay + ) + + self.loss_history = pd.DataFrame(columns=["step", "mloss", "gloss", "loss"]) + # if self.verbose: # print("Starting training") # print(self) - + steps = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 - + for epoch in range(self.n_iter): self.diffusion.train() - + for x, y in self.dataloader: self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x, y) @@ -148,18 +154,26 @@ def fit(self, X: pd.DataFrame, cond=None, **kwargs: Any): mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if self.verbose and steps % self.print_interval == 0: - print(f'Step {steps}: MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') - self.loss_history.loc[len(self.loss_history)] = \ - [steps, mloss, gloss, mloss + gloss] + print( + f"Step {steps}: MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}" + ) + self.loss_history.loc[len(self.loss_history)] = [ + steps, + mloss, + gloss, + mloss + gloss, + ] curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 - self._update_ema(self.ema_model.parameters(), self.diffusion.parameters()) - + self._update_ema( + self.ema_model.parameters(), self.diffusion.parameters() + ) + return self - def generate(self, count: int, cond=None): + def generate(self, count: int, cond: Any = None) -> np.ndarray: self.diffusion.eval() if cond is not None: cond = torch.tensor(cond, dtype=torch.long, device=self.device) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index ba7b1e55..7a2b358d 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -1,20 +1,24 @@ """ -Based on https://github.com/openai/guided-diffusion/blob/main/guided_diffusion -and https://github.com/ehoogeboom/multinomial_diffusion +Based on +- https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +- https://github.com/ehoogeboom/multinomial_diffusion +- https://github.com/lucidrains/denoising-diffusion-pytorch/blob/5989f4c77eafcdc6be0fb4739f0f277a6dd7f7d8/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L281 """ +# mypy: disable-error-code=no-untyped-def +# flake8: noqa: F405 -import torch.nn.functional as F -import torch +# stdlib import math -import pandas as pd +# third party import numpy as np -from .utils import * +import torch +import torch.nn.functional as F + +# synthcity relative from .modules import MLPDiffusion, ResNetDiffusion +from .utils import * # noqa: F403 -""" -Based in part on: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/5989f4c77eafcdc6be0fb4739f0f277a6dd7f7d8/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L281 -""" def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): """ @@ -63,41 +67,45 @@ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): class GaussianMultinomialDiffusion(torch.nn.Module): def __init__( - self, - num_numerical_features, - num_categorical_features, - model_type='mlp', - model_params=None, - num_timesteps=1000, - gaussian_loss_type='mse', - gaussian_parametrization='eps', - multinomial_loss_type='vb_stochastic', - parametrization='x0', - scheduler='cosine', - device=torch.device('cpu'), - verbose=0 - ): + self, + num_numerical_features, + num_categorical_features, + model_type="mlp", + model_params=None, + num_timesteps=1000, + gaussian_loss_type="mse", + gaussian_parametrization="eps", + multinomial_loss_type="vb_stochastic", + parametrization="x0", + scheduler="cosine", + device=torch.device("cpu"), + verbose=0, + ): super(GaussianMultinomialDiffusion, self).__init__() - assert multinomial_loss_type in ('vb_stochastic', 'vb_all') - assert parametrization in ('x0', 'direct') - + assert multinomial_loss_type in ("vb_stochastic", "vb_all") + assert parametrization in ("x0", "direct") + if verbose: self.print = print else: self.print = lambda *args, **kwargs: None - if multinomial_loss_type == 'vb_all': - self.print('Computing the loss using the bound on _all_ timesteps.' - ' This is expensive both in terms of memory and computation.') + if multinomial_loss_type == "vb_all": + self.print( + "Computing the loss using the bound on _all_ timesteps." + " This is expensive both in terms of memory and computation." + ) self.num_numerics = num_numerical_features self.num_classes = np.asarray(num_categorical_features) self.num_classes_expanded = torch.from_numpy( - np.concatenate([np.repeat(k, k) for k in num_categorical_features], - dtype=np.float32)).to(device) + np.concatenate( + [np.repeat(k, k) for k in num_categorical_features], dtype=np.float32 + ) + ).to(device) self.dim_input = self.num_numerics + sum(self.num_classes) - + self.slices_for_classes = [np.arange(self.num_classes[0])] offsets = np.cumsum(self.num_classes) for i in range(1, len(offsets)): @@ -106,27 +114,21 @@ def __init__( if model_params is None: model_params = dict( - d_in = self.dim_input, - num_classes = 0, - is_y_cond = False, - rtdl_params = None + d_in=self.dim_input, num_classes=0, is_y_cond=False, rtdl_params=None ) else: - model_params['d_in'] = self.dim_input - - if model_params['rtdl_params'] is None: - model_params['rtdl_params'] = dict( - d_layers = [256, 256, 256], - dropout = 0.0 - ) - - if model_type == 'mlp': + model_params["d_in"] = self.dim_input + + if model_params["rtdl_params"] is None: + model_params["rtdl_params"] = dict(d_layers=[256, 256, 256], dropout=0.0) + + if model_type == "mlp": self.denoise_fn = MLPDiffusion(**model_params) - elif model_type == 'resnet': + elif model_type == "resnet": self.denoise_fn = ResNetDiffusion(**model_params) else: raise "Unknown diffusion model type!" - + self.gaussian_loss_type = gaussian_loss_type self.gaussian_parametrization = gaussian_parametrization self.multinomial_loss_type = multinomial_loss_type @@ -134,9 +136,9 @@ def __init__( self.parametrization = parametrization self.scheduler = scheduler - alphas = 1. - get_named_beta_schedule(scheduler, num_timesteps) - alphas = torch.tensor(alphas.astype('float64')) - betas = 1. - alphas + alphas = 1.0 - get_named_beta_schedule(scheduler, num_timesteps) + alphas = torch.tensor(alphas.astype("float64")) + betas = 1.0 - alphas log_alpha = np.log(alphas) log_cumprod_alpha = np.cumsum(log_alpha) @@ -157,60 +159,86 @@ def __init__( self.posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) - self.posterior_log_variance_clipped = torch.from_numpy( - np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:])) - ).float().to(device) + self.posterior_log_variance_clipped = ( + torch.from_numpy( + np.log( + np.append(self.posterior_variance[1], self.posterior_variance[1:]) + ) + ) + .float() + .to(device) + ) self.posterior_mean_coef1 = ( - betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod) - ).float().to(device) + (betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)) + .float() + .to(device) + ) self.posterior_mean_coef2 = ( - (1.0 - alphas_cumprod_prev) - * np.sqrt(alphas.numpy()) - / (1.0 - alphas_cumprod) - ).float().to(device) + ( + (1.0 - alphas_cumprod_prev) + * np.sqrt(alphas.numpy()) + / (1.0 - alphas_cumprod) + ) + .float() + .to(device) + ) - assert log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.e-5 - assert log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() < 1e-5 - assert (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.e-5 + assert log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.0e-5 + assert ( + log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() + < 1e-5 + ) + assert (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.0e-5 # Convert to float32 and register buffers. - self.register_buffer('alphas', alphas.float().to(device)) - self.register_buffer('log_alpha', log_alpha.float().to(device)) - self.register_buffer('log_1_min_alpha', log_1_min_alpha.float().to(device)) - self.register_buffer('log_1_min_cumprod_alpha', log_1_min_cumprod_alpha.float().to(device)) - self.register_buffer('log_cumprod_alpha', log_cumprod_alpha.float().to(device)) - self.register_buffer('alphas_cumprod', alphas_cumprod.float().to(device)) - self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev.float().to(device)) - self.register_buffer('alphas_cumprod_next', alphas_cumprod_next.float().to(device)) - self.register_buffer('sqrt_alphas_cumprod', sqrt_alphas_cumprod.float().to(device)) - self.register_buffer('sqrt_one_minus_alphas_cumprod', sqrt_one_minus_alphas_cumprod.float().to(device)) - self.register_buffer('sqrt_recip_alphas_cumprod', sqrt_recip_alphas_cumprod.float().to(device)) - self.register_buffer('sqrt_recipm1_alphas_cumprod', sqrt_recipm1_alphas_cumprod.float().to(device)) - - self.register_buffer('Lt_history', torch.zeros(num_timesteps)) - self.register_buffer('Lt_count', torch.zeros(num_timesteps)) - + self.register_buffer("alphas", alphas.float().to(device)) + self.register_buffer("log_alpha", log_alpha.float().to(device)) + self.register_buffer("log_1_min_alpha", log_1_min_alpha.float().to(device)) + self.register_buffer( + "log_1_min_cumprod_alpha", log_1_min_cumprod_alpha.float().to(device) + ) + self.register_buffer("log_cumprod_alpha", log_cumprod_alpha.float().to(device)) + self.register_buffer("alphas_cumprod", alphas_cumprod.float().to(device)) + self.register_buffer( + "alphas_cumprod_prev", alphas_cumprod_prev.float().to(device) + ) + self.register_buffer( + "alphas_cumprod_next", alphas_cumprod_next.float().to(device) + ) + self.register_buffer( + "sqrt_alphas_cumprod", sqrt_alphas_cumprod.float().to(device) + ) + self.register_buffer( + "sqrt_one_minus_alphas_cumprod", + sqrt_one_minus_alphas_cumprod.float().to(device), + ) + self.register_buffer( + "sqrt_recip_alphas_cumprod", sqrt_recip_alphas_cumprod.float().to(device) + ) + self.register_buffer( + "sqrt_recipm1_alphas_cumprod", + sqrt_recipm1_alphas_cumprod.float().to(device), + ) + + self.register_buffer("Lt_history", torch.zeros(num_timesteps)) + self.register_buffer("Lt_count", torch.zeros(num_timesteps)) + # Gaussian part def gaussian_q_mean_variance(self, x_start, t): - mean = ( - extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - ) + mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract( - self.log_1_min_cumprod_alpha, t, x_start.shape - ) + log_variance = extract(self.log_1_min_cumprod_alpha, t, x_start.shape) return mean, variance, log_variance - + def gaussian_q_sample(self, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) assert noise.shape == x_start.shape return ( extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) - * noise + + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) - + def gaussian_q_posterior_mean_variance(self, x_start, x_t, t): assert x_start.shape == x_t.shape posterior_mean = ( @@ -230,7 +258,13 @@ def gaussian_q_posterior_mean_variance(self, x_start, x_t, t): return posterior_mean, posterior_variance, posterior_log_variance_clipped def gaussian_p_mean_variance( - self, model_output, x, t, clip_denoised=False, denoised_fn=None, model_kwargs=None + self, + model_output, + x, + t, + clip_denoised=False, + denoised_fn=None, + model_kwargs=None, ): if model_kwargs is None: model_kwargs = {} @@ -238,27 +272,33 @@ def gaussian_p_mean_variance( B, C = x.shape[:2] assert t.shape == (B,) - model_variance = torch.cat([self.posterior_variance[1].unsqueeze(0).to(x.device), (1. - self.alphas)[1:]], dim=0) + model_variance = torch.cat( + [ + self.posterior_variance[1].unsqueeze(0).to(x.device), + (1.0 - self.alphas)[1:], + ], + dim=0, + ) # model_variance = self.posterior_variance.to(x.device) model_log_variance = torch.log(model_variance) model_variance = extract(model_variance, t, x.shape) model_log_variance = extract(model_log_variance, t, x.shape) - if self.gaussian_parametrization == 'eps': + if self.gaussian_parametrization == "eps": pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) - elif self.gaussian_parametrization == 'x0': + elif self.gaussian_parametrization == "x0": pred_xstart = model_output else: raise NotImplementedError - + model_mean, _, _ = self.gaussian_q_posterior_mean_variance( x_start=pred_xstart, x_t=x, t=t ) assert ( model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape - ), f'{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}' + ), f"{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}" return { "mean": model_mean, @@ -266,13 +306,15 @@ def gaussian_p_mean_variance( "log_variance": model_log_variance, "pred_xstart": pred_xstart, } - + def _vb_terms_bpd( self, model_output, x_start, x_t, t, clip_denoised=False, model_kwargs=None ): - true_mean, _, true_log_variance_clipped = self.gaussian_q_posterior_mean_variance( - x_start=x_start, x_t=x_t, t=t - ) + ( + true_mean, + _, + true_log_variance_clipped, + ) = self.gaussian_q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t) out = self.gaussian_p_mean_variance( model_output, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs ) @@ -290,8 +332,13 @@ def _vb_terms_bpd( # At the first timestep return the decoder NLL, # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t)) output = torch.where((t == 0), decoder_nll, kl) - return {"output": output, "pred_xstart": out["pred_xstart"], "out_mean": out["mean"], "true_mean": true_mean} - + return { + "output": output, + "pred_xstart": out["pred_xstart"], + "out_mean": out["mean"], + "true_mean": true_mean, + } + def _prior_gaussian(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in @@ -309,15 +356,15 @@ def _prior_gaussian(self, x_start): mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) - + def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None): if model_kwargs is None: model_kwargs = {} terms = {} - if self.gaussian_loss_type == 'mse': + if self.gaussian_loss_type == "mse": terms["loss"] = mean_flat((noise - model_out) ** 2) - elif self.gaussian_loss_type == 'kl': + elif self.gaussian_loss_type == "kl": terms["loss"] = self._vb_terms_bpd( model_output=model_out, x_start=x_start, @@ -327,20 +374,18 @@ def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None): model_kwargs=model_kwargs, )["output"] + return terms["loss"] - return terms['loss'] - def _predict_xstart_from_eps(self, x_t, t, eps=1e-8): assert x_t.shape == eps.shape return ( extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps ) - + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( - extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - pred_xstart + extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def gaussian_p_sample( @@ -365,7 +410,9 @@ def gaussian_p_sample( (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) ) # no noise when t == 0 - sample = out["mean"] + nonzero_mask * torch.exp(0.5 * out["log_variance"]) * noise + sample = ( + out["mean"] + nonzero_mask * torch.exp(0.5 * out["log_variance"]) * noise + ) return {"sample": sample, "pred_xstart": out["pred_xstart"]} # Multinomial part @@ -381,18 +428,20 @@ def q_pred_one_timestep(self, log_x_t, t): # alpha_t * E[xt] + (1 - alpha_t) 1 / K log_probs = log_add_exp( log_x_t + log_alpha_t, - log_1_min_alpha_t - torch.log(self.num_classes_expanded) + log_1_min_alpha_t - torch.log(self.num_classes_expanded), ) return log_probs def q_pred(self, log_x_start, t): log_cumprod_alpha_t = extract(self.log_cumprod_alpha, t, log_x_start.shape) - log_1_min_cumprod_alpha = extract(self.log_1_min_cumprod_alpha, t, log_x_start.shape) + log_1_min_cumprod_alpha = extract( + self.log_1_min_cumprod_alpha, t, log_x_start.shape + ) log_probs = log_add_exp( log_x_start + log_cumprod_alpha_t, - log_1_min_cumprod_alpha - torch.log(self.num_classes_expanded) + log_1_min_cumprod_alpha - torch.log(self.num_classes_expanded), ) return log_probs @@ -402,7 +451,7 @@ def predict_start(self, model_out, log_x_t): # model_out = self._denoise_fn(x_t, t.to(x_t.device), **out_dict) assert model_out.size(0) == log_x_t.size(0) - assert model_out.size(1) == self.num_classes.sum(), f'{model_out.size()}' + assert model_out.size(1) == self.num_classes.sum(), f"{model_out.size()}" log_pred = torch.empty_like(model_out) for ix in self.slices_for_classes: @@ -425,9 +474,12 @@ def q_posterior(self, log_x_start, log_x_t, t): log_EV_qxtmin_x0 = self.q_pred(log_x_start, t_minus_1) num_axes = (1,) * (len(log_x_start.size()) - 1) - t_broadcast = t.to(log_x_start.device).view(-1, *num_axes) * torch.ones_like(log_x_start) - log_EV_qxtmin_x0 = torch.where(t_broadcast == 0, log_x_start, - log_EV_qxtmin_x0.to(torch.float32)) + t_broadcast = t.to(log_x_start.device).view(-1, *num_axes) * torch.ones_like( + log_x_start + ) + log_EV_qxtmin_x0 = torch.where( + t_broadcast == 0, log_x_start, log_EV_qxtmin_x0.to(torch.float32) + ) # unnormed_logprobs = log_EV_qxtmin_x0 + # log q_pred_one_timestep(x_t, t) @@ -435,18 +487,19 @@ def q_posterior(self, log_x_start, log_x_t, t): # Not very easy to see why this is true. But it is :) unnormed_logprobs = log_EV_qxtmin_x0 + self.q_pred_one_timestep(log_x_t, t) - log_EV_xtmin_given_xt_given_xstart = \ - unnormed_logprobs \ - - sliced_logsumexp(unnormed_logprobs, self.offsets) + log_EV_xtmin_given_xt_given_xstart = unnormed_logprobs - sliced_logsumexp( + unnormed_logprobs, self.offsets + ) return log_EV_xtmin_given_xt_given_xstart def p_pred(self, model_out, log_x, t): - if self.parametrization == 'x0': + if self.parametrization == "x0": log_x_recon = self.predict_start(model_out, log_x) log_model_pred = self.q_posterior( - log_x_start=log_x_recon, log_x_t=log_x, t=t) - elif self.parametrization == 'direct': + log_x_start=log_x_recon, log_x_t=log_x, t=t + ) + elif self.parametrization == "direct": log_model_pred = self.predict_start(model_out, log_x) else: raise ValueError @@ -467,28 +520,32 @@ def p_sample_loop(self, shape): img = torch.randn(shape, device=device) for i in reversed(range(1, self.num_timesteps)): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long)) + img = self.p_sample( + img, torch.full((b,), i, device=device, dtype=torch.long) + ) return img @torch.no_grad() - def _sample(self, image_size, batch_size = 16): + def _sample(self, image_size, batch_size=16): return self.p_sample_loop((batch_size, 3, image_size, image_size)) - @torch.no_grad() - def interpolate(self, x1, x2, t = None, lam = 0.5): - b, *_, device = *x1.shape, x1.device - t = default(t, self.num_timesteps - 1) + # @torch.no_grad() + # def interpolate(self, x1, x2, t=None, lam=0.5): + # b, *_, device = *x1.shape, x1.device + # t = default(t, self.num_timesteps - 1) - assert x1.shape == x2.shape + # assert x1.shape == x2.shape - t_batched = torch.stack([torch.tensor(t, device=device)] * b) - xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2)) + # t_batched = torch.stack([torch.tensor(t, device=device)] * b) + # xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2)) - img = (1 - lam) * xt1 + lam * xt2 - for i in reversed(range(0, t)): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long)) + # img = (1 - lam) * xt1 + lam * xt2 + # for i in reversed(range(0, t)): + # img = self.p_sample( + # img, torch.full((b,), i, device=device, dtype=torch.long) + # ) - return img + # return img def log_sample_categorical(self, logits): full_sample = [] @@ -519,7 +576,8 @@ def nll(self, log_x_start): kl = self.compute_Lt( log_x_start=log_x_start, log_x_t=self.q_sample(log_x_start=log_x_start, t=t_array), - t=t_array) + t=t_array, + ) loss += kl @@ -533,14 +591,15 @@ def kl_prior(self, log_x_start): ones = torch.ones(b, device=device).long() log_qxT_prob = self.q_pred(log_x_start, t=(self.num_timesteps - 1) * ones) - log_half_prob = -torch.log(self.num_classes_expanded * torch.ones_like(log_qxT_prob)) + log_half_prob = -torch.log( + self.num_classes_expanded * torch.ones_like(log_qxT_prob) + ) kl_prior = self.multinomial_kl(log_qxT_prob, log_half_prob) return sum_except_batch(kl_prior) def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False): - log_true_prob = self.q_posterior( - log_x_start=log_x_start, log_x_t=log_x_t, t=t) + log_true_prob = self.q_posterior(log_x_start=log_x_start, log_x_t=log_x_t, t=t) log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t) if detach_mean: @@ -553,14 +612,14 @@ def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False): decoder_nll = sum_except_batch(decoder_nll) mask = (t == torch.zeros_like(t)).float() - loss = mask * decoder_nll + (1. - mask) * kl + loss = mask * decoder_nll + (1.0 - mask) * kl return loss - def sample_time(self, b, device, method='uniform'): - if method == 'importance': + def sample_time(self, b, device, method="uniform"): + if method == "importance": if not (self.Lt_count > 10).all(): - return self.sample_time(b, device, method='uniform') + return self.sample_time(b, device, method="uniform") Lt_sqrt = torch.sqrt(self.Lt_history + 1e-10) + 0.0001 Lt_sqrt[0] = Lt_sqrt[1] # Overwrite decoder term with L1. @@ -572,7 +631,7 @@ def sample_time(self, b, device, method='uniform'): return t, pt - elif method == 'uniform': + elif method == "uniform": t = torch.randint(0, self.num_timesteps, (b,), device=device).long() pt = torch.ones_like(t).float() / self.num_timesteps @@ -582,17 +641,15 @@ def sample_time(self, b, device, method='uniform'): def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt): - if self.multinomial_loss_type == 'vb_stochastic': - kl = self.compute_Lt( - model_out, log_x_start, log_x_t, t - ) + if self.multinomial_loss_type == "vb_stochastic": + kl = self.compute_Lt(model_out, log_x_start, log_x_t, t) kl_prior = self.kl_prior(log_x_start) # Upweigh loss term of the kl vb_loss = kl / pt + kl_prior return vb_loss - elif self.multinomial_loss_type == 'vb_all': + elif self.multinomial_loss_type == "vb_all": # Expensive, dont do it ;). # DEPRECATED return -self.nll(log_x_start) @@ -602,7 +659,7 @@ def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt): #! Not used def log_prob(self, x): b, device = x.size(0), x.device - + if self.training: #! not enough arguments return self._multinomial_loss(x) @@ -610,10 +667,11 @@ def log_prob(self, x): else: log_x_start = index_to_log_onehot(x, self.num_classes) - t, pt = self.sample_time(b, device, 'importance') + t, pt = self.sample_time(b, device, "importance") kl = self.compute_Lt( - log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t) + log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t + ) kl_prior = self.kl_prior(log_x_start) @@ -621,15 +679,15 @@ def log_prob(self, x): loss = kl / pt + kl_prior return -loss - + def mixed_loss(self, x, cond=None): b = x.shape[0] device = x.device - t, pt = self.sample_time(b, device, 'uniform') + t, pt = self.sample_time(b, device, "uniform") + + x_num = x[:, : self.num_numerics] + x_cat = x[:, self.num_numerics :] - x_num = x[:, :self.num_numerics] - x_cat = x[:, self.num_numerics:] - x_num_t = x_num log_x_cat_t = x_cat if x_num.shape[1] > 0: @@ -638,23 +696,21 @@ def mixed_loss(self, x, cond=None): if x_cat.shape[1] > 0: log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes) log_x_cat_t = self.q_sample(log_x_start=log_x_cat, t=t) - + x_in = torch.cat([x_num_t, log_x_cat_t], dim=1) - model_out = self.denoise_fn( - x_in, - t, y=cond - ) + model_out = self.denoise_fn(x_in, t, y=cond) - model_out_num = model_out[:, :self.num_numerics] - model_out_cat = model_out[:, self.num_numerics:] + model_out_num = model_out[:, : self.num_numerics] + model_out_cat = model_out[:, self.num_numerics :] loss_multi = torch.zeros((1,)).float() loss_gauss = torch.zeros((1,)).float() if x_cat.shape[1] > 0: - loss_multi = self._multinomial_loss(model_out_cat, log_x_cat, log_x_cat_t, - t, pt) / len(self.num_classes) - + loss_multi = self._multinomial_loss( + model_out_cat, log_x_cat, log_x_cat_t, t, pt + ) / len(self.num_classes) + if x_num.shape[1] > 0: loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise) @@ -662,14 +718,14 @@ def mixed_loss(self, x, cond=None): # loss_gauss = torch.where(out_dict['y'] == 1, loss_gauss, 2 * loss_gauss) return loss_multi.mean(), loss_gauss.mean() - + @torch.no_grad() def mixed_elbo(self, x0, cond=None): b = x0.size(0) device = x0.device - x_num = x0[:, :self.num_numerics] - x_cat = x0[:, self.num_numerics:] + x_num = x0[:, : self.num_numerics] + x_cat = x0[:, self.num_numerics :] has_cat = x_cat.shape[1] > 0 if has_cat: log_x_cat = index_to_log_onehot(x_cat.long(), self.num_classes).to(device) @@ -692,12 +748,11 @@ def mixed_elbo(self, x0, cond=None): log_x_cat_t = x_cat model_out = self.denoise_fn( - torch.cat([x_num_t, log_x_cat_t], dim=1), - t_array, y=cond + torch.cat([x_num_t, log_x_cat_t], dim=1), t_array, y=cond ) - - model_out_num = model_out[:, :self.num_numerics] - model_out_cat = model_out[:, self.num_numerics:] + + model_out_num = model_out[:, : self.num_numerics] + model_out_cat = model_out[:, self.num_numerics :] kl = torch.tensor([0.0]) if has_cat: @@ -713,7 +768,7 @@ def mixed_elbo(self, x0, cond=None): x_start=x_num, x_t=x_num_t, t=t_array, - clip_denoised=False + clip_denoised=False, ) multinomial_loss.append(kl) @@ -751,18 +806,12 @@ def mixed_elbo(self, x0, cond=None): "mse": mse, # "mu_mse": mu_mse "out_mean": out_mean, - "true_mean": true_mean + "true_mean": true_mean, } @torch.no_grad() def gaussian_ddim_step( - self, - model_out_num, - x, - t, - clip_denoised=False, - denoised_fn=None, - eta=0.0 + self, model_out_num, x, t, clip_denoised=False, denoised_fn=None, eta=0.0 ): out = self.gaussian_p_mean_variance( model_out_num, @@ -786,7 +835,7 @@ def gaussian_ddim_step( noise = torch.randn_like(x) mean_pred = ( out["pred_xstart"] * torch.sqrt(alpha_bar_prev) - + torch.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps + + torch.sqrt(1 - alpha_bar_prev - sigma**2) * eps ) nonzero_mask = ( (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) @@ -794,39 +843,23 @@ def gaussian_ddim_step( sample = mean_pred + nonzero_mask * sigma * noise return sample - + @torch.no_grad() - def gaussian_ddim_sample( - self, - noise, - T, - cond=None, - eta=0.0 - ): + def gaussian_ddim_sample(self, noise, T, cond=None, eta=0.0): x = noise b = x.shape[0] device = x.device for t in reversed(range(T)): - self.print(f'Sample timestep {t:4d}', end='\r') + self.print(f"Sample timestep {t:4d}", end="\r") t_array = (torch.ones(b, device=device) * t).long() out_num = self.denoise_fn(x, t_array, y=cond) - x = self.gaussian_ddim_step( - out_num, - x, - t_array - ) + x = self.gaussian_ddim_step(out_num, x, t_array) self.print() return x - @torch.no_grad() def gaussian_ddim_reverse_step( - self, - model_out_num, - x, - t, - clip_denoised=False, - eta=0.0 + self, model_out_num, x, t, clip_denoised=False, eta=0.0 ): assert eta == 0.0, "Eta must be zero." out = self.gaussian_p_mean_variance( @@ -839,8 +872,7 @@ def gaussian_ddim_reverse_step( ) eps = ( - extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - - out["pred_xstart"] + extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"] ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x.shape) alpha_bar_next = extract(self.alphas_cumprod_next, t, x.shape) @@ -852,37 +884,20 @@ def gaussian_ddim_reverse_step( return mean_pred @torch.no_grad() - def gaussian_ddim_reverse_sample( - self, - x, - T, - cond=None - ): + def gaussian_ddim_reverse_sample(self, x, T, cond=None): b = x.shape[0] device = x.device for t in range(T): - self.print(f'Reverse timestep {t:4d}', end='\r') + self.print(f"Reverse timestep {t:4d}", end="\r") t_array = (torch.ones(b, device=device) * t).long() out_num = self.denoise_fn(x, t_array, y=cond) - x = self.gaussian_ddim_reverse_step( - out_num, - x, - t_array, - eta=0.0 - ) + x = self.gaussian_ddim_reverse_step(out_num, x, t_array, eta=0.0) self.print() return x - @torch.no_grad() - def multinomial_ddim_step( - self, - model_out_cat, - log_x_t, - t, - eta=0.0 - ): + def multinomial_ddim_step(self, model_out_cat, log_x_t, t, eta=0.0): # not ddim, essentially log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t) @@ -897,13 +912,15 @@ def multinomial_ddim_step( coef1 = sigma coef2 = alpha_bar_prev - sigma * alpha_bar coef3 = 1 - coef1 - coef2 - - log_ps = torch.stack([ - torch.log(coef1) + log_x_t, - torch.log(coef2) + log_x0, - torch.log(coef3) - torch.log(self.num_classes_expanded) - ], dim=2) + log_ps = torch.stack( + [ + torch.log(coef1) + log_x_t, + torch.log(coef2) + log_x0, + torch.log(coef3) - torch.log(self.num_classes_expanded), + ], + dim=2, + ) log_prob = torch.logsumexp(log_ps, dim=2) @@ -920,7 +937,9 @@ def sample_ddim(self, num_samples, cond=None): has_cat = self.num_classes[0] != 0 log_z = torch.zeros((b, 0), device=device).float() if has_cat: - uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device) + uniform_logits = torch.zeros( + (b, len(self.num_classes_expanded)), device=device + ) log_z = self.log_sample_categorical(uniform_logits) # y = torch.multinomial( @@ -930,15 +949,16 @@ def sample_ddim(self, num_samples, cond=None): # ) # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): - self.print(f'Sample timestep {i:4d}', end='\r') + self.print(f"Sample timestep {i:4d}", end="\r") t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( - torch.cat([z_norm, log_z], dim=1).float(), - t, y=cond + torch.cat([z_norm, log_z], dim=1).float(), t, y=cond + ) + model_out_num = model_out[:, : self.num_numerics] + model_out_cat = model_out[:, self.num_numerics :] + z_norm = self.gaussian_ddim_step( + model_out_num, z_norm, t, clip_denoised=False ) - model_out_num = model_out[:, :self.num_numerics] - model_out_cat = model_out[:, self.num_numerics:] - z_norm = self.gaussian_ddim_step(model_out_num, z_norm, t, clip_denoised=False) if has_cat: log_z = self.multinomial_ddim_step(model_out_cat, log_z, t) @@ -959,7 +979,9 @@ def sample(self, num_samples, cond=None): has_cat = self.num_classes[0] != 0 log_z = torch.zeros((b, 0), device=device).float() if has_cat: - uniform_logits = torch.zeros((b, len(self.num_classes_expanded)), device=device) + uniform_logits = torch.zeros( + (b, len(self.num_classes_expanded)), device=device + ) log_z = self.log_sample_categorical(uniform_logits) # y = torch.multinomial( @@ -969,15 +991,16 @@ def sample(self, num_samples, cond=None): # ) # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): - self.print(f'Sample timestep {i:4d}', end='\r') + self.print(f"Sample timestep {i:4d}", end="\r") t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( - torch.cat([z_norm, log_z], dim=1).float(), - t, y=cond + torch.cat([z_norm, log_z], dim=1).float(), t, y=cond ) - model_out_num = model_out[:, :self.num_numerics] - model_out_cat = model_out[:, self.num_numerics:] - z_norm = self.gaussian_p_sample(model_out_num, z_norm, t, clip_denoised=False)['sample'] + model_out_num = model_out[:, : self.num_numerics] + model_out_cat = model_out[:, self.num_numerics :] + z_norm = self.gaussian_p_sample( + model_out_num, z_norm, t, clip_denoised=False + )["sample"] if has_cat: log_z = self.p_sample(model_out_cat, log_z, t=t) @@ -988,17 +1011,17 @@ def sample(self, num_samples, cond=None): z_cat = ohe_to_categories(z_ohe, self.num_classes) sample = torch.cat([z_norm, z_cat], dim=1).cpu() return sample - + def sample_all(self, num_samples, cond=None, max_batch_size=2000, ddim=False): if ddim: - self.print('Sample using DDIM.') + self.print("Sample using DDIM.") sample_fn = self.sample_ddim else: sample_fn = self.sample bs = np.diff([*range(0, num_samples, max_batch_size), num_samples]) all_samples = [] - + for b in bs: sample = sample_fn(b, cond) if torch.any(sample.isnan()).item(): diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 44c63884..48310320 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -1,10 +1,14 @@ """ Code was adapted from https://github.com/Yura52/rtdl """ +# mypy: disable-error-code=no-untyped-def +# flake8: noqa: F401 +# stdlib import math from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast +# third party import torch import torch.nn as nn import torch.nn.functional as F @@ -13,10 +17,12 @@ ModuleType = Union[str, Callable[..., nn.Module]] + class SiLU(nn.Module): def forward(self, x): return x * torch.sigmoid(x) + def timestep_embedding(timesteps, dim, max_period=10000): """ Create sinusoidal timestep embeddings. @@ -29,7 +35,9 @@ def timestep_embedding(timesteps, dim, max_period=10000): """ half = dim // 2 freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + -math.log(max_period) + * torch.arange(start=0, end=half, dtype=torch.float32) + / half ).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) @@ -37,10 +45,11 @@ def timestep_embedding(timesteps, dim, max_period=10000): embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) return embedding + def _is_glu_activation(activation: ModuleType): return ( isinstance(activation, str) - and activation.endswith('GLU') + and activation.endswith("GLU") or activation in [ReGLU, GEGLU] ) @@ -48,6 +57,7 @@ def _is_glu_activation(activation: ModuleType): def _all_or_none(values): assert all(x is None for x in values) or all(x is not None for x in values) + def reglu(x: Tensor) -> Tensor: """The ReGLU activation function from [1]. References: @@ -67,6 +77,7 @@ def geglu(x: Tensor) -> Tensor: a, b = x.chunk(2, dim=-1) return a * F.gelu(b) + class ReGLU(nn.Module): """The ReGLU activation function from [shazeer2020glu]. @@ -102,13 +113,14 @@ class GEGLU(nn.Module): def forward(self, x: Tensor) -> Tensor: return geglu(x) + def _make_nn_module(module_type: ModuleType, *args) -> nn.Module: return ( ( ReGLU() - if module_type == 'ReGLU' + if module_type == "ReGLU" else GEGLU() - if module_type == 'GEGLU' + if module_type == "GEGLU" else getattr(nn, module_type)(*args) ) if isinstance(module_type, str) @@ -174,7 +186,7 @@ def __init__( if isinstance(dropouts, float): dropouts = [dropouts] * len(d_layers) assert len(d_layers) == len(dropouts) - assert activation not in ['ReGLU', 'GEGLU'] + assert activation not in ["ReGLU", "GEGLU"] self.blocks = nn.ModuleList( [ @@ -192,12 +204,12 @@ def __init__( @classmethod def make_baseline( - cls: Type['MLP'], + cls: Type["MLP"], d_in: int, d_layers: List[int], dropout: float, d_out: int, - ) -> 'MLP': + ) -> "MLP": """Create a "baseline" `MLP`. This variation of MLP was used in [gorishniy2021revisiting]. Features: @@ -224,14 +236,14 @@ def make_baseline( assert isinstance(dropout, float) if len(d_layers) > 2: assert len(set(d_layers[1:-1])) == 1, ( - 'if d_layers contains more than two elements, then' - ' all elements except for the first and the last ones must be equal.' + "if d_layers contains more than two elements, then" + " all elements except for the first and the last ones must be equal." ) return MLP( d_in=d_in, - d_layers=d_layers, # type: ignore + d_layers=d_layers, dropouts=dropout, - activation='ReLU', + activation="ReLU", d_out=d_out, ) @@ -335,7 +347,7 @@ def __init__( *, d_in: int, n_blocks: int, - d_main: int, + d_main: Optional[int], d_hidden: int, dropout_first: float, dropout_second: float, @@ -378,7 +390,7 @@ def __init__( @classmethod def make_baseline( - cls: Type['ResNet'], + cls: Type["ResNet"], *, d_in: int, n_blocks: int, @@ -387,7 +399,7 @@ def make_baseline( dropout_first: float, dropout_second: float, d_out: int, - ) -> 'ResNet': + ) -> "ResNet": """Create a "baseline" `ResNet`. This variation of ResNet was used in [gorishniy2021revisiting]. Features: * :code:`Activation` = :code:`ReLU` @@ -409,8 +421,8 @@ def make_baseline( d_hidden=d_hidden, dropout_first=dropout_first, dropout_second=dropout_second, - normalization='BatchNorm1d', - activation='ReLU', + normalization="BatchNorm1d", + activation="ReLU", d_out=d_out, ) @@ -421,10 +433,12 @@ def forward(self, x: Tensor) -> Tensor: x = self.head(x) return x -#### For diffusion + +# **For diffusion** + class MLPDiffusion(nn.Module): - def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 128): + def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t=128): super().__init__() self.dim_t = dim_t self.num_classes = num_classes @@ -432,8 +446,8 @@ def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 128): # d0 = rtdl_params['d_layers'][0] - rtdl_params['d_in'] = dim_t - rtdl_params['d_out'] = d_in + rtdl_params["d_in"] = dim_t + rtdl_params["d_out"] = d_in self.mlp = MLP.make_baseline(**rtdl_params) @@ -441,14 +455,12 @@ def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 128): self.label_emb = nn.Embedding(self.num_classes, dim_t) elif self.num_classes == 0 and is_y_cond: self.label_emb = nn.Linear(1, dim_t) - + self.proj = nn.Linear(d_in, dim_t) self.time_embed = nn.Sequential( - nn.Linear(dim_t, dim_t), - nn.SiLU(), - nn.Linear(dim_t, dim_t) + nn.Linear(dim_t, dim_t), nn.SiLU(), nn.Linear(dim_t, dim_t) ) - + def forward(self, x, timesteps, y=None): emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) if self.is_y_cond and y is not None: @@ -462,28 +474,26 @@ def forward(self, x, timesteps, y=None): class ResNetDiffusion(nn.Module): - def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t = 256): + def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t=256): super().__init__() self.dim_t = dim_t self.num_classes = num_classes - rtdl_params['d_in'] = d_in - rtdl_params['d_out'] = d_in - rtdl_params['emb_d'] = dim_t + rtdl_params["d_in"] = d_in + rtdl_params["d_out"] = d_in + rtdl_params["emb_d"] = dim_t self.resnet = ResNet.make_baseline(**rtdl_params) - + if self.num_classes > 0 and is_y_cond: self.label_emb = nn.Embedding(self.num_classes, dim_t) elif self.num_classes == 0 and is_y_cond: self.label_emb = nn.Linear(1, dim_t) - + self.proj = nn.Linear(d_in, dim_t) self.time_embed = nn.Sequential( - nn.Linear(dim_t, dim_t), - nn.SiLU(), - nn.Linear(dim_t, dim_t) + nn.Linear(dim_t, dim_t), nn.SiLU(), nn.Linear(dim_t, dim_t) ) - + def forward(self, x, timesteps, y=None): emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) if self.is_y_cond and y is not None: diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index ff92f275..61ec9eac 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -1,8 +1,15 @@ -import torch +# mypy: disable-error-code=no-untyped-def + +# stdlib +from inspect import isfunction + +# third party import numpy as np +import torch import torch.nn.functional as F -from torch.profiler import record_function -from inspect import isfunction + +# from torch.profiler import record_function + def normal_kl(mean1, logvar1, mean2, logvar2): """ @@ -33,12 +40,15 @@ def normal_kl(mean1, logvar1, mean2, logvar2): + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) ) + def approx_standard_normal_cdf(x): """ A fast approximation of the cumulative distribution function of the standard normal. """ - return 0.5 * (1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))) + return 0.5 * ( + 1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3))) + ) def discretized_gaussian_log_likelihood(x, *, means, log_scales): @@ -65,13 +75,16 @@ def discretized_gaussian_log_likelihood(x, *, means, log_scales): log_probs = torch.where( x < -0.999, log_cdf_plus, - torch.where(x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12))), + torch.where( + x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12)) + ), ) assert log_probs.shape == x.shape return log_probs + def sum_except_batch(x, num_dims=1): - ''' + """ Sums all dimensions except the first. Args: @@ -80,23 +93,26 @@ def sum_except_batch(x, num_dims=1): Returns: x_sum: Tensor, shape (batch_size,) - ''' + """ return x.reshape(*x.shape[:num_dims], -1).sum(-1) + def mean_flat(tensor): """ Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape)))) + def ohe_to_categories(ohe, K): K = torch.from_numpy(K) indices = torch.cat([torch.zeros((1,)), K.cumsum(dim=0)], dim=0).int().tolist() res = [] for i in range(len(indices) - 1): - res.append(ohe[:, indices[i]:indices[i+1]].argmax(dim=1)) + res.append(ohe[:, indices[i] : indices[i + 1]].argmax(dim=1)) return torch.stack(res, dim=1) + def log_1_min_a(a): return torch.log(1 - a.exp() + 1e-40) @@ -105,9 +121,11 @@ def log_add_exp(a, b): maximum = torch.max(a, b) return maximum + torch.log(torch.exp(a - maximum) + torch.exp(b - maximum)) + def exists(x): return x is not None + def extract(a, t, x_shape): b, *_ = t.shape t = t.to(a.device) @@ -116,62 +134,64 @@ def extract(a, t, x_shape): out = out[..., None] return out.expand(x_shape) + def default(val, d): if exists(val): return val return d() if isfunction(d) else d + def log_categorical(log_x_start, log_prob): return (log_x_start.exp() * log_prob).sum(dim=1) + def index_to_log_onehot(x, num_classes): onehots = [] for i in range(len(num_classes)): onehots.append(F.one_hot(x[:, i], num_classes[i])) - x_onehot = torch.cat(onehots, dim=1) log_onehot = torch.log(x_onehot.float().clamp(min=1e-30)) return log_onehot + def log_sum_exp_by_classes(x, slices): - device = x.device res = torch.zeros_like(x) for ixs in slices: res[:, ixs] = torch.logsumexp(x[:, ixs], dim=1, keepdim=True) - assert x.size() == res.size() - return res + @torch.jit.script def log_sub_exp(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: m = torch.maximum(a, b) return torch.log(torch.exp(a - m) - torch.exp(b - m)) + m + @torch.jit.script def sliced_logsumexp(x, slices): lse = torch.logcumsumexp( - torch.nn.functional.pad(x, [1, 0, 0, 0], value=-float('inf')), - dim=-1) + torch.nn.functional.pad(x, [1, 0, 0, 0], value=-float("inf")), dim=-1 + ) slice_starts = slices[:-1] slice_ends = slices[1:] slice_lse = log_sub_exp(lse[:, slice_ends], lse[:, slice_starts]) slice_lse_repeated = torch.repeat_interleave( - slice_lse, - slice_ends - slice_starts, - dim=-1 + slice_lse, slice_ends - slice_starts, dim=-1 ) return slice_lse_repeated + def log_onehot_to_index(log_x): return log_x.argmax(1) class FoundNANsError(BaseException): """Found NANs during sampling""" - def __init__(self, message='Found NANs during sampling.'): + + def __init__(self, message="Found NANs during sampling."): super(FoundNANsError, self).__init__(message) @@ -182,6 +202,7 @@ class TensorDataLoader: the dataset and calls cat (slow). Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6 """ + def __init__(self, *tensors, batch_size=32, shuffle=False): """ Initialize a FastTensorDataLoader. @@ -196,15 +217,14 @@ def __init__(self, *tensors, batch_size=32, shuffle=False): self.dataset_len = self.tensors[0].shape[0] self.batch_size = batch_size self.shuffle = shuffle - + def __iter__(self): idx = np.arange(self.dataset_len) if self.shuffle: np.random.shuffle(idx) for i in range(0, self.dataset_len, self.batch_size): - s = idx[i:i+self.batch_size] + s = idx[i : i + self.batch_size] yield tuple(t[s] for t in self.tensors) def __len__(self): return len(range(0, self.dataset_len, self.batch_size)) - \ No newline at end of file diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 638b5e6c..95fb8581 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -13,8 +13,8 @@ # synthcity absolute import synthcity.logger as log -from synthcity.utils.serialization import dataframe_hash from synthcity.utils.dataframe import discrete_columns as find_cat_cols +from synthcity.utils.serialization import dataframe_hash # synthcity relative from .data_encoder import ContinuousDataEncoder diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 3a3da116..36336419 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -1,10 +1,11 @@ """ Reference: Kotelnikov, Akim et al. “TabDDPM: Modelling Tabular Data with Diffusion Models.” ArXiv abs/2209.15421 (2022): n. pag. """ +# mypy: disable-error-code=override +# flake8: noqa: F401 # stdlib from pathlib import Path -from copy import deepcopy from typing import Any, List, Optional, Union # third party @@ -13,8 +14,6 @@ # Necessary packages from pydantic import validate_arguments -import torch -from torch.utils.data import sampler # synthcity absolute from synthcity.metrics.weighted_metrics import WeightedMetrics @@ -26,7 +25,6 @@ IntegerDistribution, ) from synthcity.plugins.core.models.tabular_ddpm import TabDDPM -from synthcity.plugins.core.models.tabular_encoder import TabularEncoder from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema from synthcity.utils.constants import DEVICE @@ -59,14 +57,14 @@ def __init__( self, *, is_classification: bool = False, - n_iter = 1000, - lr = 0.002, - weight_decay = 1e-4, - batch_size = 1024, - model_type = 'mlp', - num_timesteps = 1000, - gaussian_loss_type = 'mse', - scheduler = 'cosine', + n_iter: int = 1000, + lr: float = 0.002, + weight_decay: float = 1e-4, + batch_size: int = 1024, + model_type: str = "mlp", + num_timesteps: int = 1000, + gaussian_loss_type: str = "mse", + scheduler: str = "cosine", device: Any = DEVICE, verbose: int = 0, log_interval: int = 100, @@ -96,13 +94,10 @@ def __init__( compress_dataset=compress_dataset, **kwargs ) - + self.is_classification = is_classification - rtdl_params = dict( - d_layers = [dim_hidden] * num_layers, - dropout = dropout - ) + rtdl_params = dict(d_layers=[dim_hidden] * num_layers, dropout=dropout) self.model = TabDDPM( n_iter=n_iter, lr=lr, @@ -111,16 +106,16 @@ def __init__( num_timesteps=num_timesteps, gaussian_loss_type=gaussian_loss_type, scheduler=scheduler, - device=device, + device=device, verbose=verbose, - log_interval=log_interval, + log_interval=log_interval, print_interval=print_interval, model_type=model_type, - rtdl_params=rtdl_params, + rtdl_params=rtdl_params, dim_label_emb=dim_label_emb, - n_iter_min=n_iter_min, - n_iter_print=n_iter_print, - patience=patience, + n_iter_min=n_iter_min, + n_iter_print=n_iter_print, + patience=patience, ) @staticmethod @@ -158,29 +153,38 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: CategoricalDistribution(name="dim_hidden", choices=[128, 256, 512, 1024]), ] - def _fit(self, data: DataLoader, cond: pd.Series = None, **kwargs) -> "TabDDPMPlugin": + def _fit( + self, data: DataLoader, cond: Any = None, **kwargs: Any + ) -> "TabDDPMPlugin": if self.is_classification: assert cond is None _, cond = data.unpack() self._labels, self._cond_dist = np.unique(cond, return_counts=True) self._cond_dist = self._cond_dist / self._cond_dist.sum() - - # NOTE: should we include the target column in `data`? - data = data.dataframe() + + # NOTE: should we include the target column in `df`? + df = data.dataframe() if cond is not None: - cond = pd.Series(cond, index=data.index) + cond = pd.Series(cond, index=df.index) # self.encoder = TabularEncoder().fit(X) - - self.model.fit(data, cond, **kwargs) - def _generate(self, count: int, syn_schema: Schema, cond=None, **kwargs: Any) -> DataLoader: + self.model.fit(df, cond, **kwargs) + + return self + + def _generate( + self, count: int, syn_schema: Schema, cond: Any = None, **kwargs: Any + ) -> DataLoader: if self.is_classification and cond is None: # randomly generate labels following the distribution of the training data cond = np.random.choice(self._labels, size=count, p=self._cond_dist) - def callback(count, cond=cond): + + def callback(count, cond=cond): # type: ignore return self.model.generate(count, cond=cond) + return self._safe_generate(callback, count, syn_schema, **kwargs) + plugin = TabDDPMPlugin diff --git a/src/synthcity/utils/dataframe.py b/src/synthcity/utils/dataframe.py index 069b6eab..c12b29da 100644 --- a/src/synthcity/utils/dataframe.py +++ b/src/synthcity/utils/dataframe.py @@ -9,13 +9,15 @@ def constant_columns(dataframe: pd.DataFrame) -> list: return discrete_columns(dataframe, 2) -def discrete_columns(dataframe: pd.DataFrame, - max_classes: int = 10, - return_counts=False) -> list: +def discrete_columns( + dataframe: pd.DataFrame, max_classes: int = 10, return_counts: bool = False +) -> list: """ Find columns containing discrete values in a pandas dataframe. """ - return [(col, cnt) if return_counts else col - for col, vals in dataframe.items() - for cnt in [vals.nunique()] - if cnt < max_classes] + return [ + (col, cnt) if return_counts else col + for col, vals in dataframe.items() + for cnt in [vals.nunique()] + if cnt < max_classes + ] From 2a9aa2af051e3e527ec4a857a8951015a2144457 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Wed, 8 Mar 2023 13:24:45 +0100 Subject: [PATCH 13/95] convert assert to conditional AssertionErrors --- .../gaussian_multinomial_diffsuion.py | 54 ++++++++++++------- .../core/models/tabular_ddpm/modules.py | 27 ++++++---- .../plugins/core/models/tabular_ddpm/utils.py | 15 ++++-- src/synthcity/plugins/generic/plugin_ddpm.py | 5 +- 4 files changed, 66 insertions(+), 35 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 7a2b358d..16498772 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -83,8 +83,10 @@ def __init__( ): super(GaussianMultinomialDiffusion, self).__init__() - assert multinomial_loss_type in ("vb_stochastic", "vb_all") - assert parametrization in ("x0", "direct") + if not (multinomial_loss_type in ("vb_stochastic", "vb_all")): + raise AssertionError + if not (parametrization in ("x0", "direct")): + raise AssertionError if verbose: self.print = print @@ -183,12 +185,15 @@ def __init__( .to(device) ) - assert log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.0e-5 - assert ( + if not (log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.0e-5): + raise AssertionError + if not ( log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() < 1e-5 - ) - assert (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.0e-5 + ): + raise AssertionError + if not ((np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.0e-5): + raise AssertionError # Convert to float32 and register buffers. self.register_buffer("alphas", alphas.float().to(device)) @@ -233,14 +238,16 @@ def gaussian_q_mean_variance(self, x_start, t): def gaussian_q_sample(self, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) - assert noise.shape == x_start.shape + if not (noise.shape == x_start.shape): + raise AssertionError return ( extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def gaussian_q_posterior_mean_variance(self, x_start, x_t, t): - assert x_start.shape == x_t.shape + if not (x_start.shape == x_t.shape): + raise AssertionError posterior_mean = ( extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t @@ -249,12 +256,13 @@ def gaussian_q_posterior_mean_variance(self, x_start, x_t, t): posterior_log_variance_clipped = extract( self.posterior_log_variance_clipped, t, x_t.shape ) - assert ( + if not ( posterior_mean.shape[0] == posterior_variance.shape[0] == posterior_log_variance_clipped.shape[0] == x_start.shape[0] - ) + ): + raise AssertionError return posterior_mean, posterior_variance, posterior_log_variance_clipped def gaussian_p_mean_variance( @@ -270,7 +278,8 @@ def gaussian_p_mean_variance( model_kwargs = {} B, C = x.shape[:2] - assert t.shape == (B,) + if not (t.shape == (B,)): + raise AssertionError model_variance = torch.cat( [ @@ -296,9 +305,12 @@ def gaussian_p_mean_variance( x_start=pred_xstart, x_t=x, t=t ) - assert ( + if not ( model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape - ), f"{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}" + ): + raise AssertionError( + f"{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}" + ) return { "mean": model_mean, @@ -326,7 +338,8 @@ def _vb_terms_bpd( decoder_nll = -discretized_gaussian_log_likelihood( x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] ) - assert decoder_nll.shape == x_start.shape + if not (decoder_nll.shape == x_start.shape): + raise AssertionError decoder_nll = mean_flat(decoder_nll) / np.log(2.0) # At the first timestep return the decoder NLL, @@ -377,7 +390,8 @@ def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None): return terms["loss"] def _predict_xstart_from_eps(self, x_t, t, eps=1e-8): - assert x_t.shape == eps.shape + if not (x_t.shape == eps.shape): + raise AssertionError return ( extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps @@ -450,8 +464,10 @@ def predict_start(self, model_out, log_x_t): # model_out = self._denoise_fn(x_t, t.to(x_t.device), **out_dict) - assert model_out.size(0) == log_x_t.size(0) - assert model_out.size(1) == self.num_classes.sum(), f"{model_out.size()}" + if not (model_out.size(0) == log_x_t.size(0)): + raise AssertionError + if not (model_out.size(1) == self.num_classes.sum()): + raise AssertionError(f"{model_out.size()}") log_pred = torch.empty_like(model_out) for ix in self.slices_for_classes: @@ -465,7 +481,6 @@ def q_posterior(self, log_x_start, log_x_t, t): # EV_log_qxt_x0 = self.q_pred(log_x_start, t) # self.print('sum exp', EV_log_qxt_x0.exp().sum(1).mean()) - # assert False # log_qxt_x0 = (log_x_t.exp() * EV_log_qxt_x0).sum(dim=1) t_minus_1 = t - 1 @@ -861,7 +876,8 @@ def gaussian_ddim_sample(self, noise, T, cond=None, eta=0.0): def gaussian_ddim_reverse_step( self, model_out_num, x, t, clip_denoised=False, eta=0.0 ): - assert eta == 0.0, "Eta must be zero." + if not (eta == 0.0): + raise AssertionError("Eta must be zero.") out = self.gaussian_p_mean_variance( model_out_num, x, diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 48310320..00cef021 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -55,7 +55,8 @@ def _is_glu_activation(activation: ModuleType): def _all_or_none(values): - assert all(x is None for x in values) or all(x is not None for x in values) + if not (all(x is None for x in values) or all(x is not None for x in values)): + raise AssertionError def reglu(x: Tensor) -> Tensor: @@ -63,7 +64,8 @@ def reglu(x: Tensor) -> Tensor: References: [1] Noam Shazeer, "GLU Variants Improve Transformer", 2020 """ - assert x.shape[-1] % 2 == 0 + if not (x.shape[-1] % 2 == 0): + raise AssertionError a, b = x.chunk(2, dim=-1) return a * F.relu(b) @@ -73,7 +75,8 @@ def geglu(x: Tensor) -> Tensor: References: [1] Noam Shazeer, "GLU Variants Improve Transformer", 2020 """ - assert x.shape[-1] % 2 == 0 + if not (x.shape[-1] % 2 == 0): + raise AssertionError a, b = x.chunk(2, dim=-1) return a * F.gelu(b) @@ -185,8 +188,10 @@ def __init__( super().__init__() if isinstance(dropouts, float): dropouts = [dropouts] * len(d_layers) - assert len(d_layers) == len(dropouts) - assert activation not in ["ReGLU", "GEGLU"] + if not (len(d_layers) == len(dropouts)): + raise AssertionError + if activation in ["ReGLU", "GEGLU"]: + raise AssertionError self.blocks = nn.ModuleList( [ @@ -233,12 +238,14 @@ def make_baseline( References: * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 """ - assert isinstance(dropout, float) + if not (isinstance(dropout, float)): + raise AssertionError if len(d_layers) > 2: - assert len(set(d_layers[1:-1])) == 1, ( - "if d_layers contains more than two elements, then" - " all elements except for the first and the last ones must be equal." - ) + if not len(set(d_layers[1:-1])) == 1: + raise AssertionError( + "if d_layers contains more than two elements, then" + " all elements except for the first and the last ones must be equal." + ) return MLP( d_in=d_in, d_layers=d_layers, diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index 61ec9eac..c2491e6e 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -23,7 +23,8 @@ def normal_kl(mean1, logvar1, mean2, logvar2): if isinstance(obj, torch.Tensor): tensor = obj break - assert tensor is not None, "at least one argument must be a Tensor" + if tensor is None: + raise AssertionError("at least one argument must be a Tensor") # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for torch.exp(). @@ -62,7 +63,8 @@ def discretized_gaussian_log_likelihood(x, *, means, log_scales): :param log_scales: the Gaussian log stddev Tensor. :return: a tensor like x of log probabilities (in nats). """ - assert x.shape == means.shape == log_scales.shape + if not (x.shape == means.shape == log_scales.shape): + raise AssertionError centered_x = x - means inv_stdv = torch.exp(-log_scales) plus_in = inv_stdv * (centered_x + 1.0 / 255.0) @@ -79,7 +81,8 @@ def discretized_gaussian_log_likelihood(x, *, means, log_scales): x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12)) ), ) - assert log_probs.shape == x.shape + if not (log_probs.shape == x.shape): + raise AssertionError return log_probs @@ -158,7 +161,8 @@ def log_sum_exp_by_classes(x, slices): res = torch.zeros_like(x) for ixs in slices: res[:, ixs] = torch.logsumexp(x[:, ixs], dim=1, keepdim=True) - assert x.size() == res.size() + if not (x.size() == res.size()): + raise AssertionError return res @@ -212,7 +216,8 @@ def __init__(self, *tensors, batch_size=32, shuffle=False): iterator is created out of this object. :returns: A FastTensorDataLoader. """ - assert all(t.shape[0] == tensors[0].shape[0] for t in tensors) + if not all(t.shape[0] == tensors[0].shape[0] for t in tensors): + raise AssertionError self.tensors = tensors self.dataset_len = self.tensors[0].shape[0] self.batch_size = batch_size diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 36336419..b28c6ef3 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -157,7 +157,10 @@ def _fit( self, data: DataLoader, cond: Any = None, **kwargs: Any ) -> "TabDDPMPlugin": if self.is_classification: - assert cond is None + if cond is not None: + raise ValueError( + "cond is already given by the labels for classification" + ) _, cond = data.unpack() self._labels, self._cond_dist = np.unique(cond, return_counts=True) self._cond_dist = self._cond_dist / self._cond_dist.sum() From 246cd5ba0f3bbbbca2ac0c442472b41a10e715a1 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 10 Mar 2023 15:39:38 +0100 Subject: [PATCH 14/95] added an auto annotation tool --- src/auto-anno.py | 322 +++++++++++++++++++++++++++++++++++++++++++++++ src/tmp.py | 12 ++ 2 files changed, 334 insertions(+) create mode 100644 src/auto-anno.py create mode 100644 src/tmp.py diff --git a/src/auto-anno.py b/src/auto-anno.py new file mode 100644 index 00000000..a56491ca --- /dev/null +++ b/src/auto-anno.py @@ -0,0 +1,322 @@ +import os +import re +import sys +import ast +import runpy +import shutil +import inspect +import argparse +import cloudpickle +from typing import * +from numbers import * +from itertools import product, islice + + +TYPE_MAP = { # maps of type annotations + Integral: int, + Real: float, + Complex: complex, + object: Any +} + +# MOD_MAP = { # maps module names to their common aliases +# 'numpy': 'np', +# 'pandas': 'pd' +# } + + +def get_type(x): + """ + Examples: + >>> get_type(None) + >>> get_type([]) + list + >>> get_type([1, 2, 3]) + list[int] + >>> get_type([1, 'a']) + list + >>> get_type(dict(a=0.9, b=0.1)) + dict[str, float] + >>> get_type(dict(a=0.9, b='a')) + dict[str, typing.Any] + >>> get_type({1, 2.0, None}) + set[typing.Optional[float]] + >>> get_type(str) + type + >>> get_type(True) + bool + >>> get_type((1, 2.0)) + tuple[int, float] + >>> get_type(tuple(range(9))) + tuple[int, ...] + >>> get_type(iter(range(9))) + typing.Iterator[int] + >>> get_type((i if i % 2 else None for i in range(9))) + typing.Iterator[typing.Optional[int]] + """ + def dispatch(T, *xs, maxlen=5): + xs = [list(map(get_type, l)) for l in xs] + if min(map(len, xs)) == 0: # empty collection + return T + ts = tuple(map(get_common_suptype, xs)) + if len(ts) == 1: + t = ts[0] + elif len(ts) > maxlen: + t = get_common_suptype(ts) + else: + t = ts + if t is object: + return T + elif len(ts) > maxlen: + return T[t, ...] + else: + return T[t] + if x is None: + return None + if inspect.isfunction(x) or inspect.ismethod(x): + return Callable + for t in (list, set, frozenset): + if isinstance(x, t): + return dispatch(t, x) + if isinstance(x, tuple): + return dispatch(tuple, *[[a] for a in x], maxlen=4) + if isinstance(x, dict): + return dispatch(dict, x.keys(), x.values()) + if hasattr(x, '__next__'): + return dispatch(Iterator, islice(x, 10)) + if isinstance(x, bool): + return bool + if isinstance(x, Integral): + return Integral + if isinstance(x, Real): + return Real + if isinstance(x, Complex): + return Complex + return type(x) + + +def get_suptypes(t): + def suptypes_of_subscripted_type(t): + T = t.__origin__ + args = t.__args__ + sts = [T[ts] for ts in product(*map(get_suptypes, args)) + if not all(t in (object, ...) for t in ts)] + return sts + T.mro() + if inspect.isclass(t) and issubclass(t, type): + sts = list(t.__mro__) + elif hasattr(t, '__origin__'): + sts = suptypes_of_subscripted_type(t) + elif isinstance(t, type): + sts = list(t.mro()) + elif t == Ellipsis: + sts = [t] + else: # None, Callable, Iterator, etc. + sts = [t, object] + return sts + + +def get_common_suptype(ts, type_map=None): + """Find the most specific common supertype of a collection of types.""" + ts = set(ts) + assert ts, "empty collection of types" + + optional = any(t is None for t in ts) + ts.discard(None) + + if not ts: + return None + + sts = [get_suptypes(t) for t in ts] + for t in min(sts, key=len): + if all(t in ts for ts in sts): + break + else: + return Any + + if type_map: + t = type_map.get(t, t) + if optional: + t = Optional[t] + return t + + +def test(): + def get_anno(xs): + return get_common_suptype(map(get_type, xs)) + recs = [ + [None, 1, 1.2], + [{1: 2}, {1: 2.2}, {1: 2.1, 3: 4}], + [(x for x in range(10)), iter(range(10))], + ] + for xs in recs: + print(get_anno(xs)) + + +def get_full_name(x, global_vars=()): + if x in (None, Ellipsis): + return repr(x) + mod = x.__module__ + try: + name = getattr(x, '__qualname__', x.__name__) + except AttributeError: + print("WARNING: failed to get name of", x, "in", mod) + name = repr(x) + if mod != 'builtins' and x not in global_vars: + name = mod + '.' + name + return name + + +def profiler(frame, event, arg): + if event in ('call', 'return'): + filename = os.path.abspath(frame.f_code.co_filename) + funcname = frame.f_code.co_name + if filename.endswith('.py') and funcname[0] != '<' and CWD in filename: + recs = TYPE_RECS.setdefault(filename, {}) + if 'globals' not in recs: + recs['globals'] = set(frame.f_globals) + if event == 'call': + arg_types = {var: get_type(val) for var, val in frame.f_locals.items()} + lineno = frame.f_lineno + else: + arg_types = {'return': get_type(arg)} + lineno = max(ln for ln, fn in recs if fn == funcname and + ln <= frame.f_lineno and 'return' not in recs[ln, fn]) + rec = recs.setdefault((lineno, funcname), {}) + for k, v in arg_types.items(): + rec.setdefault(k, []).append(v) + return profiler + + +#*** run the script N times to collect type records *** + +parser = argparse.ArgumentParser() +parser.add_argument('script', help='the script to run') +parser.add_argument('-n', type=int, default=1, + help='number of times to run the script') +parser.add_argument('-v', '--verbose', action='store_true') +parser.add_argument('-i', action='store_true', + help='prompt before overwriting each script') +parser.add_argument('--log', default='type_records.pkl', + help='output file for type records') +parser.add_argument('--cwd', default=None, help='working directory') +parser.add_argument('--backup', action='store_true', + help='backup the scripts before annotating them') + +ARGS = parser.parse_args() +DIR = os.path.dirname(os.path.abspath(ARGS.script)) +CWD = ARGS.cwd or DIR + +try: + TYPE_RECS = cloudpickle.load(open(ARGS.log, 'rb')) +except: + TYPE_RECS = {} # {filename: {(lineno, funcname): {argname: [type]}}}} + +sys.path.extend([DIR, CWD]) +sys.setprofile(profiler) + +for _ in range(ARGS.n): + runpy.run_path(sys.argv[1], run_name='__main__') + +sys.setprofile(None) + +with open(ARGS.log, 'wb') as f: + cloudpickle.dump(TYPE_RECS, f) + + +#*** determine the type annotations from the type records *** + +def get_type_annotations(type_records=TYPE_RECS): + def recurse(x): + if isinstance(x, dict): + return {k: recurse(v) for k, v in x.items()} + elif isinstance(x, list): + return get_common_suptype(x, type_map=TYPE_MAP) + else: + return x + return recurse(type_records) + +annotations = get_type_annotations() + +# if ARGS.verbose: +# for path, recs in annotations.items(): +# print(path) +# for (lineno, funcname), arg_types in recs.items(): +# print(f' {funcname} (Ln{lineno}):') +# print(' ' + ', '.join(f'{k}: {get_full_name(v)}' for k, v in arg_types.items())) + + +#*** write the type annotations to the script *** + +def find_defs_in_ast(tree): + def recurse(node): # should be in order + if isinstance(node, ast.FunctionDef): + yield node + for child in ast.iter_child_nodes(node): + yield from recurse(child) + return list(recurse(tree)) + +def annotate_def(def_node, annotations) -> bool: + key = (def_node.lineno, def_node.name) + if key not in annotations: + return False # no type records for this function + annos = annotations[key] + l = def_node.args + all_args = l.posonlyargs + l.args + l.kwonlyargs + changed = False + for a in all_args: + if a.annotation is None and a.arg != 'self': + anno = get_full_name(annos[a.arg], annotations['globals']) + a.annotation = ast.Name(anno) + changed = True + if def_node.returns is None: + anno = get_full_name(annos['return'], annotations['globals']) + def_node.returns = ast.Name(anno) + def_node.returns.lineno = max(a.lineno for a in all_args) + changed = True + return changed + +# def get_aliases(ast): +# # TODO: handle import aliases +# ims = [i for i in ast.body if isinstance(i, ast.ImportFrom)] +# aliases = {} +# for im in ims: + +def annotate_script(filepath, verbose=ARGS.verbose): + s = open(filepath, encoding='utf8').read() + lines = s.splitlines() + defs = [d for d in find_defs_in_ast(ast.parse(s)) + if annotate_def(d, annotations[filepath])] + if not defs: + return None + if verbose: + print('Adding annotations to', filepath, '\n') + starts, ends, sigs = [], [], [] + for node in defs: + ln0, ln1 = node.lineno, node.body[0].lineno + starts.append(ln0 - 1) + ends.append(ln1 - 1) + node.body = [] # only keep signature + line = re.match('\s*', lines[ln0-1])[0] + ast.unparse(node) # keep indentation + sigs.append(line) + if verbose: + print('Old:', *lines[ln0-1:ln1], sep='\n') + print('>' * 50) + print('New:', sigs[-1], sep='\n') + print('-' * 50) + new_lines = [] + for s, e, sig in zip([None] + ends, starts + [None], sigs + [None]): + new_lines.extend(lines[s:e]) + if sig is not None: + new_lines.append(sig) + return '\n'.join(new_lines) + + +for path in annotations: + s = annotate_script(path) + if s is None: + continue + if ARGS.backup: + shutil.copy(path, path + '.bak') + if not ARGS.i or input(f"Overwrite {path}?").lower() == 'y': + with open(path, 'w', encoding='utf8') as f: + f.write(s) diff --git a/src/tmp.py b/src/tmp.py new file mode 100644 index 00000000..74bf87ad --- /dev/null +++ b/src/tmp.py @@ -0,0 +1,12 @@ +from synthcity.plugins import Plugins +from sklearn.datasets import load_iris +from synthcity.plugins.core.dataloader import GenericDataLoader + +# loadDebugger() +X, y = load_iris(as_frame = True, return_X_y = True) +X = GenericDataLoader(X.assign(target = y), target_column="target") +plugin = Plugins().get("ddpm", n_iter=3, is_classification=True, + num_timesteps=100, verbose=1) +plugin.fit(X) +X_syn = plugin.model.generate(50) +print(X_syn) From f458bb45770268bb653129c4ff9ba38091fd8214 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 10 Mar 2023 20:42:46 +0100 Subject: [PATCH 15/95] update auto-anno and generate annotations --- src/auto-anno.py | 263 +++++++++---- .../core/models/tabular_ddpm/__init__.py | 12 +- .../gaussian_multinomial_diffsuion.py | 347 ++++++++---------- .../core/models/tabular_ddpm/modules.py | 46 +-- .../plugins/core/models/tabular_ddpm/utils.py | 72 ++-- src/tmp.py | 19 +- 6 files changed, 414 insertions(+), 345 deletions(-) diff --git a/src/auto-anno.py b/src/auto-anno.py index a56491ca..96225e56 100644 --- a/src/auto-anno.py +++ b/src/auto-anno.py @@ -1,22 +1,30 @@ +# flake8: noqa +# mypy: ignore-errors + +# stdlib +import argparse +import ast +import importlib +import inspect +import io import os import re -import sys -import ast import runpy import shutil -import inspect -import argparse -import cloudpickle -from typing import * +import sys +from collections.abc import Callable, Iterator +from itertools import islice, product from numbers import * -from itertools import product, islice +from typing import Any, Optional, Union +# third party +import cloudpickle TYPE_MAP = { # maps of type annotations Integral: int, Real: float, Complex: complex, - object: Any + object: Any, } # MOD_MAP = { # maps module names to their common aliases @@ -54,9 +62,10 @@ def get_type(x): >>> get_type((i if i % 2 else None for i in range(9))) typing.Iterator[typing.Optional[int]] """ + def dispatch(T, *xs, maxlen=5): xs = [list(map(get_type, l)) for l in xs] - if min(map(len, xs)) == 0: # empty collection + if not xs or min(map(len, xs)) == 0: # empty collection return T ts = tuple(map(get_common_suptype, xs)) if len(ts) == 1: @@ -71,6 +80,7 @@ def dispatch(T, *xs, maxlen=5): return T[t, ...] else: return T[t] + if x is None: return None if inspect.isfunction(x) or inspect.ismethod(x): @@ -82,7 +92,9 @@ def dispatch(T, *xs, maxlen=5): return dispatch(tuple, *[[a] for a in x], maxlen=4) if isinstance(x, dict): return dispatch(dict, x.keys(), x.values()) - if hasattr(x, '__next__'): + if isinstance(x, io.IOBase): + return type(x) + if isinstance(x, Iterator): #! may be too general return dispatch(Iterator, islice(x, 10)) if isinstance(x, bool): return bool @@ -99,12 +111,16 @@ def get_suptypes(t): def suptypes_of_subscripted_type(t): T = t.__origin__ args = t.__args__ - sts = [T[ts] for ts in product(*map(get_suptypes, args)) - if not all(t in (object, ...) for t in ts)] - return sts + T.mro() + sts = [ + T[ts] + for ts in product(*map(get_suptypes, args)) + if not all(t in (object, ...) for t in ts) + ] + return sts + get_suptypes(T) + if inspect.isclass(t) and issubclass(t, type): sts = list(t.__mro__) - elif hasattr(t, '__origin__'): + elif hasattr(t, "__origin__"): sts = suptypes_of_subscripted_type(t) elif isinstance(t, type): sts = list(t.mro()) @@ -119,10 +135,10 @@ def get_common_suptype(ts, type_map=None): """Find the most specific common supertype of a collection of types.""" ts = set(ts) assert ts, "empty collection of types" - + optional = any(t is None for t in ts) ts.discard(None) - + if not ts: return None @@ -132,7 +148,7 @@ def get_common_suptype(ts, type_map=None): break else: return Any - + if type_map: t = type_map.get(t, t) if optional: @@ -143,6 +159,7 @@ def get_common_suptype(ts, type_map=None): def test(): def get_anno(xs): return get_common_suptype(map(get_type, xs)) + recs = [ [None, 1, 1.2], [{1: 2}, {1: 2.2}, {1: 2.1, 3: 4}], @@ -152,62 +169,119 @@ def get_anno(xs): print(get_anno(xs)) -def get_full_name(x, global_vars=()): - if x in (None, Ellipsis): - return repr(x) - mod = x.__module__ - try: - name = getattr(x, '__qualname__', x.__name__) - except AttributeError: - print("WARNING: failed to get name of", x, "in", mod) - name = repr(x) - if mod != 'builtins' and x not in global_vars: - name = mod + '.' + name - return name +def get_full_name(x, global_vars={}): + """ + Examples: + >>> import numpy as np + >>> G = lambda: {id(v): k for k, v in globals().items() if k[0] != '_'} + >>> get_full_name(np.ndarray, G()) + 'np.ndarray' + >>> import scipy as sp + >>> get_full_name(sp.sparse.csr_matrix, G()) + 'sp.sparse.csr_matrix' + >>> import scipy.sparse as sps + >>> get_full_name(sparse.csr_matrix, G()) + 'sps.csr_matrix' + """ + + def get_name(x): + if x.__module__ == "typing": + return x._name + return getattr(x, "__qualname__", x.__name__) + + if x is Ellipsis: + return "..." + if x is None: + return "None" + if id(x) in global_vars: + return global_vars[id(x)] + if x.__module__ == "builtins": + return x.__name__ + # handle the subscripted types + if hasattr(x, "__origin__"): + T, args = x.__origin__, x.__args__ + if T is Union and len(args) == 2 and args[1] is type(None): + T, args = Optional, args[:1] + T = get_full_name(T, global_vars) + args = ", ".join(get_full_name(a, global_vars) for a in args) + return f"{T}[{args}]" + # find the module alias + names = (f"{x.__module__}.{get_name(x)}").split(".")[::-1] + mods = [importlib.import_module(names[-1])] + print(names) + for name in names[-2::-1]: + print(name, mods[-1]) + mods.append(getattr(mods[-1], name)) + mods = mods[::-1] + # find the first module that is imported + for i, (name, mod) in enumerate(zip(names, mods)): + if id(mod) in global_vars: + names = names[:i] + [global_vars[id(mod)]] + mods = mods[: i + 1] + break + # skip useless intermediate modules + for k in range(1, len(names)): + if k >= len(names) - 1: + break + for i, (name, mod) in enumerate(zip(names, mods)): + if i + 1 + k >= len(names): + break + if hasattr(mods[-k], name): + names = names[: i + 1] + names[-k:] + mods = mods[: i + 1] + mods[-k:] + break + return ".".join(names[::-1]) def profiler(frame, event, arg): - if event in ('call', 'return'): + if event in ("call", "return"): filename = os.path.abspath(frame.f_code.co_filename) funcname = frame.f_code.co_name - if filename.endswith('.py') and funcname[0] != '<' and CWD in filename: + if filename.endswith(".py") and funcname[0] != "<" and CWD in filename: recs = TYPE_RECS.setdefault(filename, {}) - if 'globals' not in recs: - recs['globals'] = set(frame.f_globals) - if event == 'call': + if "globals" not in recs: + recs["globals", None] = { + id(v): k for k, v in frame.f_globals.items() if k[0] != "_" + } + if event == "call": + # print(filename, funcname, frame.f_lineno, frame.f_locals) arg_types = {var: get_type(val) for var, val in frame.f_locals.items()} lineno = frame.f_lineno else: - arg_types = {'return': get_type(arg)} - lineno = max(ln for ln, fn in recs if fn == funcname and - ln <= frame.f_lineno and 'return' not in recs[ln, fn]) + arg_types = {"return": get_type(arg)} + #! assumes no nested function has the same name as the outer function + lineno = max( + ln for ln, fn in recs if fn == funcname and ln <= frame.f_lineno + ) rec = recs.setdefault((lineno, funcname), {}) for k, v in arg_types.items(): rec.setdefault(k, []).append(v) return profiler -#*** run the script N times to collect type records *** +# *** run the script N times to collect type records *** parser = argparse.ArgumentParser() -parser.add_argument('script', help='the script to run') -parser.add_argument('-n', type=int, default=1, - help='number of times to run the script') -parser.add_argument('-v', '--verbose', action='store_true') -parser.add_argument('-i', action='store_true', - help='prompt before overwriting each script') -parser.add_argument('--log', default='type_records.pkl', - help='output file for type records') -parser.add_argument('--cwd', default=None, help='working directory') -parser.add_argument('--backup', action='store_true', - help='backup the scripts before annotating them') +parser.add_argument("script", help="the script to run") +parser.add_argument("-n", type=int, default=1, help="number of times to run the script") +parser.add_argument("-v", "--verbose", action="store_true") +parser.add_argument( + "-i", action="store_true", help="prompt before overwriting each script" +) +parser.add_argument( + "--log", default="type_records.pkl", help="output file for type records" +) +parser.add_argument("--cwd", default=None, help="working directory") +parser.add_argument( + "--backup", action="store_true", help="backup the scripts before annotating them" +) ARGS = parser.parse_args() DIR = os.path.dirname(os.path.abspath(ARGS.script)) CWD = ARGS.cwd or DIR try: - TYPE_RECS = cloudpickle.load(open(ARGS.log, 'rb')) + TYPE_RECS = cloudpickle.load(open(ARGS.log, "rb")) except: TYPE_RECS = {} # {filename: {(lineno, funcname): {argname: [type]}}}} @@ -215,15 +289,16 @@ def profiler(frame, event, arg): sys.setprofile(profiler) for _ in range(ARGS.n): - runpy.run_path(sys.argv[1], run_name='__main__') + runpy.run_path(sys.argv[1], run_name="__main__") sys.setprofile(None) -with open(ARGS.log, 'wb') as f: +with open(ARGS.log, "wb") as f: cloudpickle.dump(TYPE_RECS, f) -#*** determine the type annotations from the type records *** +# *** determine the type annotations from the type records *** + def get_type_annotations(type_records=TYPE_RECS): def recurse(x): @@ -233,8 +308,10 @@ def recurse(x): return get_common_suptype(x, type_map=TYPE_MAP) else: return x + return recurse(type_records) + annotations = get_type_annotations() # if ARGS.verbose: @@ -245,7 +322,8 @@ def recurse(x): # print(' ' + ', '.join(f'{k}: {get_full_name(v)}' for k, v in arg_types.items())) -#*** write the type annotations to the script *** +# *** write the type annotations to the script *** + def find_defs_in_ast(tree): def recurse(node): # should be in order @@ -253,62 +331,91 @@ def recurse(node): # should be in order yield node for child in ast.iter_child_nodes(node): yield from recurse(child) + return list(recurse(tree)) -def annotate_def(def_node, annotations) -> bool: + +def annotate_def(def_node: ast.FunctionDef, annotations) -> bool: key = (def_node.lineno, def_node.name) if key not in annotations: return False # no type records for this function annos = annotations[key] - l = def_node.args - all_args = l.posonlyargs + l.args + l.kwonlyargs + A = def_node.args + all_args = A.posonlyargs + A.args + A.kwonlyargs + defaults = dict(zip(A.args + A.kwonlyargs, A.defaults + A.kw_defaults)) + all_args.extend(filter(None, [A.vararg, A.kwarg])) changed = False + global_vars = annotations["globals", None] for a in all_args: - if a.annotation is None and a.arg != 'self': - anno = get_full_name(annos[a.arg], annotations['globals']) + if a.annotation is None and a.arg != "self": + t = annos[a.arg] + if a == A.vararg: + if t is tuple: + t = Any + else: + assert t.__origin__ is tuple + if ( + len(t.__args__) == 1 + or len(t.__args__) == 2 + and t.__args__[1] is Ellipsis + ): + t = t.__args__[0] + else: + t = get_common_suptype(t.__args__) + elif a == A.kwarg: + assert t.__origin__ is dict + t = t.__args__[1] + if t is None: + t = Any + if a.arg in defaults: + t = Union[t, get_type(defaults[a.arg])] + anno = get_full_name(t, global_vars) a.annotation = ast.Name(anno) changed = True if def_node.returns is None: - anno = get_full_name(annos['return'], annotations['globals']) + if "return" not in annos: + print("No return type for", key, annos) + exit() + anno = get_full_name(annos["return"], global_vars) def_node.returns = ast.Name(anno) def_node.returns.lineno = max(a.lineno for a in all_args) changed = True return changed -# def get_aliases(ast): -# # TODO: handle import aliases -# ims = [i for i in ast.body if isinstance(i, ast.ImportFrom)] -# aliases = {} -# for im in ims: def annotate_script(filepath, verbose=ARGS.verbose): - s = open(filepath, encoding='utf8').read() + s = open(filepath, encoding="utf8").read() lines = s.splitlines() - defs = [d for d in find_defs_in_ast(ast.parse(s)) - if annotate_def(d, annotations[filepath])] + defs = [ + d + for d in find_defs_in_ast(ast.parse(s)) + if annotate_def(d, annotations[filepath]) + ] if not defs: return None if verbose: - print('Adding annotations to', filepath, '\n') + print("Adding annotations to", filepath, "\n") starts, ends, sigs = [], [], [] for node in defs: ln0, ln1 = node.lineno, node.body[0].lineno starts.append(ln0 - 1) ends.append(ln1 - 1) node.body = [] # only keep signature - line = re.match('\s*', lines[ln0-1])[0] + ast.unparse(node) # keep indentation + line = re.match(r"\s*", lines[ln0 - 1])[0] + ast.unparse( + node + ) # keep indentation sigs.append(line) if verbose: - print('Old:', *lines[ln0-1:ln1], sep='\n') - print('>' * 50) - print('New:', sigs[-1], sep='\n') - print('-' * 50) + print("Old:", *lines[ln0 - 1 : ln1 - 1], sep="\n") + print(">" * 50) + print("New:", sigs[-1], sep="\n") + print("-" * 50) new_lines = [] for s, e, sig in zip([None] + ends, starts + [None], sigs + [None]): new_lines.extend(lines[s:e]) if sig is not None: new_lines.append(sig) - return '\n'.join(new_lines) + return "\n".join(new_lines) for path in annotations: @@ -316,7 +423,7 @@ def annotate_script(filepath, verbose=ARGS.verbose): if s is None: continue if ARGS.backup: - shutil.copy(path, path + '.bak') - if not ARGS.i or input(f"Overwrite {path}?").lower() == 'y': - with open(path, 'w', encoding='utf8') as f: + shutil.copy(path, path + ".bak") + if not ARGS.i or input(f"Overwrite {path}?").lower() == "y": + with open(path, "w", encoding="utf8") as f: f.write(s) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 98ac9619..1b6df0cd 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -1,9 +1,8 @@ -# mypy: allow-untyped-defs, allow-untyped-calls # flake8: noqa: F401 # stdlib from copy import deepcopy -from typing import Any, Optional, Union +from typing import Any, Iterator, Optional, Union # third party import numpy as np @@ -50,13 +49,18 @@ def __init__( self.__dict__.update(locals()) del self.self - def _anneal_lr(self, epoch): + def _anneal_lr(self, epoch: int) -> None: frac_done = epoch / self.n_iter lr = self.lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr - def _update_ema(self, target_params, source_params, rate=0.999): + def _update_ema( + self, + target_params: Iterator[nn.Parameter], + source_params: Iterator[nn.Parameter], + rate: float = 0.999, + ) -> None: """ Update target parameters to be closer to those of source parameters using an exponential moving average. diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 16498772..145f81da 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -4,23 +4,26 @@ - https://github.com/ehoogeboom/multinomial_diffusion - https://github.com/lucidrains/denoising-diffusion-pytorch/blob/5989f4c77eafcdc6be0fb4739f0f277a6dd7f7d8/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L281 """ -# mypy: disable-error-code=no-untyped-def # flake8: noqa: F405 # stdlib import math +from typing import Any, Callable, Optional # third party import numpy as np import torch import torch.nn.functional as F +from torch import Tensor, nn # synthcity relative from .modules import MLPDiffusion, ResNetDiffusion -from .utils import * # noqa: F403 +from .utils import * # noqa: F401, F403 -def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): +def get_named_beta_schedule( + schedule_name: str, num_diffusion_timesteps: int +) -> np.ndarray: """ Get a pre-defined beta schedule for the given name. The beta schedule library consists of beta schedules which remain similar @@ -46,7 +49,9 @@ def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): raise NotImplementedError(f"unknown beta schedule: {schedule_name}") -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): +def betas_for_alpha_bar( + num_diffusion_timesteps: int, alpha_bar: Callable, max_beta: float = 0.999 +) -> np.ndarray: """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. @@ -68,20 +73,19 @@ def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): class GaussianMultinomialDiffusion(torch.nn.Module): def __init__( self, - num_numerical_features, - num_categorical_features, - model_type="mlp", - model_params=None, - num_timesteps=1000, - gaussian_loss_type="mse", - gaussian_parametrization="eps", - multinomial_loss_type="vb_stochastic", - parametrization="x0", - scheduler="cosine", - device=torch.device("cpu"), - verbose=0, - ): - + num_numerical_features: int, + num_categorical_features: tuple, + model_type: str = "mlp", + model_params: Optional[dict] = None, + num_timesteps: int = 1000, + gaussian_loss_type: str = "mse", + gaussian_parametrization: str = "eps", + multinomial_loss_type: str = "vb_stochastic", + parametrization: str = "x0", + scheduler: str = "cosine", + device: torch.device = torch.device("cpu"), + verbose: int = 0, + ) -> None: super(GaussianMultinomialDiffusion, self).__init__() if not (multinomial_loss_type in ("vb_stochastic", "vb_all")): raise AssertionError @@ -129,7 +133,7 @@ def __init__( elif model_type == "resnet": self.denoise_fn = ResNetDiffusion(**model_params) else: - raise "Unknown diffusion model type!" + raise NotImplementedError(f"unknown model type: {model_type}") self.gaussian_loss_type = gaussian_loss_type self.gaussian_parametrization = gaussian_parametrization @@ -229,13 +233,17 @@ def __init__( self.register_buffer("Lt_count", torch.zeros(num_timesteps)) # Gaussian part - def gaussian_q_mean_variance(self, x_start, t): + def gaussian_q_mean_variance( + self, x_start: Tensor, t: Tensor + ) -> tuple[Tensor, Tensor, Tensor]: mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract(self.log_1_min_cumprod_alpha, t, x_start.shape) return mean, variance, log_variance - def gaussian_q_sample(self, x_start, t, noise=None): + def gaussian_q_sample( + self, x_start: Tensor, t: Tensor, noise: Optional[Tensor] = None + ) -> Tensor: if noise is None: noise = torch.randn_like(x_start) if not (noise.shape == x_start.shape): @@ -245,7 +253,9 @@ def gaussian_q_sample(self, x_start, t, noise=None): + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) - def gaussian_q_posterior_mean_variance(self, x_start, x_t, t): + def gaussian_q_posterior_mean_variance( + self, x_start: Tensor, x_t: Tensor, t: Tensor + ) -> tuple[Tensor, Tensor, Tensor]: if not (x_start.shape == x_t.shape): raise AssertionError posterior_mean = ( @@ -267,13 +277,13 @@ def gaussian_q_posterior_mean_variance(self, x_start, x_t, t): def gaussian_p_mean_variance( self, - model_output, - x, - t, - clip_denoised=False, - denoised_fn=None, - model_kwargs=None, - ): + model_output: Tensor, + x: Tensor, + t: Tensor, + clip_denoised: bool = False, + denoised_fn: Optional[nn.Module] = None, + model_kwargs: Any = None, + ) -> dict: if model_kwargs is None: model_kwargs = {} @@ -320,8 +330,14 @@ def gaussian_p_mean_variance( } def _vb_terms_bpd( - self, model_output, x_start, x_t, t, clip_denoised=False, model_kwargs=None - ): + self, + model_output: Tensor, + x_start: Tensor, + x_t: Tensor, + t: Tensor, + clip_denoised: bool = False, + model_kwargs: Optional[dict] = None, + ) -> dict: ( true_mean, _, @@ -352,7 +368,7 @@ def _vb_terms_bpd( "true_mean": true_mean, } - def _prior_gaussian(self, x_start): + def _prior_gaussian(self, x_start: Tensor) -> Tensor: """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. @@ -370,7 +386,15 @@ def _prior_gaussian(self, x_start): ) return mean_flat(kl_prior) / np.log(2.0) - def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None): + def _gaussian_loss( + self, + model_out: Tensor, + x_start: Tensor, + x_t: Tensor, + t: Tensor, + noise: Tensor, + model_kwargs: Any = None, + ) -> Tensor: if model_kwargs is None: model_kwargs = {} @@ -389,7 +413,9 @@ def _gaussian_loss(self, model_out, x_start, x_t, t, noise, model_kwargs=None): return terms["loss"] - def _predict_xstart_from_eps(self, x_t, t, eps=1e-8): + def _predict_xstart_from_eps( + self, x_t: Tensor, t: Tensor, eps: Tensor = 1e-08 + ) -> Tensor: if not (x_t.shape == eps.shape): raise AssertionError return ( @@ -397,20 +423,22 @@ def _predict_xstart_from_eps(self, x_t, t, eps=1e-8): - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps ) - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + def _predict_eps_from_xstart( + self, x_t: Tensor, t: Tensor, pred_xstart: Tensor + ) -> Tensor: return ( extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def gaussian_p_sample( self, - model_out, - x, - t, - clip_denoised=False, - denoised_fn=None, - model_kwargs=None, - ): + model_out: Tensor, + x: Tensor, + t: Tensor, + clip_denoised: bool = False, + denoised_fn: Any = None, + model_kwargs: Any = None, + ) -> dict: out = self.gaussian_p_mean_variance( model_out, x, @@ -431,11 +459,11 @@ def gaussian_p_sample( # Multinomial part - def multinomial_kl(self, log_prob1, log_prob2): + def multinomial_kl(self, log_prob1: Tensor, log_prob2: Tensor) -> Tensor: kl = (log_prob1.exp() * (log_prob1 - log_prob2)).sum(dim=1) return kl - def q_pred_one_timestep(self, log_x_t, t): + def q_pred_one_timestep(self, log_x_t: Tensor, t: Tensor) -> Tensor: log_alpha_t = extract(self.log_alpha, t, log_x_t.shape) log_1_min_alpha_t = extract(self.log_1_min_alpha, t, log_x_t.shape) @@ -447,7 +475,7 @@ def q_pred_one_timestep(self, log_x_t, t): return log_probs - def q_pred(self, log_x_start, t): + def q_pred(self, log_x_start: Tensor, t: Tensor) -> Tensor: log_cumprod_alpha_t = extract(self.log_cumprod_alpha, t, log_x_start.shape) log_1_min_cumprod_alpha = extract( self.log_1_min_cumprod_alpha, t, log_x_start.shape @@ -460,10 +488,7 @@ def q_pred(self, log_x_start, t): return log_probs - def predict_start(self, model_out, log_x_t): - - # model_out = self._denoise_fn(x_t, t.to(x_t.device), **out_dict) - + def predict_start(self, model_out: Tensor, log_x_t: Tensor) -> Tensor: if not (model_out.size(0) == log_x_t.size(0)): raise AssertionError if not (model_out.size(1) == self.num_classes.sum()): @@ -474,15 +499,7 @@ def predict_start(self, model_out, log_x_t): log_pred[:, ix] = F.log_softmax(model_out[:, ix], dim=1) return log_pred - def q_posterior(self, log_x_start, log_x_t, t): - # q(xt-1 | xt, x0) = q(xt | xt-1, x0) * q(xt-1 | x0) / q(xt | x0) - # where q(xt | xt-1, x0) = q(xt | xt-1). - - # EV_log_qxt_x0 = self.q_pred(log_x_start, t) - - # self.print('sum exp', EV_log_qxt_x0.exp().sum(1).mean()) - - # log_qxt_x0 = (log_x_t.exp() * EV_log_qxt_x0).sum(dim=1) + def q_posterior(self, log_x_start: Tensor, log_x_t: Tensor, t: Tensor) -> Tensor: t_minus_1 = t - 1 # Remove negative values, will not be used anyway for final decoder t_minus_1 = torch.where(t_minus_1 < 0, torch.zeros_like(t_minus_1), t_minus_1) @@ -508,7 +525,7 @@ def q_posterior(self, log_x_start, log_x_t, t): return log_EV_xtmin_given_xt_given_xstart - def p_pred(self, model_out, log_x, t): + def p_pred(self, model_out: Tensor, log_x: Tensor, t: Tensor) -> Tensor: if self.parametrization == "x0": log_x_recon = self.predict_start(model_out, log_x) log_model_pred = self.q_posterior( @@ -521,48 +538,12 @@ def p_pred(self, model_out, log_x, t): return log_model_pred @torch.no_grad() - def p_sample(self, model_out, log_x, t): + def p_sample(self, model_out: Tensor, log_x: Tensor, t: Tensor) -> Tensor: model_log_prob = self.p_pred(model_out, log_x=log_x, t=t) out = self.log_sample_categorical(model_log_prob) return out - @torch.no_grad() - def p_sample_loop(self, shape): - device = self.log_alpha.device - - b = shape[0] - # start with random normal image. - img = torch.randn(shape, device=device) - - for i in reversed(range(1, self.num_timesteps)): - img = self.p_sample( - img, torch.full((b,), i, device=device, dtype=torch.long) - ) - return img - - @torch.no_grad() - def _sample(self, image_size, batch_size=16): - return self.p_sample_loop((batch_size, 3, image_size, image_size)) - - # @torch.no_grad() - # def interpolate(self, x1, x2, t=None, lam=0.5): - # b, *_, device = *x1.shape, x1.device - # t = default(t, self.num_timesteps - 1) - - # assert x1.shape == x2.shape - - # t_batched = torch.stack([torch.tensor(t, device=device)] * b) - # xt1, xt2 = map(lambda x: self.q_sample(x, t=t_batched), (x1, x2)) - - # img = (1 - lam) * xt1 + lam * xt2 - # for i in reversed(range(0, t)): - # img = self.p_sample( - # img, torch.full((b,), i, device=device, dtype=torch.long) - # ) - - # return img - - def log_sample_categorical(self, logits): + def log_sample_categorical(self, logits: Tensor) -> Tensor: full_sample = [] for i in range(len(self.num_classes)): one_class_logits = logits[:, self.slices_for_classes[i]] @@ -574,33 +555,14 @@ def log_sample_categorical(self, logits): log_sample = index_to_log_onehot(full_sample, self.num_classes) return log_sample - def q_sample(self, log_x_start, t): + def q_sample(self, log_x_start: Tensor, t: Tensor) -> Tensor: log_EV_qxt_x0 = self.q_pred(log_x_start, t) log_sample = self.log_sample_categorical(log_EV_qxt_x0) return log_sample - def nll(self, log_x_start): - b = log_x_start.size(0) - device = log_x_start.device - loss = 0 - for t in range(0, self.num_timesteps): - t_array = (torch.ones(b, device=device) * t).long() - - kl = self.compute_Lt( - log_x_start=log_x_start, - log_x_t=self.q_sample(log_x_start=log_x_start, t=t_array), - t=t_array, - ) - - loss += kl - - loss += self.kl_prior(log_x_start) - - return loss - - def kl_prior(self, log_x_start): + def kl_prior(self, log_x_start: Tensor) -> Tensor: b = log_x_start.size(0) device = log_x_start.device ones = torch.ones(b, device=device).long() @@ -613,7 +575,14 @@ def kl_prior(self, log_x_start): kl_prior = self.multinomial_kl(log_qxT_prob, log_half_prob) return sum_except_batch(kl_prior) - def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False): + def compute_Lt( + self, + model_out: Tensor, + log_x_start: Tensor, + log_x_t: Tensor, + t: Tensor, + detach_mean: bool = False, + ) -> Tensor: log_true_prob = self.q_posterior(log_x_start=log_x_start, log_x_t=log_x_t, t=t) log_model_prob = self.p_pred(model_out, log_x=log_x_t, t=t) @@ -631,7 +600,9 @@ def compute_Lt(self, model_out, log_x_start, log_x_t, t, detach_mean=False): return loss - def sample_time(self, b, device, method="uniform"): + def sample_time( + self, b: int, device: torch.device, method: str = "uniform" + ) -> tuple: if method == "importance": if not (self.Lt_count > 10).all(): return self.sample_time(b, device, method="uniform") @@ -654,8 +625,14 @@ def sample_time(self, b, device, method="uniform"): else: raise ValueError - def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt): - + def _multinomial_loss( + self, + model_out: Tensor, + log_x_start: Tensor, + log_x_t: Tensor, + t: Tensor, + pt: Tensor, + ) -> Tensor: if self.multinomial_loss_type == "vb_stochastic": kl = self.compute_Lt(model_out, log_x_start, log_x_t, t) kl_prior = self.kl_prior(log_x_start) @@ -671,31 +648,7 @@ def _multinomial_loss(self, model_out, log_x_start, log_x_t, t, pt): else: raise ValueError() - #! Not used - def log_prob(self, x): - b, device = x.size(0), x.device - - if self.training: - #! not enough arguments - return self._multinomial_loss(x) - - else: - log_x_start = index_to_log_onehot(x, self.num_classes) - - t, pt = self.sample_time(b, device, "importance") - - kl = self.compute_Lt( - log_x_start, self.q_sample(log_x_start=log_x_start, t=t), t - ) - - kl_prior = self.kl_prior(log_x_start) - - # Upweigh loss term of the kl - loss = kl / pt + kl_prior - - return -loss - - def mixed_loss(self, x, cond=None): + def mixed_loss(self, x: Tensor, cond: Optional[Tensor] = None) -> tuple: b = x.shape[0] device = x.device t, pt = self.sample_time(b, device, "uniform") @@ -735,7 +688,7 @@ def mixed_loss(self, x, cond=None): return loss_multi.mean(), loss_gauss.mean() @torch.no_grad() - def mixed_elbo(self, x0, cond=None): + def mixed_elbo(self, x0: Tensor, cond: Optional[Tensor] = None) -> dict: b = x0.size(0) device = x0.device @@ -748,7 +701,7 @@ def mixed_elbo(self, x0, cond=None): gaussian_loss = [] xstart_mse = [] mse = [] - mu_mse = [] + # mu_mse = [] out_mean = [] true_mean = [] multinomial_loss = [] @@ -810,8 +763,8 @@ def mixed_elbo(self, x0, cond=None): if has_cat: prior_multin = self.kl_prior(log_x_cat) - total_gauss = gaussian_loss.sum(dim=1) + prior_gauss - total_multin = multinomial_loss.sum(dim=1) + prior_multin + total_gauss = torch.sum(gaussian_loss, dim=1) + prior_gauss + total_multin = torch.sum(multinomial_loss, dim=1) + prior_multin return { "total_gaussian": total_gauss, "total_multinomial": total_multin, @@ -826,8 +779,14 @@ def mixed_elbo(self, x0, cond=None): @torch.no_grad() def gaussian_ddim_step( - self, model_out_num, x, t, clip_denoised=False, denoised_fn=None, eta=0.0 - ): + self, + model_out_num: Tensor, + x: Tensor, + t: Tensor, + clip_denoised: bool = False, + denoised_fn: Any = None, + eta: float = 0.0, + ) -> Tensor: out = self.gaussian_p_mean_variance( model_out_num, x, @@ -859,23 +818,29 @@ def gaussian_ddim_step( return sample - @torch.no_grad() - def gaussian_ddim_sample(self, noise, T, cond=None, eta=0.0): - x = noise - b = x.shape[0] - device = x.device - for t in reversed(range(T)): - self.print(f"Sample timestep {t:4d}", end="\r") - t_array = (torch.ones(b, device=device) * t).long() - out_num = self.denoise_fn(x, t_array, y=cond) - x = self.gaussian_ddim_step(out_num, x, t_array) - self.print() - return x + # @torch.no_grad() + # def gaussian_ddim_sample(self, noise, T, cond=None, eta=0.0): + # x = noise + # b = x.shape[0] + # device = x.device + # for t in reversed(range(T)): + # self.print(f"Sample timestep {t:4d}", end="\r") + # t_array = (torch.ones(b, device=device) * t).long() + # out_num = self.denoise_fn(x, t_array, y=cond) + # x = self.gaussian_ddim_step(out_num, x, t_array) + # self.print() + # return x @torch.no_grad() def gaussian_ddim_reverse_step( - self, model_out_num, x, t, clip_denoised=False, eta=0.0 - ): + self, + model_out_num: Tensor, + x: Tensor, + t: Tensor, + clip_denoised: bool = False, + denoised_fn: Any = None, + eta: float = 0.0, + ) -> Tensor: if not (eta == 0.0): raise AssertionError("Eta must be zero.") out = self.gaussian_p_mean_variance( @@ -883,7 +848,7 @@ def gaussian_ddim_reverse_step( x, t, clip_denoised=clip_denoised, - denoised_fn=None, + denoised_fn=denoised_fn, model_kwargs=None, ) @@ -899,22 +864,22 @@ def gaussian_ddim_reverse_step( return mean_pred - @torch.no_grad() - def gaussian_ddim_reverse_sample(self, x, T, cond=None): - b = x.shape[0] - device = x.device - for t in range(T): - self.print(f"Reverse timestep {t:4d}", end="\r") - t_array = (torch.ones(b, device=device) * t).long() - out_num = self.denoise_fn(x, t_array, y=cond) - x = self.gaussian_ddim_reverse_step(out_num, x, t_array, eta=0.0) - self.print() - - return x + # @torch.no_grad() + # def gaussian_ddim_reverse_sample(self, x, T, cond=None): + # b = x.shape[0] + # device = x.device + # for t in range(T): + # self.print(f"Reverse timestep {t:4d}", end="\r") + # t_array = (torch.ones(b, device=device) * t).long() + # out_num = self.denoise_fn(x, t_array, y=cond) + # x = self.gaussian_ddim_reverse_step(out_num, x, t_array, eta=0.0) + # self.print() + # return x @torch.no_grad() - def multinomial_ddim_step(self, model_out_cat, log_x_t, t, eta=0.0): - # not ddim, essentially + def multinomial_ddim_step( + self, model_out_cat: Tensor, log_x_t: Tensor, t: Tensor, eta: float = 0.0 + ) -> Tensor: log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t) alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape) @@ -945,7 +910,7 @@ def multinomial_ddim_step(self, model_out_cat, log_x_t, t, eta=0.0): return out @torch.no_grad() - def sample_ddim(self, num_samples, cond=None): + def sample_ddim(self, num_samples: int, cond: Any = None) -> Tensor: b = num_samples device = self.log_alpha.device z_norm = torch.randn((b, self.num_numerics), device=device) @@ -958,12 +923,6 @@ def sample_ddim(self, num_samples, cond=None): ) log_z = self.log_sample_categorical(uniform_logits) - # y = torch.multinomial( - # cond, - # num_samples=b, - # replacement=True - # ) - # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): self.print(f"Sample timestep {i:4d}", end="\r") t = torch.full((b,), i, device=device, dtype=torch.long) @@ -987,7 +946,7 @@ def sample_ddim(self, num_samples, cond=None): return sample @torch.no_grad() - def sample(self, num_samples, cond=None): + def sample(self, num_samples: int, cond: Any = None) -> Tensor: b = num_samples device = self.log_alpha.device z_norm = torch.randn((b, self.num_numerics), device=device) @@ -1028,7 +987,13 @@ def sample(self, num_samples, cond=None): sample = torch.cat([z_norm, z_cat], dim=1).cpu() return sample - def sample_all(self, num_samples, cond=None, max_batch_size=2000, ddim=False): + def sample_all( + self, + num_samples: int, + cond: Any = None, + max_batch_size: int = 2000, + ddim: bool = False, + ) -> Tensor: if ddim: self.print("Sample using DDIM.") sample_fn = self.sample_ddim diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 00cef021..289f37ec 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -1,7 +1,6 @@ """ Code was adapted from https://github.com/Yura52/rtdl """ -# mypy: disable-error-code=no-untyped-def # flake8: noqa: F401 # stdlib @@ -19,11 +18,11 @@ class SiLU(nn.Module): - def forward(self, x): + def forward(self, x: Tensor) -> Tensor: return x * torch.sigmoid(x) -def timestep_embedding(timesteps, dim, max_period=10000): +def timestep_embedding(timesteps: Tensor, dim: int, max_period: int = 10000) -> Tensor: """ Create sinusoidal timestep embeddings. @@ -46,19 +45,6 @@ def timestep_embedding(timesteps, dim, max_period=10000): return embedding -def _is_glu_activation(activation: ModuleType): - return ( - isinstance(activation, str) - and activation.endswith("GLU") - or activation in [ReGLU, GEGLU] - ) - - -def _all_or_none(values): - if not (all(x is None for x in values) or all(x is not None for x in values)): - raise AssertionError - - def reglu(x: Tensor) -> Tensor: """The ReGLU activation function from [1]. References: @@ -117,7 +103,7 @@ def forward(self, x: Tensor) -> Tensor: return geglu(x) -def _make_nn_module(module_type: ModuleType, *args) -> nn.Module: +def _make_nn_module(module_type: ModuleType, *args: Any) -> nn.Module: return ( ( ReGLU() @@ -445,7 +431,14 @@ def forward(self, x: Tensor) -> Tensor: class MLPDiffusion(nn.Module): - def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t=128): + def __init__( + self, + d_in: int, + num_classes: int, + is_y_cond: bool, + rtdl_params: dict, + dim_t: int = 128, + ) -> None: super().__init__() self.dim_t = dim_t self.num_classes = num_classes @@ -468,7 +461,9 @@ def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t=128): nn.Linear(dim_t, dim_t), nn.SiLU(), nn.Linear(dim_t, dim_t) ) - def forward(self, x, timesteps, y=None): + def forward( + self, x: Tensor, timesteps: Tensor, y: Optional[Tensor] = None + ) -> Tensor: emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) if self.is_y_cond and y is not None: if self.num_classes > 0: @@ -481,7 +476,14 @@ def forward(self, x, timesteps, y=None): class ResNetDiffusion(nn.Module): - def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t=256): + def __init__( + self, + d_in: int, + num_classes: int, + is_y_cond: bool, + rtdl_params: dict, + dim_t: int = 256, + ) -> None: super().__init__() self.dim_t = dim_t self.num_classes = num_classes @@ -501,7 +503,9 @@ def __init__(self, d_in, num_classes, is_y_cond, rtdl_params, dim_t=256): nn.Linear(dim_t, dim_t), nn.SiLU(), nn.Linear(dim_t, dim_t) ) - def forward(self, x, timesteps, y=None): + def forward( + self, x: Tensor, timesteps: Tensor, y: Optional[Tensor] = None + ) -> Tensor: emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) if self.is_y_cond and y is not None: if self.num_classes > 0: diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index c2491e6e..b495c8a0 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -1,17 +1,16 @@ -# mypy: disable-error-code=no-untyped-def +# flake8: noqa: F401 # stdlib -from inspect import isfunction +from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple # third party import numpy as np import torch import torch.nn.functional as F +from torch import Tensor -# from torch.profiler import record_function - -def normal_kl(mean1, logvar1, mean2, logvar2): +def normal_kl(mean1: Tensor, logvar1: Tensor, mean2: Tensor, logvar2: Tensor) -> Tensor: """ Compute the KL divergence between two gaussians. @@ -20,7 +19,7 @@ def normal_kl(mean1, logvar1, mean2, logvar2): """ tensor = None for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, torch.Tensor): + if isinstance(obj, Tensor): tensor = obj break if tensor is None: @@ -29,7 +28,7 @@ def normal_kl(mean1, logvar1, mean2, logvar2): # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for torch.exp(). logvar1, logvar2 = [ - x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + x if isinstance(x, Tensor) else torch.tensor(x).to(tensor) for x in (logvar1, logvar2) ] @@ -42,7 +41,7 @@ def normal_kl(mean1, logvar1, mean2, logvar2): ) -def approx_standard_normal_cdf(x): +def approx_standard_normal_cdf(x: Tensor) -> Tensor: """ A fast approximation of the cumulative distribution function of the standard normal. @@ -52,7 +51,9 @@ def approx_standard_normal_cdf(x): ) -def discretized_gaussian_log_likelihood(x, *, means, log_scales): +def discretized_gaussian_log_likelihood( + x: Tensor, *, means: Tensor, log_scales: Tensor +) -> Tensor: """ Compute the log-likelihood of a Gaussian distribution discretizing to a given image. @@ -86,7 +87,7 @@ def discretized_gaussian_log_likelihood(x, *, means, log_scales): return log_probs -def sum_except_batch(x, num_dims=1): +def sum_except_batch(x: Tensor, num_dims: int = 1) -> Tensor: """ Sums all dimensions except the first. @@ -100,14 +101,14 @@ def sum_except_batch(x, num_dims=1): return x.reshape(*x.shape[:num_dims], -1).sum(-1) -def mean_flat(tensor): +def mean_flat(tensor: Tensor) -> Tensor: """ Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape)))) -def ohe_to_categories(ohe, K): +def ohe_to_categories(ohe: Tensor, K: np.ndarray) -> Tensor: K = torch.from_numpy(K) indices = torch.cat([torch.zeros((1,)), K.cumsum(dim=0)], dim=0).int().tolist() res = [] @@ -116,20 +117,16 @@ def ohe_to_categories(ohe, K): return torch.stack(res, dim=1) -def log_1_min_a(a): +def log_1_min_a(a: Tensor) -> Tensor: return torch.log(1 - a.exp() + 1e-40) -def log_add_exp(a, b): +def log_add_exp(a: Tensor, b: Tensor) -> Tensor: maximum = torch.max(a, b) return maximum + torch.log(torch.exp(a - maximum) + torch.exp(b - maximum)) -def exists(x): - return x is not None - - -def extract(a, t, x_shape): +def extract(a: Tensor, t: Tensor, x_shape: tuple) -> Tensor: b, *_ = t.shape t = t.to(a.device) out = a.gather(-1, t) @@ -138,17 +135,11 @@ def extract(a, t, x_shape): return out.expand(x_shape) -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def log_categorical(log_x_start, log_prob): +def log_categorical(log_x_start: Tensor, log_prob: Tensor) -> Tensor: return (log_x_start.exp() * log_prob).sum(dim=1) -def index_to_log_onehot(x, num_classes): +def index_to_log_onehot(x: Tensor, num_classes: np.ndarray) -> Tensor: onehots = [] for i in range(len(num_classes)): onehots.append(F.one_hot(x[:, i], num_classes[i])) @@ -157,23 +148,14 @@ def index_to_log_onehot(x, num_classes): return log_onehot -def log_sum_exp_by_classes(x, slices): - res = torch.zeros_like(x) - for ixs in slices: - res[:, ixs] = torch.logsumexp(x[:, ixs], dim=1, keepdim=True) - if not (x.size() == res.size()): - raise AssertionError - return res - - @torch.jit.script -def log_sub_exp(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: +def log_sub_exp(a: Tensor, b: Tensor) -> Tensor: m = torch.maximum(a, b) return torch.log(torch.exp(a - m) - torch.exp(b - m)) + m @torch.jit.script -def sliced_logsumexp(x, slices): +def sliced_logsumexp(x: Tensor, slices: Tensor) -> Tensor: lse = torch.logcumsumexp( torch.nn.functional.pad(x, [1, 0, 0, 0], value=-float("inf")), dim=-1 ) @@ -188,14 +170,10 @@ def sliced_logsumexp(x, slices): return slice_lse_repeated -def log_onehot_to_index(log_x): - return log_x.argmax(1) - - class FoundNANsError(BaseException): """Found NANs during sampling""" - def __init__(self, message="Found NANs during sampling."): + def __init__(self, message: str = "Found NANs during sampling.") -> None: super(FoundNANsError, self).__init__(message) @@ -207,7 +185,9 @@ class TensorDataLoader: Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6 """ - def __init__(self, *tensors, batch_size=32, shuffle=False): + def __init__( + self, *tensors: Tensor, batch_size: int = 32, shuffle: bool = False + ) -> None: """ Initialize a FastTensorDataLoader. :param *tensors: tensors to store. Must have the same length @ dim 0. @@ -223,7 +203,7 @@ def __init__(self, *tensors, batch_size=32, shuffle=False): self.batch_size = batch_size self.shuffle = shuffle - def __iter__(self): + def __iter__(self) -> Iterator[tuple]: idx = np.arange(self.dataset_len) if self.shuffle: np.random.shuffle(idx) @@ -231,5 +211,5 @@ def __iter__(self): s = idx[i : i + self.batch_size] yield tuple(t[s] for t in self.tensors) - def __len__(self): + def __len__(self) -> int: return len(range(0, self.dataset_len, self.batch_size)) diff --git a/src/tmp.py b/src/tmp.py index 74bf87ad..6a58832b 100644 --- a/src/tmp.py +++ b/src/tmp.py @@ -1,12 +1,21 @@ -from synthcity.plugins import Plugins +# third party from sklearn.datasets import load_iris + +# synthcity absolute +from synthcity.plugins import Plugins from synthcity.plugins.core.dataloader import GenericDataLoader # loadDebugger() -X, y = load_iris(as_frame = True, return_X_y = True) -X = GenericDataLoader(X.assign(target = y), target_column="target") -plugin = Plugins().get("ddpm", n_iter=3, is_classification=True, - num_timesteps=100, verbose=1) +X, y = load_iris(as_frame=True, return_X_y=True) +X = GenericDataLoader(X.assign(target=y), target_column="target") +plugin = Plugins().get( + "ddpm", + n_iter=3, + is_classification=True, + gaussian_loss_type="mse", + num_timesteps=100, + verbose=1, +) plugin.fit(X) X_syn = plugin.model.generate(50) print(X_syn) From 137c1767b508ce6976df26b493e8b5bb70208b38 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 10 Mar 2023 20:50:32 +0100 Subject: [PATCH 16/95] remove auto-anno and flake8 noqa --- src/auto-anno.py | 429 ------------------ .../core/models/tabular_ddpm/__init__.py | 5 +- .../gaussian_multinomial_diffsuion.py | 2 +- .../core/models/tabular_ddpm/modules.py | 14 +- .../plugins/core/models/tabular_ddpm/utils.py | 4 +- src/tmp.py | 21 - 6 files changed, 9 insertions(+), 466 deletions(-) delete mode 100644 src/auto-anno.py delete mode 100644 src/tmp.py diff --git a/src/auto-anno.py b/src/auto-anno.py deleted file mode 100644 index 96225e56..00000000 --- a/src/auto-anno.py +++ /dev/null @@ -1,429 +0,0 @@ -# flake8: noqa -# mypy: ignore-errors - -# stdlib -import argparse -import ast -import importlib -import inspect -import io -import os -import re -import runpy -import shutil -import sys -from collections.abc import Callable, Iterator -from itertools import islice, product -from numbers import * -from typing import Any, Optional, Union - -# third party -import cloudpickle - -TYPE_MAP = { # maps of type annotations - Integral: int, - Real: float, - Complex: complex, - object: Any, -} - -# MOD_MAP = { # maps module names to their common aliases -# 'numpy': 'np', -# 'pandas': 'pd' -# } - - -def get_type(x): - """ - Examples: - >>> get_type(None) - >>> get_type([]) - list - >>> get_type([1, 2, 3]) - list[int] - >>> get_type([1, 'a']) - list - >>> get_type(dict(a=0.9, b=0.1)) - dict[str, float] - >>> get_type(dict(a=0.9, b='a')) - dict[str, typing.Any] - >>> get_type({1, 2.0, None}) - set[typing.Optional[float]] - >>> get_type(str) - type - >>> get_type(True) - bool - >>> get_type((1, 2.0)) - tuple[int, float] - >>> get_type(tuple(range(9))) - tuple[int, ...] - >>> get_type(iter(range(9))) - typing.Iterator[int] - >>> get_type((i if i % 2 else None for i in range(9))) - typing.Iterator[typing.Optional[int]] - """ - - def dispatch(T, *xs, maxlen=5): - xs = [list(map(get_type, l)) for l in xs] - if not xs or min(map(len, xs)) == 0: # empty collection - return T - ts = tuple(map(get_common_suptype, xs)) - if len(ts) == 1: - t = ts[0] - elif len(ts) > maxlen: - t = get_common_suptype(ts) - else: - t = ts - if t is object: - return T - elif len(ts) > maxlen: - return T[t, ...] - else: - return T[t] - - if x is None: - return None - if inspect.isfunction(x) or inspect.ismethod(x): - return Callable - for t in (list, set, frozenset): - if isinstance(x, t): - return dispatch(t, x) - if isinstance(x, tuple): - return dispatch(tuple, *[[a] for a in x], maxlen=4) - if isinstance(x, dict): - return dispatch(dict, x.keys(), x.values()) - if isinstance(x, io.IOBase): - return type(x) - if isinstance(x, Iterator): #! may be too general - return dispatch(Iterator, islice(x, 10)) - if isinstance(x, bool): - return bool - if isinstance(x, Integral): - return Integral - if isinstance(x, Real): - return Real - if isinstance(x, Complex): - return Complex - return type(x) - - -def get_suptypes(t): - def suptypes_of_subscripted_type(t): - T = t.__origin__ - args = t.__args__ - sts = [ - T[ts] - for ts in product(*map(get_suptypes, args)) - if not all(t in (object, ...) for t in ts) - ] - return sts + get_suptypes(T) - - if inspect.isclass(t) and issubclass(t, type): - sts = list(t.__mro__) - elif hasattr(t, "__origin__"): - sts = suptypes_of_subscripted_type(t) - elif isinstance(t, type): - sts = list(t.mro()) - elif t == Ellipsis: - sts = [t] - else: # None, Callable, Iterator, etc. - sts = [t, object] - return sts - - -def get_common_suptype(ts, type_map=None): - """Find the most specific common supertype of a collection of types.""" - ts = set(ts) - assert ts, "empty collection of types" - - optional = any(t is None for t in ts) - ts.discard(None) - - if not ts: - return None - - sts = [get_suptypes(t) for t in ts] - for t in min(sts, key=len): - if all(t in ts for ts in sts): - break - else: - return Any - - if type_map: - t = type_map.get(t, t) - if optional: - t = Optional[t] - return t - - -def test(): - def get_anno(xs): - return get_common_suptype(map(get_type, xs)) - - recs = [ - [None, 1, 1.2], - [{1: 2}, {1: 2.2}, {1: 2.1, 3: 4}], - [(x for x in range(10)), iter(range(10))], - ] - for xs in recs: - print(get_anno(xs)) - - -def get_full_name(x, global_vars={}): - """ - Examples: - >>> import numpy as np - >>> G = lambda: {id(v): k for k, v in globals().items() if k[0] != '_'} - >>> get_full_name(np.ndarray, G()) - 'np.ndarray' - >>> import scipy as sp - >>> get_full_name(sp.sparse.csr_matrix, G()) - 'sp.sparse.csr_matrix' - >>> import scipy.sparse as sps - >>> get_full_name(sparse.csr_matrix, G()) - 'sps.csr_matrix' - """ - - def get_name(x): - if x.__module__ == "typing": - return x._name - return getattr(x, "__qualname__", x.__name__) - - if x is Ellipsis: - return "..." - if x is None: - return "None" - if id(x) in global_vars: - return global_vars[id(x)] - if x.__module__ == "builtins": - return x.__name__ - # handle the subscripted types - if hasattr(x, "__origin__"): - T, args = x.__origin__, x.__args__ - if T is Union and len(args) == 2 and args[1] is type(None): - T, args = Optional, args[:1] - T = get_full_name(T, global_vars) - args = ", ".join(get_full_name(a, global_vars) for a in args) - return f"{T}[{args}]" - # find the module alias - names = (f"{x.__module__}.{get_name(x)}").split(".")[::-1] - mods = [importlib.import_module(names[-1])] - print(names) - for name in names[-2::-1]: - print(name, mods[-1]) - mods.append(getattr(mods[-1], name)) - mods = mods[::-1] - # find the first module that is imported - for i, (name, mod) in enumerate(zip(names, mods)): - if id(mod) in global_vars: - names = names[:i] + [global_vars[id(mod)]] - mods = mods[: i + 1] - break - # skip useless intermediate modules - for k in range(1, len(names)): - if k >= len(names) - 1: - break - for i, (name, mod) in enumerate(zip(names, mods)): - if i + 1 + k >= len(names): - break - if hasattr(mods[-k], name): - names = names[: i + 1] + names[-k:] - mods = mods[: i + 1] + mods[-k:] - break - return ".".join(names[::-1]) - - -def profiler(frame, event, arg): - if event in ("call", "return"): - filename = os.path.abspath(frame.f_code.co_filename) - funcname = frame.f_code.co_name - if filename.endswith(".py") and funcname[0] != "<" and CWD in filename: - recs = TYPE_RECS.setdefault(filename, {}) - if "globals" not in recs: - recs["globals", None] = { - id(v): k for k, v in frame.f_globals.items() if k[0] != "_" - } - if event == "call": - # print(filename, funcname, frame.f_lineno, frame.f_locals) - arg_types = {var: get_type(val) for var, val in frame.f_locals.items()} - lineno = frame.f_lineno - else: - arg_types = {"return": get_type(arg)} - #! assumes no nested function has the same name as the outer function - lineno = max( - ln for ln, fn in recs if fn == funcname and ln <= frame.f_lineno - ) - rec = recs.setdefault((lineno, funcname), {}) - for k, v in arg_types.items(): - rec.setdefault(k, []).append(v) - return profiler - - -# *** run the script N times to collect type records *** - -parser = argparse.ArgumentParser() -parser.add_argument("script", help="the script to run") -parser.add_argument("-n", type=int, default=1, help="number of times to run the script") -parser.add_argument("-v", "--verbose", action="store_true") -parser.add_argument( - "-i", action="store_true", help="prompt before overwriting each script" -) -parser.add_argument( - "--log", default="type_records.pkl", help="output file for type records" -) -parser.add_argument("--cwd", default=None, help="working directory") -parser.add_argument( - "--backup", action="store_true", help="backup the scripts before annotating them" -) - -ARGS = parser.parse_args() -DIR = os.path.dirname(os.path.abspath(ARGS.script)) -CWD = ARGS.cwd or DIR - -try: - TYPE_RECS = cloudpickle.load(open(ARGS.log, "rb")) -except: - TYPE_RECS = {} # {filename: {(lineno, funcname): {argname: [type]}}}} - -sys.path.extend([DIR, CWD]) -sys.setprofile(profiler) - -for _ in range(ARGS.n): - runpy.run_path(sys.argv[1], run_name="__main__") - -sys.setprofile(None) - -with open(ARGS.log, "wb") as f: - cloudpickle.dump(TYPE_RECS, f) - - -# *** determine the type annotations from the type records *** - - -def get_type_annotations(type_records=TYPE_RECS): - def recurse(x): - if isinstance(x, dict): - return {k: recurse(v) for k, v in x.items()} - elif isinstance(x, list): - return get_common_suptype(x, type_map=TYPE_MAP) - else: - return x - - return recurse(type_records) - - -annotations = get_type_annotations() - -# if ARGS.verbose: -# for path, recs in annotations.items(): -# print(path) -# for (lineno, funcname), arg_types in recs.items(): -# print(f' {funcname} (Ln{lineno}):') -# print(' ' + ', '.join(f'{k}: {get_full_name(v)}' for k, v in arg_types.items())) - - -# *** write the type annotations to the script *** - - -def find_defs_in_ast(tree): - def recurse(node): # should be in order - if isinstance(node, ast.FunctionDef): - yield node - for child in ast.iter_child_nodes(node): - yield from recurse(child) - - return list(recurse(tree)) - - -def annotate_def(def_node: ast.FunctionDef, annotations) -> bool: - key = (def_node.lineno, def_node.name) - if key not in annotations: - return False # no type records for this function - annos = annotations[key] - A = def_node.args - all_args = A.posonlyargs + A.args + A.kwonlyargs - defaults = dict(zip(A.args + A.kwonlyargs, A.defaults + A.kw_defaults)) - all_args.extend(filter(None, [A.vararg, A.kwarg])) - changed = False - global_vars = annotations["globals", None] - for a in all_args: - if a.annotation is None and a.arg != "self": - t = annos[a.arg] - if a == A.vararg: - if t is tuple: - t = Any - else: - assert t.__origin__ is tuple - if ( - len(t.__args__) == 1 - or len(t.__args__) == 2 - and t.__args__[1] is Ellipsis - ): - t = t.__args__[0] - else: - t = get_common_suptype(t.__args__) - elif a == A.kwarg: - assert t.__origin__ is dict - t = t.__args__[1] - if t is None: - t = Any - if a.arg in defaults: - t = Union[t, get_type(defaults[a.arg])] - anno = get_full_name(t, global_vars) - a.annotation = ast.Name(anno) - changed = True - if def_node.returns is None: - if "return" not in annos: - print("No return type for", key, annos) - exit() - anno = get_full_name(annos["return"], global_vars) - def_node.returns = ast.Name(anno) - def_node.returns.lineno = max(a.lineno for a in all_args) - changed = True - return changed - - -def annotate_script(filepath, verbose=ARGS.verbose): - s = open(filepath, encoding="utf8").read() - lines = s.splitlines() - defs = [ - d - for d in find_defs_in_ast(ast.parse(s)) - if annotate_def(d, annotations[filepath]) - ] - if not defs: - return None - if verbose: - print("Adding annotations to", filepath, "\n") - starts, ends, sigs = [], [], [] - for node in defs: - ln0, ln1 = node.lineno, node.body[0].lineno - starts.append(ln0 - 1) - ends.append(ln1 - 1) - node.body = [] # only keep signature - line = re.match(r"\s*", lines[ln0 - 1])[0] + ast.unparse( - node - ) # keep indentation - sigs.append(line) - if verbose: - print("Old:", *lines[ln0 - 1 : ln1 - 1], sep="\n") - print(">" * 50) - print("New:", sigs[-1], sep="\n") - print("-" * 50) - new_lines = [] - for s, e, sig in zip([None] + ends, starts + [None], sigs + [None]): - new_lines.extend(lines[s:e]) - if sig is not None: - new_lines.append(sig) - return "\n".join(new_lines) - - -for path in annotations: - s = annotate_script(path) - if s is None: - continue - if ARGS.backup: - shutil.copy(path, path + ".bak") - if not ARGS.i or input(f"Overwrite {path}?").lower() == "y": - with open(path, "w", encoding="utf8") as f: - f.write(s) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 1b6df0cd..91ed41e8 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -1,8 +1,6 @@ -# flake8: noqa: F401 - # stdlib from copy import deepcopy -from typing import Any, Iterator, Optional, Union +from typing import Any, Iterator, Optional # third party import numpy as np @@ -12,7 +10,6 @@ from torch import nn # synthcity absolute -from synthcity.metrics.weighted_metrics import WeightedMetrics from synthcity.utils.constants import DEVICE from synthcity.utils.dataframe import discrete_columns diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 145f81da..fa280dc0 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -18,7 +18,7 @@ # synthcity relative from .modules import MLPDiffusion, ResNetDiffusion -from .utils import * # noqa: F401, F403 +from .utils import * def get_named_beta_schedule( diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 289f37ec..5ceec2d0 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -1,11 +1,9 @@ """ Code was adapted from https://github.com/Yura52/rtdl """ -# flake8: noqa: F401 - # stdlib import math -from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union, cast +from typing import Any, Callable, Optional, Union # third party import torch @@ -162,8 +160,8 @@ def __init__( self, *, d_in: int, - d_layers: List[int], - dropouts: Union[float, List[float]], + d_layers: list[int], + dropouts: Union[float, list[float]], activation: Union[str, Callable[[], nn.Module]], d_out: int, ) -> None: @@ -195,9 +193,9 @@ def __init__( @classmethod def make_baseline( - cls: Type["MLP"], + cls: type["MLP"], d_in: int, - d_layers: List[int], + d_layers: list[int], dropout: float, d_out: int, ) -> "MLP": @@ -383,7 +381,7 @@ def __init__( @classmethod def make_baseline( - cls: Type["ResNet"], + cls: type["ResNet"], *, d_in: int, n_blocks: int, diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index b495c8a0..4aa59b95 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -1,7 +1,5 @@ -# flake8: noqa: F401 - # stdlib -from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple +from typing import Iterator # third party import numpy as np diff --git a/src/tmp.py b/src/tmp.py deleted file mode 100644 index 6a58832b..00000000 --- a/src/tmp.py +++ /dev/null @@ -1,21 +0,0 @@ -# third party -from sklearn.datasets import load_iris - -# synthcity absolute -from synthcity.plugins import Plugins -from synthcity.plugins.core.dataloader import GenericDataLoader - -# loadDebugger() -X, y = load_iris(as_frame=True, return_X_y=True) -X = GenericDataLoader(X.assign(target=y), target_column="target") -plugin = Plugins().get( - "ddpm", - n_iter=3, - is_classification=True, - gaussian_loss_type="mse", - num_timesteps=100, - verbose=1, -) -plugin.fit(X) -X_syn = plugin.model.generate(50) -print(X_syn) From 6c4af11b25f5c58277f352546d4e48aebc7331fe Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 10 Mar 2023 23:37:19 +0100 Subject: [PATCH 17/95] add python<3.9 compatible annotations --- src/synthcity/plugins/core/models/tabular_ddpm/utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index 4aa59b95..4d4c92bd 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -1,3 +1,6 @@ +# future +from __future__ import annotations + # stdlib from typing import Iterator From 191cdcc77892aba4099789aa42e734c44199f6bf Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 10 Mar 2023 23:39:53 +0100 Subject: [PATCH 18/95] remove star import --- .../gaussian_multinomial_diffsuion.py | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index fa280dc0..88925ae2 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -4,8 +4,6 @@ - https://github.com/ehoogeboom/multinomial_diffusion - https://github.com/lucidrains/denoising-diffusion-pytorch/blob/5989f4c77eafcdc6be0fb4739f0f277a6dd7f7d8/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py#L281 """ -# flake8: noqa: F405 - # stdlib import math from typing import Any, Callable, Optional @@ -18,7 +16,20 @@ # synthcity relative from .modules import MLPDiffusion, ResNetDiffusion -from .utils import * +from .utils import ( + FoundNANsError, + discretized_gaussian_log_likelihood, + extract, + index_to_log_onehot, + log_1_min_a, + log_add_exp, + log_categorical, + mean_flat, + normal_kl, + ohe_to_categories, + sliced_logsumexp, + sum_except_batch, +) def get_named_beta_schedule( From 9349a66d3518a12b37cad643af76886b9e27fcf8 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 12 Mar 2023 16:10:09 +0100 Subject: [PATCH 19/95] replace builtin type annos to typing annos --- .../core/models/tabular_ddpm/__init__.py | 16 +++++++-------- .../gaussian_multinomial_diffsuion.py | 6 +++--- .../core/models/tabular_ddpm/modules.py | 8 ++++---- src/test.py | 20 +++++++++++++++++++ 4 files changed, 35 insertions(+), 15 deletions(-) create mode 100644 src/test.py diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 91ed41e8..0618becd 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -1,6 +1,7 @@ # stdlib +from collections.abc import Iterator from copy import deepcopy -from typing import Any, Iterator, Optional +from typing import Any, Dict, Optional # third party import numpy as np @@ -24,18 +25,18 @@ def __init__( self, n_iter: int = 1000, lr: float = 0.002, - weight_decay: float = 1e-4, + weight_decay: float = 0.0001, batch_size: int = 1024, num_timesteps: int = 1000, gaussian_loss_type: str = "mse", scheduler: str = "cosine", - device: Any = DEVICE, + device: torch.device = DEVICE, verbose: int = 0, log_interval: int = 10, print_interval: int = 100, # model params model_type: str = "mlp", - rtdl_params: Optional[dict] = None, # {'d_layers', 'dropout'} + rtdl_params: Optional[Dict[str, Any]] = None, dim_label_emb: int = 128, # early stopping n_iter_min: int = 100, @@ -68,7 +69,9 @@ def _update_ema( for targ, src in zip(target_params, source_params): targ.detach().mul_(rate).add_(src.detach(), alpha=1 - rate) - def fit(self, X: pd.DataFrame, cond: Any = None, **kwargs: Any) -> "TabDDPM": + def fit( + self, X: pd.DataFrame, cond: Optional[pd.Series] = None, **kwargs: Any + ) -> "TabDDPM": if cond is not None: n_labels = cond.nunique() else: @@ -180,7 +183,4 @@ def generate(self, count: int, cond: Any = None) -> np.ndarray: cond = torch.tensor(cond, dtype=torch.long, device=self.device) sample = self.diffusion.sample_all(count, cond).detach().cpu().numpy() sample = sample[:, self._col_perm] - if self.verbose: - print("Generated sample") - print(sample) return sample diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 88925ae2..ad1a776f 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -6,7 +6,7 @@ """ # stdlib import math -from typing import Any, Callable, Optional +from typing import Any, Callable, Optional, Tuple # third party import numpy as np @@ -246,7 +246,7 @@ def __init__( # Gaussian part def gaussian_q_mean_variance( self, x_start: Tensor, t: Tensor - ) -> tuple[Tensor, Tensor, Tensor]: + ) -> Tuple[Tensor, Tensor, Tensor]: mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract(self.log_1_min_cumprod_alpha, t, x_start.shape) @@ -266,7 +266,7 @@ def gaussian_q_sample( def gaussian_q_posterior_mean_variance( self, x_start: Tensor, x_t: Tensor, t: Tensor - ) -> tuple[Tensor, Tensor, Tensor]: + ) -> Tuple[Tensor, Tensor, Tensor]: if not (x_start.shape == x_t.shape): raise AssertionError posterior_mean = ( diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 5ceec2d0..cc8a56ad 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -3,7 +3,7 @@ """ # stdlib import math -from typing import Any, Callable, Optional, Union +from typing import Any, Callable, List, Optional, Union # third party import torch @@ -160,8 +160,8 @@ def __init__( self, *, d_in: int, - d_layers: list[int], - dropouts: Union[float, list[float]], + d_layers: List[int], + dropouts: Union[float, List[float]], activation: Union[str, Callable[[], nn.Module]], d_out: int, ) -> None: @@ -195,7 +195,7 @@ def __init__( def make_baseline( cls: type["MLP"], d_in: int, - d_layers: list[int], + d_layers: List[int], dropout: float, d_out: int, ) -> "MLP": diff --git a/src/test.py b/src/test.py new file mode 100644 index 00000000..eb68198a --- /dev/null +++ b/src/test.py @@ -0,0 +1,20 @@ +# third party +from sklearn.datasets import load_iris + +# synthcity absolute +from synthcity.plugins import Plugins +from synthcity.plugins.core.dataloader import GenericDataLoader + +# loadDebugger() +X, y = load_iris(as_frame=True, return_X_y=True) +X = GenericDataLoader(X.assign(target=y), target_column="target") +plugin = Plugins().get( + "ddpm", + n_iter=3, + is_classification=True, + gaussian_loss_type="mse", + num_timesteps=100, + verbose=1, +) +plugin.fit(X) +X_syn = plugin.model.generate(50) From 02579e97e21f325a2deb0c6c3aa0e699f432147d Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 12 Mar 2023 16:54:48 +0100 Subject: [PATCH 20/95] resolve py38 compatibility issue --- src/synthcity/plugins/core/models/tabular_ddpm/modules.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index cc8a56ad..8caad49f 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -193,7 +193,7 @@ def __init__( @classmethod def make_baseline( - cls: type["MLP"], + cls, d_in: int, d_layers: List[int], dropout: float, @@ -381,7 +381,7 @@ def __init__( @classmethod def make_baseline( - cls: type["ResNet"], + cls, *, d_in: int, n_blocks: int, From f930bc0f196c491903121556b755df5ba28dec73 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 12 Mar 2023 22:07:17 +0100 Subject: [PATCH 21/95] tests/plugins/generic/test_ddpm.py --- src/test.py | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 src/test.py diff --git a/src/test.py b/src/test.py deleted file mode 100644 index eb68198a..00000000 --- a/src/test.py +++ /dev/null @@ -1,20 +0,0 @@ -# third party -from sklearn.datasets import load_iris - -# synthcity absolute -from synthcity.plugins import Plugins -from synthcity.plugins.core.dataloader import GenericDataLoader - -# loadDebugger() -X, y = load_iris(as_frame=True, return_X_y=True) -X = GenericDataLoader(X.assign(target=y), target_column="target") -plugin = Plugins().get( - "ddpm", - n_iter=3, - is_classification=True, - gaussian_loss_type="mse", - num_timesteps=100, - verbose=1, -) -plugin.fit(X) -X_syn = plugin.model.generate(50) From 3cf73d7d7d610e1a97a2cde0067830208059ce85 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 13 Mar 2023 10:37:09 +0100 Subject: [PATCH 22/95] change TabDDPM method signatures --- src/synthcity/plugins/generic/plugin_ddpm.py | 36 ++++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index b28c6ef3..6556ec05 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -1,12 +1,10 @@ """ Reference: Kotelnikov, Akim et al. “TabDDPM: Modelling Tabular Data with Diffusion Models.” ArXiv abs/2209.15421 (2022): n. pag. """ -# mypy: disable-error-code=override -# flake8: noqa: F401 # stdlib from pathlib import Path -from typing import Any, List, Optional, Union +from typing import Any, List # third party import numpy as np @@ -16,14 +14,8 @@ from pydantic import validate_arguments # synthcity absolute -from synthcity.metrics.weighted_metrics import WeightedMetrics from synthcity.plugins.core.dataloader import DataLoader -from synthcity.plugins.core.distribution import ( - CategoricalDistribution, - Distribution, - FloatDistribution, - IntegerDistribution, -) +from synthcity.plugins.core.distribution import CategoricalDistribution, Distribution from synthcity.plugins.core.models.tabular_ddpm import TabDDPM from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema @@ -153,20 +145,28 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: CategoricalDistribution(name="dim_hidden", choices=[128, 256, 512, 1024]), ] - def _fit( - self, data: DataLoader, cond: Any = None, **kwargs: Any - ) -> "TabDDPMPlugin": + def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": + cond = None + if args: + if len(args) > 1: + raise ValueError("Only one positional argument is allowed") + if "cond" in kwargs: + raise ValueError("cond is already given by the positional argument") + cond = args[0] + elif "cond" in kwargs: + cond = kwargs.pop("cond") + if self.is_classification: if cond is not None: raise ValueError( "cond is already given by the labels for classification" ) - _, cond = data.unpack() + _, cond = X.unpack() self._labels, self._cond_dist = np.unique(cond, return_counts=True) self._cond_dist = self._cond_dist / self._cond_dist.sum() # NOTE: should we include the target column in `df`? - df = data.dataframe() + df = X.dataframe() if cond is not None: cond = pd.Series(cond, index=df.index) @@ -177,9 +177,9 @@ def _fit( return self - def _generate( - self, count: int, syn_schema: Schema, cond: Any = None, **kwargs: Any - ) -> DataLoader: + def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader: + cond = kwargs.pop("cond", None) + if self.is_classification and cond is None: # randomly generate labels following the distribution of the training data cond = np.random.choice(self._labels, size=count, p=self._cond_dist) From 5d37c4ba5ba7e1d4a56e783aa7a1b8aaa24f1bcd Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 13 Mar 2023 13:09:30 +0100 Subject: [PATCH 23/95] remove Iterator subscription --- src/synthcity/plugins/core/models/tabular_ddpm/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 0618becd..35910001 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -55,8 +55,8 @@ def _anneal_lr(self, epoch: int) -> None: def _update_ema( self, - target_params: Iterator[nn.Parameter], - source_params: Iterator[nn.Parameter], + target_params: Iterator, + source_params: Iterator, rate: float = 0.999, ) -> None: """ From 681ba607bc7c4052989e7791be8f9dacfe329d12 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Wed, 15 Mar 2023 17:09:25 +0100 Subject: [PATCH 24/95] update AssertionErrors, add EarlyStop callback, removed additional MLP, update logging --- .gitignore | 1 + src/synthcity/plugins/core/models/mlp.py | 37 +- .../core/models/tabular_ddpm/__init__.py | 57 +- .../gaussian_multinomial_diffsuion.py | 249 +++----- .../core/models/tabular_ddpm/modules.py | 546 +++--------------- .../plugins/core/models/tabular_ddpm/utils.py | 35 +- src/synthcity/plugins/generic/plugin_ddpm.py | 109 ++-- src/synthcity/utils/callbacks.py | 91 +++ src/synthcity/utils/dataframe.py | 4 +- tests/plugins/generic/test_ddpm.py | 9 +- 10 files changed, 416 insertions(+), 722 deletions(-) create mode 100644 src/synthcity/utils/callbacks.py diff --git a/.gitignore b/.gitignore index 41f36b84..b2bc0daa 100644 --- a/.gitignore +++ b/.gitignore @@ -67,3 +67,4 @@ lightning_logs generated MNIST cifar-10* +src/test.py diff --git a/src/synthcity/plugins/core/models/mlp.py b/src/synthcity/plugins/core/models/mlp.py index eb599874..5ab63464 100644 --- a/src/synthcity/plugins/core/models/mlp.py +++ b/src/synthcity/plugins/core/models/mlp.py @@ -1,11 +1,11 @@ # stdlib -from typing import Any, Callable, List, Optional, Tuple +from typing import Any, Callable, List, Optional, Tuple, Union # third party import numpy as np import torch from pydantic import validate_arguments -from torch import nn +from torch import Tensor, nn from torch.utils.data import DataLoader, TensorDataset # synthcity absolute @@ -31,8 +31,27 @@ def forward(self, logits: torch.Tensor) -> torch.Tensor: ) -def get_nonlin(name: str) -> nn.Module: - if name == "none": +class GLU(nn.Module): + """Gated Linear Unit (GLU).""" + + def __init__(self, activation: Union[str, nn.Module] = "sigmoid") -> None: + super().__init__() + if type(activation) == str: + self.non_lin = get_nonlin(activation) + else: + self.non_lin = activation + + def forward(self, x: Tensor) -> Tensor: + if x.shape[-1] % 2: + raise ValueError("The last dimension of the input tensor must be even.") + a, b = x.chunk(2, dim=-1) + return a * self.non_lin(b) + + +def get_nonlin(name: Union[str, nn.Module]) -> nn.Module: + if isinstance(name, nn.Module): + return name + elif name == "none": return nn.Identity() elif name == "elu": return nn.ELU() @@ -48,6 +67,16 @@ def get_nonlin(name: str) -> nn.Module: return nn.Sigmoid() elif name == "softmax": return GumbelSoftmax() + elif name == "gelu": + return nn.GELU() + elif name == "glu": + return GLU() + elif name == "reglu": + return GLU("relu") + elif name == "geglu": + return GLU("gelu") + elif name in ("silu", "swish"): + return nn.SiLU() else: raise ValueError(f"Unknown nonlinearity {name}") diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 35910001..c762f389 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -1,7 +1,7 @@ # stdlib from collections.abc import Iterator from copy import deepcopy -from typing import Any, Dict, Optional +from typing import Any, Optional, Sequence # third party import numpy as np @@ -11,6 +11,9 @@ from torch import nn # synthcity absolute +from synthcity.logger import info +from synthcity.metrics.weighted_metrics import WeightedMetrics +from synthcity.utils.callbacks import Callback from synthcity.utils.constants import DEVICE from synthcity.utils.dataframe import discrete_columns @@ -28,20 +31,21 @@ def __init__( weight_decay: float = 0.0001, batch_size: int = 1024, num_timesteps: int = 1000, + is_classification: bool = False, gaussian_loss_type: str = "mse", scheduler: str = "cosine", + callbacks: Sequence[Callback] = (), device: torch.device = DEVICE, - verbose: int = 0, log_interval: int = 10, print_interval: int = 100, # model params model_type: str = "mlp", - rtdl_params: Optional[Dict[str, Any]] = None, - dim_label_emb: int = 128, + mlp_params: Optional[dict] = None, + dim_embed: int = 128, # early stopping n_iter_min: int = 100, - n_iter_print: int = 50, patience: int = 5, + patience_metric: Optional[WeightedMetrics] = None, ) -> None: super().__init__() self.__dict__.update(locals()) @@ -72,10 +76,12 @@ def _update_ema( def fit( self, X: pd.DataFrame, cond: Optional[pd.Series] = None, **kwargs: Any ) -> "TabDDPM": - if cond is not None: - n_labels = cond.nunique() + if self.is_classification and cond is not None: + if np.ndim(cond) != 1: + raise ValueError("cond must be a 1D array") + self.n_classes = cond.nunique() else: - n_labels = 0 + self.n_classes = 0 cat_cols = discrete_columns(X, return_counts=True) @@ -92,10 +98,10 @@ def fit( self._col_perm = np.arange(X.shape[1]) model_params = dict( - num_classes=n_labels, - is_y_cond=cond is not None, - rtdl_params=self.rtdl_params, - dim_t=self.dim_label_emb, + num_classes=self.n_classes, + use_label=cond is not None, + mlp_params=self.mlp_params, + dim_emb=self.dim_embed, ) tensors = [ @@ -104,6 +110,7 @@ def fit( if cond is None else torch.tensor(cond.values, dtype=torch.long, device=self.device), ] + self.dataloader = TensorDataLoader(*tensors, batch_size=self.batch_size) self.diffusion = GaussianMultinomialDiffusion( @@ -115,7 +122,6 @@ def fit( num_timesteps=self.num_timesteps, scheduler=self.scheduler, device=self.device, - verbose=self.verbose, ).to(self.device) self.ema_model = deepcopy(self.diffusion.denoise_fn) @@ -126,11 +132,10 @@ def fit( self.diffusion.parameters(), lr=self.lr, weight_decay=self.weight_decay ) - self.loss_history = pd.DataFrame(columns=["step", "mloss", "gloss", "loss"]) + for cbk in self.callbacks: + cbk.on_fit_begin(self) - # if self.verbose: - # print("Starting training") - # print(self) + self.loss_history = pd.DataFrame(columns=["step", "mloss", "gloss", "loss"]) steps = 0 curr_loss_multi = 0.0 @@ -138,8 +143,11 @@ def fit( curr_count = 0 for epoch in range(self.n_iter): + self.epoch = epoch + 1 self.diffusion.train() + [cbk.on_epoch_begin(self, epoch) for cbk in self.callbacks] + for x, y in self.dataloader: self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x, y) @@ -157,8 +165,8 @@ def fit( if steps % self.log_interval == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) - if self.verbose and steps % self.print_interval == 0: - print( + if steps % self.print_interval == 0: + info( f"Step {steps}: MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}" ) self.loss_history.loc[len(self.loss_history)] = [ @@ -175,6 +183,17 @@ def fit( self.ema_model.parameters(), self.diffusion.parameters() ) + self.eval() + + try: + [cbk.on_epoch_end(self, epoch) for cbk in self.callbacks] + except StopIteration: + info(f"Early stopped at epoch {epoch}") + break + + for cbk in self.callbacks: + cbk.on_fit_end(self) + return self def generate(self, count: int, cond: Any = None) -> np.ndarray: diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index ad1a776f..25bc57f1 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -6,18 +6,20 @@ """ # stdlib import math -from typing import Any, Callable, Optional, Tuple +from typing import Any, Optional, Tuple # third party import numpy as np import torch import torch.nn.functional as F -from torch import Tensor, nn +from torch import Tensor + +# synthcity absolute +from synthcity.logger import debug, info, warning # synthcity relative from .modules import MLPDiffusion, ResNetDiffusion from .utils import ( - FoundNANsError, discretized_gaussian_log_likelihood, extract, index_to_log_onehot, @@ -32,16 +34,7 @@ ) -def get_named_beta_schedule( - schedule_name: str, num_diffusion_timesteps: int -) -> np.ndarray: - """ - Get a pre-defined beta schedule for the given name. - The beta schedule library consists of beta schedules which remain similar - in the limit of num_diffusion_timesteps. - Beta schedules may be added, but should not be removed or changed once - they are committed to maintain backwards compatibility. - """ +def get_beta_schedule(schedule_name: str, num_diffusion_timesteps: int) -> np.ndarray: if schedule_name == "linear": # Linear schedule from Ho et al, extended to work for any number of # diffusion steps. @@ -52,35 +45,25 @@ def get_named_beta_schedule( beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64 ) elif schedule_name == "cosine": - return betas_for_alpha_bar( - num_diffusion_timesteps, - lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, - ) + # Create a beta schedule that discretizes the given alpha_t_bar function, + # which defines the cumulative product of (1-beta) over time from t = [0,1]. + def alpha_bar(t: float) -> float: + return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 + + # a lambda that takes an argument t between 0 and 1 and produces the cumulative + # product of (1-beta) up to that part of the diffusion process. + max_beta = 0.999 + # the maximum beta to use; use values lower than 1 to prevent singularities. + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) else: raise NotImplementedError(f"unknown beta schedule: {schedule_name}") -def betas_for_alpha_bar( - num_diffusion_timesteps: int, alpha_bar: Callable, max_beta: float = 0.999 -) -> np.ndarray: - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - class GaussianMultinomialDiffusion(torch.nn.Module): def __init__( self, @@ -95,21 +78,19 @@ def __init__( parametrization: str = "x0", scheduler: str = "cosine", device: torch.device = torch.device("cpu"), - verbose: int = 0, ) -> None: super(GaussianMultinomialDiffusion, self).__init__() - if not (multinomial_loss_type in ("vb_stochastic", "vb_all")): - raise AssertionError - if not (parametrization in ("x0", "direct")): - raise AssertionError - - if verbose: - self.print = print - else: - self.print = lambda *args, **kwargs: None + if multinomial_loss_type not in ("vb_stochastic", "vb_all"): + raise ValueError( + "multinomial_loss_type must be 'vb_stochastic' or 'vb_all'" + ) + if gaussian_loss_type not in ("mse", "kl"): + raise ValueError("gaussian_loss_type must be 'mse' or 'kl'") + if parametrization not in ("x0", "direct"): + raise ValueError("parametrization must be 'x0' or 'direct'") if multinomial_loss_type == "vb_all": - self.print( + warning( "Computing the loss using the bound on _all_ timesteps." " This is expensive both in terms of memory and computation." ) @@ -131,13 +112,15 @@ def __init__( if model_params is None: model_params = dict( - d_in=self.dim_input, num_classes=0, is_y_cond=False, rtdl_params=None + dim_in=self.dim_input, num_classes=0, use_label=False, mlp_params=None ) else: - model_params["d_in"] = self.dim_input + model_params["dim_in"] = self.dim_input - if model_params["rtdl_params"] is None: - model_params["rtdl_params"] = dict(d_layers=[256, 256, 256], dropout=0.0) + if model_params["mlp_params"] is None: + model_params["mlp_params"] = dict( + n_units_hidden=256, n_layers_hidden=3, dropout=0.0 + ) if model_type == "mlp": self.denoise_fn = MLPDiffusion(**model_params) @@ -153,7 +136,7 @@ def __init__( self.parametrization = parametrization self.scheduler = scheduler - alphas = 1.0 - get_named_beta_schedule(scheduler, num_timesteps) + alphas = 1.0 - get_beta_schedule(scheduler, num_timesteps) alphas = torch.tensor(alphas.astype("float64")) betas = 1.0 - alphas @@ -200,15 +183,18 @@ def __init__( .to(device) ) - if not (log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item() < 1.0e-5): - raise AssertionError - if not ( - log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha).abs().sum().item() - < 1e-5 + if ( + max( + log_add_exp(log_alpha, log_1_min_alpha).abs().sum().item(), + log_add_exp(log_cumprod_alpha, log_1_min_cumprod_alpha) + .abs() + .sum() + .item(), + (np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item(), + ) + > 1e-5 ): - raise AssertionError - if not ((np.cumsum(log_alpha) - log_cumprod_alpha).abs().sum().item() < 1.0e-5): - raise AssertionError + raise ValueError("Numerical error in log-sum-exp") # Convert to float32 and register buffers. self.register_buffer("alphas", alphas.float().to(device)) @@ -257,8 +243,8 @@ def gaussian_q_sample( ) -> Tensor: if noise is None: noise = torch.randn_like(x_start) - if not (noise.shape == x_start.shape): - raise AssertionError + if noise.shape != x_start.shape: + raise ValueError("noise.shape != x_start.shape") return ( extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise @@ -267,8 +253,8 @@ def gaussian_q_sample( def gaussian_q_posterior_mean_variance( self, x_start: Tensor, x_t: Tensor, t: Tensor ) -> Tuple[Tensor, Tensor, Tensor]: - if not (x_start.shape == x_t.shape): - raise AssertionError + if x_start.shape != x_t.shape: + raise ValueError("x_start.shape != x_t.shape") posterior_mean = ( extract(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t @@ -283,7 +269,7 @@ def gaussian_q_posterior_mean_variance( == posterior_log_variance_clipped.shape[0] == x_start.shape[0] ): - raise AssertionError + raise ValueError("tensor lengths mismatch") return posterior_mean, posterior_variance, posterior_log_variance_clipped def gaussian_p_mean_variance( @@ -291,16 +277,14 @@ def gaussian_p_mean_variance( model_output: Tensor, x: Tensor, t: Tensor, - clip_denoised: bool = False, - denoised_fn: Optional[nn.Module] = None, - model_kwargs: Any = None, + model_kwargs: Optional[dict] = None, ) -> dict: if model_kwargs is None: model_kwargs = {} B, C = x.shape[:2] - if not (t.shape == (B,)): - raise AssertionError + if t.shape != (B,): + raise ValueError("length of t is not equal to batch size") model_variance = torch.cat( [ @@ -329,8 +313,8 @@ def gaussian_p_mean_variance( if not ( model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape ): - raise AssertionError( - f"{model_mean.shape}, {model_log_variance.shape}, {pred_xstart.shape}, {x.shape}" + raise ValueError( + "not all of model_mean, model_log_variance, pred_xstart, x have the same shape" ) return { @@ -346,7 +330,6 @@ def _vb_terms_bpd( x_start: Tensor, x_t: Tensor, t: Tensor, - clip_denoised: bool = False, model_kwargs: Optional[dict] = None, ) -> dict: ( @@ -355,7 +338,7 @@ def _vb_terms_bpd( true_log_variance_clipped, ) = self.gaussian_q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t) out = self.gaussian_p_mean_variance( - model_output, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs + model_output, x_t, t, model_kwargs=model_kwargs ) kl = normal_kl( true_mean, true_log_variance_clipped, out["mean"], out["log_variance"] @@ -365,8 +348,8 @@ def _vb_terms_bpd( decoder_nll = -discretized_gaussian_log_likelihood( x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] ) - if not (decoder_nll.shape == x_start.shape): - raise AssertionError + if decoder_nll.shape != x_start.shape: + raise ValueError("decoder_nll.shape != x_start.shape") decoder_nll = mean_flat(decoder_nll) / np.log(2.0) # At the first timestep return the decoder NLL, @@ -404,7 +387,7 @@ def _gaussian_loss( x_t: Tensor, t: Tensor, noise: Tensor, - model_kwargs: Any = None, + model_kwargs: Optional[dict] = None, ) -> Tensor: if model_kwargs is None: model_kwargs = {} @@ -418,7 +401,6 @@ def _gaussian_loss( x_start=x_start, x_t=x_t, t=t, - clip_denoised=False, model_kwargs=model_kwargs, )["output"] @@ -427,8 +409,8 @@ def _gaussian_loss( def _predict_xstart_from_eps( self, x_t: Tensor, t: Tensor, eps: Tensor = 1e-08 ) -> Tensor: - if not (x_t.shape == eps.shape): - raise AssertionError + if x_t.shape != eps.shape: + raise ValueError("x_t.shape != eps.shape") return ( extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps @@ -446,16 +428,12 @@ def gaussian_p_sample( model_out: Tensor, x: Tensor, t: Tensor, - clip_denoised: bool = False, - denoised_fn: Any = None, - model_kwargs: Any = None, + model_kwargs: Optional[dict] = None, ) -> dict: out = self.gaussian_p_mean_variance( model_out, x, t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, model_kwargs=model_kwargs, ) noise = torch.randn_like(x) @@ -500,10 +478,14 @@ def q_pred(self, log_x_start: Tensor, t: Tensor) -> Tensor: return log_probs def predict_start(self, model_out: Tensor, log_x_t: Tensor) -> Tensor: - if not (model_out.size(0) == log_x_t.size(0)): - raise AssertionError - if not (model_out.size(1) == self.num_classes.sum()): - raise AssertionError(f"{model_out.size()}") + if model_out.size(0) != log_x_t.size(0): + raise ValueError( + f"length of model_out {model_out.size(0)} != length of log_x_t {log_x_t.size(0)}" + ) + if model_out.size(1) != self.num_classes.sum(): + raise ValueError( + f"length of model_out {model_out.size(1)} != total num_classes {self.num_classes.sum()}" + ) log_pred = torch.empty_like(model_out) for ix in self.slices_for_classes: @@ -524,8 +506,6 @@ def q_posterior(self, log_x_start: Tensor, log_x_t: Tensor, t: Tensor) -> Tensor t_broadcast == 0, log_x_start, log_EV_qxtmin_x0.to(torch.float32) ) - # unnormed_logprobs = log_EV_qxtmin_x0 + - # log q_pred_one_timestep(x_t, t) # Note: _NOT_ x_tmin1, which is how the formula is typically used!!! # Not very easy to see why this is true. But it is :) unnormed_logprobs = log_EV_qxtmin_x0 + self.q_pred_one_timestep(log_x_t, t) @@ -693,9 +673,6 @@ def mixed_loss(self, x: Tensor, cond: Optional[Tensor] = None) -> tuple: if x_num.shape[1] > 0: loss_gauss = self._gaussian_loss(model_out_num, x_num, x_num_t, t, noise) - # loss_multi = torch.where(out_dict['y'] == 1, loss_multi, 2 * loss_multi) - # loss_gauss = torch.where(out_dict['y'] == 1, loss_gauss, 2 * loss_gauss) - return loss_multi.mean(), loss_gauss.mean() @torch.no_grad() @@ -712,7 +689,7 @@ def mixed_elbo(self, x0: Tensor, cond: Optional[Tensor] = None) -> dict: gaussian_loss = [] xstart_mse = [] mse = [] - # mu_mse = [] + mu_mse = [] out_mean = [] true_mean = [] multinomial_loss = [] @@ -747,13 +724,12 @@ def mixed_elbo(self, x0: Tensor, cond: Optional[Tensor] = None) -> dict: x_start=x_num, x_t=x_num_t, t=t_array, - clip_denoised=False, ) multinomial_loss.append(kl) gaussian_loss.append(out["output"]) xstart_mse.append(mean_flat((out["pred_xstart"] - x_num) ** 2)) - # mu_mse.append(mean_flat(out["mean_mse"])) + mu_mse.append(mean_flat(out["mean_mse"])) out_mean.append(mean_flat(out["out_mean"])) true_mean.append(mean_flat(out["true_mean"])) @@ -764,7 +740,7 @@ def mixed_elbo(self, x0: Tensor, cond: Optional[Tensor] = None) -> dict: multinomial_loss = torch.stack(multinomial_loss, dim=1) xstart_mse = torch.stack(xstart_mse, dim=1) mse = torch.stack(mse, dim=1) - # mu_mse = torch.stack(mu_mse, dim=1) + mu_mse = torch.stack(mu_mse, dim=1) out_mean = torch.stack(out_mean, dim=1) true_mean = torch.stack(true_mean, dim=1) @@ -783,7 +759,7 @@ def mixed_elbo(self, x0: Tensor, cond: Optional[Tensor] = None) -> dict: "losses_multinimial": multinomial_loss, "xstart_mse": xstart_mse, "mse": mse, - # "mu_mse": mu_mse + "mu_mse": mu_mse, "out_mean": out_mean, "true_mean": true_mean, } @@ -794,16 +770,12 @@ def gaussian_ddim_step( model_out_num: Tensor, x: Tensor, t: Tensor, - clip_denoised: bool = False, - denoised_fn: Any = None, eta: float = 0.0, ) -> Tensor: out = self.gaussian_p_mean_variance( model_out_num, x, t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, model_kwargs=None, ) @@ -811,7 +783,7 @@ def gaussian_ddim_step( alpha_bar = extract(self.alphas_cumprod, t, x.shape) alpha_bar_prev = extract(self.alphas_cumprod_prev, t, x.shape) - sigma = ( + sigma = eta or ( eta * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * torch.sqrt(1 - alpha_bar / alpha_bar_prev) @@ -829,39 +801,14 @@ def gaussian_ddim_step( return sample - # @torch.no_grad() - # def gaussian_ddim_sample(self, noise, T, cond=None, eta=0.0): - # x = noise - # b = x.shape[0] - # device = x.device - # for t in reversed(range(T)): - # self.print(f"Sample timestep {t:4d}", end="\r") - # t_array = (torch.ones(b, device=device) * t).long() - # out_num = self.denoise_fn(x, t_array, y=cond) - # x = self.gaussian_ddim_step(out_num, x, t_array) - # self.print() - # return x - @torch.no_grad() def gaussian_ddim_reverse_step( self, model_out_num: Tensor, x: Tensor, t: Tensor, - clip_denoised: bool = False, - denoised_fn: Any = None, - eta: float = 0.0, ) -> Tensor: - if not (eta == 0.0): - raise AssertionError("Eta must be zero.") - out = self.gaussian_p_mean_variance( - model_out_num, - x, - t, - clip_denoised=clip_denoised, - denoised_fn=denoised_fn, - model_kwargs=None, - ) + out = self.gaussian_p_mean_variance(model_out_num, x, t) eps = ( extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"] @@ -875,18 +822,6 @@ def gaussian_ddim_reverse_step( return mean_pred - # @torch.no_grad() - # def gaussian_ddim_reverse_sample(self, x, T, cond=None): - # b = x.shape[0] - # device = x.device - # for t in range(T): - # self.print(f"Reverse timestep {t:4d}", end="\r") - # t_array = (torch.ones(b, device=device) * t).long() - # out_num = self.denoise_fn(x, t_array, y=cond) - # x = self.gaussian_ddim_reverse_step(out_num, x, t_array, eta=0.0) - # self.print() - # return x - @torch.no_grad() def multinomial_ddim_step( self, model_out_cat: Tensor, log_x_t: Tensor, t: Tensor, eta: float = 0.0 @@ -895,7 +830,7 @@ def multinomial_ddim_step( alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape) alpha_bar_prev = extract(self.alphas_cumprod_prev, t, log_x_t.shape) - sigma = ( + sigma = eta or ( eta * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * torch.sqrt(1 - alpha_bar / alpha_bar_prev) @@ -935,20 +870,17 @@ def sample_ddim(self, num_samples: int, cond: Any = None) -> Tensor: log_z = self.log_sample_categorical(uniform_logits) for i in reversed(range(0, self.num_timesteps)): - self.print(f"Sample timestep {i:4d}", end="\r") + debug(f"Sample timestep {i:4d}", end="\r") t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), t, y=cond ) model_out_num = model_out[:, : self.num_numerics] model_out_cat = model_out[:, self.num_numerics :] - z_norm = self.gaussian_ddim_step( - model_out_num, z_norm, t, clip_denoised=False - ) + z_norm = self.gaussian_ddim_step(model_out_num, z_norm, t) if has_cat: log_z = self.multinomial_ddim_step(model_out_cat, log_z, t) - self.print() z_ohe = torch.exp(log_z).round() z_cat = log_z if has_cat: @@ -970,27 +902,18 @@ def sample(self, num_samples: int, cond: Any = None) -> Tensor: ) log_z = self.log_sample_categorical(uniform_logits) - # y = torch.multinomial( - # cond, - # num_samples=b, - # replacement=True - # ) - # out_dict = {'y': y.long().to(device)} for i in reversed(range(0, self.num_timesteps)): - self.print(f"Sample timestep {i:4d}", end="\r") + debug(f"Sample timestep {i:4d}", end="\r") t = torch.full((b,), i, device=device, dtype=torch.long) model_out = self.denoise_fn( torch.cat([z_norm, log_z], dim=1).float(), t, y=cond ) model_out_num = model_out[:, : self.num_numerics] model_out_cat = model_out[:, self.num_numerics :] - z_norm = self.gaussian_p_sample( - model_out_num, z_norm, t, clip_denoised=False - )["sample"] + z_norm = self.gaussian_p_sample(model_out_num, z_norm, t)["sample"] if has_cat: log_z = self.p_sample(model_out_cat, log_z, t=t) - self.print() z_ohe = torch.exp(log_z).round() z_cat = log_z if has_cat: @@ -1006,7 +929,7 @@ def sample_all( ddim: bool = False, ) -> Tensor: if ddim: - self.print("Sample using DDIM.") + info("Sample using DDIM.") sample_fn = self.sample_ddim else: sample_fn = self.sample @@ -1017,7 +940,7 @@ def sample_all( for b in bs: sample = sample_fn(b, cond) if torch.any(sample.isnan()).item(): - raise FoundNANsError + raise ValueError("found NaNs in sample") all_samples.append(sample) return torch.cat(all_samples, dim=0) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 8caad49f..297c01bf 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -3,513 +3,115 @@ """ # stdlib import math -from typing import Any, Callable, List, Optional, Union +from typing import Optional, Union # third party import torch -import torch.nn as nn -import torch.nn.functional as F import torch.optim -from torch import Tensor +from torch import Tensor, nn -ModuleType = Union[str, Callable[..., nn.Module]] +# synthcity absolute +from synthcity.plugins.core.models.mlp import MLP, get_nonlin -class SiLU(nn.Module): - def forward(self, x: Tensor) -> Tensor: - return x * torch.sigmoid(x) - - -def timestep_embedding(timesteps: Tensor, dim: int, max_period: int = 10000) -> Tensor: - """ - Create sinusoidal timestep embeddings. - - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) - * torch.arange(start=0, end=half, dtype=torch.float32) - / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - return embedding - - -def reglu(x: Tensor) -> Tensor: - """The ReGLU activation function from [1]. - References: - [1] Noam Shazeer, "GLU Variants Improve Transformer", 2020 - """ - if not (x.shape[-1] % 2 == 0): - raise AssertionError - a, b = x.chunk(2, dim=-1) - return a * F.relu(b) - - -def geglu(x: Tensor) -> Tensor: - """The GEGLU activation function from [1]. - References: - [1] Noam Shazeer, "GLU Variants Improve Transformer", 2020 - """ - if not (x.shape[-1] % 2 == 0): - raise AssertionError - a, b = x.chunk(2, dim=-1) - return a * F.gelu(b) - - -class ReGLU(nn.Module): - """The ReGLU activation function from [shazeer2020glu]. - - Examples: - .. testcode:: - - module = ReGLU() - x = torch.randn(3, 4) - assert module(x).shape == (3, 2) - - References: - * [shazeer2020glu] Noam Shazeer, "GLU Variants Improve Transformer", 2020 - """ - - def forward(self, x: Tensor) -> Tensor: - return reglu(x) - - -class GEGLU(nn.Module): - """The GEGLU activation function from [shazeer2020glu]. - - Examples: - .. testcode:: - - module = GEGLU() - x = torch.randn(3, 4) - assert module(x).shape == (3, 2) - - References: - * [shazeer2020glu] Noam Shazeer, "GLU Variants Improve Transformer", 2020 - """ - - def forward(self, x: Tensor) -> Tensor: - return geglu(x) - - -def _make_nn_module(module_type: ModuleType, *args: Any) -> nn.Module: - return ( - ( - ReGLU() - if module_type == "ReGLU" - else GEGLU() - if module_type == "GEGLU" - else getattr(nn, module_type)(*args) - ) - if isinstance(module_type, str) - else module_type(*args) - ) - - -class MLP(nn.Module): - """The MLP model used in [gorishniy2021revisiting]. - - The following scheme describes the architecture: - - .. code-block:: text - - MLP: (in) -> Block -> ... -> Block -> Linear -> (out) - Block: (in) -> Linear -> Activation -> Dropout -> (out) - - Examples: - .. testcode:: - - x = torch.randn(4, 2) - module = MLP.make_baseline(x.shape[1], [3, 5], 0.1, 1) - assert module(x).shape == (len(x), 1) - - References: - * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 - """ - - class Block(nn.Module): - """The main building block of `MLP`.""" - - def __init__( - self, - *, - d_in: int, - d_out: int, - bias: bool, - activation: ModuleType, - dropout: float, - ) -> None: - super().__init__() - self.linear = nn.Linear(d_in, d_out, bias) - self.activation = _make_nn_module(activation) - self.dropout = nn.Dropout(dropout) - - def forward(self, x: Tensor) -> Tensor: - return self.dropout(self.activation(self.linear(x))) - +class TimeStepEmbedding(nn.Module): def __init__( self, - *, - d_in: int, - d_layers: List[int], - dropouts: Union[float, List[float]], - activation: Union[str, Callable[[], nn.Module]], - d_out: int, + dim: int, + max_period: int = 10000, + n_layers: int = 2, + nonlin: Union[str, nn.Module] = "silu", ) -> None: """ - Note: - `make_baseline` is the recommended constructor. - """ - super().__init__() - if isinstance(dropouts, float): - dropouts = [dropouts] * len(d_layers) - if not (len(d_layers) == len(dropouts)): - raise AssertionError - if activation in ["ReGLU", "GEGLU"]: - raise AssertionError - - self.blocks = nn.ModuleList( - [ - MLP.Block( - d_in=d_layers[i - 1] if i else d_in, - d_out=d, - bias=True, - activation=activation, - dropout=dropout, - ) - for i, (d, dropout) in enumerate(zip(d_layers, dropouts)) - ] - ) - self.head = nn.Linear(d_layers[-1] if d_layers else d_in, d_out) - - @classmethod - def make_baseline( - cls, - d_in: int, - d_layers: List[int], - dropout: float, - d_out: int, - ) -> "MLP": - """Create a "baseline" `MLP`. - - This variation of MLP was used in [gorishniy2021revisiting]. Features: - - * :code:`Activation` = :code:`ReLU` - * all linear layers except for the first one and the last one are of the same dimension - * the dropout rate is the same for all dropout layers + Create sinusoidal timestep embeddings. Args: - d_in: the input size - d_layers: the dimensions of the linear layers. If there are more than two - layers, then all of them except for the first and the last ones must - have the same dimension. Valid examples: :code:`[]`, :code:`[8]`, - :code:`[8, 16]`, :code:`[2, 2, 2, 2]`, :code:`[1, 2, 2, 4]`. Invalid - example: :code:`[1, 2, 3, 4]`. - dropout: the dropout rate for all hidden layers - d_out: the output size - Returns: - MLP - - References: - * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 + - dim (int): the dimension of the output. + - max_period (int): controls the minimum frequency of the embeddings. + - n_layers (int): number of dense layers """ - if not (isinstance(dropout, float)): - raise AssertionError - if len(d_layers) > 2: - if not len(set(d_layers[1:-1])) == 1: - raise AssertionError( - "if d_layers contains more than two elements, then" - " all elements except for the first and the last ones must be equal." - ) - return MLP( - d_in=d_in, - d_layers=d_layers, - dropouts=dropout, - activation="ReLU", - d_out=d_out, - ) - - def forward(self, x: Tensor) -> Tensor: - x = x.float() - for block in self.blocks: - x = block(x) - x = self.head(x) - return x - - -class ResNet(nn.Module): - """The ResNet model used in [gorishniy2021revisiting]. - The following scheme describes the architecture: - .. code-block:: text - ResNet: (in) -> Linear -> Block -> ... -> Block -> Head -> (out) - |-> Norm -> Linear -> Activation -> Dropout -> Linear -> Dropout ->| - | | - Block: (in) ------------------------------------------------------------> Add -> (out) - Head: (in) -> Norm -> Activation -> Linear -> (out) - Examples: - .. testcode:: - x = torch.randn(4, 2) - module = ResNet.make_baseline( - d_in=x.shape[1], - n_blocks=2, - d_main=3, - d_hidden=4, - dropout_first=0.25, - dropout_second=0.0, - d_out=1 - ) - assert module(x).shape == (len(x), 1) - References: - * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 - """ - - class Block(nn.Module): - """The main building block of `ResNet`.""" - - def __init__( - self, - *, - d_main: int, - d_hidden: int, - bias_first: bool, - bias_second: bool, - dropout_first: float, - dropout_second: float, - normalization: ModuleType, - activation: ModuleType, - skip_connection: bool, - ) -> None: - super().__init__() - self.normalization = _make_nn_module(normalization, d_main) - self.linear_first = nn.Linear(d_main, d_hidden, bias_first) - self.activation = _make_nn_module(activation) - self.dropout_first = nn.Dropout(dropout_first) - self.linear_second = nn.Linear(d_hidden, d_main, bias_second) - self.dropout_second = nn.Dropout(dropout_second) - self.skip_connection = skip_connection - - def forward(self, x: Tensor) -> Tensor: - x_input = x - x = self.normalization(x) - x = self.linear_first(x) - x = self.activation(x) - x = self.dropout_first(x) - x = self.linear_second(x) - x = self.dropout_second(x) - if self.skip_connection: - x = x_input + x - return x + super().__init__() + self.dim = dim + self.max_period = max_period + self.n_layers = n_layers - class Head(nn.Module): - """The final module of `ResNet`.""" + if dim % 2 != 0: + raise ValueError(f"embedding dim must be even, got {dim}") - def __init__( - self, - *, - d_in: int, - d_out: int, - bias: bool, - normalization: ModuleType, - activation: ModuleType, - ) -> None: - super().__init__() - self.normalization = _make_nn_module(normalization, d_in) - self.activation = _make_nn_module(activation) - self.linear = nn.Linear(d_in, d_out, bias) + layers = [] + for _ in range(n_layers - 1): + layers.append(nn.Linear(dim, dim)) + layers.append(get_nonlin(nonlin)) - def forward(self, x: Tensor) -> Tensor: - if self.normalization is not None: - x = self.normalization(x) - x = self.activation(x) - x = self.linear(x) - return x + self.fc = nn.Sequential(*layers, nn.Linear(dim, dim)) - def __init__( - self, - *, - d_in: int, - n_blocks: int, - d_main: Optional[int], - d_hidden: int, - dropout_first: float, - dropout_second: float, - normalization: ModuleType, - activation: ModuleType, - d_out: int, - ) -> None: - """ - Note: - `make_baseline` is the recommended constructor. + def forward(self, timesteps: Tensor) -> Tensor: """ - super().__init__() - - self.first_layer = nn.Linear(d_in, d_main) - if d_main is None: - d_main = d_in - self.blocks = nn.Sequential( - *[ - ResNet.Block( - d_main=d_main, - d_hidden=d_hidden, - bias_first=True, - bias_second=True, - dropout_first=dropout_first, - dropout_second=dropout_second, - normalization=normalization, - activation=activation, - skip_connection=True, - ) - for _ in range(n_blocks) - ] - ) - self.head = ResNet.Head( - d_in=d_main, - d_out=d_out, - bias=True, - normalization=normalization, - activation=activation, - ) - - @classmethod - def make_baseline( - cls, - *, - d_in: int, - n_blocks: int, - d_main: int, - d_hidden: int, - dropout_first: float, - dropout_second: float, - d_out: int, - ) -> "ResNet": - """Create a "baseline" `ResNet`. - This variation of ResNet was used in [gorishniy2021revisiting]. Features: - * :code:`Activation` = :code:`ReLU` - * :code:`Norm` = :code:`BatchNorm1d` Args: - d_in: the input size - n_blocks: the number of Blocks - d_main: the input size (or, equivalently, the output size) of each Block - d_hidden: the output size of the first linear layer in each Block - dropout_first: the dropout rate of the first dropout layer in each Block. - dropout_second: the dropout rate of the second dropout layer in each Block. - References: - * [gorishniy2021revisiting] Yury Gorishniy, Ivan Rubachev, Valentin Khrulkov, Artem Babenko, "Revisiting Deep Learning Models for Tabular Data", 2021 + - timesteps (Tensor): 1D Tensor of N indices, one per batch element. """ - return cls( - d_in=d_in, - n_blocks=n_blocks, - d_main=d_main, - d_hidden=d_hidden, - dropout_first=dropout_first, - dropout_second=dropout_second, - normalization="BatchNorm1d", - activation="ReLU", - d_out=d_out, - ) - - def forward(self, x: Tensor) -> Tensor: - x = x.float() - x = self.first_layer(x) - x = self.blocks(x) - x = self.head(x) - return x - - -# **For diffusion** + d, T = self.dim, self.max_period + mid = d // 2 + fs = torch.exp(-math.log(T) / mid * torch.arange(mid, dtype=torch.float32)) + args = timesteps[:, None].float() * fs[None] + emb = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + return self.fc(emb) class MLPDiffusion(nn.Module): + add_residual = False + def __init__( self, - d_in: int, - num_classes: int, - is_y_cond: bool, - rtdl_params: dict, - dim_t: int = 128, + dim_in: int, + dim_emb: int = 128, + *, + mlp_params: dict = {}, + use_label: bool = False, + num_classes: int = 0, + emb_nonlin: Union[str, nn.Module] = "silu", + max_time_period: int = 10000, ) -> None: super().__init__() - self.dim_t = dim_t + self.dim_t = dim_emb self.num_classes = num_classes - self.is_y_cond = is_y_cond - - # d0 = rtdl_params['d_layers'][0] - - rtdl_params["d_in"] = dim_t - rtdl_params["d_out"] = d_in + self.has_label = use_label - self.mlp = MLP.make_baseline(**rtdl_params) + if isinstance(emb_nonlin, str): + self.emb_nonlin = get_nonlin(emb_nonlin) + else: + self.emb_nonlin = emb_nonlin - if self.num_classes > 0 and is_y_cond: - self.label_emb = nn.Embedding(self.num_classes, dim_t) - elif self.num_classes == 0 and is_y_cond: - self.label_emb = nn.Linear(1, dim_t) + self.proj = nn.Linear(dim_in, dim_emb) + self.time_emb = TimeStepEmbedding(dim_emb, max_time_period) - self.proj = nn.Linear(d_in, dim_t) - self.time_embed = nn.Sequential( - nn.Linear(dim_t, dim_t), nn.SiLU(), nn.Linear(dim_t, dim_t) + if use_label: + if self.num_classes > 0: + self.label_emb = nn.Embedding(self.num_classes, dim_emb) + elif self.num_classes == 0: # regression + self.label_emb = nn.Linear(1, dim_emb) + + self.model = MLP( + n_units_in=dim_emb, + n_units_out=dim_in, + task_type="/", + residual=self.add_residual, + **mlp_params, ) - def forward( - self, x: Tensor, timesteps: Tensor, y: Optional[Tensor] = None - ) -> Tensor: - emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) - if self.is_y_cond and y is not None: - if self.num_classes > 0: - y = y.squeeze() + def forward(self, x: Tensor, t: Tensor, y: Optional[Tensor] = None) -> Tensor: + emb = self.time_emb(t) + if self.has_label: + if y is None: + raise ValueError("y must be provided if use_label is True") + if self.num_classes == 0: + y = y.resize(-1, 1).float() else: - y = y.resize(y.size(0), 1).float() - emb += F.silu(self.label_emb(y)) + y = y.squeeze().long() + emb += self.emb_nonlin(self.label_emb(y)) x = self.proj(x) + emb - return self.mlp(x) + return self.model(x) -class ResNetDiffusion(nn.Module): - def __init__( - self, - d_in: int, - num_classes: int, - is_y_cond: bool, - rtdl_params: dict, - dim_t: int = 256, - ) -> None: - super().__init__() - self.dim_t = dim_t - self.num_classes = num_classes - - rtdl_params["d_in"] = d_in - rtdl_params["d_out"] = d_in - rtdl_params["emb_d"] = dim_t - self.resnet = ResNet.make_baseline(**rtdl_params) - - if self.num_classes > 0 and is_y_cond: - self.label_emb = nn.Embedding(self.num_classes, dim_t) - elif self.num_classes == 0 and is_y_cond: - self.label_emb = nn.Linear(1, dim_t) - - self.proj = nn.Linear(d_in, dim_t) - self.time_embed = nn.Sequential( - nn.Linear(dim_t, dim_t), nn.SiLU(), nn.Linear(dim_t, dim_t) - ) - - def forward( - self, x: Tensor, timesteps: Tensor, y: Optional[Tensor] = None - ) -> Tensor: - emb = self.time_embed(timestep_embedding(timesteps, self.dim_t)) - if self.is_y_cond and y is not None: - if self.num_classes > 0: - y = y.squeeze() - else: - y = y.resize(y.size(0), 1).float() - emb += F.silu(self.label_emb(y)) - x = self.proj(x) + emb - return self.resnet(x) +class ResNetDiffusion(MLPDiffusion): + add_residual = True diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index 4d4c92bd..8ec0c025 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -18,13 +18,12 @@ def normal_kl(mean1: Tensor, logvar1: Tensor, mean2: Tensor, logvar2: Tensor) -> Shapes are automatically broadcasted, so batches can be compared to scalars, among other use cases. """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, Tensor): - tensor = obj - break - if tensor is None: - raise AssertionError("at least one argument must be a Tensor") + try: + tensor = next( + x for x in (mean1, logvar1, mean2, logvar2) if isinstance(x, Tensor) + ) + except StopIteration: + raise TypeError("at least one argument must be a Tensor") # Force variances to be Tensors. Broadcasting helps convert scalars to # Tensors, but it does not work for torch.exp(). @@ -66,7 +65,7 @@ def discretized_gaussian_log_likelihood( :return: a tensor like x of log probabilities (in nats). """ if not (x.shape == means.shape == log_scales.shape): - raise AssertionError + raise ValueError("shapes must match") centered_x = x - means inv_stdv = torch.exp(-log_scales) plus_in = inv_stdv * (centered_x + 1.0 / 255.0) @@ -83,8 +82,8 @@ def discretized_gaussian_log_likelihood( x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12)) ), ) - if not (log_probs.shape == x.shape): - raise AssertionError + if log_probs.shape != x.shape: + raise ValueError("shapes must match") return log_probs @@ -123,8 +122,9 @@ def log_1_min_a(a: Tensor) -> Tensor: def log_add_exp(a: Tensor, b: Tensor) -> Tensor: - maximum = torch.max(a, b) - return maximum + torch.log(torch.exp(a - maximum) + torch.exp(b - maximum)) + """Numerically stable log(exp(a) + exp(b)).""" + m = torch.max(a, b) + return m + torch.log(torch.exp(a - m) + torch.exp(b - m)) def extract(a: Tensor, t: Tensor, x_shape: tuple) -> Tensor: @@ -171,13 +171,6 @@ def sliced_logsumexp(x: Tensor, slices: Tensor) -> Tensor: return slice_lse_repeated -class FoundNANsError(BaseException): - """Found NANs during sampling""" - - def __init__(self, message: str = "Found NANs during sampling.") -> None: - super(FoundNANsError, self).__init__(message) - - class TensorDataLoader: """ A DataLoader-like object for a set of tensors that can be much faster than @@ -198,13 +191,13 @@ def __init__( :returns: A FastTensorDataLoader. """ if not all(t.shape[0] == tensors[0].shape[0] for t in tensors): - raise AssertionError + raise ValueError("All tensors must have the same length.") self.tensors = tensors self.dataset_len = self.tensors[0].shape[0] self.batch_size = batch_size self.shuffle = shuffle - def __iter__(self) -> Iterator[tuple]: + def __iter__(self) -> Iterator: idx = np.arange(self.dataset_len) if self.shuffle: np.random.shuffle(idx) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 6556ec05..b149e336 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -4,7 +4,7 @@ # stdlib from pathlib import Path -from typing import Any, List +from typing import Any, List, Sequence # third party import numpy as np @@ -19,6 +19,7 @@ from synthcity.plugins.core.models.tabular_ddpm import TabDDPM from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema +from synthcity.utils.callbacks import Callback from synthcity.utils.constants import DEVICE @@ -31,14 +32,55 @@ class TabDDPMPlugin(Plugin): Tabular denoising diffusion probabilistic model. Args: - ... + is_classification: bool = False + Whether the task is classification or regression. + n_iter: int = 1000 + Number of epochs for training. + lr: float = 0.002 + Learning rate. + weight_decay: float = 1e-4 + L2 weight decay. + batch_size: int = 1024 + Size of mini-batches. + model_type: str = "mlp" + Type of model to use. Either "mlp" or "resnet". + num_timesteps: int = 1000 + Number of timesteps to use in the diffusion process. + gaussian_loss_type: str = "mse" + Type of loss to use for the Gaussian diffusion process. Either "mse" or "kl". + scheduler: str = "cosine" + The scheduler of forward process variance 'beta' to use. Either "cosine" or "linear". + device: Any = DEVICE + Device to use for training. + callbacks: Sequence[Callback] = () + Callbacks to use during training. + log_interval: int = 100 + Number of iterations between logging. + print_interval: int = 500 + Number of iterations between printing. + n_layers_hidden: int = 3 + Number of hidden layers in the MLP. + dim_hidden: int = 256 + Number of hidden units per hidden layer in the MLP. + dropout: float = 0.0 + Dropout rate. + dim_embed: int = 128 + Dimensionality of the embedding space. + random_state: int + random seed to use + workspace: Path. + Optional Path for caching intermediary results. + compress_dataset: bool. Default = False. + Drop redundant features before training the generator. + sampling_patience: int. + Max inference iterations to wait for the generated data to match the training schema. Example: >>> from sklearn.datasets import load_iris >>> from synthcity.plugins import Plugins - >>> X, y = load_iris(as_frame = True, return_X_y = True) + >>> X, y = load_iris(as_frame=True, return_X_y=True) >>> X["target"] = y - >>> plugin = Plugins().get("ddpm", n_iter = 100) + >>> plugin = Plugins().get("ddpm", n_iter=100, is_classification=True) >>> plugin.fit(X) >>> plugin.generate(50) @@ -58,19 +100,14 @@ def __init__( gaussian_loss_type: str = "mse", scheduler: str = "cosine", device: Any = DEVICE, - verbose: int = 0, + callbacks: Sequence[Callback] = (), log_interval: int = 100, print_interval: int = 500, # model params - num_layers: int = 3, + n_layers_hidden: int = 3, dim_hidden: int = 256, dropout: float = 0.0, - dim_label_emb: int = 128, - # early stopping - n_iter_min: int = 100, - n_iter_print: int = 50, - patience: int = 5, - # patience_metric: Optional[WeightedMetrics] = None, + dim_embed: int = 128, # core plugin arguments random_state: int = 0, workspace: Path = Path("workspace"), @@ -89,7 +126,10 @@ def __init__( self.is_classification = is_classification - rtdl_params = dict(d_layers=[dim_hidden] * num_layers, dropout=dropout) + mlp_params = dict( + n_layers_hidden=n_layers_hidden, n_units_hidden=dim_hidden, dropout=dropout + ) + self.model = TabDDPM( n_iter=n_iter, lr=lr, @@ -97,17 +137,15 @@ def __init__( batch_size=batch_size, num_timesteps=num_timesteps, gaussian_loss_type=gaussian_loss_type, + is_classification=is_classification, scheduler=scheduler, device=device, - verbose=verbose, + callbacks=callbacks, log_interval=log_interval, print_interval=print_interval, model_type=model_type, - rtdl_params=rtdl_params, - dim_label_emb=dim_label_emb, - n_iter_min=n_iter_min, - n_iter_print=n_iter_print, - patience=patience, + mlp_params=mlp_params, + dim_embed=dim_embed, ) @staticmethod @@ -141,20 +179,24 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: CategoricalDistribution(name="batch_size", choices=[256, 4096]), CategoricalDistribution(name="num_timesteps", choices=[100, 1000]), CategoricalDistribution(name="n_iter", choices=[5000, 10000, 20000]), - CategoricalDistribution(name="num_layers", choices=[2, 4, 6, 8]), + CategoricalDistribution(name="n_layers_hidden", choices=[2, 4, 6, 8]), CategoricalDistribution(name="dim_hidden", choices=[128, 256, 512, 1024]), ] def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": - cond = None + """Fit the model to the data. + + Optionally, a condition can be given as the keyword argument `cond`. + + If the task is classification, the target labels are automatically regarded as the condition, and no additional condition should be given. + + If the task is regression, the target variable is not specially treated. There is no condition by default, but can be given by the user, either as a column name or an array-like. + """ + df = X.dataframe() + cond = kwargs.pop("cond", None) + if args: - if len(args) > 1: - raise ValueError("Only one positional argument is allowed") - if "cond" in kwargs: - raise ValueError("cond is already given by the positional argument") - cond = args[0] - elif "cond" in kwargs: - cond = kwargs.pop("cond") + raise ValueError("Only keyword arguments are allowed") if self.is_classification: if cond is not None: @@ -164,15 +206,14 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": _, cond = X.unpack() self._labels, self._cond_dist = np.unique(cond, return_counts=True) self._cond_dist = self._cond_dist / self._cond_dist.sum() - - # NOTE: should we include the target column in `df`? - df = X.dataframe() + else: + if type(cond) is str: + cond = df[cond] if cond is not None: cond = pd.Series(cond, index=df.index) - # self.encoder = TabularEncoder().fit(X) - + # NOTE: cond may also be included in the dataframe self.model.fit(df, cond, **kwargs) return self @@ -184,7 +225,7 @@ def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader # randomly generate labels following the distribution of the training data cond = np.random.choice(self._labels, size=count, p=self._cond_dist) - def callback(count, cond=cond): # type: ignore + def callback(count): # type: ignore return self.model.generate(count, cond=cond) return self._safe_generate(callback, count, syn_schema, **kwargs) diff --git a/src/synthcity/utils/callbacks.py b/src/synthcity/utils/callbacks.py new file mode 100644 index 00000000..fa54074c --- /dev/null +++ b/src/synthcity/utils/callbacks.py @@ -0,0 +1,91 @@ +# stdlib +from abc import ABC, abstractmethod +from typing import Optional + +# third party +import numpy as np +import pandas as pd +from torch import Tensor, nn + +# synthcity absolute +from synthcity.metrics.weighted_metrics import WeightedMetrics + + +class Callback(ABC): + """Abstract base class of callbacks.""" + + @abstractmethod + def on_epoch_begin(self, model: nn.Module) -> None: + raise NotImplementedError + + @abstractmethod + def on_epoch_end(self, model: nn.Module) -> None: + raise NotImplementedError + + @abstractmethod + def on_fit_begin(self, model: nn.Module) -> None: + raise NotImplementedError + + @abstractmethod + def on_fit_end(self, model: nn.Module) -> None: + raise NotImplementedError + + +class EarlyStopping(Callback): + def __init__( + self, + patience: int = 5, + min_epochs: int = 100, + patience_metric: Optional[WeightedMetrics] = None, + ) -> None: + self.patience = patience + self.patience_metric = patience_metric + self.min_epochs = min_epochs + self.best_score = self._init_patience_score() + self.best_model_state = None + self.wait = 0 + self._epochs = 0 + + def on_epoch_end(self, model: nn.Module) -> None: + self._epochs += 1 + if self.patience_metric is not None: + if not hasattr(self, "X_val"): + self.X_val = model.X_val + if isinstance(self.X_val, Tensor): + self.X_val = self.X_val.detach().cpu().numpy() + self._evaluate_patience_metric(model) + if self.wait >= self.patience and self._epochs >= self.min_epochs: + raise StopIteration("Early stopping") + + def on_fit_end(self, model: nn.Module) -> None: + if self.best_model_state is not None: + model.load_state_dict(self.best_model_state) # type: ignore + + def _init_patience_score(self) -> float: + if self.patience_metric is None: + return 0 + elif self.patience_metric.direction() == "minimize": + return np.inf + else: + return -np.inf + + def _evaluate_patience_metric(self, model: nn.Module) -> None: + X_val = self.X_val + X_syn = model.generate(len(X_val)) + + new_score = self.patience_metric.evaluate( # type: ignore + pd.DataFrame(X_val), + pd.DataFrame(X_syn), + ) + + if self.patience_metric.direction() == "minimize": # type: ignore + is_new_best = new_score < self.best_score + else: + is_new_best = new_score > self.best_score + + if is_new_best: + self.wait = 0 + self.best_score = new_score + self.best_model_state = model.state_dict() + else: + self.wait += 1 diff --git a/src/synthcity/utils/dataframe.py b/src/synthcity/utils/dataframe.py index c12b29da..a313b91e 100644 --- a/src/synthcity/utils/dataframe.py +++ b/src/synthcity/utils/dataframe.py @@ -6,7 +6,7 @@ def constant_columns(dataframe: pd.DataFrame) -> list: """ Find constant value columns in a pandas dataframe. """ - return discrete_columns(dataframe, 2) + return discrete_columns(dataframe, 1) def discrete_columns( @@ -19,5 +19,5 @@ def discrete_columns( (col, cnt) if return_counts else col for col, vals in dataframe.items() for cnt in [vals.nunique()] - if cnt < max_classes + if cnt <= max_classes ] diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index 7f56077a..cddadf62 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -15,16 +15,11 @@ plugin_name = "ddpm" plugin_args = dict( n_iter=1000, - # is_classification=True, + is_classification=True, batch_size=200, num_timesteps=500, - verbose=1, log_interval=10, - print_interval=100 - # rtdl_params=dict( - # d_layers=[256, 256], - # dropout=0.0 - # ) + print_interval=100, ) From a9438dc93f01d6c226c3865f3218fd7f60dc55e5 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 16 Mar 2023 13:14:31 +0100 Subject: [PATCH 25/95] remove TensorDataLoader, update test_ddpm --- .../core/models/tabular_ddpm/__init__.py | 19 +++-- .../gaussian_multinomial_diffsuion.py | 74 +++++++++++-------- .../plugins/core/models/tabular_ddpm/utils.py | 50 ++----------- src/synthcity/plugins/generic/plugin_ddpm.py | 5 ++ tests/plugins/generic/test_ddpm.py | 47 +++++++----- 5 files changed, 94 insertions(+), 101 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index c762f389..d80c2a85 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -9,6 +9,7 @@ import torch from pydantic import validate_arguments from torch import nn +from torch.utils.data import DataLoader, TensorDataset # synthcity absolute from synthcity.logger import info @@ -19,7 +20,6 @@ # synthcity relative from .gaussian_multinomial_diffsuion import GaussianMultinomialDiffusion -from .utils import TensorDataLoader class TabDDPM(nn.Module): @@ -104,14 +104,18 @@ def fit( dim_emb=self.dim_embed, ) - tensors = [ + dataset = TensorDataset( torch.tensor(X.values, dtype=torch.float32, device=self.device), - np.repeat(None, len(X)) + torch.tensor([torch.nan] * len(X), dtype=torch.float32, device=self.device) if cond is None - else torch.tensor(cond.values, dtype=torch.long, device=self.device), - ] + else torch.tensor( + cond.values, + dtype=torch.long if self.is_classification else torch.float32, + device=self.device, + ), + ) - self.dataloader = TensorDataLoader(*tensors, batch_size=self.batch_size) + self.dataloader = DataLoader(dataset, batch_size=self.batch_size) self.diffusion = GaussianMultinomialDiffusion( model_type=self.model_type, @@ -150,7 +154,8 @@ def fit( for x, y in self.dataloader: self.optimizer.zero_grad() - loss_multi, loss_gauss = self.diffusion.mixed_loss(x, y) + args = (x,) if cond is None else (x, y) + loss_multi, loss_gauss = self.diffusion.mixed_loss(*args) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 25bc57f1..270d8b03 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -21,7 +21,6 @@ from .modules import MLPDiffusion, ResNetDiffusion from .utils import ( discretized_gaussian_log_likelihood, - extract, index_to_log_onehot, log_1_min_a, log_add_exp, @@ -29,6 +28,7 @@ mean_flat, normal_kl, ohe_to_categories, + perm_and_expand, sliced_logsumexp, sum_except_batch, ) @@ -233,9 +233,9 @@ def __init__( def gaussian_q_mean_variance( self, x_start: Tensor, t: Tensor ) -> Tuple[Tensor, Tensor, Tensor]: - mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - variance = extract(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract(self.log_1_min_cumprod_alpha, t, x_start.shape) + mean = perm_and_expand(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = perm_and_expand(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = perm_and_expand(self.log_1_min_cumprod_alpha, t, x_start.shape) return mean, variance, log_variance def gaussian_q_sample( @@ -246,8 +246,9 @@ def gaussian_q_sample( if noise.shape != x_start.shape: raise ValueError("noise.shape != x_start.shape") return ( - extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start - + extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise + perm_and_expand(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + perm_and_expand(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) + * noise ) def gaussian_q_posterior_mean_variance( @@ -256,11 +257,11 @@ def gaussian_q_posterior_mean_variance( if x_start.shape != x_t.shape: raise ValueError("x_start.shape != x_t.shape") posterior_mean = ( - extract(self.posterior_mean_coef1, t, x_t.shape) * x_start - + extract(self.posterior_mean_coef2, t, x_t.shape) * x_t + perm_and_expand(self.posterior_mean_coef1, t, x_t.shape) * x_start + + perm_and_expand(self.posterior_mean_coef2, t, x_t.shape) * x_t ) - posterior_variance = extract(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract( + posterior_variance = perm_and_expand(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = perm_and_expand( self.posterior_log_variance_clipped, t, x_t.shape ) if not ( @@ -296,15 +297,15 @@ def gaussian_p_mean_variance( # model_variance = self.posterior_variance.to(x.device) model_log_variance = torch.log(model_variance) - model_variance = extract(model_variance, t, x.shape) - model_log_variance = extract(model_log_variance, t, x.shape) + model_variance = perm_and_expand(model_variance, t, x.shape) + model_log_variance = perm_and_expand(model_log_variance, t, x.shape) if self.gaussian_parametrization == "eps": pred_xstart = self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output) elif self.gaussian_parametrization == "x0": pred_xstart = model_output else: - raise NotImplementedError + raise ValueError("unknown gaussian_parametrization. Must be 'eps' or 'x0'") model_mean, _, _ = self.gaussian_q_posterior_mean_variance( x_start=pred_xstart, x_t=x, t=t @@ -412,16 +413,17 @@ def _predict_xstart_from_eps( if x_t.shape != eps.shape: raise ValueError("x_t.shape != eps.shape") return ( - extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps + perm_and_expand(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - perm_and_expand(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps ) def _predict_eps_from_xstart( self, x_t: Tensor, t: Tensor, pred_xstart: Tensor ) -> Tensor: return ( - extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart - ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + perm_and_expand(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - pred_xstart + ) / perm_and_expand(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def gaussian_p_sample( self, @@ -453,8 +455,8 @@ def multinomial_kl(self, log_prob1: Tensor, log_prob2: Tensor) -> Tensor: return kl def q_pred_one_timestep(self, log_x_t: Tensor, t: Tensor) -> Tensor: - log_alpha_t = extract(self.log_alpha, t, log_x_t.shape) - log_1_min_alpha_t = extract(self.log_1_min_alpha, t, log_x_t.shape) + log_alpha_t = perm_and_expand(self.log_alpha, t, log_x_t.shape) + log_1_min_alpha_t = perm_and_expand(self.log_1_min_alpha, t, log_x_t.shape) # alpha_t * E[xt] + (1 - alpha_t) 1 / K log_probs = log_add_exp( @@ -465,8 +467,10 @@ def q_pred_one_timestep(self, log_x_t: Tensor, t: Tensor) -> Tensor: return log_probs def q_pred(self, log_x_start: Tensor, t: Tensor) -> Tensor: - log_cumprod_alpha_t = extract(self.log_cumprod_alpha, t, log_x_start.shape) - log_1_min_cumprod_alpha = extract( + log_cumprod_alpha_t = perm_and_expand( + self.log_cumprod_alpha, t, log_x_start.shape + ) + log_1_min_cumprod_alpha = perm_and_expand( self.log_1_min_cumprod_alpha, t, log_x_start.shape ) @@ -525,7 +529,7 @@ def p_pred(self, model_out: Tensor, log_x: Tensor, t: Tensor) -> Tensor: elif self.parametrization == "direct": log_model_pred = self.predict_start(model_out, log_x) else: - raise ValueError + raise ValueError(f"unknown parametrization {self.parametrization}") return log_model_pred @torch.no_grad() @@ -613,8 +617,11 @@ def sample_time( pt = torch.ones_like(t).float() / self.num_timesteps return t, pt + else: - raise ValueError + raise ValueError( + "Unknown sampling method. Must be 'importance' or 'uniform'." + ) def _multinomial_loss( self, @@ -636,8 +643,11 @@ def _multinomial_loss( # Expensive, dont do it ;). # DEPRECATED return -self.nll(log_x_start) + else: - raise ValueError() + raise ValueError( + "Unknown multinomial loss type. Must be 'vb_stochastic' or 'vb_all'." + ) def mixed_loss(self, x: Tensor, cond: Optional[Tensor] = None) -> tuple: b = x.shape[0] @@ -665,6 +675,7 @@ def mixed_loss(self, x: Tensor, cond: Optional[Tensor] = None) -> tuple: loss_multi = torch.zeros((1,)).float() loss_gauss = torch.zeros((1,)).float() + if x_cat.shape[1] > 0: loss_multi = self._multinomial_loss( model_out_cat, log_x_cat, log_x_cat_t, t, pt @@ -781,8 +792,8 @@ def gaussian_ddim_step( eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) - alpha_bar = extract(self.alphas_cumprod, t, x.shape) - alpha_bar_prev = extract(self.alphas_cumprod_prev, t, x.shape) + alpha_bar = perm_and_expand(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = perm_and_expand(self.alphas_cumprod_prev, t, x.shape) sigma = eta or ( eta * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) @@ -811,9 +822,10 @@ def gaussian_ddim_reverse_step( out = self.gaussian_p_mean_variance(model_out_num, x, t) eps = ( - extract(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"] - ) / extract(self.sqrt_recipm1_alphas_cumprod, t, x.shape) - alpha_bar_next = extract(self.alphas_cumprod_next, t, x.shape) + perm_and_expand(self.sqrt_recip_alphas_cumprod, t, x.shape) * x + - out["pred_xstart"] + ) / perm_and_expand(self.sqrt_recipm1_alphas_cumprod, t, x.shape) + alpha_bar_next = perm_and_expand(self.alphas_cumprod_next, t, x.shape) mean_pred = ( out["pred_xstart"] * torch.sqrt(alpha_bar_next) @@ -828,8 +840,8 @@ def multinomial_ddim_step( ) -> Tensor: log_x0 = self.predict_start(model_out_cat, log_x_t=log_x_t) - alpha_bar = extract(self.alphas_cumprod, t, log_x_t.shape) - alpha_bar_prev = extract(self.alphas_cumprod_prev, t, log_x_t.shape) + alpha_bar = perm_and_expand(self.alphas_cumprod, t, log_x_t.shape) + alpha_bar_prev = perm_and_expand(self.alphas_cumprod_prev, t, log_x_t.shape) sigma = eta or ( eta * torch.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index 8ec0c025..04eb9d8f 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -1,9 +1,6 @@ # future from __future__ import annotations -# stdlib -from typing import Iterator - # third party import numpy as np import torch @@ -127,10 +124,11 @@ def log_add_exp(a: Tensor, b: Tensor) -> Tensor: return m + torch.log(torch.exp(a - m) + torch.exp(b - m)) -def extract(a: Tensor, t: Tensor, x_shape: tuple) -> Tensor: - b, *_ = t.shape - t = t.to(a.device) - out = a.gather(-1, t) +def perm_and_expand(a: Tensor, t: Tensor, x_shape: tuple) -> Tensor: + """Permutes a tensor in the order specified by `t` and expands it to `x_shape`.""" + if not (a.ndim == 1 and t.shape == (x_shape[0],)): + raise ValueError(f"dimensionality mismatch: {a.shape}, {t.shape}, {x_shape}") + out = a[t] while len(out.shape) < len(x_shape): out = out[..., None] return out.expand(x_shape) @@ -169,41 +167,3 @@ def sliced_logsumexp(x: Tensor, slices: Tensor) -> Tensor: slice_lse, slice_ends - slice_starts, dim=-1 ) return slice_lse_repeated - - -class TensorDataLoader: - """ - A DataLoader-like object for a set of tensors that can be much faster than - TensorDataset + DataLoader because dataloader grabs individual indices of - the dataset and calls cat (slow). - Source: https://discuss.pytorch.org/t/dataloader-much-slower-than-manual-batching/27014/6 - """ - - def __init__( - self, *tensors: Tensor, batch_size: int = 32, shuffle: bool = False - ) -> None: - """ - Initialize a FastTensorDataLoader. - :param *tensors: tensors to store. Must have the same length @ dim 0. - :param batch_size: batch size to load. - :param shuffle: if True, shuffle the data *in-place* whenever an - iterator is created out of this object. - :returns: A FastTensorDataLoader. - """ - if not all(t.shape[0] == tensors[0].shape[0] for t in tensors): - raise ValueError("All tensors must have the same length.") - self.tensors = tensors - self.dataset_len = self.tensors[0].shape[0] - self.batch_size = batch_size - self.shuffle = shuffle - - def __iter__(self) -> Iterator: - idx = np.arange(self.dataset_len) - if self.shuffle: - np.random.shuffle(idx) - for i in range(0, self.dataset_len, self.batch_size): - s = idx[i : i + self.batch_size] - yield tuple(t[s] for t in self.tensors) - - def __len__(self) -> int: - return len(range(0, self.dataset_len, self.batch_size)) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index b149e336..631480fc 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -195,6 +195,11 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": df = X.dataframe() cond = kwargs.pop("cond", None) + # note that the TabularEncoder is not used in this plugin, because the + # Gaussian multinomial diffusion module needs to know the number of classes + # for each discrete feature before it applies torch.nn.functional.one_hot + # on these features, and it also preprocesses the continuous features differently. + if args: raise ValueError("Only keyword arguments are allowed") diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index cddadf62..2f9afeae 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -1,3 +1,7 @@ +# stdlib +from itertools import product +from typing import Any, Generator + # third party import numpy as np import pandas as pd @@ -13,9 +17,8 @@ from synthcity.plugins.generic.plugin_ddpm import plugin plugin_name = "ddpm" -plugin_args = dict( +plugin_params = dict( n_iter=1000, - is_classification=True, batch_size=200, num_timesteps=500, log_interval=10, @@ -23,29 +26,39 @@ ) -@pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) -) +def extend_fixtures( + plugin_name: str = plugin_name, + plugin: Any = plugin, + plugin_params: dict = plugin_params, + **extra_params: list +) -> Generator: + if not extra_params: + yield from generate_fixtures(plugin_name, plugin, plugin_params) + return + param_set = list(product(*extra_params.values())) + for values in param_set: + params = plugin_params.copy() + params.update(zip(extra_params.keys(), values)) + yield from generate_fixtures(plugin_name, plugin, params) + + +@pytest.mark.parametrize("test_plugin", extend_fixtures()) def test_plugin_sanity(test_plugin: Plugin) -> None: assert test_plugin is not None -@pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) -) +@pytest.mark.parametrize("test_plugin", extend_fixtures()) def test_plugin_name(test_plugin: Plugin) -> None: assert test_plugin.name() == plugin_name -@pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) -) +@pytest.mark.parametrize("test_plugin", extend_fixtures()) def test_plugin_type(test_plugin: Plugin) -> None: assert test_plugin.type() == "generic" @pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) + "test_plugin", extend_fixtures(is_classification=[True, False]) ) def test_plugin_fit(test_plugin: Plugin) -> None: X = pd.DataFrame(load_iris()["data"]) @@ -53,7 +66,7 @@ def test_plugin_fit(test_plugin: Plugin) -> None: @pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) + "test_plugin", extend_fixtures(is_classification=[True, False]) ) def test_plugin_generate(test_plugin: Plugin) -> None: X = pd.DataFrame(load_iris()["data"]) @@ -69,7 +82,7 @@ def test_plugin_generate(test_plugin: Plugin) -> None: @pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) + "test_plugin", extend_fixtures(is_classification=[True, False]) ) def test_plugin_generate_constraints(test_plugin: Plugin) -> None: X = pd.DataFrame(load_iris()["data"]) @@ -100,9 +113,7 @@ def test_plugin_generate_constraints(test_plugin: Plugin) -> None: assert list(X_gen.columns) == list(X.columns) -@pytest.mark.parametrize( - "test_plugin", generate_fixtures(plugin_name, plugin, plugin_args) -) +@pytest.mark.parametrize("test_plugin", extend_fixtures()) def test_plugin_hyperparams(test_plugin: Plugin) -> None: assert len(test_plugin.hyperparameter_space()) == 6 @@ -123,7 +134,7 @@ def test_eval_performance_ddpm(compress_dataset: bool) -> None: X = GenericDataLoader(Xraw) for _ in range(2): - test_plugin = plugin(**plugin_args, compress_dataset=compress_dataset) + test_plugin = plugin(**plugin_params, compress_dataset=compress_dataset) evaluator = PerformanceEvaluatorXGB() test_plugin.fit(X) From 52be80f2ecb3aab0c9a3a5cd782744814127872d Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 16 Mar 2023 15:34:47 +0100 Subject: [PATCH 26/95] update EarlyStopping --- src/synthcity/utils/callbacks.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/src/synthcity/utils/callbacks.py b/src/synthcity/utils/callbacks.py index fa54074c..0e6b15cf 100644 --- a/src/synthcity/utils/callbacks.py +++ b/src/synthcity/utils/callbacks.py @@ -1,6 +1,5 @@ # stdlib from abc import ABC, abstractmethod -from typing import Optional # third party import numpy as np @@ -34,25 +33,29 @@ def on_fit_end(self, model: nn.Module) -> None: class EarlyStopping(Callback): def __init__( self, + patience_metric: WeightedMetrics, patience: int = 5, min_epochs: int = 100, - patience_metric: Optional[WeightedMetrics] = None, ) -> None: self.patience = patience - self.patience_metric = patience_metric self.min_epochs = min_epochs + self.patience_metric = patience_metric self.best_score = self._init_patience_score() self.best_model_state = None self.wait = 0 self._epochs = 0 + def on_fit_begin(self, model: nn.Module) -> None: + self.X_val = model.X_val + if isinstance(self.X_val, Tensor): + self.X_val = self.X_val.detach().cpu().numpy() + + def on_epoch_begin(self, model: nn.Module) -> None: + pass + def on_epoch_end(self, model: nn.Module) -> None: self._epochs += 1 if self.patience_metric is not None: - if not hasattr(self, "X_val"): - self.X_val = model.X_val - if isinstance(self.X_val, Tensor): - self.X_val = self.X_val.detach().cpu().numpy() self._evaluate_patience_metric(model) if self.wait >= self.patience and self._epochs >= self.min_epochs: raise StopIteration("Early stopping") @@ -62,9 +65,7 @@ def on_fit_end(self, model: nn.Module) -> None: model.load_state_dict(self.best_model_state) # type: ignore def _init_patience_score(self) -> float: - if self.patience_metric is None: - return 0 - elif self.patience_metric.direction() == "minimize": + if self.patience_metric.direction() == "minimize": return np.inf else: return -np.inf @@ -73,12 +74,12 @@ def _evaluate_patience_metric(self, model: nn.Module) -> None: X_val = self.X_val X_syn = model.generate(len(X_val)) - new_score = self.patience_metric.evaluate( # type: ignore + new_score = self.patience_metric.evaluate( pd.DataFrame(X_val), pd.DataFrame(X_syn), ) - if self.patience_metric.direction() == "minimize": # type: ignore + if self.patience_metric.direction() == "minimize": is_new_best = new_score < self.best_score else: is_new_best = new_score > self.best_score From 794ebd61be9fc966871a419604d0813ed3a35bee Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 27 Mar 2023 20:52:17 +0200 Subject: [PATCH 27/95] add TabDDPM tutorial, update TabDDPM plugin and encoders --- .../plugins/core/models/data_encoder.py | 150 +- .../core/models/tabular_ddpm/__init__.py | 22 +- .../gaussian_multinomial_diffsuion.py | 4 +- .../core/models/tabular_ddpm/modules.py | 10 +- .../core/models/tabular_ddpm/nn_utils.py | 169 ++ .../plugins/core/models/tabular_ddpm/utils.py | 541 +++-- .../plugins/core/models/tabular_encoder.py | 242 +-- src/synthcity/plugins/generic/plugin_ddpm.py | 18 +- src/synthcity/utils/dataframe.py | 12 + ...al8_tabular_modelling_with_diffusion.ipynb | 1936 +++++++++++++++++ 10 files changed, 2763 insertions(+), 341 deletions(-) create mode 100644 src/synthcity/plugins/core/models/tabular_ddpm/nn_utils.py create mode 100644 tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb diff --git a/src/synthcity/plugins/core/models/data_encoder.py b/src/synthcity/plugins/core/models/data_encoder.py index 75915afb..9f432d9c 100644 --- a/src/synthcity/plugins/core/models/data_encoder.py +++ b/src/synthcity/plugins/core/models/data_encoder.py @@ -1,40 +1,83 @@ # stdlib -from typing import Any, List, Optional +from functools import wraps +from typing import Any, List, Optional, Union # third party import numpy as np import pandas as pd from pydantic import validate_arguments +from sklearn.base import BaseEstimator, TransformerMixin from sklearn.mixture import BayesianGaussianMixture +from sklearn.preprocessing import ( + MinMaxScaler, + OneHotEncoder, + QuantileTransformer, + StandardScaler, +) -class DatetimeEncoder: - """Datetime encoder, with sklearn-style API""" - - def __init__(self) -> None: - pass +class _DataEncoder(TransformerMixin, BaseEstimator): + """Base data encoder, with sklearn-style API""" @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def fit(self, X: pd.Series) -> Any: + def fit(self, X: Any) -> Any: + return self._fit(X) + + def _fit(self, X: Any) -> Any: return self @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def transform(self, X: pd.Series) -> pd.Series: - out = pd.to_numeric(X).astype(float) - return out + def transform(self, X: Any) -> Any: + return self._transform(X) + + def _transform(self, X: Any) -> Any: + return X @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def inverse_transform(self, X: pd.Series) -> pd.Series: - out = pd.to_datetime(X) - return out + def inverse_transform(self, X: Any) -> Any: + return self._inverse_transform(X) + + def _inverse_transform(self, X: Any) -> Any: + return X @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def fit_transform(self, X: pd.Series) -> pd.Series: + def fit_transform(self, X: Any) -> Any: return self.fit(X).transform(X) + @classmethod + def wraps(cls, encoder_class: TransformerMixin) -> type: + """Wraps sklearn encoder to DataEncoder.""" + + @wraps(encoder_class) + class WrappedEncoder(_DataEncoder): + def __init__(self, *args: Any, **kwargs: Any) -> None: + self.encoder = encoder_class(*args, **kwargs) + + def _fit(self, X: Any) -> _DataEncoder: + self.encoder.fit(X) + return self + + def _transform(self, X: Any) -> Any: + return self.encoder.transform(X) + + def _inverse_transform(self, X: Any) -> Any: + return self.encoder.inverse_transform(X) + + return WrappedEncoder + + +class DatetimeEncoder(_DataEncoder): + """Datetime variables encoder""" + + def _transform(self, X: pd.Series) -> pd.Series: + return pd.to_numeric(X).astype(float) + + def _inverse_transform(self, X: pd.Series) -> pd.Series: + return pd.to_datetime(X) + -class ContinuousDataEncoder: - """Continuous variables encoder""" +class BayesianGMMEncoder(_DataEncoder): + """Bayesian Gaussian Mixture encoder""" def __init__( self, @@ -52,18 +95,17 @@ def __init__( self.weights: Optional[List[float]] = None self.std_multiplier = 4 - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def fit(self, X: pd.Series) -> Any: + def _fit(self, X: pd.DataFrame) -> Any: self.min_value = X.min() self.max_value = X.max() self.model.fit(X.values.reshape(-1, 1)) self.weights = self.model.weights_ + self.n_components = len(self.model.weights_) return self - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def transform(self, X: pd.Series) -> pd.DataFrame: + def _transform(self, X: pd.DataFrame) -> pd.DataFrame: name = X.name X = X.values.reshape(-1, 1) means = self.model.means_.reshape(1, self.n_components) @@ -85,8 +127,7 @@ def transform(self, X: pd.Series) -> pd.DataFrame: return pd.DataFrame(out, columns=[f"{name}.value", f"{name}.component"]) - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def inverse_transform(self, X: pd.DataFrame) -> pd.Series: + def _inverse_transform(self, X: pd.DataFrame) -> pd.DataFrame: normalized = np.clip(X.values[:, 0], -1, 1) means = self.model.means_.reshape([-1]) stds = np.sqrt(self.model.covariances_).reshape([-1]) @@ -100,11 +141,62 @@ def inverse_transform(self, X: pd.DataFrame) -> pd.Series: # clip values return np.clip(reversed_data, self.min_value, self.max_value) - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def fit_transform(self, X: pd.Series) -> pd.Series: - return self.fit(X).transform(X) - def components(self) -> int: - if self.weights is None: - raise RuntimeError("Train the model first") - return len(self.weights) +OneHotEncoder = _DataEncoder.wraps(OneHotEncoder) +StandardScaler = _DataEncoder.wraps(StandardScaler) +MinMaxScaler = _DataEncoder.wraps(MinMaxScaler) + + +@_DataEncoder.wraps +class GaussianQuantileTransformer(QuantileTransformer): + """Quantile transformer with Gaussian distribution""" + + def __init__( + self, + *, + ignore_implicit_zeros: bool = False, + subsample: int = 10000, + random_state: Any = None, + copy: bool = True, + ): + super().__init__( + n_quantiles=None, + output_distribution="normal", + ignore_implicit_zeros=ignore_implicit_zeros, + subsample=subsample, + random_state=random_state, + copy=copy, + ) + + def fit(self, X: pd.DataFrame, y: Any = None) -> "GaussianQuantileTransformer": + self.n_quantiles = max(min(len(X) // 30, 1000), 10) + return super().fit(X, y) + + +REGISTRY = { + "datetime": DatetimeEncoder, + "onehot": OneHotEncoder, + "standard": StandardScaler, + "minmax": MinMaxScaler, + "quantile": GaussianQuantileTransformer, + "bayesian_gmm": BayesianGMMEncoder, +} + + +def get_encoder(encoder: Union[str, type]) -> TransformerMixin: + """Get a registered encoder. + + Supported encoders: + - Datetime + - datetime + - Categorical + - onehot + - Continuous + - standard + - minmax + - quantile + - bayesian_gmm + """ + if isinstance(encoder, type): # custom encoder + return encoder + return REGISTRY[encoder] diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index d80c2a85..d6141f81 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -99,7 +99,7 @@ def fit( model_params = dict( num_classes=self.n_classes, - use_label=cond is not None, + conditional=cond is not None, mlp_params=self.mlp_params, dim_emb=self.dim_embed, ) @@ -139,7 +139,7 @@ def fit( for cbk in self.callbacks: cbk.on_fit_begin(self) - self.loss_history = pd.DataFrame(columns=["step", "mloss", "gloss", "loss"]) + self.loss_history = [] steps = 0 curr_loss_multi = 0.0 @@ -174,12 +174,14 @@ def fit( info( f"Step {steps}: MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}" ) - self.loss_history.loc[len(self.loss_history)] = [ - steps, - mloss, - gloss, - mloss + gloss, - ] + self.loss_history.append( + [ + steps, + mloss, + gloss, + mloss + gloss, + ] + ) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 @@ -196,6 +198,10 @@ def fit( info(f"Early stopped at epoch {epoch}") break + self.loss_history = pd.DataFrame( + self.loss_history, columns=["step", "mloss", "gloss", "loss"] + ).set_index("step") + for cbk in self.callbacks: cbk.on_fit_end(self) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 270d8b03..db55aedb 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -19,7 +19,7 @@ # synthcity relative from .modules import MLPDiffusion, ResNetDiffusion -from .utils import ( +from .nn_utils import ( discretized_gaussian_log_likelihood, index_to_log_onehot, log_1_min_a, @@ -112,7 +112,7 @@ def __init__( if model_params is None: model_params = dict( - dim_in=self.dim_input, num_classes=0, use_label=False, mlp_params=None + dim_in=self.dim_input, num_classes=0, conditional=False, mlp_params=None ) else: model_params["dim_in"] = self.dim_input diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 297c01bf..8d3a777b 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -67,7 +67,7 @@ def __init__( dim_emb: int = 128, *, mlp_params: dict = {}, - use_label: bool = False, + conditional: bool = False, num_classes: int = 0, emb_nonlin: Union[str, nn.Module] = "silu", max_time_period: int = 10000, @@ -75,7 +75,7 @@ def __init__( super().__init__() self.dim_t = dim_emb self.num_classes = num_classes - self.has_label = use_label + self.has_label = conditional if isinstance(emb_nonlin, str): self.emb_nonlin = get_nonlin(emb_nonlin) @@ -85,7 +85,7 @@ def __init__( self.proj = nn.Linear(dim_in, dim_emb) self.time_emb = TimeStepEmbedding(dim_emb, max_time_period) - if use_label: + if conditional: if self.num_classes > 0: self.label_emb = nn.Embedding(self.num_classes, dim_emb) elif self.num_classes == 0: # regression @@ -103,9 +103,9 @@ def forward(self, x: Tensor, t: Tensor, y: Optional[Tensor] = None) -> Tensor: emb = self.time_emb(t) if self.has_label: if y is None: - raise ValueError("y must be provided if use_label is True") + raise ValueError("y must be provided if conditional is True") if self.num_classes == 0: - y = y.resize(-1, 1).float() + y = y.reshape(-1, 1).float() else: y = y.squeeze().long() emb += self.emb_nonlin(self.label_emb(y)) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/nn_utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/nn_utils.py new file mode 100644 index 00000000..04eb9d8f --- /dev/null +++ b/src/synthcity/plugins/core/models/tabular_ddpm/nn_utils.py @@ -0,0 +1,169 @@ +# future +from __future__ import annotations + +# third party +import numpy as np +import torch +import torch.nn.functional as F +from torch import Tensor + + +def normal_kl(mean1: Tensor, logvar1: Tensor, mean2: Tensor, logvar2: Tensor) -> Tensor: + """ + Compute the KL divergence between two gaussians. + + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + try: + tensor = next( + x for x in (mean1, logvar1, mean2, logvar2) if isinstance(x, Tensor) + ) + except StopIteration: + raise TypeError("at least one argument must be a Tensor") + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) + + +def approx_standard_normal_cdf(x: Tensor) -> Tensor: + """ + A fast approximation of the cumulative distribution function of the + standard normal. + """ + return 0.5 * ( + 1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3))) + ) + + +def discretized_gaussian_log_likelihood( + x: Tensor, *, means: Tensor, log_scales: Tensor +) -> Tensor: + """ + Compute the log-likelihood of a Gaussian distribution discretizing to a + given image. + + :param x: the target images. It is assumed that this was uint8 values, + rescaled to the range [-1, 1]. + :param means: the Gaussian mean Tensor. + :param log_scales: the Gaussian log stddev Tensor. + :return: a tensor like x of log probabilities (in nats). + """ + if not (x.shape == means.shape == log_scales.shape): + raise ValueError("shapes must match") + centered_x = x - means + inv_stdv = torch.exp(-log_scales) + plus_in = inv_stdv * (centered_x + 1.0 / 255.0) + cdf_plus = approx_standard_normal_cdf(plus_in) + min_in = inv_stdv * (centered_x - 1.0 / 255.0) + cdf_min = approx_standard_normal_cdf(min_in) + log_cdf_plus = torch.log(cdf_plus.clamp(min=1e-12)) + log_one_minus_cdf_min = torch.log((1.0 - cdf_min).clamp(min=1e-12)) + cdf_delta = cdf_plus - cdf_min + log_probs = torch.where( + x < -0.999, + log_cdf_plus, + torch.where( + x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12)) + ), + ) + if log_probs.shape != x.shape: + raise ValueError("shapes must match") + return log_probs + + +def sum_except_batch(x: Tensor, num_dims: int = 1) -> Tensor: + """ + Sums all dimensions except the first. + + Args: + x: Tensor, shape (batch_size, ...) + num_dims: int, number of batch dims (default=1) + + Returns: + x_sum: Tensor, shape (batch_size,) + """ + return x.reshape(*x.shape[:num_dims], -1).sum(-1) + + +def mean_flat(tensor: Tensor) -> Tensor: + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def ohe_to_categories(ohe: Tensor, K: np.ndarray) -> Tensor: + K = torch.from_numpy(K) + indices = torch.cat([torch.zeros((1,)), K.cumsum(dim=0)], dim=0).int().tolist() + res = [] + for i in range(len(indices) - 1): + res.append(ohe[:, indices[i] : indices[i + 1]].argmax(dim=1)) + return torch.stack(res, dim=1) + + +def log_1_min_a(a: Tensor) -> Tensor: + return torch.log(1 - a.exp() + 1e-40) + + +def log_add_exp(a: Tensor, b: Tensor) -> Tensor: + """Numerically stable log(exp(a) + exp(b)).""" + m = torch.max(a, b) + return m + torch.log(torch.exp(a - m) + torch.exp(b - m)) + + +def perm_and_expand(a: Tensor, t: Tensor, x_shape: tuple) -> Tensor: + """Permutes a tensor in the order specified by `t` and expands it to `x_shape`.""" + if not (a.ndim == 1 and t.shape == (x_shape[0],)): + raise ValueError(f"dimensionality mismatch: {a.shape}, {t.shape}, {x_shape}") + out = a[t] + while len(out.shape) < len(x_shape): + out = out[..., None] + return out.expand(x_shape) + + +def log_categorical(log_x_start: Tensor, log_prob: Tensor) -> Tensor: + return (log_x_start.exp() * log_prob).sum(dim=1) + + +def index_to_log_onehot(x: Tensor, num_classes: np.ndarray) -> Tensor: + onehots = [] + for i in range(len(num_classes)): + onehots.append(F.one_hot(x[:, i], num_classes[i])) + x_onehot = torch.cat(onehots, dim=1) + log_onehot = torch.log(x_onehot.float().clamp(min=1e-30)) + return log_onehot + + +@torch.jit.script +def log_sub_exp(a: Tensor, b: Tensor) -> Tensor: + m = torch.maximum(a, b) + return torch.log(torch.exp(a - m) - torch.exp(b - m)) + m + + +@torch.jit.script +def sliced_logsumexp(x: Tensor, slices: Tensor) -> Tensor: + lse = torch.logcumsumexp( + torch.nn.functional.pad(x, [1, 0, 0, 0], value=-float("inf")), dim=-1 + ) + + slice_starts = slices[:-1] + slice_ends = slices[1:] + + slice_lse = log_sub_exp(lse[:, slice_ends], lse[:, slice_starts]) + slice_lse_repeated = torch.repeat_interleave( + slice_lse, slice_ends - slice_starts, dim=-1 + ) + return slice_lse_repeated diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py index 04eb9d8f..8574ffec 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/utils.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/utils.py @@ -1,169 +1,408 @@ -# future -from __future__ import annotations +# mypy: ignore-errors + +# stdlib +from collections import Counter +from copy import deepcopy +from dataclasses import dataclass, replace +from typing import Any, Dict, Literal, Optional, Tuple, Union, cast # third party import numpy as np +import pandas as pd +import sklearn.preprocessing import torch -import torch.nn.functional as F -from torch import Tensor - - -def normal_kl(mean1: Tensor, logvar1: Tensor, mean2: Tensor, logvar2: Tensor) -> Tensor: - """ - Compute the KL divergence between two gaussians. - - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - try: - tensor = next( - x for x in (mean1, logvar1, mean2, logvar2) if isinstance(x, Tensor) +from sklearn.impute import SimpleImputer + +# synthcity absolute +from synthcity.utils.dataframe import TaskType + +ArrayDict = Dict[str, np.ndarray] +TensorDict = Dict[str, torch.Tensor] + + +CAT_MISSING_VALUE = "__nan__" +CAT_RARE_VALUE = "__rare__" +Normalization = Literal["standard", "quantile", "minmax"] +NumNanPolicy = Literal["drop-rows", "mean"] +CatNanPolicy = Literal["most_frequent"] + + +@dataclass(frozen=False) +class Dataset: + X_num: Optional[ArrayDict] + X_cat: Optional[ArrayDict] + y: ArrayDict + task_type: TaskType + n_classes: Optional[int] + + @property + def is_binclass(self) -> bool: + return self.task_type == TaskType.BINARY + + @property + def is_multiclass(self) -> bool: + return self.task_type == TaskType.MULTICLASS + + @property + def is_regression(self) -> bool: + return self.task_type == TaskType.REGRESSION + + @property + def n_num_features(self) -> int: + return 0 if self.X_num is None else self.X_num["train"].shape[1] + + @property + def n_cat_features(self) -> int: + return 0 if self.X_cat is None else self.X_cat["train"].shape[1] + + @property + def n_features(self) -> int: + return self.n_num_features + self.n_cat_features + + def size(self, part: Optional[str]) -> int: + return sum(map(len, self.y.values())) if part is None else len(self.y[part]) + + @property + def nn_output_dim(self) -> int: + if self.is_multiclass: + assert self.n_classes is not None + return self.n_classes + else: + return 1 + + +def num_process_nans(dataset: Dataset, policy: Optional[NumNanPolicy]) -> Dataset: + assert dataset.X_num is not None + nan_masks = {k: np.isnan(v) for k, v in dataset.X_num.items()} + if not any(x.any() for x in nan_masks.values()): + assert policy is None + return dataset + + assert policy is not None + if policy == "drop-rows": + valid_masks = {k: ~v.any(1) for k, v in nan_masks.items()} + assert valid_masks[ + "test" + ].all(), "Cannot drop test rows, since this will affect the final metrics." + new_data = {} + for data_name in ["X_num", "X_cat", "y"]: + data_dict = getattr(dataset, data_name) + if data_dict is not None: + new_data[data_name] = { + k: v[valid_masks[k]] for k, v in data_dict.items() + } + dataset = replace(dataset, **new_data) + elif policy == "mean": + new_values = np.nanmean(dataset.X_num["train"], axis=0) + X_num = deepcopy(dataset.X_num) + for k, v in X_num.items(): + num_nan_indices = np.where(nan_masks[k]) + v[num_nan_indices] = np.take(new_values, num_nan_indices[1]) + dataset = replace(dataset, X_num=X_num) + else: + assert raise_unknown("policy", policy) + return dataset + + +# Inspired by: https://github.com/yandex-research/rtdl/blob/a4c93a32b334ef55d2a0559a4407c8306ffeeaee/lib/data.py#L20 +def normalize( + X: ArrayDict, + normalization: Normalization, + seed: Optional[int], + return_normalizer: bool = False, +) -> ArrayDict: + X_train = X["train"] + if normalization == "standard": + normalizer = sklearn.preprocessing.StandardScaler() + elif normalization == "minmax": + normalizer = sklearn.preprocessing.MinMaxScaler() + elif normalization == "quantile": + normalizer = sklearn.preprocessing.QuantileTransformer( + output_distribution="normal", + n_quantiles=max(min(X["train"].shape[0] // 30, 1000), 10), + subsample=1e9, + random_state=seed, ) - except StopIteration: - raise TypeError("at least one argument must be a Tensor") - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for torch.exp(). - logvar1, logvar2 = [ - x if isinstance(x, Tensor) else torch.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + torch.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + else: + raise_unknown("normalization", normalization) + normalizer.fit(X_train) + if return_normalizer: + return {k: normalizer.transform(v) for k, v in X.items()}, normalizer + return {k: normalizer.transform(v) for k, v in X.items()} + + +def cat_process_nans(X: ArrayDict, policy: Optional[CatNanPolicy]) -> ArrayDict: + assert X is not None + nan_masks = {k: v == CAT_MISSING_VALUE for k, v in X.items()} + if any(x.any() for x in nan_masks.values()): + if policy is None: + X_new = X + elif policy == "most_frequent": + imputer = SimpleImputer(missing_values=CAT_MISSING_VALUE, strategy=policy) + imputer.fit(X["train"]) + X_new = {k: cast(np.ndarray, imputer.transform(v)) for k, v in X.items()} + else: + raise_unknown("categorical NaN policy", policy) + else: + assert policy is None + X_new = X + return X_new + + +def cat_drop_rare(X: ArrayDict, min_frequency: float) -> ArrayDict: + assert 0.0 < min_frequency < 1.0 + min_count = round(len(X["train"]) * min_frequency) + X_new = {x: [] for x in X} + for column_idx in range(X["train"].shape[1]): + counter = Counter(X["train"][:, column_idx].tolist()) + popular_categories = {k for k, v in counter.items() if v >= min_count} + for part in X_new: + X_new[part].append( + [ + (x if x in popular_categories else CAT_RARE_VALUE) + for x in X[part][:, column_idx].tolist() + ] + ) + return {k: np.array(v).T for k, v in X_new.items()} + + +def build_target(y: ArrayDict, task_type: TaskType) -> Tuple[ArrayDict, Dict[str, Any]]: + info: Dict[str, Any] = {} + if task_type == TaskType.REGRESSION: + mean, std = float(y["train"].mean()), float(y["train"].std()) + y = {k: (v - mean) / std for k, v in y.items()} + info["mean"] = mean + info["std"] = std + return y, info + + +@dataclass(frozen=True) +class Transformations: + seed: int = 0 + normalization: Optional[Normalization] = None + num_nan_policy: Optional[NumNanPolicy] = None + cat_nan_policy: Optional[CatNanPolicy] = None + cat_min_frequency: Optional[float] = None + + +def transform_dataset( + dataset: Dataset, + transformations: Transformations, +) -> Dataset: + # WARNING: the order of transformations matters. Moreover, the current + # implementation is not ideal in that sense. + + if dataset.X_num is not None: + dataset = num_process_nans(dataset, transformations.num_nan_policy) + + num_transform = None + cat_transform = None + X_num = dataset.X_num + + if X_num is not None and transformations.normalization is not None: + X_num, num_transform = normalize( + X_num, + transformations.normalization, + transformations.seed, + return_normalizer=True, + ) + num_transform = num_transform + + if dataset.X_cat is None: + assert transformations.cat_nan_policy is None + assert transformations.cat_min_frequency is None + # assert transformations.cat_encoding is None + X_cat = None + else: + X_cat = cat_process_nans(dataset.X_cat, transformations.cat_nan_policy) + if transformations.cat_min_frequency is not None: + X_cat = cat_drop_rare(X_cat, transformations.cat_min_frequency) + + y, y_info = build_target(dataset.y, dataset.task_type) + + dataset = replace(dataset, X_num=X_num, X_cat=X_cat, y=y) + dataset.num_transform = num_transform + dataset.cat_transform = cat_transform + + return dataset + + +def make_dataset( + df: pd.DataFrame, + target: str, + cat_counts: Dict[str, int], + T: Transformations, +) -> Dataset: + # classification + if len(cat_counts) > 0: + task_type = TaskType.CLASSIFICATION + else: + task_type = TaskType.REGRESSION + + X_cat = df[list(cat_counts.keys())] + X_num = df.drop(columns=list(X_cat.keys()) + [target]) + y = df[target] + + D = Dataset( + X_num, + X_cat, + y, + task_type=TaskType(task_type), ) - -def approx_standard_normal_cdf(x: Tensor) -> Tensor: - """ - A fast approximation of the cumulative distribution function of the - standard normal. - """ - return 0.5 * ( - 1.0 + torch.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * torch.pow(x, 3))) - ) + return transform_dataset(D, T, None) -def discretized_gaussian_log_likelihood( - x: Tensor, *, means: Tensor, log_scales: Tensor -) -> Tensor: - """ - Compute the log-likelihood of a Gaussian distribution discretizing to a - given image. - - :param x: the target images. It is assumed that this was uint8 values, - rescaled to the range [-1, 1]. - :param means: the Gaussian mean Tensor. - :param log_scales: the Gaussian log stddev Tensor. - :return: a tensor like x of log probabilities (in nats). - """ - if not (x.shape == means.shape == log_scales.shape): - raise ValueError("shapes must match") - centered_x = x - means - inv_stdv = torch.exp(-log_scales) - plus_in = inv_stdv * (centered_x + 1.0 / 255.0) - cdf_plus = approx_standard_normal_cdf(plus_in) - min_in = inv_stdv * (centered_x - 1.0 / 255.0) - cdf_min = approx_standard_normal_cdf(min_in) - log_cdf_plus = torch.log(cdf_plus.clamp(min=1e-12)) - log_one_minus_cdf_min = torch.log((1.0 - cdf_min).clamp(min=1e-12)) - cdf_delta = cdf_plus - cdf_min - log_probs = torch.where( - x < -0.999, - log_cdf_plus, - torch.where( - x > 0.999, log_one_minus_cdf_min, torch.log(cdf_delta.clamp(min=1e-12)) - ), +def prepare_tensors( + dataset: Dataset, device: Union[str, torch.device] +) -> Tuple[Optional[TensorDict], Optional[TensorDict], TensorDict]: + X_num, X_cat, Y = ( + None if x is None else {k: torch.as_tensor(v) for k, v in x.items()} + for x in [dataset.X_num, dataset.X_cat, dataset.y] ) - if log_probs.shape != x.shape: - raise ValueError("shapes must match") - return log_probs - - -def sum_except_batch(x: Tensor, num_dims: int = 1) -> Tensor: - """ - Sums all dimensions except the first. - - Args: - x: Tensor, shape (batch_size, ...) - num_dims: int, number of batch dims (default=1) - - Returns: - x_sum: Tensor, shape (batch_size,) - """ - return x.reshape(*x.shape[:num_dims], -1).sum(-1) - - -def mean_flat(tensor: Tensor) -> Tensor: - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def ohe_to_categories(ohe: Tensor, K: np.ndarray) -> Tensor: - K = torch.from_numpy(K) - indices = torch.cat([torch.zeros((1,)), K.cumsum(dim=0)], dim=0).int().tolist() - res = [] - for i in range(len(indices) - 1): - res.append(ohe[:, indices[i] : indices[i + 1]].argmax(dim=1)) - return torch.stack(res, dim=1) - - -def log_1_min_a(a: Tensor) -> Tensor: - return torch.log(1 - a.exp() + 1e-40) - - -def log_add_exp(a: Tensor, b: Tensor) -> Tensor: - """Numerically stable log(exp(a) + exp(b)).""" - m = torch.max(a, b) - return m + torch.log(torch.exp(a - m) + torch.exp(b - m)) - - -def perm_and_expand(a: Tensor, t: Tensor, x_shape: tuple) -> Tensor: - """Permutes a tensor in the order specified by `t` and expands it to `x_shape`.""" - if not (a.ndim == 1 and t.shape == (x_shape[0],)): - raise ValueError(f"dimensionality mismatch: {a.shape}, {t.shape}, {x_shape}") - out = a[t] - while len(out.shape) < len(x_shape): - out = out[..., None] - return out.expand(x_shape) - - -def log_categorical(log_x_start: Tensor, log_prob: Tensor) -> Tensor: - return (log_x_start.exp() * log_prob).sum(dim=1) - + if device.type != "cpu": + X_num, X_cat, Y = ( + None if x is None else {k: v.to(device) for k, v in x.items()} + for x in [X_num, X_cat, Y] + ) + assert X_num is not None + assert Y is not None + if not dataset.is_multiclass: + Y = {k: v.float() for k, v in Y.items()} + return X_num, X_cat, Y -def index_to_log_onehot(x: Tensor, num_classes: np.ndarray) -> Tensor: - onehots = [] - for i in range(len(num_classes)): - onehots.append(F.one_hot(x[:, i], num_classes[i])) - x_onehot = torch.cat(onehots, dim=1) - log_onehot = torch.log(x_onehot.float().clamp(min=1e-30)) - return log_onehot +############## +# DataLoader # +############## -@torch.jit.script -def log_sub_exp(a: Tensor, b: Tensor) -> Tensor: - m = torch.maximum(a, b) - return torch.log(torch.exp(a - m) - torch.exp(b - m)) + m +class TabDataset(torch.utils.data.Dataset): + def __init__(self, dataset: Dataset, split: Literal["train", "val", "test"]): + super().__init__() -@torch.jit.script -def sliced_logsumexp(x: Tensor, slices: Tensor) -> Tensor: - lse = torch.logcumsumexp( - torch.nn.functional.pad(x, [1, 0, 0, 0], value=-float("inf")), dim=-1 + self.X_num = ( + torch.from_numpy(dataset.X_num[split]) + if dataset.X_num is not None + else None + ) + self.X_cat = ( + torch.from_numpy(dataset.X_cat[split]) + if dataset.X_cat is not None + else None + ) + self.y = torch.from_numpy(dataset.y[split]) + + assert self.y is not None + assert self.X_num is not None or self.X_cat is not None + + def __len__(self) -> int: + return len(self.y) + + def __getitem__(self, idx): + out_dict = { + "y": self.y[idx].long() if self.y is not None else None, + } + + x = np.empty((0,)) + if self.X_num is not None: + x = self.X_num[idx] + if self.X_cat is not None: + x = torch.cat([x, self.X_cat[idx]], dim=0) + return x.float(), out_dict + + +# def prepare_dataloader( +# dataset: Dataset, +# split: str, +# batch_size: int, +# ): +# torch_dataset = TabDataset(dataset, split) +# loader = torch.utils.data.DataLoader( +# torch_dataset, +# batch_size=batch_size, +# shuffle=(split == "train"), +# num_workers=1, +# ) +# while True: +# yield from loader + + +# def prepare_torch_dataloader( +# dataset: Dataset, +# split: str, +# shuffle: bool, +# batch_size: int, +# ) -> torch.utils.data.DataLoader: + +# torch_dataset = TabDataset(dataset, split) +# loader = torch.utils.data.DataLoader( +# torch_dataset, batch_size=batch_size, shuffle=shuffle, num_workers=1 +# ) + +# return loader + + +def concat_features(D: Dataset): + if D.X_num is None: + assert D.X_cat is not None + X = { + k: pd.DataFrame(v, columns=range(D.n_features)) for k, v in D.X_cat.items() + } + elif D.X_cat is None: + assert D.X_num is not None + X = { + k: pd.DataFrame(v, columns=range(D.n_features)) for k, v in D.X_num.items() + } + else: + X = { + part: pd.concat( + [ + pd.DataFrame(D.X_num[part], columns=range(D.n_num_features)), + pd.DataFrame( + D.X_cat[part], + columns=range(D.n_num_features, D.n_features), + ), + ], + axis=1, + ) + for part in D.y.keys() + } + + return X + + +def concat_to_pd(X_num, X_cat, y): + if X_num is None: + return pd.concat( + [ + pd.DataFrame(X_cat, columns=list(range(X_cat.shape[1]))), + pd.DataFrame(y, columns=["y"]), + ], + axis=1, + ) + if X_cat is not None: + return pd.concat( + [ + pd.DataFrame(X_num, columns=list(range(X_num.shape[1]))), + pd.DataFrame( + X_cat, + columns=list( + range(X_num.shape[1], X_num.shape[1] + X_cat.shape[1]) + ), + ), + pd.DataFrame(y, columns=["y"]), + ], + axis=1, + ) + return pd.concat( + [ + pd.DataFrame(X_num, columns=list(range(X_num.shape[1]))), + pd.DataFrame(y, columns=["y"]), + ], + axis=1, ) - slice_starts = slices[:-1] - slice_ends = slices[1:] - slice_lse = log_sub_exp(lse[:, slice_ends], lse[:, slice_starts]) - slice_lse_repeated = torch.repeat_interleave( - slice_lse, slice_ends - slice_starts, dim=-1 - ) - return slice_lse_repeated +def raise_unknown(unknown_what: str, unknown_value: Any): + raise ValueError(f"Unknown {unknown_what}: {unknown_value}") diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 360fcf56..a9bb0e82 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -9,7 +9,6 @@ import pandas as pd from pydantic import BaseModel, validate_arguments, validator from sklearn.base import BaseEstimator, TransformerMixin -from sklearn.preprocessing import MinMaxScaler, OneHotEncoder # synthcity absolute import synthcity.logger as log @@ -17,7 +16,7 @@ from synthcity.utils.serialization import dataframe_hash # synthcity relative -from .data_encoder import ContinuousDataEncoder +from .data_encoder import get_encoder class FeatureInfo(BaseModel): @@ -50,107 +49,6 @@ def _output_dimensions_validator(cls: Any, v: int) -> int: return v -class BinEncoder(TransformerMixin, BaseEstimator): - """Binary encoder (for SurvivalGAN). - - Model continuous columns with a BayesianGMM and normalized to a scalar [0, 1] and a vector. - Discrete columns are encoded using a scikit-learn OneHotEncoder. - """ - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def __init__( - self, - max_clusters: int = 10, - categorical_limit: int = 10, - ) -> None: - """Create a data transformer. - - Args: - max_clusters (int): - Maximum number of Gaussian distributions in Bayesian GMM. - """ - self.max_clusters = max_clusters - self.categorical_limit = categorical_limit - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def _fit_continuous(self, data: pd.Series) -> FeatureInfo: - """Train Bayesian GMM for continuous columns. - - Args: - data (pd.Series): - A dataframe containing a column. - - Returns: - namedtuple: - A ``FeatureInfo`` object. - """ - name = data.name - encoder = ContinuousDataEncoder( - n_components=min(self.max_clusters, len(data)), - ) - encoder.fit(data) - num_components = encoder.components() - - transformed_features = [f"{name}.value"] + [ - f"{name}.component_{i}" for i in range(num_components) - ] - - return FeatureInfo( - name=name, - feature_type="continuous", - transform=encoder, - output_dimensions=1 + num_components, - transformed_features=transformed_features, - ) - - def fit( - self, raw_data: pd.DataFrame, discrete_columns: Optional[List] = None - ) -> "BinEncoder": - """Fit the ``BinEncoder``. - - Fits a ``ContinuousDataEncoder`` for continuous columns - """ - if discrete_columns is None: - discrete_columns = find_cat_cols(raw_data, self.categorical_limit) - - self.output_dimensions = 0 - - self._column_transform_info = {} - for name in raw_data.columns: - if name not in discrete_columns: - column_transform_info = self._fit_continuous(raw_data[name]) - self._column_transform_info[name] = column_transform_info - - return self - - def _transform_continuous( - self, column_transform_info: FeatureInfo, data: pd.Series - ) -> pd.Series: - name = data.name - encoder = column_transform_info.transform - transformed = encoder.transform(data) - - return transformed[f"{name}.component"].to_numpy().astype(int) - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: - """Take raw data and output a matrix data.""" - output = raw_data.copy() - - for name in self._column_transform_info: - column_transform_info = self._column_transform_info[name] - - output[name] = self._transform_continuous( - column_transform_info, raw_data[name] - ) - - return output - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def fit_transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: - return self.fit(raw_data).transform(raw_data) - - class TabularEncoder(TransformerMixin, BaseEstimator): """Tabular encoder. @@ -164,6 +62,8 @@ def __init__( max_clusters: int = 10, categorical_limit: int = 10, whitelist: list = [], + categorical_encoder: str = "onehot", + continuous_encoder: str = "bayesian_gmm", ) -> None: """Create a data transformer. @@ -174,10 +74,12 @@ def __init__( self.max_clusters = max_clusters self.categorical_limit = categorical_limit self.whitelist = whitelist + self.categorical_encoder = categorical_encoder + self.continuous_encoder = continuous_encoder @validate_arguments(config=dict(arbitrary_types_allowed=True)) def _fit_continuous(self, data: pd.Series) -> FeatureInfo: - """Train Bayesian GMM for continuous columns. + """Fit the continuous encoder on a continuous column. Args: data (pd.DataFrame): @@ -188,20 +90,28 @@ def _fit_continuous(self, data: pd.Series) -> FeatureInfo: A ``FeatureInfo`` object. """ name = data.name - encoder = ContinuousDataEncoder( - n_components=min(len(data), self.max_clusters), - ) + + if self.continuous_encoder == "bayesian_gmm": + encoder = get_encoder("bayesian_gmm")( + n_components=min(self.max_clusters, len(data)), + ) + n_components = encoder.n_components + dim_out = 1 + n_components + transformed_features = [f"{name}.value"] + [ + f"{name}.component_{i}" for i in range(n_components) + ] + else: + encoder = get_encoder(self.continuous_encoder)() + dim_out = 1 + transformed_features = [name] + encoder.fit(data) - num_components = encoder.components() - transformed_features = [f"{name}.value"] + [ - f"{name}.component_{i}" for i in range(num_components) - ] return FeatureInfo( name=name, feature_type="continuous", transform=encoder, - output_dimensions=1 + num_components, + output_dimensions=dim_out, transformed_features=transformed_features, ) @@ -218,16 +128,21 @@ def _fit_discrete(self, data: pd.Series) -> FeatureInfo: A ``FeatureInfo`` object. """ name = data.name - ohe = OneHotEncoder(handle_unknown="ignore", sparse=False) - ohe.fit(data.values.reshape(-1, 1)) - num_categories = len(ohe.categories_[0]) - transformed_features = list(ohe.get_feature_names_out([data.name])) + if self.categorical_encoder == "onehot": + encoder = get_encoder("onehot")(handle_unknown="ignore", sparse=False) + else: + raise ValueError(f"Unknown categorical encoder {self.categorical_encoder}") + + encoder.fit(data.values.reshape(-1, 1)) + num_categories = len(encoder.categories_[0]) + + transformed_features = list(encoder.get_feature_names_out([data.name])) return FeatureInfo( name=name, feature_type="discrete", - transform=ohe, + transform=encoder, output_dimensions=num_categories, transformed_features=transformed_features, ) @@ -238,17 +153,15 @@ def fit( ) -> Any: """Fit the ``TabularEncoder``. - Fits a ``ContinuousDataEncoder`` for continuous columns and a - ``OneHotEncoder`` for discrete columns. - This step also counts the #columns in matrix data and span information. """ if discrete_columns is None: discrete_columns = find_cat_cols(raw_data, self.categorical_limit) + self.output_dimensions = 0 self._column_raw_dtypes = raw_data.infer_objects().dtypes - self._column_transform_info_list = [] + self._column_transform_info = [] for name in raw_data.columns: if name in self.whitelist: @@ -262,7 +175,8 @@ def fit( column_transform_info = self._fit_continuous(raw_data[name]) self.output_dimensions += column_transform_info.output_dimensions - self._column_transform_info_list.append(column_transform_info) + self._column_transform_info.append(column_transform_info) + return self def _transform_continuous( @@ -273,10 +187,15 @@ def _transform_continuous( transformed = encoder.transform(data) # Converts the transformed data to the appropriate output format. - output = np.zeros((len(transformed), column_transform_info.output_dimensions)) - output[:, 0] = transformed[f"{name}.value"].to_numpy() - index = transformed[f"{name}.component"].to_numpy().astype(int) - output[np.arange(index.size), index + 1] = 1 + if self.continuous_encoder == "bayesian_gmm": + output = np.zeros( + (len(transformed), column_transform_info.output_dimensions) + ) + output[:, 0] = transformed[f"{name}.value"].to_numpy() + index = transformed[f"{name}.component"].to_numpy().astype(int) + output[np.arange(index.size), index + 1] = 1 + else: + output = transformed.to_numpy().reshape(-1, 1) return pd.DataFrame( output, @@ -286,16 +205,16 @@ def _transform_continuous( def _transform_discrete( self, column_transform_info: FeatureInfo, data: pd.Series ) -> pd.DataFrame: - ohe = column_transform_info.transform + encoder = column_transform_info.transform return pd.DataFrame( - ohe.transform(data.to_frame().values), + encoder.transform(data.to_frame().values), columns=column_transform_info.transformed_features, ) @validate_arguments(config=dict(arbitrary_types_allowed=True)) def transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: """Take raw data and output a matrix data.""" - if len(self._column_transform_info_list) == 0: + if len(self._column_transform_info) == 0: return pd.DataFrame(np.zeros((len(raw_data), 0))) column_data_list = [] @@ -305,7 +224,7 @@ def transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: data = raw_data[name] column_data_list.append(data) - for column_transform_info in self._column_transform_info_list: + for column_transform_info in self._column_transform_info: name = column_transform_info.name data = raw_data[name] @@ -330,18 +249,23 @@ def _inverse_transform_continuous( column_data: pd.DataFrame, ) -> pd.DataFrame: encoder = column_transform_info.transform - data = pd.DataFrame(column_data.values[:, :2], columns=["value", "component"]) - data.iloc[:, 1] = np.argmax(column_data.values[:, 1:], axis=1) + if self.continuous_encoder == "bayesian_gmm": + data = pd.DataFrame( + column_data.values[:, :2], columns=["value", "component"] + ) + data.iloc[:, 1] = np.argmax(column_data.values[:, 1:], axis=1) + else: + data = column_data return encoder.inverse_transform(data) @validate_arguments(config=dict(arbitrary_types_allowed=True)) def _inverse_transform_discrete( self, column_transform_info: FeatureInfo, column_data: pd.DataFrame ) -> pd.DataFrame: - ohe = column_transform_info.transform + encoder = column_transform_info.transform column = column_transform_info.name return pd.DataFrame( - ohe.inverse_transform(column_data), + encoder.inverse_transform(column_data), columns=[column], ) @@ -351,7 +275,7 @@ def inverse_transform(self, data: pd.DataFrame) -> pd.DataFrame: Output uses the same type as input to the transform function. """ - if len(self._column_transform_info_list) == 0: + if len(self._column_transform_info) == 0: return pd.DataFrame(np.zeros((len(data), 0))) st = 0 @@ -367,7 +291,7 @@ def inverse_transform(self, data: pd.DataFrame) -> pd.DataFrame: feature_types.append(self._column_raw_dtypes) recovered_column_data_list.append(local_data) - for column_transform_info in self._column_transform_info_list: + for column_transform_info in self._column_transform_info: dim = column_transform_info.output_dimensions column_data = data.iloc[:, list(range(st, st + dim))] if column_transform_info.feature_type == "continuous": @@ -396,18 +320,18 @@ def layout(self) -> List[Tuple]: - continuous, and with length 1 + number of GMM clusters. - discrete, and with length , the length of the one-hot encoding. """ - return self._column_transform_info_list + return self._column_transform_info def n_features(self) -> int: return np.sum( [ column_transform_info.output_dimensions - for column_transform_info in self._column_transform_info_list + for column_transform_info in self._column_transform_info ] ) def get_column_info(self, name: str) -> FeatureInfo: - for column_transform_info in self._column_transform_info_list: + for column_transform_info in self._column_transform_info: if column_transform_info.name == name: return column_transform_info @@ -424,7 +348,7 @@ def activation_layout( - discrete, and with length , the length of the one-hot encoding. """ out = [] - for column_transform_info in self._column_transform_info_list: + for column_transform_info in self._column_transform_info: if column_transform_info.feature_type == "continuous": out.extend( [ @@ -443,6 +367,35 @@ def activation_layout( return out +class BinEncoder(TabularEncoder): + """Binary encoder (for SurvivalGAN). + + Model continuous columns with a BayesianGMM and normalized to a scalar [0, 1] and a vector. + Discrete columns are encoded using a scikit-learn OneHotEncoder. + """ + + def _transform_continuous( + self, column_transform_info: FeatureInfo, data: pd.Series + ) -> pd.Series: + name = data.name + encoder = column_transform_info.transform + transformed = encoder.transform(data) + return transformed[f"{name}.component"].to_numpy().astype(int) + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: + """Take raw data and output a matrix data.""" + output = raw_data.copy() + + for column_transform_info in self._column_transform_info: + name = column_transform_info.name + output[name] = self._transform_continuous( + column_transform_info, raw_data[name] + ) + + return output + + class TimeSeriesTabularEncoder(TransformerMixin, BaseEstimator): """TimeSeries Tabular encoder. @@ -456,10 +409,12 @@ def __init__( max_clusters: int = 10, categorical_limit: int = 10, whitelist: list = [], + encoder: str = "minmax", ) -> None: self.max_clusters = max_clusters self.categorical_limit = categorical_limit self.whitelist = whitelist + self.encoder = encoder def fit_temporal( self, @@ -484,9 +439,8 @@ def fit_temporal( self.temporal_encoder.fit(temporal_df) # Temporal horizons - self.observation_times_encoder = MinMaxScaler().fit( - np.asarray(observation_times).reshape(-1, 1) - ) + self.observation_times_encoder = get_encoder(self.encoder) + self.observation_times_encoder.fit(np.asarray(observation_times).reshape(-1, 1)) return self @@ -672,6 +626,7 @@ def __init__( self, max_clusters: int = 10, categorical_limit: int = 10, + continuous_encoder: str = "gmm", ) -> None: """Create a data transformer. @@ -682,6 +637,7 @@ def __init__( self.encoder = BinEncoder( max_clusters=max_clusters, categorical_limit=categorical_limit, + continuous_encoder=continuous_encoder, ) def _prepare( diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 631480fc..855eb4ec 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -194,6 +194,7 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": """ df = X.dataframe() cond = kwargs.pop("cond", None) + self.loss_history = None # note that the TabularEncoder is not used in this plugin, because the # Gaussian multinomial diffusion module needs to know the number of classes @@ -208,18 +209,23 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": raise ValueError( "cond is already given by the labels for classification" ) - _, cond = X.unpack() + df, cond = X.unpack() self._labels, self._cond_dist = np.unique(cond, return_counts=True) self._cond_dist = self._cond_dist / self._cond_dist.sum() - else: + self.target_name = cond.name + self.target_iloc = list(X.columns).index(cond.name) + + if cond is not None: if type(cond) is str: cond = df[cond] + self.expecting_conditional = True if cond is not None: cond = pd.Series(cond, index=df.index) # NOTE: cond may also be included in the dataframe self.model.fit(df, cond, **kwargs) + self.loss_history = self.model.loss_history return self @@ -230,8 +236,14 @@ def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader # randomly generate labels following the distribution of the training data cond = np.random.choice(self._labels, size=count, p=self._cond_dist) + if cond is not None and len(cond) > count: + raise ValueError("The length of cond is less than the required count") + def callback(count): # type: ignore - return self.model.generate(count, cond=cond) + data = self.model.generate(count, cond=cond) + if self.is_classification: + data = np.insert(data, self.target_iloc, cond, axis=1) + return data return self._safe_generate(callback, count, syn_schema, **kwargs) diff --git a/src/synthcity/utils/dataframe.py b/src/synthcity/utils/dataframe.py index a313b91e..80104e23 100644 --- a/src/synthcity/utils/dataframe.py +++ b/src/synthcity/utils/dataframe.py @@ -1,7 +1,19 @@ +# stdlib +import enum + # third party import pandas as pd +class TaskType(enum.Enum): + BINARY = "binary" + MULTICLASS = "multiclass" + REGRESSION = "regression" + + def __str__(self) -> str: + return self.value + + def constant_columns(dataframe: pd.DataFrame) -> list: """ Find constant value columns in a pandas dataframe. diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb new file mode 100644 index 00000000..97e38401 --- /dev/null +++ b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb @@ -0,0 +1,1936 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "97e2d93c", + "metadata": {}, + "source": [ + "# Tutorial 8: Modelling tabular data with diffusion models\n", + "\n", + "This tutorial demonstrates hot to use a denoising diffusion probabilistic model (DDPM) to synthesize tabular data. The algorithm was proposed in [TabDDPM: Modelling Tabular Data with Diffusion Models](https://arxiv.org/abs/2209.15421)." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "696e0157", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[KeOps] Warning : \n", + " The default C++ compiler could not be found on your system.\n", + " You need to either define the CXX environment variable or a symlink to the g++ command.\n", + " For example if g++-8 is the command you can do\n", + " import os\n", + " os.environ['CXX'] = 'g++-8'\n", + " \n", + "[KeOps] Warning : Cuda libraries were not detected on the system ; using cpu only mode\n" + ] + } + ], + "source": [ + "# stdlib\n", + "import sys\n", + "import warnings\n", + "sys.path.insert(0, '../src')\n", + "\n", + "# third party\n", + "import numpy as np\n", + "from sklearn.datasets import load_iris, load_diabetes\n", + "\n", + "# synthcity absolute\n", + "import synthcity.logger as log\n", + "from synthcity.plugins import Plugins\n", + "from synthcity.plugins.core.dataloader import GenericDataLoader\n", + "\n", + "log.add(sink=sys.stderr, level=\"INFO\")\n", + "warnings.filterwarnings(\"ignore\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "54ce9a10", + "metadata": {}, + "source": [ + "## Synthesize a classification dataset\n", + "\n", + "For classification datasets, TabDDPM automatically uses the labels as the conditional variable during training. You should not provide an additional `cond` argument to the `fit` method." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "51076cdc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sepal length (cm)sepal width (cm)petal length (cm)petal width (cm)target
05.13.51.40.20
14.93.01.40.20
24.73.21.30.20
34.63.11.50.20
45.03.61.40.20
\n", + "
" + ], + "text/plain": [ + " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", + "0 5.1 3.5 1.4 0.2 \n", + "1 4.9 3.0 1.4 0.2 \n", + "2 4.7 3.2 1.3 0.2 \n", + "3 4.6 3.1 1.5 0.2 \n", + "4 5.0 3.6 1.4 0.2 \n", + "\n", + " target \n", + "0 0 \n", + "1 0 \n", + "2 0 \n", + "3 0 \n", + "4 0 " + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Note: preprocessing data with OneHotEncoder or StandardScaler is not needed or recommended. Synthcity handles feature encoding and standardization internally.\n", + "\n", + "X, y = load_iris(return_X_y=True, as_frame=True)\n", + "X[\"target\"] = y\n", + "\n", + "loader = GenericDataLoader(X, target_column=\"target\", sensitive_columns=[])\n", + "\n", + "loader.dataframe().head()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "52397e4a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0 50\n", + "1 50\n", + "2 50\n", + "Name: target, dtype: int64" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y.value_counts()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "cda52bea", + "metadata": {}, + "source": [ + "### Model fitting" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "3bf24be4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T15:19:24.516935+0200][30696][INFO] Step 100: MLoss: 0.0 GLoss: 0.2235 Sum: 0.2235\n", + "[2023-03-27T15:19:25.913968+0200][30696][INFO] Step 200: MLoss: 0.0 GLoss: 0.2298 Sum: 0.2298\n", + "[2023-03-27T15:19:27.191123+0200][30696][INFO] Step 300: MLoss: 0.0 GLoss: 0.2305 Sum: 0.2305\n", + "[2023-03-27T15:19:28.432055+0200][30696][INFO] Step 400: MLoss: 0.0 GLoss: 0.2273 Sum: 0.2273\n", + "[2023-03-27T15:19:29.766838+0200][30696][INFO] Step 500: MLoss: 0.0 GLoss: 0.2333 Sum: 0.2333\n", + "[2023-03-27T15:19:31.280538+0200][30696][INFO] Step 600: MLoss: 0.0 GLoss: 0.221 Sum: 0.221\n", + "[2023-03-27T15:19:33.034999+0200][30696][INFO] Step 700: MLoss: 0.0 GLoss: 0.2123 Sum: 0.2123\n", + "[2023-03-27T15:19:34.519078+0200][30696][INFO] Step 800: MLoss: 0.0 GLoss: 0.2212 Sum: 0.2212\n", + "[2023-03-27T15:19:36.020932+0200][30696][INFO] Step 900: MLoss: 0.0 GLoss: 0.2014 Sum: 0.2014\n", + "[2023-03-27T15:19:38.330664+0200][30696][INFO] Step 1000: MLoss: 0.0 GLoss: 0.2069 Sum: 0.2069\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# define the model hyper-parameters\n", + "plugin_params = dict(\n", + " is_classification = True,\n", + " n_iter = 1000, # epochs\n", + " lr = 0.002,\n", + " weight_decay = 1e-4,\n", + " batch_size = 1000,\n", + " model_type = \"mlp\", # or \"resnet\"\n", + " num_timesteps = 500, # timesteps in diffusion\n", + " n_layers_hidden = 3,\n", + " dim_hidden = 256,\n", + " dim_embed = 128,\n", + " dropout = 0.0,\n", + " # performance logging\n", + " log_interval = 10,\n", + " print_interval = 100,\n", + ")\n", + "\n", + "plugin = Plugins().get(\"ddpm\", **plugin_params)\n", + "plugin.fit(loader)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "e1a270c9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "TabDDPM(\n", + " (diffusion): GaussianMultinomialDiffusion(\n", + " (denoise_fn): MLPDiffusion(\n", + " (emb_nonlin): SiLU()\n", + " (proj): Linear(in_features=4, out_features=128, bias=True)\n", + " (time_emb): TimeStepEmbedding(\n", + " (fc): Sequential(\n", + " (0): Linear(in_features=128, out_features=128, bias=True)\n", + " (1): SiLU()\n", + " (2): Linear(in_features=128, out_features=128, bias=True)\n", + " )\n", + " )\n", + " (label_emb): Embedding(3, 128)\n", + " (model): MLP(\n", + " (model): Sequential(\n", + " (0): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=128, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (1): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (2): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (3): Linear(in_features=256, out_features=4, bias=True)\n", + " )\n", + " (loss): MSELoss()\n", + " )\n", + " )\n", + " )\n", + " (ema_model): MLPDiffusion(\n", + " (emb_nonlin): SiLU()\n", + " (proj): Linear(in_features=4, out_features=128, bias=True)\n", + " (time_emb): TimeStepEmbedding(\n", + " (fc): Sequential(\n", + " (0): Linear(in_features=128, out_features=128, bias=True)\n", + " (1): SiLU()\n", + " (2): Linear(in_features=128, out_features=128, bias=True)\n", + " )\n", + " )\n", + " (label_emb): Embedding(3, 128)\n", + " (model): MLP(\n", + " (model): Sequential(\n", + " (0): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=128, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (1): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (2): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (3): Linear(in_features=256, out_features=4, bias=True)\n", + " )\n", + " (loss): MSELoss()\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.model" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "49b18ada", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAAAwsklEQVR4nO3dd3gU5drH8e+9m7IBAtKkI1EBpYOAKE0EpChgP4Ag2FCPiOdYjlhe5WA7iL2CekDFgiCIKGikqFTpRaqEZkKR0AnJkrL3+8cuOUkIyQJJNru5P9eVi52ZZ2fu2Qm/zD6zO4+oKsYYY4KfI9AFGGOMKRgW6MYYEyIs0I0xJkRYoBtjTIiwQDfGmBARFqgNV6pUSevUqROozRtjTFBasWLFflWtnNuygAV6nTp1WL58eaA2b4wxQUlEdp5umXW5GGNMiLBAN8aYEGGBbowxISJgfejGGJOXtLQ0EhIScLvdgS4lIFwuFzVr1iQ8PNzv51igG2OKpYSEBKKjo6lTpw4iEuhyipSqcuDAARISEoiJifH7eX51uYhIdxHZLCJxIjI8l+W1ReRnEVklImtFpOcZ1G6MMadwu91UrFixxIU5gIhQsWLFM353km+gi4gTeBfoATQA+olIgxzNngYmqWpzoC/w3hlVYYwxuSiJYX7S2ey7P2forYE4Vd2mqqnARKBPjjYKlPU9LgfsPuNK/LRyzae8NfVWMtJTC2sTxhgTlPwJ9BpAfJbpBN+8rEYAA0QkAZgJPJjbikRkiIgsF5HliYmJZ1Eu/J4wnw+PbcTtPnhWzzfGmIL08ccfM3To0ECXARTcxxb7AR+rak2gJzBBRE5Zt6p+oKotVbVl5cq5fnM1X66wKABS3IfPvlpjjAlB/gT6LqBWlumavnlZ3QVMAlDVxYALqFQQBebkCi8FgNsC3RhTyHbs2MEll1zC4MGDqVevHrfddhuzZ8+mbdu21K1bl6VLl57S/uqrr6ZJkyZ07tyZP//8E4DJkyfTqFEjmjZtSocOHQBYv349rVu3plmzZjRp0oQtW7acc73+fGxxGVBXRGLwBnlfoH+ONn8CnYGPReRSvIF+dn0q+YgKLwOA+8TRwli9MaYY+vd369mwu2D/zzeoXpZnezXMt11cXByTJ09m3LhxtGrVii+++IIFCxYwffp0XnzxRa6//vrMtg8++CCDBg1i0KBBjBs3jmHDhjFt2jRGjhxJbGwsNWrU4PDhwwCMGTOGhx56iNtuu43U1FQyMjLOeZ/yPUNX1XRgKBALbMT7aZb1IjJSRHr7mj0C3CMia4AvgcFaSIOVuiKiAXCfOFIYqzfGmGxiYmJo3LgxDoeDhg0b0rlzZ0SExo0bs2PHjmxtFy9eTP/+3vPdgQMHsmDBAgDatm3L4MGD+fDDDzOD+4orruDFF19k1KhR7Ny5k6ioqHOu1a8vFqnqTLwXO7POeybL4w1A23Ouxg9REd4z9JTUY0WxOWNMMeDPmXRhiYyMzHzscDgypx0OB+np6X6tY8yYMSxZsoQZM2Zw2WWXsWLFCvr378/ll1/OjBkz6NmzJ2PHjuXqq68+p1qD7l4urgjvpyNTTligG2OKlyuvvJKJEycC8Pnnn9O+fXsAtm7dyuWXX87IkSOpXLky8fHxbNu2jQsvvJBhw4bRp08f1q5de87bD7qv/rsifV0udoZujClm3n77be644w5Gjx5N5cqVGT9+PACPPfYYW7ZsQVXp3LkzTZs2ZdSoUUyYMIHw8HCqVq3Kk08+ec7bl0Lq6s5Xy5Yt9WwGuIiPX0zPuUN4oVYvel/9YiFUZowpDjZu3Mill14a6DICKrfXQERWqGrL3NoHXZdLlOs8ANxpxwNbiDHGFDNBF+guX6CnpKcEthBjjClmgi7QI13lAHBboBtjTDZBF+jh4aUIU7VAN8aYHIIu0AGiFNwZJwJdhjHGFCtBGeguhRQLdGOMySY4Ax3B7UkLdBnGmBJo8ODBfP3114EuI1dBGehR4iDFYwNcGGNMVkEZ6C6cdoZujCl0zz33HPXr16ddu3b069ePV155JdvyOXPm0Lx5cxo3bsydd97JiRPeruDhw4fToEEDmjRpwqOPPgrkfgvdghZ0X/0HiHI4ceu532rSGBMkfhgOe38v2HVWbQw9/nPaxcuWLWPKlCmsWbOGtLQ0WrRowWWXXZa53O12M3jwYObMmUO9evW4/fbbef/99xk4cCDffPMNmzZtQkQyb5eb2y10C1pwnqFLGCkW6MaYQrRw4UL69OmDy+UiOjqaXr16ZVu+efNmYmJiqFevHgCDBg1i3rx5lCtXDpfLxV133cXUqVMpVco7KE9ut9AtaEF5hu5yhONOt6/+G1Ni5HEmXdyEhYWxdOlS5syZw9dff80777zD3Llzc72FbsWKFQt0236doYtIdxHZLCJxIjI8l+Wvi8hq388fInK4QKvMweWIwI2nMDdhjCnh2rZty3fffYfb7SYpKYnvv/8+2/L69euzY8cO4uLiAJgwYQIdO3YkKSmJI0eO0LNnT15//XXWrFkD5H4L3YKW7xm6iDiBd4GuQAKwTESm+wa1AEBV/5ml/YNA8wKvNAuXMwK3FOYWjDElXatWrejduzdNmjShSpUqNG7cmHLlymUud7lcjB8/nltuuYX09HRatWrFfffdx8GDB+nTpw9utxtV5bXXXgNyv4VuQfOny6U1EKeq2wBEZCLQB9hwmvb9gGcLprzcRTkjcRfmBowxBnj00UcZMWIEycnJdOjQgcsuu4x77rknc3nnzp1ZtWpVtudUq1btlMGjAaZOnVro9foT6DWArO8NEoDLc2soIhcAMcDccy/t9FxOFykOQT0exBGU13WNMUFgyJAhbNiwAbfbzaBBg2jRokWgS8pTQV8U7Qt8rZr7R1BEZAgwBKB27dpnvRFXmAuAEyeO4Ioqf9brMcaYvHzxxReBLuGM+HN6uwuolWW6pm9ebvoCX55uRar6gaq2VNWWlStX9r/KHFxh3tGxU1IOnvU6jDEm1PgT6MuAuiISIyIReEN7es5GInIJUB5YXLAlnioq3Pu5Trf7cGFvyhhjgka+ga6q6cBQIBbYCExS1fUiMlJEemdp2heYqEUwSGlUeBkAUk4cKexNGWNM0PCrD11VZwIzc8x7Jsf0iIIrK2+uCG+gu08cLapNGmNMsReUHxGxQDfGFIUyZcoEuoQzEpSBHhURDYA79ViAKzHGmOIjKAPd5Qv0lLSkAFdijCkJVJXHHnuMRo0a0bhxY7766isA9uzZQ4cOHWjWrBmNGjVi/vz5ZGRkMHjw4My2r7/+epHVGZw354r0fv3WnWo36DKmJBi1dBSbDm4q0HVeUuESHm/9uF9tp06dyurVq1mzZg379++nVatWdOjQgS+++IJu3brx1FNPkZGRQXJyMqtXr2bXrl2sW7cOoNBulZub4DxDd/kCPc0C3RhT+BYsWEC/fv1wOp1UqVKFjh07smzZMlq1asX48eMZMWIEv//+O9HR0Vx44YVs27aNBx98kB9//JGyZcsWWZ1BeYYe5ToPAHd6cmALMcYUCX/PpItahw4dmDdvHjNmzGDw4ME8/PDD3H777axZs4bY2FjGjBnDpEmTGDduXJHUE6Rn6N6v+6ekpwS4EmNMSdC+fXu++uorMjIySExMZN68ebRu3ZqdO3dSpUoV7rnnHu6++25WrlzJ/v378Xg83HTTTTz//POsXLmyyOoMyjP0iIhoRNUC3RhTJG644QYWL15M06ZNERFefvllqlatyieffMLo0aMJDw+nTJkyfPrpp+zatYs77rgDj8c7ZsNLL71UZHUGZaCLw4FLwZ1uN9E1xhSepCTvJ+lEhNGjRzN69OhsywcNGsSgQYNOeV5RnpVnFZRdLgBRgDvjRKDLMMaYYiNoA92lgtuTGugyjDGm2AjaQI9CSMlIC3QZxphCVAT3+iu2zmbfgzbQXeLArRboxoQql8vFgQMHSmSoqyoHDhzA5XKd0fOC8qIogEvCcHvSA12GMaaQ1KxZk4SEBBITEwNdSkC4XC5q1qx5Rs8J4kB3ctT60I0JWeHh4cTExAS6jKAStF0uUY5wUvAEugxjjCk2/Ap0EekuIptFJE5Ehp+mza0iskFE1otIoY+s6nKE41YLdGOMOSnfLhcRcQLvAl2BBGCZiExX1Q1Z2tQFngDaquohETm/sAo+yeWMIIWSd7HEGGNOx58z9NZAnKpuU9VUYCLQJ0ebe4B3VfUQgKruK9gyT+VyRuKWwt6KMcYED38CvQYQn2U6wTcvq3pAPRFZKCK/iUj33FYkIkNEZLmILD/XK9dRvkBXj3W7GGMMFNxF0TCgLnAV0A/4UETOy9lIVT9Q1Zaq2rJy5crntEFXmIsMEdLT7AZdxhgD/gX6LqBWlumavnlZJQDTVTVNVbcDf+AN+ELjCosCIMV9sDA3Y4wxQcOfQF8G1BWRGBGJAPoC03O0mYb37BwRqYS3C2ZbwZV5KldYKQDc7iOFuRljjAka+Qa6qqYDQ4FYYCMwSVXXi8hIEentaxYLHBCRDcDPwGOqeqCwigaICi8NgPvE4cLcjDHGBA2/vimqqjOBmTnmPZPlsQIP+36KRFR4GQBSTtgZujHGQBB/U9QV4Q1094ljAa7EGGOKhyAO9GgA3KlHA1yJMcYUD0Eb6FGRJwM9KcCVGGNM8RC0ge6KKAtASqp1uRhjDARzoLvKAZCSdjzAlRhjTPEQvIEeeR4A7rTkwBZijDHFRNAGepTrPADc6fbVf2OMgSAO9EgLdGOMySZoA90ZFkGEKikZ7kCXYowxxULQBjqAS8GdfiLQZRhjTLEQ/IHusUA3xhgI8kAvheDOSAt0GcYYUywEdaC7xEGKxwLdGGMg6APdSYpaoBtjDAR9oIfh9mQEugxjjCkWgj/QsUA3xhjwM9BFpLuIbBaROBEZnsvywSKSKCKrfT93F3ypp4pyhONWT1Fsyhhjir18RywSESfwLtAV72DQy0RkuqpuyNH0K1UdWgg1npbLGU4KWpSbNMaYYsufM/TWQJyqblPVVGAi0Kdwy/KPyxGJWwJdhTHGFA/+BHoNID7LdIJvXk43ichaEflaRGrltiIRGSIiy0VkeWJi4lmUm50rzALdGGNOKqiLot8BdVS1CTAL+CS3Rqr6gaq2VNWWlStXPueNRjldpIqQkZ56zusyxphg50+g7wKynnHX9M3LpKoHVPXkd/A/Ai4rmPLy5gqLAuCE+3BRbM4YY4o1fwJ9GVBXRGJEJALoC0zP2kBEqmWZ7A1sLLgST+9koKdYoBtjTP6BrqrpwFAgFm9QT1LV9SIyUkR6+5oNE5H1IrIGGAYMLqyCs4p2lQfgyLH4fFoaY0zoy/djiwCqOhOYmWPeM1kePwE8UbCl5a9mxUtgJyTsW8uFMZ2LevPGGFOsBPU3RWtW83bVxx/8I8CVGGNM4AV1oFesUI8oj5JwLCHQpRhjTMAFdaCLw0EtnMS79we6FGOMCbigDnSAWmFliE8/HugyjDEm4II+0Gu6KpPg8ODJSA90KcYYE1BBH+i1ytYmVYR9iesCXYoxxgRU8Ad6hfoAJOxdHdhCjDEmwII/0Ks2AyD+QM67+RpjTMkS9IFetWpznKrEH9kR6FKMMSaggj7Qw8NLUdUjJCT/FehSjDEmoII+0AFqOaOITzsa6DKMMSagQiPQIyuQQFqgyzDGmIAKjUAvU4PDDuHY0V35NzbGmBAVGoF+3sUAxO9eFuBKjDEmcEIj0M9vDEB84u8BrsQYYwInJAK9ZvWWAMQf3hrgSowxJnD8CnQR6S4im0UkTkSG59HuJhFREWlZcCXmr3SZqlTwKAlJu4tys8YYU6zkG+gi4gTeBXoADYB+ItIgl3bRwEPAkoIu0h81iSAh9VAgNm2MMcWCP2forYE4Vd2mqqnARKBPLu2eA0YB7gKsz2+1IsoRnxGQTRtjTLHgT6DXALKOwpzgm5dJRFoAtVR1Rl4rEpEhIrJcRJYnJiaecbF5qVWqKnsdStoJuze6MaZkOueLoiLiAF4DHsmvrap+oKotVbVl5cqVz3XT2dQsdwEeET6YeTdHj8Tn/wRjjAkx/gT6LqBWlumavnknRQONgF9EZAfQBphe1BdGO7d6iLaUYszRdXSd2oPXvr6BtLTkoizBGGMCyp9AXwbUFZEYEYkA+gLTTy5U1SOqWklV66hqHeA3oLeqLi+Uik+jTHQ1xgxawqQ2z9M+vALjj8fx44IXi7IEY4wJqHwDXVXTgaFALLARmKSq60VkpIj0LuwCz9Sl9fvwcr+5VMxQ5u+aH+hyjDGmyIT500hVZwIzc8x75jRtrzr3ss6NwxlGO1dV5p7YS3qam7BwV6BLMsaYQhcS3xTNTftaHTnmENZumBToUowxpkiEbKBf2fROwlSZFzc9/8bGGBMCQjbQo8vWoDku5h3dEuhSjDGmSIRsoAN0qNSMLQ4Pe3avCHQpxhhT6EI70Bv2A2D+758GuBJjjCl8IR3oMRd0okYGzPtraaBLMcaYQhfSgS4OBx1K12ZJxjHcKXYnRmNMaAvpQAdoH9MNt0NY9vuEQJdijDGFKuQDvXWTQbg8yvztsYEuxRhjClXIB3qkqxyXO6OZd/xP1OMJdDnGGFNoQj7QATpUac0uJ2zf+UugSzHGmEJTIgK9fePbAZi/4csAV2KMMYWnRAR6teqXcbHHwfz9qwNdijHGFJoSEegA7ctexApNIenYnkCXYowxhaLEBHqHi3qTLsJvaz4OdCnGGFMo/Ap0EekuIptFJE5Ehuey/D4R+V1EVovIAhFpUPClnptmDfsS7VHmxc8JdCnGGFMo8g10EXEC7wI9gAZAv1wC+wtVbayqzYCX8Q4aXayEhbu4MrwC81P22scXjTEhyZ8z9NZAnKpuU9VUYCLQJ2sDVT2aZbI0oAVXYsHpUL0d+53Chj+mBboUY4wpcP4Eeg0gPst0gm9eNiLygIhsxXuGPqxgyitY7ZrdicujPLVoBHv3rg50OcYYU6AK7KKoqr6rqhcBjwNP59ZGRIaIyHIRWZ6YmFhQm/ZbhQoX816zf/KXeBg4cwDbtlt/ujEmdIhq3r0jInIFMEJVu/mmnwBQ1ZdO094BHFLVcnmtt2XLlrp8+fKzKvpcbdo8nfsWPkm6QMuwciSmp3BQ07i71jXc1PXVgNRkjDH+EJEVqtoyt2X+nKEvA+qKSIyIRAB9gWwDdYpI3SyT1wLFety3S+r3ZkLXD4mRCHakHSPK4USBsfGxZKSnBro8Y4w5K2H5NVDVdBEZCsQCTmCcqq4XkZHAclWdDgwVkS5AGnAIGFSYRReEWrWuYMLglZnTsxa8yMNbv2T+8re5qs0jAazMGGPOTr5dLoUlkF0uuUlLS6bbhNbUd5bh/UG/BbocY4zJ1bl2uZQI4eGluKl8YxZqEgkJFujGmOBjgZ7FTVc8jgCTl4wOdCnGGHPGLNCzqFq1GR0d5fjm6GZSTxwLdDnGGHNGLNBz+Nul/TjkEGYttrN0Y0xwsUDP4YoW91EjA6bt/LHA152cvL/A12mMMSdZoOfgcIbRo1x9lmkyhw5uzbZs5q/P8smMIWd1c69v5w7n8smd6D++OZ/MuIe9e1YVVMnGGANYoOfqmkYDyRBhzop3M+e5Uw7xwrYpvLJ/Me9+2/+M1nfo4FZG7/yeizOEdJRX9v9Gt9iBvDX1FtLSkgu6fGNMCWWBnotL6vaiVgb8tHtB5rzYxaM46hBaaARjj65n/Pd3+b2+N2Lv47jA6I6vMOmO1czo9D69I6ry4bFNDPzsSnbs+PWM6tu4+VsOHozLs4075VC+ozPt37+JGb88Y7cTNiZEWKDnQhwOrilXn6VZul0m7fyJmAzhv7ctpLuzPK8dWMrH399Nepo7z3WtWvsZU1P3MjC6HhdfdA0AtWu347n+s3nton7Ek86tPz/AouXv+VXbstXj6L/4Kf71Xb882z0yuSddv+7KjF+eOW2bd2c/xPCd3zB/2Zt+bbukUI+HH34dwcJl7wS6FGPOiAX6aXTzdbvMXfEemzZPZ60jjVurXUlYuIsX//YjnaQsrx5YQu8Jrfhm9mO5dp2kp7l5bsVoqmYo93Ubc8ryru2eZGrPL6iFk3/8/h5r13+VZ00JCb/x8KrXcABLcLNp8/Rc263bMJl5mkQEMHznNzz++VUcO7orW5vkpH3MTPHOe3X9+Hz/MAW7o0fi/bpPT3z8Qu6ZcDn/2jGFx9eNwZ1yqAiqM6ZgWKCfxv+6XeYzadV7RHqUXlc+CXi/VfrmgPm8VW8Q0eLkmV0/0n1Ca96ccjPbd/xCcvJ+vp71MP0+u5wtDg/DLxlIqTLn57qdKlWaMPa6iVRU4e9Ln2Pr1lm5tktO2sewWfeSAXzS+llKeZRPlr+ea9uPVrxBtEeZfuNMHjivKbFp+xn4dY9sn62P/W00yQ5hcOmL2eZUpsz917m9YH7Yv38Thw9tP2X+r7+9xnvf9Mu3Gyk36vHw119r87wWER+/mC5Te3DTpy2ZveClXLuYTriP8NF3g7lx9r2s96TQP+oCjjiEHxblelNRY4olu5dLHt6YcjMfH9tEhMI1kVV5vv/sU9qox8P8ZW8zcfOXLPIkkSFCpEc54RDqehwMrN2N6zv9B3Hk/bczPn4hA2fdixNoH1Wd4xlukjJScYoQIWHsTj/GRknn/Yb3c2WrBxg1uTcTj2/jh+6fUbVqs8z1xG39iRsWPMJ9ZRvxwA1fAjB30cs8tGUCj1S8nMHXfQTAgPEtOKbpTBu8mjs/bc1WdTPjpliioiryxaxhTP9rCc2iqtPtklto0WgAzrAIv16zXbuWMnbek1SKrMDtnUZxXvkYMtJT+XLWMN7au4AyCh90fC2z++nX317jH5vGkS5ClEfpG12Xbo0GsitxA9sObiJMnNx13fhcX7+tW2cxesHTLCSZCFXqahiNSlXnvs6vUanSJZnt/jmhHQvTD1NVHWx3Kg09TvpUa0fj2h25OKYLs357hbe3f8sep9BJyvJUt/c5v3Ijbvy4GeHi4KtBK3Pdvno8uN2HiSpVwa/XJhQlJ+3DFVUBhzPf+/yZApLXvVws0POwYdM0/rbk/wD4vOXTNGn4tzzb70/cyIylrxGftItrL+1Ps0b98w3yrDb/8T0PL3iCZJQyOCgtDjxAqnpIRxlUswu3XOM9K9+1ayk9Z93JoDJ1efjmbzLX8cTnnZiTmshP13/HeeVjMuff/0kbVnuS+L7XVA4d2cENCx7h0UpXMOjaD1i/aSp9lzxLD2cFtqcdZpPDwyUeBzvIwO0Qzs9QXm31BM0a33ba2tPT3HwWO5T3En/z1iwQpfC36LqsStrJakmjLaX4I+M4qQLvt36WlNQj3L/mDepqGP/X5v/4bOU7zExLxCOSbd3PVu/KzV3/N0xtcvJ+3vhuEJNSdlJKYWD5JqSkp7DheAKrNIVLNYJx/X8lIjKapas+4q61b/Jg+Wbc2eNDZswfwZgdM0hwetclqqgIl3qcPNL071zeYkjmdr6KHcbze3/ms8ueommjvtlqSk7axyNTerPOk8Skbp9Qrfplub4uf8T9wHuLn8eBg7JhUVSJqsztXV6ldJmqef4unAlPRjoHDmwmIyOV9IwTVKxQr0j+yBw+tJ0+03pxgUTyRu+vqFDh4kLf5klJx/bw0U8P8rcrnjjtax+qLNDPkno89Pq4KaXEedqztEB69LMOLEo7yKxbZlO6TFXi4xfTa849DCh9MY/eMi1b223b53Ljr8O40VUTlzOSL49vZU7vaZn/CZ/8/Gq+S0/k/AxleL3+dLlyOCnug8xb/h5vx03mkCjj2/6H+vWuA2DvnlW8+/Nj7Ek7QoonnX2axl6ncJVE82SXtzmecoCxi58nNv0gZRUer3M913UcScKuJdwzawiHxPt7V10djL/h28w/Pjt2/MqmP3+h9vlNqVPzSoZNuZZ1nhSmdZ9A1WrNSU9z8+AXV7FIk7jFVYsHrnmH8hUuytzP2PnP8ei2SVwfXoURt87k1gmtSFIP3/abhyuqPOA9rnv3rmLdtlg27ltF3YoN6dbu6VPOMo8n7aXz5C50iqjMS7f9nDn/wP4/eOC7W9ko6UQoNHOUYuyARac8f92Gydy75N8IUFEdHMXDfqdwc2R1nu0bm9kuIz2VibP+QZPanWjc8JbM+erxsHzNeEScNKzXO9eQTjtxnPsnXsUS/ncNpGKGMq7Tm1wY0znvX6A8qMfD4pVjaNbgVkqVqpRrm1GTevFF8nbCFSqp8G7H17gwpjMb/5jOj+s+ocH5zene4fQX5desm8iiuO/o1vTOM671nW/6Mvboei7MED696XvKlat9Rs8PZhbo5yAh4TfCwlzZujWKi9/XT6b/8pFcQRRlnS7iUg/zp8PDjz0mcn6VRqe0/8+kXnyZvJ0ohbbh5Xl1wPzMZUcO7+CH317lurZPUia6Wrbn7dm9goE/DiId+KTz+6zbPosXtk8lHagvEZSWcEo5IugZ05POVz6e7Q/f3r2rKRVVkbLlamXO2/fXOu6deRup6mH8tV/mWutJ8fGLuWn2PbR0luHdAYv491fdmZK6h2eq/e/dSk4n/7NfjosluHnlwlvp1v7//HxVs3vxq2v5OmUns66bSoUKF7Nh8zc8tngEiaKMvvQOEo8mMHLPbJ6qchV9u7+d+byVaz7l7ytf5jwVPrrmQ2rWbAPA6Ml9+DR5G580e4wWTW/PVi/ANc7zGNruOf6In8+HcV+z2eHt7w9Tpb6GcVONTtzc5VXE4UA9nszX496yDakeXRNV5e34WBzAx13GULt2u7Pa75N/GC/1OHmrx8en/P7Hxy+k95x76RNZnRub3MWwJc9xQqCKOtjq9GZKpEf5pstYatVqe8r6Dx3cyg3T+nDA6X031sDjpFfVNlzX5vFs7yxTTxwjcf9GatRonTnvyOEddPvmOi4gjC2SThNcfNDvZyIio/Pcp9QTxwgPL11gJ2Z//bWW8b8+xdrjCbzd6ysqVqpXIOvNjwV6CHt4QntWpB+irAplJYzra1x12qA7cngH135zHUccwtgG93Flqwf83s627XMZ/Msw3ECKQ2jqCeelLm/n+p/VH+lpbjyetHz/EwJ8/sP9/GffAq4gisWkcE/0pQy7cdJp23sy0vnnFx2Z6znKZRrJ+NuXnvV/4m3b59Jn3kO0l9LszUhhi8PDeR7lnVZP07RRX9Tj4f4JV7DSc5wpXcbidEYybcmrfHz4d6qogw97fpotDJOT9nHDpM64ECbftpAFK97noS0T6B1+PjWizufjw7+T4vCGXJ0M4a4LelC+dBVW71rEoqNxbHBk0MNZgWev/4pvFzzHS3/N457oSxh24+TMbcRt/Yk75z1MpMLH3cZlC8OcUpIPsmr9l7RpcW/mOwxPRjo3f3oZRzWDJIHSCm9dOZKGl9yY+bxHPmvP/LRDzLj2Kyqf35A9u1fw9E/3koZyXbV2NL+oB7fPf5RGjlJ8MPC3bK+/ejz88/P2zMs4wnuNH2TLXyv57q8lbHRkEKFK57CKtDy/OUv2rWBh2iGOO4SX69xEj44jAHhr6i18dHQjU9q9wpZdi3l8x1S6Ocvz3I3TTtvVNHvBSzy75XNaOMvyWr/ZhIeXyly2Zt1EPJ50mjcZ4NfvxNats/hy+etMTfkTBRToFVGV53K5xlYYzjnQRaQ78CbeEYs+UtX/5Fj+MHA3kA4kAneq6s681mmBHhgzf32WH3fO5o3bfj3jC1kbNk3jiUXPcF2lZtzR4wPCwl2FVGV2nox0Bk+4nFWSSu/wyjzfd3a+AX08aS/v/nAvf2v9KBdc0P6ctn/vJ61ZRAqNPWH0rtaOHm0eodx5dTKX7927mht/GEAYcNjX/d/OUYaRPT/OdnH2pHlL3uSBTR9xY0RVYt17qEMYn/T7lUhXOfbv38SUhS9wQfm6dL1yeLaL0Z6MdMbNuIu3D66gukfY41DaO8ry5m3zTjmWmzZP585FT1JG4e32o6hf99pT6jh2dBdDp/ZmpaTyaKU2DLr2QwDmLPwP/4j7nJcuuJ56Na9k6LzHOCQwsNyl3NDqYQ4c2c7AFS9xf9lG/N134T03J69BvFi7N706vZA5/9u5w3k6fgYPV2zNHdf9N3P+5i0zmLLyPb5P3skxh1ApQ+kYVZ049342SirjWj5N7eqt6P5tH9qHV+CVAfMAGPfdnbx+cBmiSg2PcHFYNE3L16NVTHcuuqAjr824g8kndlErA+KdcH14FUb2/QlxOJg253FGxM8gQ4SeYRV57Jr3qVT50mz7kZy0j7idP7N060xmHljNFoeHMFWud9Xgrnb/ZtKSlxmftCXbtZbdu5cz5tfheBSiw6IoE16G6mVrUadyE2pXb02FChef9UnGOQW6iDiBP4CuQALeMUb7qeqGLG06AUtUNVlE7geuUtU8ryBaoJszsXfPKmJXvkf/Lm8QHlm6SLd99Eg8R44mUKvWFadt89P85xkTN5mu5RvSp/XDVK+e6/+3TI991oEfMw5R3qN8lcdF1dwsXfUR/1r1BuVxMOGW2FO6yE7asGkaDy56mmMCL9TtT9d2T2YuO3gwjvu+vZktkk49DeMPSeezNiNpUO96bv2kOSnqYdrAZYSFu9i/fxMjZ97Fr54jeEQo61EiFGbcOve0H8cF7x+gQRNas0NTmd7nW8qWrcW2HXMZOP9RLhEX/x2wKNdPT7lTDrFr93Ji6nTC4Qzj0MGt9J92PcmitIs8n+9S9/FN+9e56KKugPeMf9GK91i7azFbk+L5I/Uw233dPicvet9Rpi4PXjeBD2fezftH13FP9CW4wly8fWg1bYiiadkLGXdkHZEKXSKrcjzjBEcyUtjtOZF5AR2gmYbTvUoburV8MDP4jyftpfekLlSSML4YuJRdu5dy1+z7OCJQTiFJ4LiAZrnY//j5bRnQ49TvpvjjXAP9CmCEqnbzTT8BoKq5fkBXRJoD76hqnu/FLdBNSbY/cSPP/HAHdza5j5bNBp/x85OT9yM48v00S+K+9fxjxkDvF+Mia1ClVGU8qsw8sJbd4uH1hkNoXLcXN33TiyiEBy66iX/tmMLzta6lz9XZ3oizd+9qpi15hdgDa7n34pvo3uHZfOuM2/oTt8x/mCiFFIF0EUp7lCnX5N0VlNO27XMY8MtDHHMIPZwVeHlA3rfLOHgwjhUbJvH73mW0vbBH5qeX1ONh5KQefH1iNwDXhlXiuZu/JzyyNDt2/MqoXx9ngyeJcuqgnCOM88NKUzf6AupWbkzDC7ud9lrazF+f5fEdUxlc+mJmHN1CusDYK1/g0vp9AO8Ql7t3r2Dn3pX8eXAjrev2od7FPfze/6zONdBvBrqr6t2+6YHA5ao69DTt3wH2qurzuSwbAgwBqF279mU7d+bZK2OMKQCpJ47xwtQbmZq6N3NeeY/yWvNHM/+YLFv1X+5a8zoOoKpH+G7gkmz9zOfimzn/YuneZVR1VaRqmeq0qnc9F8Zcfcbr+W3FWF5f+z6jOr1JnTodz7qe9DQ3/5lyAxVd5bm316cF8hl69Xi489PWLJcTVMpQPur4v3cQBa3IAl1EBgBDgY6qeiKv9doZujFFK+3EcXAIDgnD4Qg7pQ/3ram38uGxjYyofg03dX01QFUGr+07fuHdBc/wYPsXzvm6TV7yCnR//jTtAmplma7pm5dzI12Ap/AjzI0xRS+/aw8P9P6M9hsm0azRmd0e2njF1LmKV+rMC2gN/lxmXQbUFZEYEYkA+gLZ7grl6zcfC/RW1X0FX6YxprA5wyJo3mRAsfsCnfFfvkdOVdPxdqPEAhuBSaq6XkRGikhvX7PRQBlgsoisFpHcbwNojDGm0Ph1NUBVZwIzc8x7JsvjLgVclzHGmDNk762MMSZEWKAbY0yIsEA3xpgQYYFujDEhwgLdGGNChAW6McaECAt0Y4wJERboxhgTIizQjTEmRFigG2NMiLBAN8aYEGGBbowxIcIC3RhjQoQFujHGhAgLdGOMCREW6MYYEyL8CnQR6S4im0UkTkSG57K8g4isFJF036DSxhhjili+gS4iTuBdoAfQAOgnIg1yNPsTGAx8UdAFGmOM8Y8/Q9C1BuJUdRuAiEwE+gAbTjZQ1R2+ZZ5CqNEYY4wf/OlyqQHEZ5lO8M07YyIyRESWi8jyxMTEs1mFMcaY0yjSi6Kq+oGqtlTVlpUrVy7KTRtjTMjzJ9B3AbWyTNf0zTPGGFOM+BPoy4C6IhIjIhFAX2B64ZZljDHmTOUb6KqaDgwFYoGNwCRVXS8iI0WkN4CItBKRBOAWYKyIrC/Moo0xxpzKn0+5oKozgZk55j2T5fEyvF0xxhhjAsS+KWqMMSHCAt0YY0KEBboxxoQIC3RjjAkRFujGGBMiLNCNMSZEWKAbY0yIsEA3xpgQYYFujDEhwgLdGGNChAW6McaECAt0Y4wJERboxhgTIizQjTEmRFigG2NMiLBAN8aYEOFXoItIdxHZLCJxIjI8l+WRIvKVb/kSEalT4JUaY4zJU76BLiJO4F2gB9AA6CciDXI0uws4pKoXA68Dowq6UGOMMXnzZwi61kCcqm4DEJGJQB9gQ5Y2fYARvsdfA++IiKiqFmCtAPz7u/Vs2H20oFdrjDFFpkH1sjzbq2GBr9efLpcaQHyW6QTfvFzb+AaVPgJUzLkiERkiIstFZHliYuLZVWyMMSZXfg0SXVBU9QPgA4CWLVue1dl7YfxVM8aYUODPGfouoFaW6Zq+ebm2EZEwoBxwoCAKNMYY4x9/An0ZUFdEYkQkAugLTM/RZjowyPf4ZmBuYfSfG2OMOb18u1xUNV1EhgKxgBMYp6rrRWQksFxVpwP/BSaISBxwEG/oG2OMKUJ+9aGr6kxgZo55z2R57AZuKdjSjDHGnAn7pqgxxoQIC3RjjAkRFujGGBMiLNCNMSZESKA+XSgiicDOM3hKJWB/IZVTnJXE/S6J+wwlc79L4j7Due33BapaObcFAQv0MyUiy1W1ZaDrKGolcb9L4j5DydzvkrjPUHj7bV0uxhgTIizQjTEmRARToH8Q6AICpCTud0ncZyiZ+10S9xkKab+Dpg/dGGNM3oLpDN0YY0weLNCNMSZEBEWg5zdIdbASkVoi8rOIbBCR9SLykG9+BRGZJSJbfP+W980XEXnL9zqsFZEWgd2DsyciThFZJSLf+6ZjfAOMx/kGHI/wzQ+ZAchF5DwR+VpENonIRhG5ItSPtYj80/e7vU5EvhQRVygeaxEZJyL7RGRdlnlnfGxFZJCv/RYRGZTbtvJS7APdz0Gqg1U68IiqNgDaAA/49m04MEdV6wJzfNPgfQ3q+n6GAO8XfckF5iFgY5bpUcDrvoHGD+EdeBxCawDyN4EfVfUSoCne/Q/ZYy0iNYBhQEtVbYT39tt9Cc1j/THQPce8Mzq2IlIBeBa4HO9Yzs+e/CPgN1Ut1j/AFUBslukngCcCXVch7eu3QFdgM1DNN68asNn3eCzQL0v7zHbB9IN31Ks5wNXA94Dg/dZcWM5jjvc+/Ff4Hof52kmg9+Es9rkcsD1n7aF8rPnfWMMVfMfue6BbqB5roA6w7myPLdAPGJtlfrZ2/vwU+zN0/BukOuj53l42B5YAVVR1j2/RXqCK73GovBZvAP8CPL7pisBh9Q4wDtn3y68ByINADJAIjPd1NX0kIqUJ4WOtqruAV4A/gT14j90KQv9Yn3Smx/acj3kwBHrIE5EywBTgH6p6NOsy9f6pDpnPlorIdcA+VV0R6FqKWBjQAnhfVZsDx/nfW3AgJI91eaAP3j9m1YHSnNotUSIU1bENhkD3Z5DqoCUi4XjD/HNVneqb/ZeIVPMtrwbs880PhdeiLdBbRHYAE/F2u7wJnOcbYByy71eoDECeACSo6hLf9Nd4Az6Uj3UXYLuqJqpqGjAV7/EP9WN90pke23M+5sEQ6P4MUh2URETwjse6UVVfy7Io66Dbg/D2rZ+cf7vvKnkb4EiWt3RBQVWfUNWaqloH77Gcq6q3AT/jHWAcTt3noB+AXFX3AvEiUt83qzOwgRA+1ni7WtqISCnf7/rJfQ7pY53FmR7bWOAaESnve3dzjW+e/wJ9IcHPiw09gT+ArcBTga6nAPerHd63YWuB1b6fnnj7DecAW4DZQAVfe8H7iZ+twO94Pz0Q8P04h/2/Cvje9/hCYCkQB0wGIn3zXb7pON/yCwNd9znsbzNgue94TwPKh/qxBv4NbALWAROAyFA81sCXeK8TpOF9N3bX2Rxb4E7f/scBd5xpHfbVf2OMCRHB0OVijDHGDxboxhgTIizQjTEmRFigG2NMiLBAN8aYEGGBbko0EfmHiJQKdB3GFAT72KIp0XzfWG2pqvsDXYsx58rO0E2JISKlRWSGiKzx3Z/7Wbz3GPlZRH72tblGRBaLyEoRmey7zw4iskNEXhaR30VkqYhcHMh9MSY3FuimJOkO7FbVpuq9P/cbwG6gk6p2EpFKwNNAF1VtgfdbnQ9nef4RVW0MvON7rjHFigW6KUl+B7qKyCgRaa+qR3Isb4N3EJWFIrIa7/03Lsiy/Mss/15R2MUac6bC8m9iTGhQ1T98w331BJ4XkTk5mggwS1X7nW4Vp3lsTLFgZ+imxBCR6kCyqn4GjMZ7+9pjQLSvyW9A25P9474+93pZVvG3LP8uLpqqjfGfnaGbkqQxMFpEPHjvinc/3q6TH0Vkt68ffTDwpYhE+p7zNN47fQKUF5G1wAm8w4UZU6zYxxaN8YN9vNEEA+tyMcaYEGFn6MYYEyLsDN0YY0KEBboxxoQIC3RjjAkRFujGGBMiLNCNMSZE/D84fzE31QmjlAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# plot training curves\n", + "plugin.loss_history.plot()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "cf5241cc", + "metadata": {}, + "source": [ + "### Data generation\n", + "\n", + "Since the model training is conditional to the labels, the data generation requires the labels as well. You can pass the labels as a `cond` argument to the `generate` method. If it is not provided, the model will randomly generate the labels following the multinomial distribution of the training labels." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a2e81779", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sepal length (cm)sepal width (cm)petal length (cm)petal width (cm)target
06.4421192.9347334.3269331.3725701
16.2854122.7214405.1209012.0575472
24.6963502.0427262.8569090.7889351
35.3360192.6885334.1632831.1920511
46.0818253.2216824.6457681.5052931
55.6901652.3360884.1056301.2966071
65.3989352.7577133.8099841.1613691
77.3582703.2834286.4965902.3172382
86.5953272.5985265.8056531.4513532
95.2247182.7962243.5009151.1252481
\n", + "
" + ], + "text/plain": [ + " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", + "0 6.442119 2.934733 4.326933 1.372570 \n", + "1 6.285412 2.721440 5.120901 2.057547 \n", + "2 4.696350 2.042726 2.856909 0.788935 \n", + "3 5.336019 2.688533 4.163283 1.192051 \n", + "4 6.081825 3.221682 4.645768 1.505293 \n", + "5 5.690165 2.336088 4.105630 1.296607 \n", + "6 5.398935 2.757713 3.809984 1.161369 \n", + "7 7.358270 3.283428 6.496590 2.317238 \n", + "8 6.595327 2.598526 5.805653 1.451353 \n", + "9 5.224718 2.796224 3.500915 1.125248 \n", + "\n", + " target \n", + "0 1 \n", + "1 2 \n", + "2 1 \n", + "3 1 \n", + "4 1 \n", + "5 1 \n", + "6 1 \n", + "7 2 \n", + "8 2 \n", + "9 1 " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.generate(10)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "f2d6c6cb", + "metadata": {}, + "source": [ + "### Conditional data generation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1f55ffdb", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sepal length (cm)sepal width (cm)petal length (cm)petal width (cm)target
05.2009353.4104481.2944040.2501560
14.8921723.4047651.3739660.3176620
24.5464153.0013621.3792670.1460120
36.9123333.3724784.7320091.6384991
45.4792602.6232463.4961611.2651181
55.6916102.5684203.6208421.0259881
66.9353143.2469516.2097022.2368082
77.0824953.0612085.9071951.9507212
86.0660102.5531235.1930901.6390342
\n", + "
" + ], + "text/plain": [ + " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", + "0 5.200935 3.410448 1.294404 0.250156 \n", + "1 4.892172 3.404765 1.373966 0.317662 \n", + "2 4.546415 3.001362 1.379267 0.146012 \n", + "3 6.912333 3.372478 4.732009 1.638499 \n", + "4 5.479260 2.623246 3.496161 1.265118 \n", + "5 5.691610 2.568420 3.620842 1.025988 \n", + "6 6.935314 3.246951 6.209702 2.236808 \n", + "7 7.082495 3.061208 5.907195 1.950721 \n", + "8 6.066010 2.553123 5.193090 1.639034 \n", + "\n", + " target \n", + "0 0 \n", + "1 0 \n", + "2 0 \n", + "3 1 \n", + "4 1 \n", + "5 1 \n", + "6 2 \n", + "7 2 \n", + "8 2 " + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n", + "plugin.generate(len(labels), cond=labels)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "adf672a5", + "metadata": {}, + "source": [ + "## Synthesize a regression dataset\n", + "\n", + "For regression datasets, there is no conditional variable by default. The model learns the joint distribution of the whole dataset and generates new data points from it." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "13df0848", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
fixed acidityvolatile aciditycitric acidresidual sugarchloridesfree sulfur dioxidetotal sulfur dioxidedensitypHsulphatesalcoholquality
count4898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.000000
mean6.8547880.2782410.3341926.3914150.04577235.308085138.3606570.9940273.1882670.48984710.5142675.877909
std0.8438680.1007950.1210205.0720580.02184817.00713742.4980650.0029910.1510010.1141261.2306210.885639
min3.8000000.0800000.0000000.6000000.0090002.0000009.0000000.9871102.7200000.2200008.0000003.000000
25%6.3000000.2100000.2700001.7000000.03600023.000000108.0000000.9917233.0900000.4100009.5000005.000000
50%6.8000000.2600000.3200005.2000000.04300034.000000134.0000000.9937403.1800000.47000010.4000006.000000
75%7.3000000.3200000.3900009.9000000.05000046.000000167.0000000.9961003.2800000.55000011.4000006.000000
max14.2000001.1000001.66000065.8000000.346000289.000000440.0000001.0389803.8200001.08000014.2000009.000000
\n", + "
" + ], + "text/plain": [ + " fixed acidity volatile acidity citric acid residual sugar \\\n", + "count 4898.000000 4898.000000 4898.000000 4898.000000 \n", + "mean 6.854788 0.278241 0.334192 6.391415 \n", + "std 0.843868 0.100795 0.121020 5.072058 \n", + "min 3.800000 0.080000 0.000000 0.600000 \n", + "25% 6.300000 0.210000 0.270000 1.700000 \n", + "50% 6.800000 0.260000 0.320000 5.200000 \n", + "75% 7.300000 0.320000 0.390000 9.900000 \n", + "max 14.200000 1.100000 1.660000 65.800000 \n", + "\n", + " chlorides free sulfur dioxide total sulfur dioxide density \\\n", + "count 4898.000000 4898.000000 4898.000000 4898.000000 \n", + "mean 0.045772 35.308085 138.360657 0.994027 \n", + "std 0.021848 17.007137 42.498065 0.002991 \n", + "min 0.009000 2.000000 9.000000 0.987110 \n", + "25% 0.036000 23.000000 108.000000 0.991723 \n", + "50% 0.043000 34.000000 134.000000 0.993740 \n", + "75% 0.050000 46.000000 167.000000 0.996100 \n", + "max 0.346000 289.000000 440.000000 1.038980 \n", + "\n", + " pH sulphates alcohol quality \n", + "count 4898.000000 4898.000000 4898.000000 4898.000000 \n", + "mean 3.188267 0.489847 10.514267 5.877909 \n", + "std 0.151001 0.114126 1.230621 0.885639 \n", + "min 2.720000 0.220000 8.000000 3.000000 \n", + "25% 3.090000 0.410000 9.500000 5.000000 \n", + "50% 3.180000 0.470000 10.400000 6.000000 \n", + "75% 3.280000 0.550000 11.400000 6.000000 \n", + "max 3.820000 1.080000 14.200000 9.000000 " + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "df = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv\", sep=\";\")\n", + "\n", + "loader = GenericDataLoader(df, target_column=\"quality\", sensitive_columns=[])\n", + "loader.dataframe().describe()" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "id": "14bca1cd", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T18:08:18.761007+0200][38480][INFO] Step 100: MLoss: 1.2836 GLoss: 0.9867 Sum: 2.2703\n", + "[2023-03-27T18:08:24.679745+0200][38480][INFO] Step 200: MLoss: 1.2622 GLoss: 0.9409 Sum: 2.2031\n", + "[2023-03-27T18:08:30.391531+0200][38480][INFO] Step 300: MLoss: 1.2059 GLoss: 0.7669 Sum: 1.9727999999999999\n", + "[2023-03-27T18:08:36.164268+0200][38480][INFO] Step 400: MLoss: 1.1645 GLoss: 0.6393 Sum: 1.8038\n", + "[2023-03-27T18:08:41.835318+0200][38480][INFO] Step 500: MLoss: 1.1717 GLoss: 0.6158 Sum: 1.7875\n", + "[2023-03-27T18:08:47.581383+0200][38480][INFO] Step 600: MLoss: 1.1946 GLoss: 0.5384 Sum: 1.733\n", + "[2023-03-27T18:08:53.378127+0200][38480][INFO] Step 700: MLoss: 1.1343 GLoss: 0.5135 Sum: 1.6478000000000002\n", + "[2023-03-27T18:08:59.698145+0200][38480][INFO] Step 800: MLoss: 1.1168 GLoss: 0.4788 Sum: 1.5956000000000001\n", + "[2023-03-27T18:09:05.752638+0200][38480][INFO] Step 900: MLoss: 1.1034 GLoss: 0.4734 Sum: 1.5768\n", + "[2023-03-27T18:09:12.070003+0200][38480][INFO] Step 1000: MLoss: 1.142 GLoss: 0.4692 Sum: 1.6112\n", + "[2023-03-27T18:09:18.112377+0200][38480][INFO] Step 1100: MLoss: 1.1691 GLoss: 0.4602 Sum: 1.6293\n", + "[2023-03-27T18:09:25.549484+0200][38480][INFO] Step 1200: MLoss: 1.1201 GLoss: 0.4578 Sum: 1.5779\n", + "[2023-03-27T18:09:31.574874+0200][38480][INFO] Step 1300: MLoss: 1.1436 GLoss: 0.4429 Sum: 1.5865\n", + "[2023-03-27T18:09:37.672797+0200][38480][INFO] Step 1400: MLoss: 1.1093 GLoss: 0.449 Sum: 1.5583\n", + "[2023-03-27T18:09:44.149652+0200][38480][INFO] Step 1500: MLoss: 1.1468 GLoss: 0.4347 Sum: 1.5815000000000001\n", + "[2023-03-27T18:09:49.923915+0200][38480][INFO] Step 1600: MLoss: 1.1545 GLoss: 0.4313 Sum: 1.5858\n", + "[2023-03-27T18:09:55.733558+0200][38480][INFO] Step 1700: MLoss: 1.102 GLoss: 0.4305 Sum: 1.5325000000000002\n", + "[2023-03-27T18:10:03.367053+0200][38480][INFO] Step 1800: MLoss: 1.0953 GLoss: 0.4267 Sum: 1.522\n", + "[2023-03-27T18:10:10.533359+0200][38480][INFO] Step 1900: MLoss: 1.1247 GLoss: 0.4223 Sum: 1.5470000000000002\n", + "[2023-03-27T18:10:17.355705+0200][38480][INFO] Step 2000: MLoss: 1.2767 GLoss: 0.4266 Sum: 1.7033\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 47, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# define the model hyper-parameters\n", + "plugin_params.update(\n", + " is_classification = False,\n", + " n_iter = 500, # epochs\n", + " lr = 5e-4,\n", + " weight_decay = 1e-4,\n", + " batch_size = 1250,\n", + " n_layers_hidden = 3,\n", + " dim_hidden = 256,\n", + " num_timesteps = 100, # timesteps in diffusion\n", + ")\n", + "plugin = Plugins().get(\"ddpm\", **plugin_params)\n", + "plugin.fit(loader)" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "id": "83064f94", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 48, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABiV0lEQVR4nO2dd3hU1fa/3z2TSSa9997ovfcqSFERsSsCevXqvViuX/V61Z967V67omIXK6CAoggovZcQagokpPfee+b8/jiTISEJJJDOfp8nTybn7HPOmjOTz1577bXXEYqiIJFIJJLuj6azDZBIJBJJ2yAFXSKRSHoIUtAlEomkhyAFXSKRSHoIUtAlEomkh2DWWRd2cXFRAgICOuvyEolE0i05cuRIjqIork3t6zRBDwgIICwsrLMuL5FIJN0SIURic/tkyEUikUh6CFLQJRKJpIcgBV0ikUh6CJ0WQ5dIJJILUV1dTUpKChUVFZ1tSqeg1+vx8fFBp9O1+Bgp6BKJpEuSkpKCra0tAQEBCCE625wORVEUcnNzSUlJITAwsMXHyZCLRCLpklRUVODs7HzFiTmAEAJnZ+dWj06koEskki7LlSjmdVzKe+92gn4m/wzvh79PfkV+Z5sikUgkXYpuJ+hJRUl8dvIzMssyO9sUiUQi4euvv2bp0qWdbQbQDQXd1twWgOKq4k62RCKRSLoW3VbQi6qKOtkSiUTS00lISKBPnz4sXryYXr16cccdd7BlyxbGjx9PaGgohw4datR+2rRpDBo0iOnTp5OUlATATz/9xIABAxg8eDCTJk0CICIiglGjRjFkyBAGDRpETEzMZdt70bRFIYQv8A3gDijAp4qivHdemynAr0C8cdNaRVFeuGzrmkB66BLJlcd/f4sgMq1tnbh+XnY8d23/i7aLjY3lp59+4ssvv2TkyJH88MMP7Nmzh/Xr1/PKK69w/fXXm9o++OCDLFq0iEWLFvHll1/y0EMP8csvv/DCCy+wefNmvL29KSgoAGD58uU8/PDD3HHHHVRVVVFbW3vZ76klHnoN8H+KovQDxgD/FEL0a6LdbkVRhhh/2kXMAezM7QAp6BKJpGMIDAxk4MCBaDQa+vfvz/Tp0xFCMHDgQBISEhq03b9/P7fffjsACxcuZM+ePQCMHz+exYsX89lnn5mEe+zYsbzyyiu8/vrrJCYmYmlpedm2XtRDVxQlHUg3vi4WQkQB3kDkZV/9ErDR2QBS0CWSK4mWeNLthYWFhem1RqMx/a3RaKipqWnROZYvX87BgwfZsGEDw4cP58iRI9x+++2MHj2aDRs2MGfOHD755BOmTZt2Wba2KoYuhAgAhgIHm9g9VghxXAixUQjR5N0XQtwnhAgTQoRlZ2e33lpAq9Fio7ORgi6RSLoc48aNY+XKlQB8//33TJw4EYCzZ88yevRoXnjhBVxdXUlOTiYuLo6goCAeeugh5s2bx4kTJy77+i0WdCGEDbAGeERRlPODWeGAv6Iog4EPgF+aOoeiKJ8qijJCUZQRrq5N1mdvEbbmtnJSVCKRdDk++OADvvrqKwYNGsS3337Le++p042PP/44AwcOZMCAAYwbN47BgwezevVqBgwYwJAhQzh16hR33XXXZV9fKIpy8UZC6IDfgc2KorzdgvYJwAhFUXKaazNixAjlUh9wsWD9ArxsvPhg2geXdLxEIun6REVF0bdv3842o1Np6h4IIY4oijKiqfYX9dCFuv70CyCqOTEXQngY2yGEGGU8b24rbW8xduZ2MuQikUgk59GSaovjgYXASSHEMeO2pwA/AEVRlgM3Ag8IIWqAcuBWpSWu/yVia25Laklqe51eIpFIuiUtyXLZA1ywSoyiKMuAZW1l1MWwNbeVHrpEIpGcR7dbKQoy5CKRSCRN0S0F3dbclpLqEmoNl7+ySiKRSHoK3VbQAUqqSzrZEolEIuk6dEtBr1v+L3PRJRJJZ7B48WJ+/vnnzjajEd1S0GWBLolEImmMFHSJRCK5AC+++CK9e/dmwoQJ3Hbbbbz55psN9m/dupWhQ4cycOBA7r77biorKwF48skn6devH4MGDeKxxx4Dmi6j25a0JA+9y1G/4qJBMaAR3bJfkkgkLWXjk5Bxsm3P6TEQZr92wSaHDx9mzZo1HD9+nOrqaoYNG8bw4cNN+ysqKli8eDFbt26lV69e3HXXXXz88ccsXLiQdevWER0djRDCVDK3qTK6bUm3VMI6D31v2l5Gfz+a03mnO9kiiUTSE9m7dy/z5s1Dr9dja2vLtdde22D/6dOnCQwMpFevXgAsWrSIXbt2YW9vj16v55577mHt2rVYWVkBTZfRbUu6tYf+29nfqKytZP3Z9Tzu9HgnWyWRSNqNi3jSXQ0zMzMOHTrE1q1b+fnnn1m2bBnbtm1rsoyus7Nzm123W3roVjorNEJDZa0aq9qcsBmDYuhkqyQSSU9j/Pjx/Pbbb1RUVFBSUsLvv//eYH/v3r1JSEggNjYWgG+//ZbJkydTUlJCYWEhc+bM4Z133uH48eNA02V025Ju6aFrhAYbnQ1FVUWM9x7P3tS9HM8+zlC3oZ1tmkQi6UGMHDmS6667jkGDBuHu7s7AgQOxt7c37dfr9Xz11VfcdNNN1NTUMHLkSO6//37y8vKYN28eFRUVKIrC22+rdQ0ff/xxYmJiUBSF6dOnM3jw4Da1t0Xlc9uDyymfCzBrzSwySzPZuGAj16y7hhtCb+Cp0U81aldaXcr6s+uZ6T8TZ8u2G9pIJJL2pauUzy0pKcHGxoaysjImTZrEp59+yrBhwzrk2m1ePrer4mPjw0SfiXhYezDZZzKb4jdRXVvdoE14ZjjXrbuOVw6+wuuHX+8kSyUSSXfmvvvuY8iQIQwbNowFCxZ0mJhfCt0y5ALw7tR3TemK80Lm8Wfin+xO3c1kn8lUG6rRm+l558g7aDQa5gTO4Y/4P1jSfwl9nRv2dnGFcfx+9neWDl0q0x8lEkkjfvjhh842ocV0WwWzMbfBSqemAo3zGoez3pkfo3/ktg23cdfGu8guy+Z49nEWhC7gmTHPYG9hz3tH32t0nrVn1vLZyc9ILm7byQmJRCLpaLqtoNfHTGPGtcHXciD9AFF5UUTlRfHCgRdQUJjuNx1bc1sWhC7gQNoBU2ZMHVF5UQ1+SyQSSXelRwg6wI29biTALoD/Tfof7lbu7EjegZ+tHyEOIQD0depLrVJLQmGC6RhFUYjKVYU8Oje6E6yWSCSStqPHCLq/nT+/zf+N2YGzuaPvHQBM95uO8VGnBDsEAxBbEGs6JqUkheJqtR5MdJ4UdIlE0r3ptpOiF+KmXjdxOv80N/W6ybQtwC4AM2HG2YKzpm113nmIQ4gUdIlE0ggbGxtKSrrPcxd6jIdeHxtzG16b+Bq+dr6mbTqtDj87vwYeenReNGZCjb/nVuSSXZbdGeZKJBJJm9AjBb05gh2CG3jokXmRBDsEM8hlEAB/xP/ByuiV1BhqGhxXXVvNz2d+ZnvSdgorCzvUZolE0vkoisLjjz/OgAEDGDhwIKtWrQIgPT2dSZMmMWTIEAYMGMDu3bupra1l8eLFprbvvPNOh9nZI0MuzRHiEMKWxC1U1FRgrjUnKjeKid4T6e3UG4A3w9Q6xynFKcwPnc+e1D3c0fcOtiZv5b/7/wvAMLdhrJi9otPeg0RyJfL6odfbPCzax6kP/x717xa1Xbt2LceOHeP48ePk5OQwcuRIJk2axA8//MDVV1/N008/TW1tLWVlZRw7dozU1FROnToF0C5lcpvjihN0BYW4wjiKqorIq8hjjNcYU1qjTqOj2lDNisgVfBf1HbVKLaEOoRzPOo6lmSULQhfwfdT35JTn4GLp0tlvRyKRdBB79uzhtttuQ6vV4u7uzuTJkzl8+DAjR47k7rvvprq6muuvv54hQ4YQFBREXFwcDz74IHPnzmXmzJkdZucVJ+gAp/NOsy9tH3bmdszwnwHA8+OeB6DaUE1FbQV6rZ5fz/7KwYyDHMs6Rn/n/swLmcd3Ud+xO2U380Pnd9bbkEiuOFrqSXc0kyZNYteuXWzYsIHFixfz6KOPctddd3H8+HE2b97M8uXLWb16NV9++WWH2HNFxdD97PzwtvHm3fB32Zq0lWuDr8VCa9GgjU6j47WJr/H8uOcZ5DKIXSm7iM6LZrDrYHo79sbD2oPtyds76R1IJJLOYOLEiaxatYra2lqys7PZtWsXo0aNIjExEXd3d+69917+9re/ER4eTk5ODgaDgQULFvDSSy8RHh7eYXZeUR66mcaM5VctZ9GmRVQbqlkQuuCC7Ud5jmL58eUADHYdjBCCyT6TWX92PRU1FejN9B1htkQi6WTmz5/P/v37GTxY1YH//e9/eHh4sGLFCt544w10Oh02NjZ88803pKamsmTJEgwG9RkNr776aofZ2W3L514OcQVxROZFck3QNRdsdzjjMHdvvhuAnbfsxEnvxN7Uvdy/5X5em/gac4PmdoS5EskVSVcpn9uZXDHlcy+HIIegi4o5qF65hdYCP1s/nPROAIzxHEOoYygfHvuwUbleiUQi6UyuSEFvKeZac27ve3uDFadajZZHhj1CcnEyHx3/iLiCuE60UCKRSM4hBf0iPDr8URYPWNxg20TviYzzGsfnJz9n3q/z+D3u3HMGT+WcYmX0yg62UiLpmXRWSLgrcCnvXQr6JSCEYNm0ZXw/53tCHUP5/MTnppv/Q9QPvHbotUZleiUSSevQ6/Xk5uZekaKuKAq5ubno9a1LvLiislzaEp1WxyDXQSzpv4Sn9jzFntQ9TPSZSGJxoqlMb90KVIlE0np8fHxISUkhO/vKrLGk1+vx8fFp1TFS0C+TWQGzePfIu3wb+S0TfSaSVJQEQExBjBR0ieQy0Ol0BAYGdrYZ3QoZcrlMdFodswNnE5YZRm55LgWVBQDE5sde+ECJRCJpY6SgtwH9XfpTbahusIK0fpleiUQi6QikoLcBfZ3UxP9NCZsACHUMlYIukUg6HCnobYCfnR9WZlYczjgMwFTfqaSWpFJaXdrJlkkkkiuJiwq6EMJXCLFdCBEphIgQQjzcRBshhHhfCBErhDghhBjWPuZ2TTRCQx+nPhgUAx7WHvR37g/Q4GEaEolE0t60xEOvAf5PUZR+wBjgn0KIfue1mQ2EGn/uAz5uUyu7Af2c1VviZ+tHqEMoAF+c/ILj2ccB2JywmXm/zJP56RKJpN24qKAripKuKEq48XUxEAV4n9dsHvCNonIAcBBCeLa5tV0Yk6Db+eFt682sgFnsS9vH4o2LSS5O5ouTXxBXGGd6MLVEIpG0Na2KoQshAoChwMHzdnkDyfX+TqGx6COEuE8IESaECOtpiwXqJkb9bf3RCA1vTH6D3+f/jhCCp3Y/RVSeKuSnck51ppkSiaQH02JBF0LYAGuARxRFKbqUiymK8qmiKCMURRnh6up6KafosgQ7BPP82OeZFzLPtM3d2p0bQm/gWPYxLM0scdI7cSLnRCdaKZFIejItEnQhhA5VzL9XFGVtE01SAd96f/sYt10xCCFY0GsBjnrHBtuXDFiCmTBjduBshrsPlx66RCJpN1qS5SKAL4AoRVHebqbZeuAuY7bLGKBQUZT0NrSz2+Jt483Ka1by+IjHGeAygOTiZPIr8imrLuts0yQSSQ+jJbVcxgMLgZNCiGPGbU8BfgCKoiwH/gDmALFAGbCkzS3txtTVdBnoMhCA+7fcT3xhPGuuXYOvne+FDpVIJJIWc1FBVxRlDyAu0kYB/tlWRvVU+jn3QyCIzI1EIzT8ePpHnhj5RGebJZFIeghypWgHYq2z5qFhD/H6xNe52v9qfon5RYZeJBJJmyEFvYP528C/MSdoDrf3vZ3i6mJ+PftrZ5skkUh6CFLQO4nBroMZ5jaMd468Q0RORGebI5FIegBS0DsJIQRvTXkLJ70T/9j6D3LKc1p9jsraSmavmc3G+I3tYKFEIuluSEHvRFwsXVg2bRlFlUUsP7681cefzjtNSkkK25K2Ndq35swaTmafbAszJRJJN0EKeicT4hjCgl4L+PnMz3wf9T3/b+//a+StV9ZW8tqh10gpTmmwvW6R0rHsYw22K4rCKwdfYUXkina1XSKRdC2koHcB7h98PxZaC1479Bq/xP7Cf/f9t8GTzg+lH+L7qO95es/TGBSDaXtErhp7zyjNIKM0w7Q9ryKPKkMVZ/LPdNybkEgknY4U9C6Ai6ULH07/kGXTlvHYiMfYkbKDX2J/Me0/nKk+OCM8K5yfz/xs2h6RE4GblRsA+9P2M//X+ayLWUdGmSruiUWJslyvRHIFIQW9izDCYwSTfSezsN9ChrsP542wN0yhl7CMMIa4DmG052jeOfIOOeU5lFWXEVcYx7zgeei1et4Ie4PYglgOZxw2eesGxSAfsiGRXEFIQe9iaISG58Y+R0VNBW8cfoPS6lIicyMZ6TGSp0c/TUVNBe+Hv09kbiQKCkPchtDfpT/FVcUAJBYnNgi/yLCLRHLl0JJaLpIOJtA+kLsH3M0nJz7BTGNGrVLLSI+RBNoHcme/O/k64muTUPd37s8oj1GczD7JcPfhROdFk1mWiU6jQyM0xOTHdPK7kUgkHYUU9C7KvYPu5UT2CdafXY+ZxozBroMB+Pugv5NQmEBMQQwTvCfgbOnMPQPv4fqQ69mcsJn96fuJzY/F3codOws76aFLJFcQUtC7KBZaC5ZNX8Z/9/8XM40ZVjorAGzMbfhg+geN2nrZeOFn6weok6d9nfriY+vDjuQdfHD0A0Z6jGSM55iOfhsSiaQDkYLehTHXmvPyhJdb3L6uFG9pdSke1h70duzNL7G/8OmJT9mdspvV165uL1MlEkkXQE6K9iB8bHxMrz2sPZgfOp+Xxr/E3QPuJiovitSSrvEQqcLKQuasnSNr2EgkbYwU9B6Elc4KN0s1L93DygNrnTXzQuZxY+iNAGxL2sbq06vZkbyj84wEYgtiSS5O5nj28U61QyLpaciQSw/D186XrPIsPKw9Gmzr5diLz058Rn5lPsPchjHFd0qn2ZhVlgVAdnl2p9kgkfREpIfew6ibGK0v6ADT/aaTX5kPQHpp6x73alAMvHTgJVOxr+raakCtGfNd5Het9rRNgl52TtCTi5PZlbKrUdtN8ZuYvGoy5TXlrbqGRHIlIgW9hxHsEIyZMMPTxrPB9lv73Mq9A+/l9j63k1mWSbWh+oLnySnP4eqfr2Z70nZOZJ9g1elVvHroVXLLc5m5ZiaP7niU76O+5/XDr/Ph0Q9Nx8Xkx3DL77dQUFHQ7LnrBL1+EbLlx5fz8PaHGwl3WGYYeRV5xObHtvQWSFrA1sSt7E/b39lmSNoYKeg9jJt738x3c7/DztyuwXYnvRMPDXuIPk59MCgGMkszee3Qa7y4/8Umz/ND1A+klabxfdT3bEncAsDJnJP8/a+/k1+Rz1+Jf/H64dex0FpwOPMwJVUlAOxI3kFkbiRHMo80a2NTIZeInAhqDDWmCpJ1JBQlABBTIBdItSVvHXmLj49/3NlmSNoYKeg9DEszS/o79292f53nnl6azpbELWyM34hBMfB22Ns8tvMxAMqqy1h1ehUWWgsOZhzkt7jfGO05Gncrd07nn+buAXfzyoRXmOA9gTcmvUGNoYa9aXsBiMqLAiAyL7JZG8730Ovq0gAcyzrWoG1CYQKAXPHahlTXVpNWkkZycXJnm3JFciD9AGklae1ybinoVxje1t4AROVGkVmWSXF1MWcLzrL+7Hq2Jm2loqaCdbHrKKoq4sXxqveeV5HHrIBZPDnqSSZ6T+TeQfdybfC1fHzVx0zymYSDhYMpcyYyN9J0/mpDNZsSNlFrqG1gQ2ZZpum81bXVROVFoaAgEIRnhZvalVWXmdpKQW87UkpSqFVqTUXeJB2HQTHwjy3/YOXple1yfinoVxge1h4IBDtTdpq2rYlZQ25Frink8Uf8H/R16svswNmM9hiNQDDFdwpX+V/FR1d9hKWZpelYrUbLJJ9J7ErZRW55LqklqWiEhsjcSH47+xuP73ycvxL/MrVXFIXssmxszW0ByK3INXUCk30nczz7uKnme1JxEgD2FvacyT/ToEZ8VyOtJK1L21efxKJE0+uUkpQLtJS0NVllWVQbqhusGWlLpKBfYei0Otys3AjPVD1hC61FgxrrW5O2cjL7JFN9pwLw+MjHeWH8C7hYujR7zqv8rqKoqoiPjn0EwCTvSeRW5PJt5LcA/BH/h6ltQWUBVYYq+jn3A9RMl4hcta77DP8ZFFcVm0r+1oVb6jJ0cityW/w+M0ozTMe3NynFKcxeO5sN8Rs65HqXS31Bb03YRVEUlh1dJieoL4O6xX3eNt7tcn4p6FcgXjZe1Cg12JrbMtZzLJW1lfjZ+hFsH8zq06tRUJjkMwmA3k69uT7k+gueb5LPJHxtfVl9Ri0tcEPoDYC6gMhWZ8vu1N0UVhYC5+LndXH+7PJsInIi6O/cn6GuQwE4mnUUgPiieASC6X7TgdaVAn75wMvcv+X+S/KaFUUxlSAuripmRcQKagw1zbaPzovGoBjYFL+p1dfqDBKLEtFr9QCNHmt4ISJyI/jkxCfdpuPqitTdbynokjbDy8YLgF6OvRjiNgSAMZ5jGOo+lCpDFc56Z/o6923x+bQaLXf2vRNQv6ijPdUwDcDTY56mxlDDhrgNlFSVmAR9gMsAAOIK40goSqCfcz98bH1w1jubBD2hMAFPa09T29bE0etKHdRlybSGtTFrmb1mNolFiaw+vZo3w940jWiaom5Cd3/afkqrS1t9vYzSDIqqilp93KWSVJREL6de2JrbtspD35q0FaDdJvQ6AkVR+CbimwZrIM7fX1Vb1W7XTy1JRSBM/4NtjRT0KxAva/XLFOoQygiPEQCM9x7PUDfVQ57oMxGNaN1X4/qQ67Ezt2OAywCsdFYEOwQzwHkAcwLn4Gfrx6uHXmXiqomm8E4fpz4IhOnvcV7jEEIwzH2YSdATixLxt/PHSe+Ep7UnB9IPtMiWoqoi02TqvrR9rXofAOvPrqdGqeGP+D9MIlb3/NamiCuMQyM0VBmq2J26u1XXSi5K5ob1N/DygXNF2KJyo5i2ehrJRc2LbVJREv/d/1/iC+NbdT1QU0ED7ALws/W7JEGv/wCVtqCwsrDDMm7iC+N5I+wNfov7rcn9v8f9ztTVU9ttsji1JBU3KzfMtebtcn4p6FcgdcO9UMdQBrsO5udrf2aq71RGe4zGysyKOYFzWn1OK50V3875lidHPQnA21Pe5q0pbyGE4I3Jb/DEyCfwtfVlW/I2QK0146R3IrUkFW8bbwa6DARgiOsQUktSSStJI74wngD7AADmhcxjb+reFoUI6mK8AtFI0GPzY1kZvbJR5k0dGaUZhGeFmzqbkznq6tiI3AiqDdVsiNvQ6Ni4gjhGeYzCSe/E5vjNGBQDn5/8nOf2PXfBkE95TTn/2vEviquKOZxx2NR25emVZJdnE5YZ1uRx25K2sWD9An4+8zOfn/zctH1TwiYWb1pMRU0F4ZnhPLL9EU5kn2h0zcyyTPxs/fC19W2xkMYVxhFfGI+5xvyiK41PZp9k/dn1jVJQm+PFAy+yeOPiDplUrlvP0NwoY2/aXoqqitqtg0kpTmm3cAtIQb8i6ePcB63QmsItvZ16I4TA3dqdA7cfYKzX2Es6b5B9kGnyNNA+0DSs7Ofcj4X9FprE3knvhE6rw9XKFYCZATMRQg3R1I0S3gx7k7KaMiZ4TwBgQegCNELTYAK3OWILVEGf7DuZwxmHGwyhPzj6AS8ffJmHtz/cwAsrrCxkY/xG08O57+x3pyk81NuxNxE5Efx29jee3P1kg+JmBsVAQlECIQ4hzAmcw5akLcz4aQbvhb/H2pi1F4z7fx/1PafzT3OV31Vkl2eTWpJKWXWZKRbf3LFfnfoKD2sPZvjP4K/Ev0xhnu8iv+NI5hG+jviaZ/c9y9akrdzxxx38dOYn07FJRWrmkL+9P762vqSXpF9w1XBJVQlP7X6KpVuXAjA7cDZZZVkXnFP4145/8fSep1m4caEpg6k5Kmoq2JWyi6zyLJKLk6k2VLdr+KkubNdcdk9dBdB2E/SSFHxs2yfDBaSgX5H0d+7P3tv20suxV6N9dcLaHozzGsdM/5mm+Hyd+F8dcLWpTR/nPui1ev5K/AsfGx+ToHtYezDZZzJrY9ZSUVOBoigczTrKJ8c/aeSFn8k/g43OhhtCbqC8ppy7Nt7Fx8c+ptZQy+HMwwTaB7I7dTeP7XzM5G2viFjBE7ue4MNjH9LXqS9/G/g3tEJLiEMIswNnk1KSwspoNXd4X9o+yqrLePPwm0TmRlJeU06QQxD/N+L/+O+4/2JnYcc/Bv8DrdCyKaHpidJaQy0/nf6JUR6juH/w/YA6GbwlaQtlNWXY6GyaFPSSqhJO5pxkhv8M7up3F+U15fyZ8CepJakczz6OpZklHx77kMSiRN6Y/AZ9nfqy9sxa0/F1GS7+tqqg1yg1ZJQ0HUKpMdTw2M7H2Bi/EX87fx4a+hBD3YZSq9Q2G4MuqCggsyyTW3rfYrpXF+Jg+kFTuYfj2cd5P/x9rl13bbO1e2oMNZflydd19k156MVVxaY5l/YQ9KraKrLLststZRGkoF+xWOusO+W6b0x+g4+nq0vO+zv3p69TX/o59TPt12l0DHRVwy+39L6lQSx/Uf9F5Ffm88mJT1h+Yjl3bbyLZceW8fSepxt44bEFsYQ4hDDOexzXh1xPZW0lHx//mK1JWymuKub+Qffz9Oin2Z26m/eOvgfAoYxDBNoHcm3QtTww+AGc9E78a/i/eHDog/R3UTNyovKiTGGctTFrWRG5gv/s/g+gjk7MNGbcEHoD6+at44EhDzDKYxSb4jc1KUB7UveQVprGzb1vJsQhBBudDUcyj7AyeiW+tr5cHXA1Z/LPUFpdyqsHXyWzVJ0TCMsMo1apZYznGAa7DibALoCfzvzEb2fVmPCbk99EK7SM9x7PrIBZTPWbSkRuBPkVamG2utx+fzt/gh2CATiRc6KRfQCfnfyMvWl7eWbMM3x81cfcO+hePK3PrTRuirqQxlTfqYQ4hHAo/ZBp36H0QxxMP9ig/fbk7VjrrLHR2RCeFc6GuA3kVeSxMX5jo3MbFAM3rL+BD45+0Ghffcqqy0ylKBrZl38u5FJjqOGhbQ+Z5mbqjyaSi5OJyIlgyaYllzTRXVRVxJRVU9ietN20La0kDQUFb1sZcpH0EDRCYxoFLB26lJXXrGw0KhjrORZrnXWjdMlh7sO4PuR6vjr1FR8d+4i5QXN5c/Kb5JTnmHLdFUUhJj+GEMcQLLQWvDj+RT6crhYPe+XgKwCM8hzFzb1vZkHoAr4+9TWx+bFE5EQw3W86r0x8hal+ag7+ov6LmOY3zZQzD2onk1KSYopd13l0QfZBjd7rrMBZpJSkmCZUI3IjKK4qRlEUfoz+ERdLF6b5TUOr0TLYbTC/xv7KyZyT3DfoPno59qKgsoAvTn7BD9E/8PTepzEoBg6kH0Cv1TPYbTBCCJYMWMLJnJN8eOxDBrkMYpLPJFZes5K3Jr8FwHiv8SgoJiFNKEzAzdINK50VA1wG4Grp2mDhV322JG5hlMcoFvRaYNrmYaNW8UwrPefhnsg+wdKtS9kQt8EkmCEOIYz2HM3RrKNU1VYRkx/DA1se4PGdj5s63+KqYrYnb2ei90QGuw5mQ9wGssuz0Wl0rIxe2agjjMqNIr4wnm1J25q0F9SQydx1c3l4+8ON9pXXlJNcnIyT3onK2koOZxxme/J2/kz4E8BUR6hubmFzwmbCMsMadEot5XD6YXIrchss4GvvHHSQgi7pZJrKplk8YDF/3PAHDnqHRvseG/EYjnpH+jr15fmxzzPTfyahjqF8deorVkSs4NVDr1JUVUSoQ6jpGC8bL8Z5jSO3IpdQx1BTqOe+QfcB6qRcjVLDSPeRTdpoZ25HgF0AvR17s7DfQkBd4frwsIfRa/U4WjjiqHdsdNx0v+lYmVnxz63/ZOnWpdz6+60s2bSEL059wd60vdzR9w50Gh0AQ12HUqPUMMF7AvOC55nCYd9EfoOlmSUH0w/y0bGP2Ju6l2Huw7DQWgBqzv+7U9/FSe/ELX3UMEcfpz6mEVh/5/7YmduZQh9JxUn42fmZ7v1V/lepo4WSNJYdXWaqkllQUcCZ/DOM9hzd4D3VeegZpRmEZYRx/5b7ueOPO9iZspNvI78ltiAWO3M73KzcGO0xmoraCval7eOJXU8ghCC/Mp/tydv5NvJbpv80nbyKPK4JuobBroMprynHXGPOg0MfJCovylQfqI4dKTsAOFt4tsmQT2JRIks2LyG3PJewzDCKq4pN+yJyIojOi26wxmJzwmbg3FxFRG4EPjY+DHAe0OABLAczDtJaDmWonUD90tIdIejyAReSLodOo8NJ79TkPnsLe9Zdtw5LnaVJ1Bb3X8zTe57mzbA3sTSzxFnvzCiPUQ2OuyH0Bvam7W3woGwvGy9GeY7iYPpBzDRmpknipnh90uvotXp8bX3xtvGmqKqIO/regb2FvWnRVFO2fjvnW57f9zz70/ZzS+9bWBuzlvfC32OSzySW9F9iajszYCaHMw/z3NjnEEIQ6qh2SJW1lTw24jGOZB7hkxOfADA/dH6D60z3m84032lNzn9oNVrGeI5hb9peFEUhsSjRtAoYYKb/TH6M/pHbN9xObkUuCgoPDn3QVC3z/PtoaWaJo4Ujx7OO8+GxD3G0cGTpkKVU1Fbw+cnPKakuIcQhBCEEwz2GoxEaHtn+CAoKy6Yt44UDL/DB0Q9IKkpinPc4HhyihrR0WrVjm+A9Qa0YGvUdD2x5gKv8rmJ+6HzGeY1jZ/JOnPRO5FXkcSjjEHOD5jawbVvSNsprynlh3As8u+9ZjmQeYYrvFI5lHWPhxoWm79QUnyn8EvuLKQ0zJj8Gg2LgVM4pBrkOwsfWhz8T/yS3XF2ZfH6YqCXUCfrZgrMUVxVjo7Phj/g/cNI74Wbl1urztRQp6JJux/me+7VB1+Jl7YW/nb8pc+Z8pvpO5Zbet3BjrxsbbL8+5HoOph9koMtArHRWzV6zftjlqdFPUW2oxtLMkpt63XRBW3s59uK7Od9RVl2GjbkNk3wmsSFuA8+MeQatRmtqF2gfyOczz6Ug2lvY42ntSVZZFnOD5rKw30KOZR0jPCucBaELGl3nQpPZ47zG8WfinxzPPk5eRR7+dv6mfUPdhuKsdya3IhdvG2/Wxazj/sH3cyjjULOVOz2sPdiRsgOBYMXsFfja+nI67zSfn/ycxKJEU6dpZ27HGM8xpBSn8PKElxniNoT5IfP5+PjH+Nv58/bkt033fIjrEHo59uKW3rdgrbNm3bx1rIhYwarTq9iStIW+Tn2Jyovi4WEP8+WpLzmYfrCRoB/MOEigfSBzg+by8sGXOZh+kCm+U/g97nfMhBl5FXmYa8xNo46CygIAymrKOJh+kPTSdBb1X4SVmRW1Si21tbX0cepDdF40OeU5Fyx/AeqE7bakbfRx6kNsQSyjPUdzMP0gJ7NPUlhVyJHMIzw79tlWr/FoDVLQJd0eIYRpgVRz6LQ6nhnzTKPt0/2m42rp2qpH8tUN2VuKRmiwMbcxHdvS42f6z6S8ptwkJMPchzHMfVirrg2qoAP8EP0DQANB12q0PDf2OcpqyrDWWfPgtgfZmbyTw5mHGeo21OQ518fLxouovCgm+07G19YXUDsudyt3MssyG2RPLZu+DDNhZupwbup1ExG5ESwdsrRBB2qls2LNdWtMf9uZ2/Hg0Ae5f9D9bEzYyKsHXwXUjvlk9kmTBxyWEcZPZ37i8ZGPE54ZznXB12GuNWeo21AOZhykulat+DkjYAYTvCeQWpKKjbmNydMf6TGSwxmH+fLUl4A6QqhLVwX428C/8djOxziccZjZgbMBNXTy8oGXeW7sc7hbuwNq2uvjOx9nf/p+U+G5uwfczaH0Q2xL3sb25O30derLDSE3tPrzaw1S0CVXNJZmlmxasMkUy+5KPDbysTY5j6eNJ4H2gabJzwC7gAb76yaBaw21eFh78OzeZymuLm52gVldHP2OvneYtgkhmOijrgQOcQgxbT//vrpauZomqVuCTqvjuuDrGOY2jKi8KIIdghnvPZ5tydu48487ichVH4ySUZpBeU25KUQ02nM074W/xzeR31BYWcg1Qdc06Ei9rL3Iq8jj+pDrCcsI40D6AXxtffG38zeF8pz1zkz3m46tzpatSVtNgv75yc/ZnbqbbyK/4fGRjwPw0oGXOJx5mCUDlrAqehU2OhtGeYwi2CGYVadXodfq+WDaBw1GZe3BRX1/IcSXQogsIcSpZvZPEUIUCiGOGX+ebXszJZL2w1xr3q75912BcV7jqDHUoBGaZhe2aDVanhn9DJN8JzEveB7XBF3TZLtrgq/hngH3MNqj4YTpjb1uZKzn2FbVAWopPrY+zPCfAajzIY+NeIz0knRGuo9kVsAsUx39kR7qxPZE74kAvBv+Lo4Wjo0Wy9WlDo7yGGUaZdSteXCzcsNCa8Eg10GYacy4pc8tbE7YzIH0A+SU57A+dj1mGjPWxKyhpKqEakM1e1L3MC94Ho8Of5SV16xk+YzlmGnMGOY2DI3Q8MbkNxqE7dqLlnjoXwPLgG8u0Ga3oihNf/oSiaTTGec1ju+jvsfT2vOCdUQm+05msu/kC56rv3P/JmPr/Z378+nMTy/b1othpjFjUf9F3NXvLkB9YMq2pG0E2AeYso16O/Vmw/wNRORG4G3j3WikMMpjFJmlmbhbudPLsRdJxUkmQdcIDU+Pfto00vj7oL/zZ8KfPLf3OQLsA6g2VPP6pNd5YtcTrItdxwCXAZRUlzDeezygzofUsXToUuaHzjcVmGtvLiroiqLsEkIEdIAtEomknRjhPgIzjVmD+Hl3p25U5WHtwf8m/c80T1GHn52fKUXzfG7ufTM3974ZUOcmwjLDTN49NMwk0pvpeWnCS/x71785lHGI60OuZ3bgbFadXsXXp75mduBsNELTKCMIwFHfdEpreyFasozWKOi/K4rSqJsRQkwB1gApQBrwmKIoTZamE0LcB9wH4OfnNzwxMbGpZhKJpB1YEbECP1s/U8xcolJrqKWitqJFq6cVRTF1JOGZ4SzatAiN0DDAZQDfz/m+vU0FQAhxRFGUJrMA2iJ/JhzwVxRlMPAB8EtzDRVF+VRRlBGKooxwdW06vUwikbQPi/ovkmLeBFqNtsWlMOrPtQxzH8ZVfldhUAymTKLO5rIFXVGUIkVRSoyv/wB0QogLJ2xKJBJJD+DR4Y+qz98NmN3ZpgBtkLYohPAAMhVFUYQQo1A7iZY//FEikUi6Kb52vqy+dnVnm2HiooIuhPgRmAK4CCFSgOcAHYCiKMuBG4EHhBA1QDlwq9JdHn8ukUgkPYiWZLncdpH9y1DTGiUSiUTSichqixKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJD6HaCnlFYwZojKVRU13a2KRKJRNKl6HaCHp6Uz//9dJzYrKaf6i2RSCRXKt1O0Hu5q08DOZNZfJGWEolEcmXR7QQ9wNkKc62G05nFRKUXMfzFv4jOKOpssyQSiaTT6XaCbqbVEORqzZmMYrZFZ5FbWsXXexM62yyJRCLpdLqdoIMadjmTWcKRxHwAfj2WRlFFdSdbJZFIJJ1LtxT03h62pBaUcyg+j0E+9pRX17L2SEqTbTedymD8a9v4/URak/uj0ot4fn0EBoOsJyaRSLo33VLQQ93UR02VVNZw5xh/BvvY893BJM4v8vjt/gTu/+4IGUUVPPdrBIVljb34n8JS+HpfAol5ZR1iu0QikbQX3VLQe3vYml6P8HfkjjH+xGaVcDA+j3+tOsbdXx8G4Jv9iQz1c2DNA+PIL6virb9ONzpXZHohAKcz2iZrprSyhpT8jusc1oankFlU0Wj78eQCsprY3hVQFIUNJ9LlWoJO5J8/hPPcr6c62wxJG9MtBd3X0Qq9ToOztTmBLtZcO8gLO70Z/1l7knVHU9kWncXBuFxiskqYO9CTIb4O3Djch1WHk6mpNZjOoygKkWlqhkxLBP10RjEJOaUXbPP+1hiu/WAPtR0QwskqquDR1cf5aHtso32LvzrEO1vOtLsNl8KZzBL++UM4f5xM72xTrlj2n83lYHxeZ5txRXLtB3v4Yk98u5y7Wwq6RiMYGeDE5N6uCCGwNNdy0whf4nNK8XWyBOA/604CMKW3GwCjAp2prDE0CK2k5JdTVFEDXDivvarGwL9/PsHV7+7iri8PXTDeHptVQn5ZNWezGy58+isyk1nv7qK8qu280ihjJ7TzTHaD7aWVNeSXVXfZxVd1I5j0wouPIA7F53HP14eprtcRSy6PwvJq8kqrSMoraxSmlLQvxRXVnEwtpKqmfb7P3VLQAb5cPJL/LRhk+nvR2ABC3Wx4/9ahDPF1IC5bFfdgV/Xhr32MYZr6nnhkuuqdu9tZcPoCgv5XZCarwpIZG+RMUl4Zu2Kym22bkl8OwLHkggbb1x1NITqjmANxuSiK0iZZOdFG+xNyyxqMHDKMoZb4i4wmOos0o5BntEDQ/4rMYGt01kVHRpKWk5ir3suyqlpySqo62Zori+Q8VR/8nKza5fzdVtB1Wg1m2nPm+zlb8dejkxnq58g1gzwBmNrbzfSU7hA3GzQCousJekRaERoB1w7yIj6nlMqaWpMHXWtQOJWqxtdPpBag0wo+WzQCFxtzvjuQZDpH/dCKoigm7/N4PUGvNSjsjVUfs7rjdBarw5IZ+dIWkls4EWswKKw8lERBWcN/vtMZxVjqtAANOplMo1DmlFRRWN710jnTC9QvdUa9GP+GE+k8uvpYo7Z1nVJXHW10RxJyz33vkvJa11FKj/7ySDbqQ10koa3ptoJ+Ia4b4kWwqzXzh3qbtul1WgKc1QVJdUSmFRHkasNgXwdqDQpPrjnJ4Bf+ZE9MDi/+Hsk1H+zhZEohp1IL6e1hi42FGTeP8GVbdCZns0vYdzaHIS/8yU9hyYA6lC01dgj1PfRTqYUUlldjZa5l2+ksPtkZR2WNgTXhTadans/R5HyeXHuSv60Io7LmXMgmKqOY0UFO+Dtbse5oKv/bFE10RlEDoexoz1ZRlEbhpvOp88zrT9quO5rK2vDURl57nBT0Nqf+dyIxt+UT+CWVNYx5dSvrjrbseytpTJ0T5+soPfQW42arZ+v/TWGon2OD7b3cbRuEViLTCunnaWfKmll3NJVag8K934Tx9b4EALZFZ3EqtYiB3vYALBzrj52ljts+PcA/vg+nuKKGZ3+NID6n1BRuCXGzITqjmJySSqIzitgTmwPAvRODSM4rJy6nFFu9GWvCU1qU/340qQCAsMR8nlqrZiZU1xo4m1VCHw87pvZ242hSAR/tOMs3+xMbCHpbhV1+O55GVvHFQySbIzKZ/tZOYrOaD2GlFTb20KOM4aNDCecm6mpqDSQZBSf2Ip1EZ1JZU8tT6052aHbT5ZCQW4qztTlCQFIr0nW3RmWSWVTJ8eTCdrSufVEUhS/2xDebAbbvbA63frq/3WLcKfnl2FiY4WCla5fz90hBb47eHrYk5JZSXlVLVHoRaYUVjAhwJNDFGp1W4Gprwa//HI+1hRkjAxzp52nH6rBkCsurGejtAICnvSWr7htrOueq+8ZgbqbhyTUnTP/Qcwd6UmtQmPnOLma9u5vlO8/S19OOG4f7AGrM/tlr+pGcV86B+NyL2n00uQBvB0senBbCmvAUdpzOIj6nlKpaA308bHnkqlC+WDSCAd52xGWXkFlYgZW5Fo1QPdzHfzrOV3vVWfXqWkOrF1GlF5bz4I9H+Wj72Yu2DU9SV++eSGn+n75uMjS7uJJag0JhWTWpxjDMoXr3IyW/nBqjrV3ZQz+eXMgPB5P49kBiZ5vSIhJzywh1t8HTTm/qMFvChhNqVlKa8bPqjpzNLuHF3yNZE57a5P6NJzM4EJfX6lBUS0nKK8PH0dIUCm5ruqegK4r600p6e9iiKKo4rA1PQacVXDPIC51Ww8vzB/LZXSMY4G3P9scm88O9Y5jWx80kNHUeet15Nj8yiU0PT2J0kDP3TAjkYHweJ40x97oYvgBuH+1HaWUNM/q64etkxQ3DvHn86j5cM8gLWwszbv/sICNe+otX/ohi/9lc4prwRI8lFTDEz4Gl00IIcrXm2V8jOBCnCl8fT1scrMyZ3tedvh52nM0uJaOoAi8HS3wcrdh8KoOfjqTwyc44DAaFOz47yJNrT7TofqXkl2EwKKYSC9uisy4aQ41IU+9B3VzF+dkpiqKQXliBrYUZBgVySiqJMhZXs7Ew43B8vqlt3ehisK8DZ7NLWtURfbrrLM9eYp51Ta3BlM5aWVPLplPpF3zfMcbRyJ8RmZcUY94dk90oy6ol2VDhSfmXlP2TkFNKgLM1fs5WLV5QV1xRzQ5jNlXdCKs5jiUXsON0VoPw4IX4ZOdZ7v/2SIvaXi5R6ep9Tm5mNHXK+P1tzcilNSTnleHbThOi0B0FPeIXeMkN8uJafWhdaGXf2RzWHU1jam83nKzNAbh5hC9DfB0AsNXr0Gk1TO7tCoBOK+jlYdPgXI7W5njY6wEYH+IMwLrwVGwszAhxs+G7e0az4aGJvDJ/IAf+M52l00IBePvmIdw43AdLcy1fLB7JYzN7McLfiS/2xHPbZweY9tZO/rP2hGnRTVZRBakF5Qz1dcDCTMtL8waQlFfGs79GoNMKglzO2RXsZkN2cSUxWSV42OkJdLE2hZgyiir4Yk88hxLyiEhrXJ2yqsbQYKFPQk4pU97YwXcHEwlPLADUL/mFQjiKonAqVT13dEYxx5IL6P/cZo4mnRPp3NIqqmoMDDbe64zCCpN43jjch9OZxeSXqpO/dfHzGX3dqKg2mDrXlrD+eBo/HkqirKqmxcfU8d2BROa8v5tTqYWs2JfA/d+FE24MezVFTKbaCcfnlJpGErUGpdEkdlNEZxSx5KvDvPJHlGnb4YQ8Bj6/+YJVRN/56ww3fLSP97bEmLZtOpXOwi8OXnDBVlFFNbmlVQS4WOPnZNUi4correLTXXFU1agjwrSCC4feHvjuCIu/OszE17dfNExXa1BDIJsiMsgpqSS/tMr0fWgP6rLcmkpIqKk1mEJ/rZlbaClq0kR5u2W4QHcUdL091FZBcUarDw1wtmaQjz2vbowmp6SSBcYQSHMM8XXA1sKM3h62WJhpm203yMcBK3MtaYUVpuHUhFAXk+C72ekxN2t8q0cFOrF0WijLFw5n77+n8cPfRnPfpCB+PJTMLZ8eoKiimqPGydWhfg4AjAtxYd0/xvHM3L68edPgBucNdlXFPS67FHejoANc3d8dCzMNr2+KBjAJ46ZTGaZh9JNrT7Dwi4Omc/18JIUag8IvR1MJT8o3zcpvi85q9j6k5JdTWF6NuZmG0xlFbDyZTlWNgeU7z4Vq6iY9695PZlEFUelFuNiYM2egOrIJM44I4nNKsLfUMTLACWh5HN1gUIjNKqG6VrmkxTMbT6nfre8PJrLysDrhfSKloNn2sVkleDuo92dzRAY1tQYWf3WIGe/suqCXWjcRX2NQOJ5cYPLuVx9OpsagcDCuadu/PZDIe1tjsLEw44dDSSYB/3RXHLtjcvh2fyJf7Y1n6At/8u+fT5Bez6OuC7EEOFvh72xNdnHlBTu9EykFjH9tGx9si2WwrwNzB3qSV1rV7AiisLya9MIKpvZ2Jau4ku0X+L4AHIzPJau4ElDXHLy4IZLrP9pLbknlBY+7VOo6yaY6sricUiqq1RFPYm4ZWcUVLNsWc0mLBGtqDbzyR1SDCeickirKq2vxdWyfDBfojoJuq/7TU9z6VYZajWDVfWO5bZQvg3zsmWpcdNQcOq2Gp+f2ZenUkIu2qxMdn0v8sDzs9YwLceGpOX1ZfucwItMKufPzg3x3IBGdVtDf61zIZ6ifI3+bGMS8Id4NzlGXc6+ez4JQd1Xg75sUxLQ+btQYFPQ6DQVl1ZRW1vDe1hhe2hCJoijsPJ1NWGI+heXVGAwKa8NT0GoE4UkFnEotZM4AT0LdbPjlWCofbI0xTXquP57G1ig11FDn+c/o605mUSW/n0hHCPgzMtPk2dfFX+tGQ5lFFUSmF9HX045BPvZYmGnYa5xEjs8pJdDFmhBj7Z6zLYyjJ+eXmf4x98bkNNiXkFPKplPNf3fyS6s4nJCHhZmG1WEpxGWrdp9MKSQ5r4zbPj3QqNRCTJaabTTE14FvDyTyzx/C2R2TQ3ZxJXvOu3591oancCy5gPEhzuSXVZOYW0ZFdS2bjB1KXQjvfH4OS2awjz0f3zmMvNIqfj+RTnJeGeFJBeh1Gt7dcoYXf4/E1daCdcdSeeWPaNOxdZ+Dv7O1yVNszhvNKank/m+P4GRtzi//HM+6B8bhY+zYmwu71I1Q7hjtj6uthSldF9TO/Pz4+2/H07Ey12JlrmXn6Wz+jMikqsbAj4eSaIpPd51l/9kLzzulFZQ36MTqUxdySc0vp7yqljnv7TZ9H+rChdbmWpLyylh9OJk3/zzDseT8Js91IY4lF/DprjhWGzPgoH7KovTQz2Hrrv4uybykwy3Ntbx6wyDWL53QpNd8PreO8mPWAM+LthsbrIZd6jy1y2HWAE+W3T6M2KwSdsfkMCbIGb2u+RFCHX5OVui06mSLh52eBcN8+OHe0Qz3d+KmET7otIK7xwcC6pc+IaeU9MIKdp7JJre0CkVR47L743JJK6zgkelqmKjGoDDUz5Gr+rlzKrWIt/46w3/WniQms5iHVx7lnhVhXP/hXv6KzESrEcwb4gWoI4G7xwei02i447MDzHp3FxuMy/0Hetuj1QhS8suJySyhn6cdep2WMUHOppz6+OxSglyscbaxwM3WwpQtdDHOGEMgjla6Rse8/dcZ7v8u3DQyOZ+t0VkYFHh6bl9qDQo2FmaMC3bmRGoha8NT2R+XaxLcxNxSiiqqySyqJNTNlqfm9MXRypzNEZncMdoPe0tds9epy7bo42HL03P6AXWx52yKK2twsNKZ1kGczig2ee+F5epKwym93ZgQ4kKomw2f7jrLdwfVCdlltw2jrLqWUDdb1v1jPHMHerIvNsd0fN2iIn9nK/p72QHnRkTn89afp8kpreKThcMZ4uuARiPwsjcKej1hziqq4MXfI9kXm2Pq6EPdbRgf7My+sznGeZNyrvlgN7d+esDk8ZZV1bDxVDpX9XVnRIATa8JTKKmswdXWgm8PJDaaH8gpqeTVjdF8tKNxqYs6fgpLZtpbO/h7EzH5ogp18t3f2Yoag8LW6Ewi04vYHq1+306lFqHXaRgb7EJibqkpzHYovvWCXve9q5++bEpZlIJeD70DmOkvyUNvT8YGqYLu00b5pVf39+Dk81cT9cIsViwZ1aJjzLQaApxVL93dTo9ep2VcsAsA0/q4c+zZmUzro45KjiYVUG4cqi/bdu4f5HB8HisPJ2OnN+PeSUH09VT/6Yf5O/Dw9FB+WzqB/8zuw+GEfB788SiWOi0vzutPXE4pa8JTCHG1MXnfADeN8OFfM3rh72xNYXk1vx5Lw1yrwcUo0qvCkqmqNTDGeP8m93IlLruUvyIzSSusoL9xMvqusf7sOJ3NyQtkz9RRN8F42yg/ojOKCU/Kp6iiGkVR2G+cTH7i58bPpc0orODXY6l42OlZOMafyb1cWTTOn9GBzpzNLuHX42pmxO6YbA7E5TL5jR28tlH1fkPdbBgV6MSmRyax599Teen6AVzd352/IjObDLscjM8jOqOYJeMD6OVug6VOy7HkAtaGp+BiY87to/yIySphS2QmV7+7i++MGTSH4vMwKKoDIYTgiVl9iM8p5ZOdcQz3Vzvd1X8fy/f3jsbawoyxwc7kllaZOrmE3DLc7SywMjcj0MUaXydLdp5ueuXz7pgcpvZ2ZUC9hAAvo8OSXlBBYVk17/x1hqlv7uCLPfEs2x5LbFYJFmYafBytGBfiQk5JFcdTCnngu3BTuYG/IjNIzC3lho/2UVheza2jfBkd6ESNQcHRSsfL1w8gs6iSj3ecbTDJvD06C0VR5xiauqeH4vN4/OcTmGs1nEgpNM3FgDoPUBc/n9lPdQp/OaqW1I42fl9OpRbS19OOIFdrkvPLTXM/h1qQiXY+daPMEymFpg6sbgR7qaP4ltD9BF0IsPW4pBh6ezLIx55nr+nHvKFebXZOrUatU6PRtDzFqS6OXhe/r4+1hZnpH7K+5xqWmI+9pY7BPvZsisjgj5Pp3DLSF71Oy/2Tg5g/1Bs3W7WDGOhjz6JxAbjZWhCdUczCsf4sHBvA6r+PxdNez8RQF1xtLXC00uFuZ0Fvd1semBLMj/eN4fNFIzA30+Bhr0ejEbjb6Skoq2aonwNTjBPQk3qpv/+95gR6nYYFw9Sw0l3jArDTm/HeVnUSsKK6lrXhKby5+XSjsMaZzGK87PWmmPwNH+1j7vu7ickqIbu4koemh2Kh03L/d0corVTjx78eS2XMq1vZHZPDDcO8EUKw4u5RPH51Hwb52KMo6tyEtbmW/Wdz+Xa/KrA/HFRDA3VhIVA7dSEEcwZ6UlxZw+qwFCqqa1l3NMXkpX25Jx4HKx3zhnhjptUw0Mee346n8WdkJreN8jMtdnvh90gA3tkSQ2F5NfvO5mBhpjHNQczo584P944hyMXaNPoaGeCEi40FcM7R2H9WvUcJOaX4Gzt9IQSTe7my72wOp1ILmfD6NlOHmVpQTkp+OaMDnRvcWw97PUJAfG4pc97fzXtbY5gY6sq1g70IS8jnREohwa42aDWC8SGqM3Hn5wc5llzA+7cNxdfJkreNE7rphRV8vWQU44JdTB36rAGeXNXXnRn93Hn7rzPc+cVBk7BujcoyfvYG00R9fbZFZ2GmESy7fRiAKRPsdEYxo17ewkM/HjXeMw8Adp5RzxeTWUy1MbOpv5cdfk5WVNUYyC+rxlZvRlhCfqvi6KWVNRxNUlONSypriMsuITmvjBX7Epg70BMrc7MWn6u1dD9BB7DpeoIuhODuCYG42TYW0o4k2O2ch94UbrYWaDWCfcY4ZJ03PcLfkVGBTsRll6IoCneNDQBg3hBv3rllSINz6HVaHr4qFEcrHfdODAKgr6cde/89jf/M6YsQgoVjA7hvUnCDfNv+XvZ8cudwHr+6t9FGVXSenNXH1C7Y1RpvB0vySqtYMMwHBys1C8lOr+NvE4PYEpXJRztiue/bIzy6+jjLtsfyyKpjDTI7zmSWEOpuywBve767ZzQPTw8lOa+clzaomSQ3DPXmg9uGEpddwn/WqkXcvtmfSJCrNWseGMdjM3s3eL/1PdQHp4dSWlXLhpPpDPZRt5ubaZocRo8PcWGAtx3/75dTDH/xL/616jg3Lt/HK39E8WdkJkvGBZpCaUN9HcgtrcLf2Yp/Tg0xXTMpr4wpvV3JL6vipd8j2ROTw8gApwaT9CMDnNj22BTmDmocGvR1ssLXydL0eSfklhHofG6uZXIvN8qqarlnxWFS8sv5Yo+aPVbnlY4OcmpwPp1Wg7utnp/CkkktKOfjO4axfOFwbhzuQ1WtgYPxeabOzdvBkhA3GwyKwicLh3PNIC/uHh/ImcwS9Dota/8xjsnGDnywjz33TQri75OC0GgEny4czovz+nMqtYj5H+3jP2tPsDsmm+sGe6ER5zqo6lqDKfa9/2wOQ/0cGBvsjJW51jQa+/FQEkKo6x7s9GYM83PATCOorlXQaQVlVbVsjsiguLKGkQHqyus67hzjT3FlTYOMo/TCcv7x/RGyixtO3B5JzOPaD/bw5Fp1ovvvk9X/jaPJBbz4eyQaIXh6bt9Gn1Fb0j0FvQt66F2Fm0f48sSs3rjZWjS530yrwcNOT05JJeZmGm4wesDDAxwZYZzYnTXA46JxvjtG+3PkmRkmTxDUKpha42ji0Rm9uGdCYKPjpvZx49rBXiZbH7kqlNFB57xAIYQpXXTJ+IAGx/59chBzB3nyv02n2R2TzSvzB/LtPaPIKak0lV+oNailB3oZJ4QnhLrw4LQQPO317DqTjYedHn9nK8aHuPDw9F6sP57G6rBkjiTms2CYD8P9HRuNiFxtLfCy1xPiZsPto/1M7/Gl6wcyvY8bQ3wcTNvqo9NqWPeP8Twzt6/qcd48mKoaA5/uiuPawV4snXZusn1ssDNajeCV+QPR67R42etNKbXPXtOPxeMC+OlICjFZJab5mpYyLsiFA3G5FFVUk1NSib/Luc92bLAzOq0gs6gSH0dL/jiVQX5pFQfj8rDVm9HHw67R+bwc9OSUVOFuZ8HM/qq3OyrAyTQnFVpvtPLV4pH8+a9JXG1sd9soP56Y1ZufHxhrGk2C+r18ak5fAlzOjR4Wjg1g35PTTJlfpVW1XD/Ui0E+Duw9m0utQeFfq44x9/09rA1P4WRqIWODXdBpNYwKdGLf2VzTSG72AE/W/WM8yxcOx0yrwdsY9qiz/0tjOdtxwS74O6k22FqYcfsoP0AN59Txyc44/jiZ0WDidtOpdG799AAJOaX8djwNCzMNNw33xVZvxntbYvgzMpOHrwo1jZDbi/bz/dsTW0+I3drZVnRJ/J2t+ceUC2fleDtYqpNDTlbM6OfO9weSmNnPA3c7C6b2duUh42ToxWhNKKgppvd1Z3pf90bbH5keyvQ+boS42TbYbmGm5YNbh9LP044AZ2vmDvJEURSG+Tnw0Y6znM4sVlfQ1hgIdT93rJlWw22j/Hj7rzOm2DOoHcTKw0k8ZfTSrxvcfLjs5fkDsTTXYqfXMTLAkeKKGgZ42/HxncMxXGAxkU6r4W/GUQyoo5QtUZncNymoQScwuZcr4c/MwN64JFwIwZTerhSV1xDkasNz1/bnusFebDiRzk0jLpxuez7jQpxZFZbMr8fUmHFAPQ/dxsKMiaGuZBVX8NoNg7jmgz38eDiJg/F5jApwarKj8nSwhKQC5g/1Me23NNcyMsCRvbG5puwqaDwBqNdpL/r9rI+1hRlPzemLp72ePyMyGRfswpHEfJbvjGPeh3s4lVqEtbmWZ345hUGB8cbOblywM6/8Ec3/Np2mqKKGW0f6MtDn3EjLz8mKxNwybhnhy4YT6YQnFdDHwxZXWwtqag2YaQRD/BzwdbLCz8mKVYeTuX20HxVVBlPmyuqwZJZODUGjEbzzVwzBrjasvG8MhxPyqaypxdJcy2AfB/bE5jClt6tpNNuedFNBd4eqYqgsAQubi7eXNMDLQQ3HBLpY42lvyeZ/TTLt+6qFE7DtiZudnunNhIw0GsE/66WRCiF4+KpeLPryEL8eTSPAxZqZ/dxNMfk6bh3py1d7402eIqji8shVofx7zUmGGf95m2Nqn3Mprh/fMRzFeG1zs9Z1ar09bBs8cav++7A/r77H2zcPaTApONTPsVF9opYwpZcbWo0weaH1BR3gozuGoSiqKA/3d+R/m9Qne902yrfJ8/kYvcwbhzdMm50Y6moU9Mbv73JZMj6QJcY5gptH+BKbVUJGYQX/ntUHFxtzHv9ZnXMZYpxbmNLbjVc3RvPl3ngCXaxNMfo6ApytORiXx6hAJ3ydLEnOKzclEJhpNSwZH8Bwf3XE+v+u6ce934Tx3K8R6HVayqpquX9yMMt3nuVAXC6+Tlaczizmmbl9cbAyZ0a/c07KnIGeFFdU894tQ5vsHNuabiroxlhhSaYU9EugbthXt/CouzO5lyvHnp2BnV7X7KjBzU5P+P+b0aiGxoJhPuw8k8315+X0XwhHYyikI2iLmh/2VjpGG0MQQIMYMdAgJfaThcPZeCqD7KIKbhzetKAvHOtPX0+7RiOoRWMDCHKxbhBKaQ/8na35ZOEI0981tQY+3nmWIBdr09xCL3dbDj99FVlFlbjZWTT6XvxjajBzBnqi12np7W5Lcl65acU3wNNz+5lez+jnzuJxAaaCfVf1deeRq0L54WAi3x1MNE0cT+vTeF3L7aP9uH20X5u994vRTQXd6GUVp4NzcOfa0g2pE/SAHiLogGny9EI0JY5mWg0f3TG8PUzqUszo586+s7m42lpgbdH8v72LjQULx/hf8Fw+jlZNpudammtNMemOxEyrYd0D49GcNyPoYmPRYI6nPp72lngac+oHGcMiowKdmmwLqpc+s7871uZm9PW0w9xMw6JxAXywLZZjSQUEuVgT1M4dWUvoppOidatF5cTopRBkXFHa1NBf0jO5yjhXEejcczrx+thb6bDVX1pJ2vsmBbH5kUkXPF6rEYwLdmGwr4Np8nfptBB6uduQVljRpHfeGXRPQbcxxqikoF8SY4Oc+W3pBIZdQjxW0j3xdbJici9XxoW0LkPmSkCv05py81uDhZmWt24agpe9nuuHtjxk1550z5CL3h7MLLvcatHughCiwYy/5Mpgxd2dP+Hd0xjoY8++/0zvbDNMXNRDF0J8KYTIEkI0WVxaqLwvhIgVQpwQQgxrezMbXVSNoxd0jwcKSCQSSUfQkpDL18CsC+yfDYQaf+4DPr58s1pA8DSI+g2i/+iQy0kkEklX56KCrijKLuBCRaXnAd8oKgcAByHExcsTXi5Xvwxew2DtvbBvGVS0X1F8iUQi6Q60RQzdG0iu93eKcVv7Brh1lnDrD7DuPvjzadjyHHiPgDEPQHk+7HgVnIIheCq49QP3fuAQQKPcJolEIukhdOikqBDiPtSwDH5+bZBsb+cJi36D1CMQvUENwfy0SN3nOwYqi2H7y+fa66zAvb8arul1Nbj2AXNrqCqD4z+C11Dwbv8pAIlEImkPREseaiuECAB+VxRlQBP7PgF2KIryo/Hv08AURVEu6KGPGDFCCQsLuySjm8VQCyd/gppKGLpQ9cYrSyD7NGRFQuYpSDsGKYdAMRbPt/eF6nIoy1HrrF/7PviOBHs/0HbPJCCJRNJzEUIcURRlRFP72kKx1gNLhRArgdFA4cXEvN3QaGHwrQ23WdiAz3D1p46SbEjcCzkxkHMGairUDmDX/9QQDoBLb7jhE9Vrl0gkkm7ARQVdCPEjMAVwEUKkAM8BOgBFUZYDfwBzgFigDFjSXsa2GTau0P/6xtsDJ8LZbeqCpV1vwKdT1RDM4Ntg+GLQXtpKNIlEIukIWhRyaQ/aJeTSlpTlweEvIPp3SD+mTrDO/0QNx0gkEkkncaGQi0z5aA4rJ5j8ONy3A25fDYZq+GoWHOiYNHuJRCJpLVLQL4YQakbM33dD6NWw6UnY9BQYDBc/ViKRSDoQKegtxdIBbvkWRv0dDnwIa+5Ws2kkEomkiyDz8lqDRguzXwd7b/jrWTXP/fbV6naJRCLpZKSH3lqEgPEPw9y3IXYL7Hitsy2SSCQSQHrol87Ie9QVqrveAAc/GLawsy2SSCRXONJDvxzmvAlBk2H9Utj8NHRSCqhEIpGAFPTLw9wK7lgDI++F/ctg95udbZFEIrmCkSGXy0VrBnPegIoC2PaSWgNm8C2dbZVEIrkCkYLeFggB8z5USwb8+k+1CmTgpM62SiKRXGHIkEtbYWah5qk7B8PKOyD9RGdbJJFIrjCkoLcllo5wx89gYQvfLYC8uM62SCKRXEFIQW9rHHxh4Tow1MA316thGIlEIukApKC3B669VU+9NEf11KvKOtsiiURyBSAFvb3wGQ43r1CfkvTX/+tsayQSyRWAFPT2JHQGjF0Khz9Xn3kqkUgk7YgU9PZm+rPgORjW/l19tqlEIpG0E1LQ2xszC7jle/X3j7dBeUFnWySRSHooUtA7AgdfNUe9IAnW3AOG2s62SCKR9ECkoHcU/uPUEgGxW2D7y51tjUQi6YHIpf8dyYglkBoGu9+GoClg5QK2HurzSyUSieQykYLe0cx6HRL3wzfzQDGAUxAs2agKu0QikVwGMuTS0VjYwM3fwIAb4ar/QnGmuqK0orCzLZNIJN0cKeidgccAWPAZTHgEbvsRcs7A74+qD51O2CsnTSUSySUhQy6dTdBkmPIf2P4SxG2HslzwnwDzP1YfbSeRSCQtRHroXYGJj0Kv2eDSC6b9P0g/Bh+Ogb3vQW1NZ1snkUi6CdJD7wpotHD7ynN/D7wJNj0Jfz0LyYdgwReg03eefRKJpFsgPfSuiKO/Gluf9TpE/w4rroXMiM62SiKRdHGkh96VGXM/2LjBhv+D5ROh92zodbX6II3KYrB2g5Dp6iPwAMrzwdxWfc6pRCK54pD/+V2dATeoi5D2vAPHV6oee30G3QJz34bSLFg+CZwC4IbPwa0PpIbD8R9hxgugs+wM6yUSSQciBb07YOUEM1+E6c9BUQpUFKn57Cd/hh2vQlYUmOlVT70oDT6dDBP+BQc/gfI8cA6F0fd19ruQSCTtjIyhdye0ZuAYAJ6D1BWmk5+A21dDbiykHILZr8MD+yFwkir0Gi14DIS970JNVWdbL5FI2hnpoXd3QmfA3ZsgJQwG36Z66bevhshfwK2/WuHx+wWw+T9qeMZ3VNtcd/dbcOhzmPsW9JnTNueUSCSXhVAUpVMuPGLECCUsLKxTrn1FoShqHfYzG9W/r30fhi+6vHMe+gz+eAz0DlBRANe8qxYek0gk7Y4Q4oiiKCOa2idDLj0dIdQc98fjIHiaKsQ/LYG3+0PivtafL+kAbHwCes+BRyPBewTsX6Z2HBKJpFORgn6lYO2sLlCy84KYP8FQA6vuhPyExm1Lc8/lvRsM5+LvFUWw9l6w94X5n4C5NQy7S43hp4V32FuRSCRNI2PoVxJWTnD/XmM2TDp8Pg0+nQJj/gGj7lXz28vy4MuZqkj7jYX8RPXY+3aoK1cLU2DJJtDbqdv7zVO9/hM/gffwznpnEokE6aFfeVjYqJ61Swgs/gN8x6hPUHpnIKy5F765DgqSYdxDUJqtZtSU58NXs+DESpj0OPiNPnc+SwfoNQtO/SwzaSSSTqZFgi6EmCWEOC2EiBVCPNnE/sVCiGwhxDHjz9/a3lRJm+MxQI2v378Hes2EpP1Qkg03fKrmvT94BG5fpaZD5sWp8fJJTzQ+z4glqvhvef7ctoJkKMnqsLcikUhaEHIRQmiBD4EZQApwWAixXlGUyPOarlIUZWk72ChpbzwGwo1fNr9/2F2qJ+43tumyAsHTYNTf4cCHYKiG6nI49gOY26jPUR1087nyBE1RUQgWdhduI5FILkpLPPRRQKyiKHGKolQBK4F57WuWpEshhBort3Frvs3MFyHkKjj8hVpuYPhitfzAuvvgp0Xq5GtdKWCDARL2QFUZxGyB/wXDx+Mg7Es1b14ikVwSLZkU9QaS6/2dAoxuot0CIcQk4AzwL0VRks9vIIS4D7gPwM9PPryhR2FmAXeuUcXaUANm5uqTl/a+B9tfgchfQaOD/ter5QkS94Kdtxqfdw5Rz/H7v9Tffa+Fac9CTQXo7aEwGXa8po4SRt6rPhSkrTEY1LCRrXvbn1si6SAuurBICHEjMEtRlL8Z/14IjK4fXhFCOAMliqJUCiH+DtyiKMq0C51XLiy6gsg+rQp4ZiScWAUImPAwRP2mVo1c/Ifq/WdFqcK/911VzOtj6wW1leoTnQYsUMM/yYdUsdeYqemYhhoQWrB2Bbe+amdQlKp2NjbuasfhGAglGXD0ezVjx8oRJj6mVrQ8tQbG/lN9gpSFTWfcKYnkolxoYVFLBH0s8LyiKFcb//4PgKIorzbTXgvkKYpif6HzSkG/QqkuV3/XVX80GEBzXuQv9yzE7wRLJ3UlqqEGBt8OQgP73oed/1Nj9bZeak0bQzUUp6sjAEON6mlXlzV9fVHvWraeUJwBOiuoKgb/8WrHY6ZX5wWG3qmWVDj6HdRWqR1CnzmgswYLW/Xaejv1WbAlWWonZG4Nrn3Awbd190VRVLvN9Oq55XyCpBkuV9DNUMMo04FU4DBwu6IoEfXaeCqKkm58PR/4t6IoYy50XinokkumMAVqq9VCZU0Jn8GgZuVUlaiLoGor1bz73BjIiVGLlg1dqIpu0gH4dak6RzDtGVXAT62BiHWqJw/Q5xpV/JMPQsaJltnoPVw9rrb6XPgpL17tfGqr1ZFFfqJqo99YdSSRHa0ea+Wsbht9v1p7J+0YnN6glkMuTFH3u/dXbQ6c3HiiWlHg7Da1c3LtC9WlUJwJ/uPAbwxodQ3bFyQDinyGbTfhsgTdeII5wLuAFvhSUZSXhRAvAGGKoqwXQrwKXAfUAHnAA4qiRF/onFLQJV2a2mpVFG09wHPwue1leerv8nxVoKtKVIG08VAfE1hRqArvka/VDqQ+Nu7GkYlQOxMHfzUclLBHHY30maOKcXY0xG6Bkkw1hKTUqmElr2Fg76OGnVLD1VGFlbO6lqC2Sh39VJep+wsSm35f5rbqHIT3MDWzKO2YOomNohZ3cwwwHp+sjhRs3NR74NJbXW2cE6OG0Goq1A7Fvb86wgHIOaMuSAuaAmaWao1+M716HY3mXHkIIdQRTVUpOAWqdtdWqSGyplAUdQRWlKaOsDRata1jwCV8sN2fyxb09kAKuqRHoyhQWaSGZ2qrAEUNx7SU6nI11FOcDh6DVJG0dKi3v0IV/Yi16tyETq9eS2cJ5lZqyGjgTWqnY2Gjdhjxu9SyD/E7z5V80JrD8CWqyIZ9qdpqbqOObKpK1U6ltrKhbUKrdjCm7UIV7hpjOM3cVhXdigLjNYxzGKXZ6nvwGqraXlsF7gMh76zagfa9Bqxc1LCZpYMaDss5o3YilUWN75HnYHUC3WOQWpaiJEM9piRL7XBrKtT5lMpiSD+uPoTdzkvNpPIZAW791HuSGaGOYoKnqQ9rD5yodrS11eooKnaLen88BqlrNyxs1VFgaphqq88oyDwF+fHqvfEcrF4nP1G1v7JI/dtMb+zUFHXE5+jf8u9D/dsvBV0ikTSgvECN/Vs5nQvB1NaowiU050JZiqKOSrIiVJF06QVOwapYntkMxWmq8FeVqnMKToEQ8QsoBlUADdWqyBZnqN5+QRKkHFaLuzn4wulNqkhqzODEavU4jZl6LRs39XouvcC1t9rJgCqiBUnqyuX04w3fl5le7TysnNTXxRmqOHsOVkc+pblg5wlpR9XzWLmooxWhVTu6urkXjZm6vxFCtUsxZkWB2inWnrdKum5k1RzjH4EZ/23551X/1FLQJRJJt0JRWjYxXJanhoCsnFQh19u37LjSXNWjd+17blK+ukL12FOPqAKts1Q9/OCpqminn1DnUApT1P1BU9XOMGk/eA4BryFqJ5l8SB3ZuISqTwuzdFDnSGprQKB2mA7+4Bx8SbdGCrpEIpH0EGQ9dIlEIrkCkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPYROW1gkhMgGmqkgdEFcgJw2NqctkHa1nq5qm7SrdXRVu6Dr2nY5dvkriuLa1I5OE/RLRQgR1twqqc5E2tV6uqpt0q7W0VXtgq5rW3vZJUMuEolE0kOQgi6RSCQ9hO4o6J92tgHNIO1qPV3VNmlX6+iqdkHXta1d7Op2MXSJRCKRNE139NAlEolE0gRS0CUSiaSH0G0EXQgxSwhxWggRK4R4soOv7SuE2C6EiBRCRAghHjZuf14IkSqEOGb8mVPvmP8YbT0thLi6ne1LEEKcNNoQZtzmJIT4SwgRY/ztaNwuhBDvG207IYQY1k429a53X44JIYqEEI90xj0TQnwphMgSQpyqt63V90cIscjYPkYIsagdbXtDCBFtvP46IYSDcXuAEKK83r1bXu+Y4cbvQKzR/hY8tqfVdrX6s2vr/9tm7FpVz6YEIcQx4/aOvF/NaUTHfs8URenyP4AWOAsEAebAcaBfB17fExhmfG0LnAH6Ac8DjzXRvp/RRgsg0Gi7th3tSwBcztv2P+BJ4+sngdeNr+cAG1EfhjUGONhBn18G4N8Z9wyYBAwDTl3q/QGcgDjjb0fja8d2sm0mYGZ8/Xo92wLqtzvvPIeM9gqj/bPbwa5WfXbt8X/blF3n7X8LeLYT7ldzGtGh37Pu4qGPAmIVRYlTFKUKWAnM66iLK4qSrihKuPF1MRAFeF/gkHnASkVRKhVFiQdiUd9DRzIPWGF8vQK4vt72bxSVA4CDEMKznW2ZDpxVFOVCK4Pb7Z4pirILyGvieq25P1cDfymKkqcoSj7wFzCrPWxTFOVPRVHqnlB8APC50DmM9tkpinJAUVXhm3rvp83sugDNfXZt/n97IbuMXvbNwI8XOkc73a/mNKJDv2fdRdC9geR6f6dwYUFtN4QQAcBQ4KBx01LjkOnLuuEUHW+vAvwphDgihLjPuM1dUZR04+sMwL2TbAO4lYb/ZF3hnrX2/nTWd/BuVE+ujkAhxFEhxE4hxETjNm+jPR1hW2s+u46+ZxOBTEVRYupt6/D7dZ5GdOj3rLsIepdACGEDrAEeURSlCPgYCAaGAOmow73OYIKiKMOA2cA/hRCT6u80eiGdkp8qhDAHrgN+Mm7qKvfMRGfenwshhHgaqAG+N25KB/wURRkKPAr8IISw60CTutxndx630dBx6PD71YRGmOiI71l3EfRUwLfe3z7GbR2GEEKH+kF9ryjKWgBFUTIVRalVFMUAfMa5EEGH2qsoSqrxdxawzmhHZl0oxfg7qzNsQ+1kwhVFyTTa2CXuGa2/Px1qnxBiMXANcIdRCDCGNHKNr4+gxqd7Ge2oH5ZpF9su4bPrsHsmhDADbgBW1bO3Q+9XUxpBB3/PuougHwZChRCBRo/vVmB9R13cGJv7AohSFOXtetvrx57nA3Uz7+uBW4UQFkKIQCAUdRKmPWyzFkLY1r1GnVA7ZbShboZ8EfBrPdvuMs6yjwEK6w0J24MGXlNXuGf1rtea+7MZmCmEcDSGGmYat7U5QohZwBPAdYqilNXb7iqE0BpfB6HeozijfUVCiDHG7+pd9d5PW9rV2s+uI/9vrwKiFUUxhVI68n41pxF09PfscmZ2O/IHdVb4DGov+3QHX3sC6lDpBHDM+DMH+BY4ady+HvCsd8zTRltPc5kz6BexLQg1e+A4EFF3bwBnYCsQA2wBnIzbBfCh0baTwIh2tM0ayAXs623r8HuG2qGkA9WoMcl7LuX+oMazY40/S9rRtljUOGrdd225se0C42d8DAgHrq13nhGoAnsWWIZxFXgb29Xqz66t/2+bssu4/Wvg/vPaduT9ak4jOvR7Jpf+SyQSSQ+hu4RcJBKJRHIRpKBLJBJJD0EKukQikfQQpKBLJBJJD0EKukQikfQQpKBLrmiEWgHSqrPtkEjaApm2KLmiEUIkoOYA53S2LRLJ5SI9dMkVg3FV7QYhxHEhxCkhxHOAF7BdCLHd2GamEGK/ECJcCPGTsTZHXc35/wm1hvYhIURIZ74XiaQppKBLriRmAWmKogxWFGUA8C6QBkxVFGWqEMIFeAa4SlGLnYWhFnWqo1BRlIGoKwvf7VDLJZIWIAVdciVxEpghhHhdCDFRUZTC8/aPQX0owV6hPvVmEepDOer4sd7vse1trETSWsw62wCJpKNQFOWMUB/1NQd4SQix9bwmAvXhArc1d4pmXkskXQLpoUuuGIQQXkCZoijfAW+gPsqsGPWRYaA+HWh8XXzcGHPvVe8Ut9T7vb9jrJZIWo700CVXEgOBN4QQBtRqfQ+ghk42CSHSjHH0xcCPQggL4zHPoFYLBHAUQpwAKlHLAkskXQqZtiiRtACZ3ijpDsiQi0QikfQQpIcukUgkPQTpoUskEkkPQQq6RCKR9BCkoEskEkkPQQq6RCKR9BCkoEskEkkP4f8DGXMhkWX7MfAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plugin.loss_history.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "af9d6df1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
fixed acidityvolatile aciditycitric acidresidual sugarchloridesfree sulfur dioxidetotal sulfur dioxidedensitypHsulphatesalcoholquality
053.753993-2.4752390.404968406.8983861.788962450.0737241221.5510775.69717712.45137714.83544581.5156960.0
1241.769932-33.9059337.4401881722.53605348.0345561312.2644572141.11936831.25907483.65474928.591759489.1726743.0
225.3449040.769463-11.237007-335.794326-3.595284-234.179124382.9075157.63768417.7483003.38029673.7010481.0
315.635557-28.371864-19.808469800.08844661.404066-596.053591-1749.79750528.376345-71.868790-14.556346-38.3151791.0
4-0.796959-8.546869-4.726590128.3430281.083628-288.3521041184.6802738.08150023.0128282.16859736.6728400.0
5-31.203381-39.052177-57.6510321269.158981-22.793850101.490751-661.9978235.01273819.61582226.791456-63.7736783.0
6-120.526480-49.314650-67.642982650.13681665.155843598.106999-3468.7530373.75056652.556860-108.310847-91.8163103.0
713.172627-7.196406-20.153565746.262383-30.8466881592.8153971610.699379-15.57666027.31969245.376814135.8714220.0
\n", + "
" + ], + "text/plain": [ + " fixed acidity volatile acidity citric acid residual sugar chlorides \\\n", + "0 53.753993 -2.475239 0.404968 406.898386 1.788962 \n", + "1 241.769932 -33.905933 7.440188 1722.536053 48.034556 \n", + "2 25.344904 0.769463 -11.237007 -335.794326 -3.595284 \n", + "3 15.635557 -28.371864 -19.808469 800.088446 61.404066 \n", + "4 -0.796959 -8.546869 -4.726590 128.343028 1.083628 \n", + "5 -31.203381 -39.052177 -57.651032 1269.158981 -22.793850 \n", + "6 -120.526480 -49.314650 -67.642982 650.136816 65.155843 \n", + "7 13.172627 -7.196406 -20.153565 746.262383 -30.846688 \n", + "\n", + " free sulfur dioxide total sulfur dioxide density pH \\\n", + "0 450.073724 1221.551077 5.697177 12.451377 \n", + "1 1312.264457 2141.119368 31.259074 83.654749 \n", + "2 -234.179124 382.907515 7.637684 17.748300 \n", + "3 -596.053591 -1749.797505 28.376345 -71.868790 \n", + "4 -288.352104 1184.680273 8.081500 23.012828 \n", + "5 101.490751 -661.997823 5.012738 19.615822 \n", + "6 598.106999 -3468.753037 3.750566 52.556860 \n", + "7 1592.815397 1610.699379 -15.576660 27.319692 \n", + "\n", + " sulphates alcohol quality \n", + "0 14.835445 81.515696 0.0 \n", + "1 28.591759 489.172674 3.0 \n", + "2 3.380296 73.701048 1.0 \n", + "3 -14.556346 -38.315179 1.0 \n", + "4 2.168597 36.672840 0.0 \n", + "5 26.791456 -63.773678 3.0 \n", + "6 -108.310847 -91.816310 3.0 \n", + "7 45.376814 135.871422 0.0 " + ] + }, + "execution_count": 51, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.model.generate(8)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "be62c2f0", + "metadata": {}, + "source": [ + "### Conditional data generation\n", + "\n", + "A conditional variable `cond` can be provided to the `fit` method. It can be either a column name in the dataset or a custom array. The model will then learn the conditional distribution of the dataset given `cond`. In this case, an array must be provided as the `cond` argument of the `generate` method." + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "56a1fc7e", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T18:03:45.005934+0200][38480][INFO] Step 100: MLoss: 0.9066 GLoss: 1.0013 Sum: 1.9079000000000002\n", + "[2023-03-27T18:03:51.387087+0200][38480][INFO] Step 200: MLoss: 0.4735 GLoss: 1.0112 Sum: 1.4847000000000001\n", + "[2023-03-27T18:03:59.107456+0200][38480][INFO] Step 300: MLoss: 0.4567 GLoss: 1.001 Sum: 1.4577\n", + "[2023-03-27T18:04:05.835508+0200][38480][INFO] Step 400: MLoss: 0.2715 GLoss: 0.9856 Sum: 1.2571\n", + "[2023-03-27T18:04:12.739590+0200][38480][INFO] Step 500: MLoss: 0.2193 GLoss: 0.9046 Sum: 1.1239\n", + "[2023-03-27T18:04:19.417762+0200][38480][INFO] Step 600: MLoss: 0.0143 GLoss: 0.8463 Sum: 0.8606\n", + "[2023-03-27T18:04:26.022729+0200][38480][INFO] Step 700: MLoss: 0.0048 GLoss: 0.7509 Sum: 0.7557\n", + "[2023-03-27T18:04:32.757598+0200][38480][INFO] Step 800: MLoss: 0.0083 GLoss: 0.7102 Sum: 0.7185\n", + "[2023-03-27T18:04:39.550873+0200][38480][INFO] Step 900: MLoss: 0.0029 GLoss: 0.675 Sum: 0.6779000000000001\n", + "[2023-03-27T18:04:46.573464+0200][38480][INFO] Step 1000: MLoss: 0.0039 GLoss: 0.6414 Sum: 0.6453\n", + "[2023-03-27T18:04:53.438631+0200][38480][INFO] Step 1100: MLoss: 0.003 GLoss: 0.6046 Sum: 0.6076\n", + "[2023-03-27T18:05:01.283222+0200][38480][INFO] Step 1200: MLoss: 0.0013 GLoss: 0.6297 Sum: 0.631\n", + "[2023-03-27T18:05:08.559280+0200][38480][INFO] Step 1300: MLoss: 0.0012 GLoss: 0.5479 Sum: 0.5491\n", + "[2023-03-27T18:05:15.536738+0200][38480][INFO] Step 1400: MLoss: 0.0067 GLoss: 0.5275 Sum: 0.5342\n", + "[2023-03-27T18:05:22.391711+0200][38480][INFO] Step 1500: MLoss: 0.0007 GLoss: 0.5252 Sum: 0.5259\n", + "[2023-03-27T18:05:29.285959+0200][38480][INFO] Step 1600: MLoss: 0.0018 GLoss: 0.5017 Sum: 0.5035000000000001\n", + "[2023-03-27T18:05:36.288634+0200][38480][INFO] Step 1700: MLoss: 0.0012 GLoss: 0.5013 Sum: 0.5025\n", + "[2023-03-27T18:05:43.485831+0200][38480][INFO] Step 1800: MLoss: 0.0009 GLoss: 0.4927 Sum: 0.49360000000000004\n", + "[2023-03-27T18:05:50.629387+0200][38480][INFO] Step 1900: MLoss: 0.0009 GLoss: 0.4931 Sum: 0.494\n", + "[2023-03-27T18:05:58.709478+0200][38480][INFO] Step 2000: MLoss: 0.0006 GLoss: 0.4864 Sum: 0.487\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 43, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.fit(loader, cond='quality')" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "3fcb9493", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 44, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABKvklEQVR4nO3dd3wc1dXw8d/dvupdsiXZsnGvuFKMC5hqeg0lYAcCIYWEh5KXhBQeyEMoCWkQegfTQgndVBvbgHvv3ZItq/ey9b5/zK4sybsq9qqf78f+aHdmdvZoJB1dnblFaa0RQgjR85m6OgAhhBCRIQldCCF6CUnoQgjRS0hCF0KIXkISuhBC9BKWrnrjlJQUnZOT01VvL4QQPdKqVauKtdapofZ1WULPyclh5cqVXfX2QgjRIyml9oXbJyUXIYToJSShCyFELyEJXQgheokuq6ELIURLPB4PeXl51NfXd3UoXcLhcJCVlYXVam3zayShCyG6pby8PGJjY8nJyUEp1dXhdCqtNSUlJeTl5TFo0KA2v05KLkKIbqm+vp7k5OQ+l8wBlFIkJye3+68TSehCiG6rLybzoKP53CWhB3x74FtyK3O7OgwhhDhqktADfrPkN7yw6YWuDkMI0cO88MIL/OIXv+jqMABJ6A3cPjdlrrKuDkMIIY6aJPQAn/ZR6a7s6jCEEN3I3r17GTFiBPPmzWPYsGFcc801fPHFF0ybNo2hQ4eyfPnyI44/7bTTGDduHLNnz2b//v0AvPXWW4wZM4bx48czY8YMADZt2sTUqVM5/vjjGTduHDt27DjmeKXbYoDX76XSJQldiO7ofz/YxOaDkf35HNU/jj+eP7rV43bu3Mlbb73Fc889x5QpU5g/fz5Llizh/fff5/777+eiiy5qOPaWW25h7ty5zJ07l+eee45f/vKXvPfee9x7770sWLCAzMxMysvLAXjiiSf41a9+xTXXXIPb7cbn8x3z5yQt9ACv3ystdCHEEQYNGsTYsWMxmUyMHj2a2bNno5Ri7Nix7N27t8mx3333HVdffTUA1157LUuWLAFg2rRpzJs3j6effrohcZ900kncf//9PPjgg+zbtw+n03nMsUoLHfBrPxpNhauiq0MRQoTQlpZ0R7Hb7Q2PTSZTw3OTyYTX623TOZ544gmWLVvGRx99xKRJk1i1ahVXX301J5xwAh999BFz5szhySef5LTTTjumWHtcC93n91HhqsDrb9uFbOs5Aao91RE9rxCibzn55JN5/fXXAXj11VeZPn06ALt27eKEE07g3nvvJTU1ldzcXHbv3s3gwYP55S9/yYUXXsj69euP+f1bTehKqWyl1NdKqc1KqU1KqV+FOGaWUqpCKbU28P8PxxxZGAv2LuCU109hf9X+iJ3Tqw8n8Sp3VcTOK4ToW/71r3/x/PPPM27cOF5++WX+8Y9/AHDnnXcyduxYxowZw8knn8z48eN58803GTNmDMcffzwbN27kuuuuO+b3V1rrlg9Qqh/QT2u9WikVC6wCLtJab250zCzgDq31eW1948mTJ+ujWeBi6YGl3PzFzbx8zsscn3Z8u18fSpW7ipNfOxmADy/+kIFxAyNyXiHE0duyZQsjR47s6jC6VKhroJRapbWeHOr4VlvoWut8rfXqwOMqYAuQGYFYj0q8PR6Acld5xM4ZLLkAUkcXQvRY7aqhK6VygAnAshC7T1JKrVNKfaKUCnkHQyl1k1JqpVJqZVFRUfuj5XBCj2TibVxykZ4uQoieqs0JXSkVA7wN3Kq1bp71VgMDtdbjgX8B74U6h9b6Ka31ZK315NTUkGuctqojErq00IUQvUGbErpSyoqRzF/VWr/TfL/WulJrXR14/DFgVUqlRDTSgBhrDCZlosItLXQhhGisLb1cFPAssEVr/UiYYzICx6GUmho4b0kkAw0yKRPxtnhpoQshRDNtGVg0DbgW2KCUWhvY9ltgAIDW+gngMuCnSikvUAdcqVvrPnMM4u2RTejSQhdC9AatJnSt9RKgxZnWtdaPAo9GKqjWxNnjpIUuhOgy8+bN47zzzuOyyy7r6lCa6HEjRQES7AmR7baoDyd0aaELIXqqHpnQ423xEU28jYf7y4yLQojG7rvvPoYPH84pp5zCVVddxV/+8pcm+7/88ksmTJjA2LFjuf7663G5XADcddddjBo1inHjxnHHHXcAoafRjaQeOTlXxGvogYTutDilhS5Ed/TJXXBoQ2TPmTEWznmgxUNWrFjB22+/zbp16/B4PEycOJFJkyY17K+vr2fevHl8+eWXDBs2jOuuu47HH3+ca6+9lnfffZetW7eilGqYMjfUNLqR1CNb6HH2OKo91Xj8noicL1hySXIkSQ1dCNFg6dKlXHjhhTgcDmJjYzn//POb7N+2bRuDBg1i2LBhAMydO5dvvvmG+Ph4HA4HN9xwA++88w5RUVFA6Gl0I6lHttAT7AmAUR5JdiYf8/mCN0WTHElsL9t+zOcTQkRYKy3p7sZisbB8+XK+/PJL/vOf//Doo4/y1VdfhZxGNzn52HNYUI9socfbAqNFIzS4KFhySXIk4fK5qPfWR+S8Qoiebdq0aXzwwQfU19dTXV3Nhx9+2GT/8OHD2bt3Lzt37gTg5ZdfZubMmVRXV1NRUcGcOXP429/+xrp164DQ0+hGUo9soUd6+H+wH3qSI6nhvA6LIyLnFkL0XFOmTOGCCy5g3LhxpKenM3bsWOLj4xv2OxwOnn/+eS6//HK8Xi9Tpkzh5ptvprS0lAsvvJD6+nq01jzyiDEm884772THjh1orZk9ezbjx4+PaLw9MqEHSy6RSujBkktaVBoAJfUlpEenR+TcQoie7Y477uCee+6htraWGTNmMGnSJG688caG/bNnz2bNmjVNXtOvX78jFpAGeOedI2ZOiagemdDj7HFABBN64KZoMIkX1xVH5LxCiJ7vpptuYvPmzdTX1zN37lwmTpzY1SGF1SMTeqTnRA/W0NOjjIReUtch09AIIXqg+fPnd3UIbdYjb4rGWmMxK3PEa+jBhC4tdCFET9QjE7pSijhbHJXuStw+Nx6/h2OZCyxYQ4+2RhNrjZWELoTokXpkQgej7PLOjneY9MokJr48kfPePY8XNr6A1++l3lvPX1b8haLatq2KFKyhW0wWkp3JktCFED1Sj6yhA1w89GLWFq5lZNJIUPD9we/566q/YrfYsZlsvLj5RQbGD+TyYZe3eq5gDd2szKQ4UyShCyF6pB6b0K8fc32T5zePu5lrPr6GVza/QrQ1GoBDNYfadK6GhG4yEvqW0i2RDVYI0SPFxMRQXV3d1WG0WY8tuTSnlOK6Udexv2p/Q0Jua0JvXHKRFroQoqfqNQkd4PSBp9Mvuh9RliiGJg5te0IP3BS1KKOGXuOpodZT25GhCiF6EK01d955J2PGjGHs2LG88cYbAOTn5zNjxgyOP/54xowZw+LFi/H5fMybN6/h2L/97W+dFmePLbmEYjFZ+PP0P1PlruKj3R+xuWRzm14X7LZoNplJdaYCRl/0KGtUh8UqhGi7B5c/yNbSrRE954ikEfy/qf+vTce+8847rF27lnXr1lFcXMyUKVOYMWMG8+fP56yzzuLuu+/G5/NRW1vL2rVrOXDgABs3bgTokGlyw+lVLXSASemTmJU9i37R/ThUc6hN3Rmb3xQFKK6XsosQwrBkyRKuuuoqzGYz6enpzJw5kxUrVjBlyhSef/557rnnHjZs2EBsbCyDBw9m9+7d3HLLLXz66afExcV1Wpy9qoXeWHp0Om6/m9L60lan2A3W0JskdKmjC9FttLUl3dlmzJjBN998w0cffcS8efO47bbbuO6661i3bh0LFizgiSee4M033+S5557rlHh6XQs9qF90P6BtN0Z9fh8WZUEp1ZD8JaELIYKmT5/OG2+8gc/no6ioiG+++YapU6eyb98+0tPTufHGG/nxj3/M6tWrKS4uxu/3c+mll/KnP/2J1atXd1qcvbaFnhGdARgJfXTK6BaP9WovZpMZgER7IiZlkoQuhGhw8cUX89133zF+/HiUUjz00ENkZGTw4osv8vDDD2O1WomJieGll17iwIED/OhHP8Lv9wPw5z//udPi7P0JvfYQawvXMiJpRNg5zr1+L2ZlJHSzyUySI0kSuhCioQ+6UoqHH36Yhx9+uMn+uXPnMnfu3CNe15mt8sZ6bckl0Z6I3Wzn490fc+0n1/Lh7g/DHuvz+7CYDv9uy4jK4GD1wc4IUwghIqbXJnSlFBnRGawvXg/QYoL26aYJPTs2m7yqvA6PUQghIqnXJnQwWtpBhbWFYY9rXHIByIrNIr8mH4/f06HxCSFadiyzqPZ0R/O59+qEPixpGNmx2QxPHE5RXfiZF33a13BTFIwWuk/72jzSVAgReQ6Hg5KSkj6Z1LXWlJSU4HC0b23jXntTFOD2Sbdzy4Rb+M3i37Cvcl/Y40K10AFyq3LJjs1u9X2K64opry9nSOKQYw9aCAFAVlYWeXl5FBW1bRrs3sbhcJCVldWu17Sa0JVS2cBLQDqggae01v9odowC/gHMAWqBeVrrrrnN24jZZMZpcpLqTGVlwcqwx/n8Pqwma8PzYBJvax399oW3U1xXzEeXfHRsAQshGlitVgYNGtTVYfQobWmhe4HbtdarlVKxwCql1Oda68YTpZwDDA38PwF4PPCxW0iLSqPCVYHL58Juth+x36ubttBTnalYTdY2JfRVBatYXbi6YcpeIYToKq3W0LXW+cHWtta6CtgCZDY77ELgJW34HkhQSvWLeLRHKTXKmHAr3I1Rn79pDd1sMpMZk0ledesJ/ZkNzwBQ46lpmBNGCCG6QrtuiiqlcoAJwLJmuzKB3EbP8zgy6aOUukkptVIptbIz62JpzjSAsEvSNW+hg1F2ya3KDXl80KGaQyw5sIS0KOP8NZ6aCEQrhBBHp80JXSkVA7wN3Kq1rjyaN9NaP6W1nqy1npyamno0pzgqDS30uvAt9MY1dDBujOZV5bV4h73CVQHA6GRjaoFK91FdFiGEiIg2JXSllBUjmb+qtX4nxCEHgMbdQbIC27qFYAu6xRa66cgWerWnmnJXedjzun1ugIYJvarcVRGIVgghjk6rCT3Qg+VZYIvW+pEwh70PXKcMJwIVWuv8CMZ5TOJscdjN9rAJ3ef3HVFy6R/dH4D8mvCfhsvnAiDZIQldCNH12tLLZRpwLbBBKbU2sO23wAAArfUTwMcYXRZ3YnRb/FHEIz0GSilSnakU1BaE3O/1e7FbmvZ+Cba6y+rLwp7X7Tda6ME51CWhCyG6UqsJXWu9BFCtHKOBn0cqqI6QFpUWdrRo87lcABIdiQCU1peGPafHZ0wNEEz+UkMXQnSlXj30v7HUqNTwNXS/F4tqmtCTHElAywk9WHKRFroQojvoMwk9IyqD/Jp8fH7fEft8+sgaeow1BqvJSkl9SdhzBksuifZEFEpa6EKILtVnEvrQxKG4fC72VR05p0vzgUVg1N2THEkt19ADvVwcFgextlhpoQshulSfSejDk4YDsL10+xH7vPrIkgsYZZeWSi7BhG41WSWhCyG6XJ9J6IPjB2NRFraVbTtin9fvPeKmKAQSel3rNXS72U6cLU4SuhCiS/WZhG4z2xiUMIjtZUe20JvPhx7UWgs9uACGzWyTFroQosv1mYQOMDxxONtKj2yhhxpYBK0n9GALPVhykZuiQoiu1OcSekFtQcMcLEHhSi6JjkTqffXUempDns/tc2Mz2VBKSQtdCNHl+lRCH5Y0DOCIVrpXh6+hQ/i+6G6fu2F+dUnoQoiu1qcS+vBEo6fLuqJ1TbaHK7kER4C2lNCtZmOWxlhbLLXeWpkTXQjRZfpUQk92JjMpfRL/2f6fJom3pZuiED6hu3wubGYbYEwABlDtro502EII0SZ9KqEDXDvyWg7WHOTr3K8btoUa+g+HE3q4wUVuf9OSC8jwfyFE1+lzCX1W9iwyYzJ5efPLAGitQ07OBYcn6Ao3/N/tczcsjBFrNRJ6pUd6ugghukafS+hmk5nzjzufNYVrcPlc+LQxt0uoGrrT4sRpcbb5pihIC10I0XX6XEKHw4tXFNYWHk7oIWro0HJfdLfP3VBDDyb0Spe00IUQXaNPJvT0qHTAWJIuOPtiqBo6GKsRtVRDb35TVAYXCSG6Sp9M6A2LRtcWNgzfD1VDB6OO3mIvF5OR0FOcKZiUqcUl64QQoiP1yYQeXDS6zSWXMBN0Na6hW81W+kX3I68qrwMiFkKI1vXJhB5cNLqwtrCh5BLqpigcrqEbq+w11XhgEUBWbJYkdCFEl+mTCV0pRVpUGoV1h1vo4UouSY4kvNobsjbeuB86QFZMFrlVuR0TtBBCtKJPJnSAVKexxmiwhh6uhR7six7qxmhwcq6g7NhsylxlMlpUCNEl+mxCT49Kb1JyCddCT3aEn8+lcbdFMEouAHnVUnYRQnS+PpvQU6NSKaorapjTZdG2EpbvOTJpJznDz+fSeC4XMFrogNTRhRBdos8m9LSoNOq8dZS7ygH4dGMh/1l1ZP073ARdWms8fk+TGnowoedW5fLlvi8pqQs9ZYAQQnSEPp3QgYZ+436/iYo6zxHHJdqNGnrzhN54+bmgWFss8fZ43t/1PrcuvJU3t73ZIbELIUQofT6hH6w+CIDPr0ImdKvZWF6ueUJvvPxcY9kx2ews3wnAvqp9EY9bCCHC6bsJ3dm8ha4orz0yoYNxY7R5Qnf73ABNSi5wuOzitDjJrZQujEKIzhO6a0cfEBz+H0zoXp+JyhAtdAg9QVcwoTcuuQCcmXMmFpMFm9nGl/u/jHTYQggRVp9toTssDhLtiYcHAunQNXQIPfzf7Q+d0E8feDr3T7+fnLgcyl3lRyxILYQQHaXVhK6Uek4pVaiU2hhm/yylVIVSam3g/x8iH2bHyIzJbKiha22mxu3D4/MfcVyiI5EyV9OBRcEaeuOBRY1lx0kXRiFE52pLC/0F4OxWjlmstT4+8P/eYw+rc2TGZjYM/Q9eilCt9CRHEmX1ZQ2DkAA8PuO45jX0oGAtfX/V/ghGLIQQ4bWa0LXW3wChpxvs4TJjMg8/0S0ndI1u6LMOjXq5mK1HHA/GvC6AzO0ihOg0kaqhn6SUWqeU+kQpNTrcQUqpm5RSK5VSK4uKiiL01kevSUJvqYUeYrRosIYeroUeZY0izZnG/kppoQshOkckEvpqYKDWejzwL+C9cAdqrZ/SWk/WWk9OTU2NwFsfm2ArGjjcQg/RdTEjKgNoWg9v6OUSpoYORh1dWuhCiM5yzAlda12pta4OPP4YsCqlUo45sjDW5ZZz51vrKK52HfO5+sf0b3istTHbYqgW+rDEYSgUW0u3NmwL122xsQGxA9hetp1/rP6H9EkXQnS4Y07oSqkMpZQKPJ4aOGeHTWJSWOXirVV5HCyvO+Zz9Y/pj0IZT1qooUdZo8iJz2FL6ZaGbQ29XFpI6CdnnoxSimc2PMPzm54/5niFEKIlbem2+BrwHTBcKZWnlLpBKXWzUurmwCGXARuVUuuAfwJX6lDL+0RISoyRQCPRQreZbQ0DjIKXItxo0ZFJI5sk9HAjRRs7O+dsvr3qW07odwIbi0P2+hRCiIhpdaSo1vqqVvY/CjwasYhakRJjJNDiKndEzpcVk0VhbWGLLXSAUcmj+HjPx5TWl5LkSGpTySVoTPIYXtz0Ii6fq8VfAEIIcSx63EjR1FgjIRZFoIUOh3u6tFRDB6OFDrC1xKijB3u5NJ+cK5SxKWPxam+TGrwQQkRaj0voDquZWLuFoqoIJfTYYNfFllvoI5JHALC5dDNwuIbelhb36BSjJ2ew7FLtrmbB3gUhF54WQoij1eMSOkBKrD0iNXSAS4ZcwrVDfwV+BwAVdaFLOXG2OLJisthcYiT04EjRtpRc0qPSSXGmsKl4EwCvbHmFOxbdwfyt8yPxKQghBNBTE3qMLWIJvV9MP2ZnXgJAtM0ctoUOMD5tPKsKVuHXflw+FxaTBZNq/RIqpRiTPIaNJUYLPTgL4yMrH2Fb6bYIfBZCCNFjE7o9YiUXAI/PKH0kxdhaTOjT+k+jtL6ULSVbcPvdLQ4qam5s6lj2VOxhyYElbC3dyg1jbiDGFsPj6x4/5viFEAJ6cEIvro5MLxcAn99I6MnR9rDdFgGmZU5DoVh8YDFun7tdPVYuGXoJsdZYblt4GwCXDruUWdmzWH5oeZNJv4QQ4mj12IReUefB7T1yqtuj4W1I6DZcXj/1ntAJNsmRxKjkUSw9sBS3zx12Yq6QMTtTuGXiLdR56xiRNILs2GymZkylyl3FtjIpuwghjl3PTOixRqmjpCYyZRdvYA705MCgpZbKLqdknsL64vXk1+S3u0/5FcOu4Jycc7hu1HUATM2YCsDy/OVHE7YQQjTRIxN6aoQHFwVb6BlxRk+Xlurzpw88HYDv879vVw0dwGwy89DMhzj/uPMBYxm8QfGDWHZo2dGELYQQTfTIhJ7SMLioPiLnC9bQ+yU4jfO2kNBHJI1g/rnzuXzY5Vwy9JJjfu+pGVNZXbAajz/8XwVB3x38jre3v33M7ymE6J165CLRHdVC7xdvtNALq1r+RTE6eTSjTwo77Xu7nNz/ZN7Y9gYLcxdyxsAzwh63t2Ivt359Kx6/h3MGnUOUNSoi7y+E6D16Zgs9JrLD/4M19H7xRgu9sDJyXSJbMzNrJjlxOfx77b9x+VysOLQCr9/bsH9PxR6e2fAMv/r6V7h9bjx+DysLVlLrqeVA9YFOi1MI0f31yBa602Ym2mZm8Y4iVu8rIzHaxpj+cZw2Ip0ByUbLtbCyntRYO4GZfVsUbKFH2czEO60URrCPe2vMJjM/O/5n/PqbX3PO2+dQVFfE0MSh/Hbqb8mKzWLep/MaJgR7ZNYj/PqbX7P0wFI+2PUBSw8s5YvLv5DWuhAC6KEJHYw6+ve7S0mLtaOB/6zK476PtvDqj08gzmHl/EeX8PR1kzhtRHqr5wrW0K1mE2mx9lZLLpF2Vs5ZvLDpBcrqy/ifSf/D61tf50cLfkSSIwmXz8V7F77HcQnHATAlYwqf7PmEclc5Gs2CvQu4eOjFnRqvEKJ76rEJfd7JORRXu/jFqUNx2szsLa7hkse/5cVv95IWa8fn12wvqG5TQg+20M0mRVqcvVNb6AAmZeLlc17GpExYTBauGnEVz2x4hje3vcmfT/lzQzIHY3DT4gOLibJEkehI5N2d7zZJ6Fpr6rx10moXog/qkTV0gB9NG8SdZ43AaTOmvc1JiebSiZl8vrmAd9YYteW2rmoUrKFbTIq0WEen1tCDbGYbFpPx+9VpcXLLhFtYfOViTh1wapPjpmdOB+CqEVdxxfArWFO4hvd2vseWEmPxjSfWPcHst2ZT6a7s3E9ACNHlemwLPZQfTMnm6cV7qKr3YjOb2pzQgyUXs1mRFmvME6O1blP9vbMNiBvA6+e+zrDEYVS4K3hszWP8funvUSjmjZ7HS5tfwqd9fH/we87MOROA0vpSHGaHtNqF6OV6bAs9lCFpsZwwKImsRCenDE3hQHnbauHBkovVZCI11o7b529xtGhXG50yGqvZSoozhQ8u/oA3znuDaZnTeH7T8yQ7kom1xbLkwBLAKMFc89E13PPdPV0btBCiw/WqFjrAv6+ZiMvr54lFu1i5t7RNr/E1qaEH+6K7SIhq30jQrtA/pj/9Y/rzj1P/wb/X/ptZ2bN4efPLLD2wFK01m0o2kVedR0l9CbWeWjaVbCLGGsPI5JFdHboQIsJ6VQsdIDnGTv8EJ/0TnFTWe6mqb72l7WlSQzf6uHdFHf1Y2Mw2bp10K8enHc8pmadQWFfI9rLtDXOv13nr+GjPR9zy1S08sPyBLo5WCNERel1CD+ofGMafX9F62cXn15gUmBon9E7uuhhJp2SeAsCHuz/ky/1fMjl9Mgn2BB5Y9gA1nhq2lm7FryMzU6UQovvotQk9M8EonRxow41Rr19jMRmXonHJpadKjUrlnJxzeGHTC+yp2MMZA8/gtAGn4fa7SbAnUOutZX/l/q4OUwgRYb02oQdb6G3p6eL1+TGbjB4tMXYLUTZzjyu5NHf/9Pu5YtgVxNpimT1gNhcPuZjMmEz+eNIfAdhSuqWLIxRCRFqvTehpsQ7MJsX2Q1Xc/PIqNh2sCHus0UI/3EUxI87Bocq2dXnsriwmC78/6fcs+sEi0qPTOT7teD699FNmZs/EarI29FsXQvQeva6XS5DZpMiIc/Dqsv14/Zrx2QmM7h8f8lifX2MxH07oWUlR5Jb27IQeZDVZj3g+NHEom0s3d1FEQoiO0vNa6K5q2Pxf0LrVQzMTnA19zAsqw9/k9Po1ZtPhS5Gd6CS3rPbYY+2mRiaNZGvpVnQbrqEQoufoeQl9y/vw5nWQt7LVQwenRhPnsJAR52g5ofv8TUouA5KiKK/1dOvBRcdiVPIoKlwVHKw52NWhCCEiqOcl9BHngtkGm95p9dDfnDOSj381nePSotvQQm+a0AFyS3tnK3186nhA1jIVorfpeQndEQ9DzoBN74G/5b7U8VFWshKjSI9zUNBCrxWfX2NtVEPPDiT0vF5adhmWOIyM6AwW5i7s6lCEEBHUakJXSj2nlCpUSm0Ms18ppf6plNqplFqvlJoY+TCbGXMJVB2E3O/bdHh6nIPCqnr8/tA14+Yt9GBC39+WFrrPw/df/Ie3332rTbF0B0opZmbN5Lv876j39twBVEKIptrSy+UF4FHgpTD7zwGGBv6fADwe+Nhxhp0NFid8fT+c/r+gfcZNUnss7FsK9RWQMQ76jQelmFL/LWt0CeUFo0jKGAjNZlF0uoq53v0feOEvUFtK/KgLudFRyPCNn0C/i+DAKtj4Ngw4AQaeAlFJ4EiA3Qvxf/9vTqwLzBkTtRbGXgYmq1EWciaA1Qm1JVBTDN56SB4KMWmHY9AafG6w2Dv0kjV3avapvLHtDZYfWs6MrBmd+t5CiI6h2tLTQSmVA3yotR4TYt+TwEKt9WuB59uAWVrr/JbOOXnyZL1yZes3NsP67t/w5b3gbWf3QosDlBksNsicBJ563PuXY9ZezJkTjP37lgLgR2EicH2yT4SCjeCubnK6rXHTeKR4KtPNm7jW/FnbYnAmQmIO+L1QkQd1ZZAxFhIGgtkKk2+AjDHGL5Kk44xjIzyVr9vnZvrr0zl38Ln84aQ/RPTcQoiOo5RapbWeHGpfJPqhZwK5jZ7nBbYdkdCVUjcBNwEMGDDg2N71pJ/B+Cth5xdGa1kpqC2FrMkQnQqHNkD+OvC52W4fw73vruT3J9kYbisxXu+qNHrK2GL4JuYc3racy+M3/sDYV3GAu99dx6oiM59eZIaYVKO176k/nIDryiizpnLe0/nYLSY+c0/h8hvuwFFfAn4P+DxGPJ5aI57oVDCZoXgHFG2B8lwjeWdOgqgUyF0GpXugphA2vQvKBMH5VuKzYeQFkDnReOyIh5ShxvmOks1sY1rmNBblLsJ/oh+T6nm3U4QQTXXqwCKt9VPAU2C00I/5hFFJMO6K0Ptyphn/gZjyOpb4XaxOH8vwqUf+Innx2WXUuLyHN8RnEpNeye4de/EfdyamYH3d6oCUIQ2HHcqvxOs/yEkDE1m8o5iyxHH0i3e2HPOQ2S3v99TByuehrhQGngylu2HH57D8KeMXRVB0Koy5DKbfZpRwjsKp2afy+b7P2VKyhdEpo4/qHEKI7iMSCf0AkN3oeVZgW7eRGmtHKTgUZuZFj8/fMDlX0ICkKNw+P4cq6xvmhWnO7fU3nB+gos7TekJvjdVp/PURdNxpMOXHxl8Hpbuh8iDUFMH2T2HF07D6JTj3r3D8Vcbxfr9RFnLEtfpW0zOnY1Imvs79WhK6EL1AJP7Ofh+4LtDb5USgorX6eWezmk0kR9vDTonbfOg/QL94Y9bFlvqvuwIJPS3WOLaitgMHIlkdkD4Khp5uJO8rXoSfLzfKMO/dDJ//EXZ8Ac+fA38dDgWtD+1PcCQwIW2CdF8UopdoS7fF14DvgOFKqTyl1A1KqZuVUjcHDvkY2A3sBJ4GfhbmVF0qI94etoXevNsiQFK00eourXGHPae7IaEfbqF3quTj4IfvwLgfwNK/w6uXQtFW48buuz8Bb/jYg2ZlzWJb2baGFY6EED1XqyUXrfVVrezXwM8jFlEHSY91cDBcQvc1nW0RIDnaWH6upDp8UnR5fUDTkkuns9jgkqfg1LuhaBv0Px5yl8Mb18B7PzXKMc6EsC8/e9DZPL/peW7+4mbOGHgGj8x6pNNCF0JEVp/p2pAe7+BQRegujs0n5wJIjgkk9BZa6K5mLfTKem/YYztc4kAYdqZxg3TkeXDq74zpEf4+Dh47AT75f1BTcsTLMqIz+PTST/nhyB/y+b7P2Va6rQuCF0JEQp9J6ENSYyir9YSso/v8/iZD/wGibBacVjMl1eGnDAiWXJJjurCFHs7MO+HHX8DoC43+7cufhn9OgG//Bd6mn5PT4uTm8TfjtDiZv3V+FwUshDhWvXY+9OZG9zd6fWw6WEnacEeTfaFq6GC00ltuoRsllyibmViHhcrulNDB6OOeOcl4XLgVPv89fPY7WPEMTL/D6A2zZzEcXEP8Zc9y3uDzeH/X+xTVFuG0OPnDSX8g3h56DnkhRPfTZ1roIwMJffPByiP2haqhg1FHbymhB1voNouJeKe1e7XQm0sbAde8ZdxEtUbB+7+AT+8yBmBpH7z3M64Zcgl+7Wd3xW4W5i7khx//kLyqvK6OXAjRRn2mhR7nsJKd5GRz/pEJ3Reihg5GKaUt3RbtPSGhBw2ZDYNmQv5aY9RpbLrRSn/xPI779gkWX/YlUY4EVh/8jl9+/Ut++NHVPHb649JPXYgeoM+00AFG94sP3UIPUUMHSIq2tdht0dWTWuiNmS3GFAmx6cbzQdNh2q9gzctEPz8H9dV9THrvNl7etwd7bSk/XnA9eyr2dG3MQohW9amEPqp/HHtLaqh2Ne2N4muphl7tDts/uyGhm03EOXpQQg/ljHvhyteM+WOW/B3qyhh85kM8X+7G6q7h1s9/SnWzicm6yq7yXRTVFnV1GEJ0O30qoY/uH4fWsLVZ2cXTQg3d7fMf8QsgyOX1YbOYUEr1rBZ6OCPmwM+Xwe8K4bbNMOUG+s/7lL9U+dlXncfl757Ptwe/7eooufmLm7lj0R1dHYYQ3U6fSuijAjdG1+aWN9luDP0PUUMPjBYNN7jI7fVjtxivi4/qBQk9yGw5PJNj0mCmXvMBT1eBuSqfn3z+E3707sU8tuZR3t7+Nh6fB601tZ7OWd2pqLaIQzWHWF24mk0lmzrlPYXoKfpUQs+IczC6fxxvrsxtUkbx+v2hW+gNg4tC90V3ef3YLUbii3dacXv91Ht8HRB5F0s+jik/WcbbQ+ZxV6WLA6VbeWL9k9zz3T1c8d4FXPrfiznl9VP4Yt8XHR7KltItDY9f3fxqh7+fED1Jn0roSinmnZzD9oJqvt11eNRk2Bp6O1rocU4rQPfrix4ptmjss+7imp9u4LOp97HOPp5/FZXjLd2DtXAzQ+pquGPhbTy25lF2lu3ssHlhNpcYk45d5Mzmk72fUFxX3CHvI0RP1KcSOsD54/uTHG3j+aV7AdBah6+htzL839W45BJI6L2m7BKOxQ5jL8N05SvM+tk6Phh7K2+M/jnPx01kWm0tT6x/kovfv5hz50/jD5/9lJc3vkCtpxatNfsr9+PxH9v12Vy4jhyPh+t3LMPr9/LhZhnZKkRQn+mHHuSwmrlkYibPLtmDx+fHFFjaLVQNPSkwQVe4rosuj3FTFPpQQm8sKglONCbdjNa/5LEVz1Cw50sWlW/na38hi1yLeDd/Ce9seI706H4sLdtMkiWKi3LO5dqJPyPFmdLut9xStJ4JLjeDZv8f49Y9wn+3vcncibegIrxEnxA9UZ9L6ACDU2PwayiscpESaIWHKrk4rGZi7BaKw8zn4vb1wRZ6OErB1BtJn3ojVwBX1BTDvm/5ds1T/L+63eTXl/DTiiq22Wp5YcebvLrzbS4beBbnj52LRVkoqC1gTcFqluYuYlrWdG6e8HPs5sMLZ28r3YZGc8hTySi/GabeyIXb3+Q+TxFbijYyKm1s133uQnQTfTKhZ8QZc7kcqqgjIZCIQ5VcoOXBRS6Pv2+30FsSnQKjLuDkURfwwdYP0HVlJI66GCoOsHfZozy7/1Pe2Psxr+77pOElZg3D3G6eqdjJ61vmo5SJQdH9sJmsrCw/PAvkyLTjwWTmrAk38+DKe3lt5SPcN+f5Lvgkhehe+mZCjw8mdBdD0oybd6Fa6HB4cFEobp8fp9XccBwYrX7RVMKI8w8/SRtBzvmPcl9tKT9b/jgb9y9E1xSTarIzOG4g8UNO5Ns1z/E5xZiBrTXl5JvN3F5ZRb7Fwla7lbHjLgQgftTF/GDpPbxctJIB3z/Aj6begcXUJ7+lhQD6akIPttAr6/H5jYRuDVFDB6OnS15Z6D7WLq+voYUf57CSEGVlf2nn9Mfu8aKS6Dfrbvpx9xG7Tp76E04u2Gj0ha8rA3cNpI4wFsrevQiGzzEONFu5/awnKVlwI//c9iqPb5vP9ORx/O60v5Ealdri23v9Xqrd1Xi1F6fFSbQ1uiM+SyE6VZ9M6AlRVmwWEwWV9Xj9xvD9sC30aBvr88pD7nN7D5dcAAYmRZErCf3Y2aIge+qR28/96xGbzIOm86fLP2TWgv9hfdlW/uNfw0Vvnc60+KGMzZzGmAEzGZE8Eqfl8OLd3x5Yyl0Lb6fMW9OwbWbcEP545pOkRqc1Ob/X72Vb2TZGJY2SG6+i2+uTCV0pRUacg0MV9Xh9Rgs9XA09OcaooWutj/iBbtxtESA7KYr1eRUdF7gIyZo8hHOu/oBzPPVc8e1feXTbfFaXbOKTim2w+TnMwBB7MiPiBlFRX8Y3lbsY7HFzU1UNVouDApuTl/zbufTts3h2zqsMTRkFgNvn5vZFt7MwdyFXjbiKu6behUn1uZ6+ogfpkwkdjLJL45JLuBZ6UrQNr19TWeclPsraZF/jm6IAA5Oj+GTjIbw+f8hukKKDWR0Mmnk3f53xWyjZRdGOT9i4fyEbSrewqTaPJTWFxPv9XOozcfukO4keezk44sHv47wv7ubG3P9y00fX8P9OuQ8Pmte2vMqGkk2cHJPDa1tfo7L6EPed+lesJmvrsQjRBfpsQk+Pd7A+rxxvKzX0lMDycsU1riMSutFt0dzwfEBSFD6/5mB5PQOSozooctEqpSBlCKkpt3DqSbdwqtZQUwRl+yAm1ZgH3nT464bJzOAzH+CpxQlcv+Ml7lzyGwAyfX4eKCllzp79PBMfxz/5mop3L+WS0dcyNusUMmL6hfzLrb382k9exX7KyncxPmf2MZ1L9G19NqFnxNn5rKIer6/lGnrjwUXHNbvP5vL4mpRcBiQZN9b2l9ZKQu9OlDIWz45Ja/Gw46bfxSdJw9i/6H485fsYnT0d07nzIGMMN7qqiPv0Fh6o2s2SZffCMhhhTeSgv54UZzL/N/NhxqSMaXdotZ5arn/vYjbVHgTguerfMmXMVUfzWQrRhxN6vBOX109xoEtiSzV0IORi0W5f05JLMInvK61hsicRe2BqXdFzRI2+hBEjL4Sa4sMLgAT8YO7XnLf9E/YWrGHp3s/5rjKf2V4v37mq+OGHVzHFmc6IhKHYzQ5OHXklozNPaPG9tNb87sNr2VJzgDu8Tp401/LWhmcloYuj1ncTeqDrYrBLYriad3CCruJmfdF9fmMOmMYll4w4BzazieV7Snngk63cPWckV04d0BHhi45kMh+RzIPbo0ecx+gR5zF65u+5qaYYyvdTsfsrnt3xFosr85hfewivgicPfMEJ5gTOG3Yp6UlD8bgq8biryEo/HpSJ57+7n++qdlOKj9tNqcydt4D8N8/lDVc+pVUHSYrt3+mf9rHyaz8Hqw+SFZvV1aH0WX03occbifpAeR3Q8khROHI+l8YLRAeZTYqsRCf/XWv8+bz+QAVXRjZs0Z1Ep0B0CvGZE7lt+h3cVlcO5fuorinkjdWP8lbVDn6/5dnQL/X7me2zcnLyeObMeRIsNi4bewOvrv4/Xvn2T9xy5mM97q+7t3e8zZ++/xPz58yXNWi7SJ9N6OmBFvqBMiOhh6uh2ywm4hyWI0ou7kYLRDc2IDmK3cVG/+a9xTWIPsSZAM4EYoAbhpzB9XXlbFn9LHXeGuz2eMxWJ7sK1lJRW8x5U35J/MBTmrx8yOgrmL7s/3j60GK+fXUap/Y7CYfFSW75Lk4ePIcZo6/CrMxszV2Cx1vH2EFnNEn6+dX5rC5czanZpxJlbd89nBpPDS9tfonLh11+VJOmASzYswC/9vPEuif41+x/HdU5xLHpswk9LdaBUpBX1nILHSA5xn7EFLour7GQha1ZQh+eHsvKvWVMGJDA7iJJ6H2ZciYwatrtTbaNbOkFJhP/uOAtPlj4W16p2MKjeZ8B4PD7eaN8I5ZVD+EEqgLfqhOXRDE+/jjSYzJJSR/H/eufoNRTSZTZzhWDL+CS0deyt3A9abHZjEqf0GKL/9+r/s5L215nWe43PDPn5bBTKCxc9QSvbH6JEl89d5/wWyaPvAyAsvoyVhasJMWZwsK8hWwu2cyo5FFtvVQiQvpsQrdZTKTF2huG6rfUbzw5+sj5XFxhWui/On0oc0/O4c2VuSzZWUy9x4fDakaItrCmjeCSK97hEq+Lij0L8flcxKWO5ptV/2Z98QYqfC7GJY2kzlvHq8UreKVsPZ7yDZD3KVkeD78vLefz6Che3P4mL+x4q+G8yVjIMDtJMjvIsMVz8fibOG7ADA6V76IazfxtbzDM5WZVyUZ++8EPuWj8j5mcPQOb2dZwjmc++Sn/KFxCf68fE3DDsnv4df4KrjntQRbmLsSnfTxYb+dWZeHP3/yG5y74D1az9NnvTG1K6Eqps4F/AGbgGa31A832zwMeBg4ENj2qtX4mgnF2iKzEKFbvLwPCl1zAqKPvK2k6pL8hoTdL1lE2C1E2C4NSotEacktrGZoeG+HIRa9nsRM/9KyGp6ed+VdOa3bI1VqjXdUU5K9k+96vGJ84gviU4ZzurefGnZ+wsmQjw5JHk1u2nRXl2ynRFZSoMlZZCnhr6a9RSzQ60GqP9fl5uv/ZPHdoCa+UbeSTRf9DjFaMNEdT7/dSrb3sUV7mqFj+75pPcdUW89v/XsEDuR+z7+0drPeU0d/rZUr5Pv5gdnOn3s19H83lypN+y4D4HGJsMZ148fquVhO6UsoMPAacAeQBK5RS72utNzc79A2t9S86IMYOk53oZNU+I6G3VnIJJv6ghpuiYVr2A5ONPul7SyShiw6iFMoRS8agU8kYdGqTXUNyTmFI4PFE4MJG+2pK9/DfpfdR4Sony55MxcFVjIzuT9KcR7hD+/nZnoWs3Po2nxevZZ+7hjhlIcNk5dyoQfz4wlcw26KwOOL469Vfc8+b5/Ja9Q4AfmZKRv38I84GNr8xh+fLNvDux1dhRTEtbghn5JxNdkx/Ssp3k5N9CoPSxmM2tf+vV7/2U1JX0uoEbEdzXq31UcXUXbSlhT4V2Km13g2glHod4/ujeULvcbISD984aqmFnhyYE93v15gCxwVr6HZr6ISeE+iTLjdGRXcTnTSIq89/LsxeM1FDzmDGkDOY0cp5LPZY/vTDRdx+aC02s43olJFgMn4e/ufqrzht5WOU5K9hVeEaPvduYWHljsMv3vw0URqGW2Lx+DzU+D1oBfXaTy1+6hWkahMZ5ijMJjMJZgf9rXEkxfTjw+I1bPdVM1Q5mRV7HKNTxpCSOoqElOGgNd+seQq7xcEF036H3RaDX/vxaz8+7aOweCvb9nzBwH6TGJh5IvmFG4lPyGFd6Sb+97v/pbiumFhrDCfFDeHcnLOZNfqqNs3fU1SxH7e3jszk4W37InSQtiT0TCC30fM8INSIiUuVUjOA7cD/aK1zmx+glLoJuAlgwICu75+dlXh4Br5wQ//BGFzk11Be52noxthQcgnzuoQoGwlRVvaWSEIXvZhSJPabcORmq53jT7oNgNl+P3ccWsemvV9R5qkmOS6bXXlL2VC6he31FcSbbPS3OFEaHBYrUWYHDrOdQ/WlFHir8Wg/2ylnkakAV81OBni8/MycxLf+Sp6r2ICvciPsPjK0v8//CB9QG6qxtv3FIzYNx84VNT7y/YdYXFfBZyVrSV7xAE4U0VoRj6IcTYnyU600qdpEvLJQo73sNRlTiGT5TQy0RJNmiyfRHk9BXQnl3lqSrdEk2xKIt8ejlImxWacwZdy1x3TpQ4nUTdEPgNe01i6l1E+AF+GIkh9a66eApwAmT57cMcvCt0ObW+iB+VxKql0NCb2h22KYFjoYZRdJ6KLPM5kw9Z/A2P6HE//oCddzQTtPo31eKkq2Ehs3ELMjlp8CdbWl7N7/DaXFWyivPkC9p44Thl9CQcV+3t/2OtFmB/G2WCzKhAlFgiOJYZknsTN/BYeqD9I/NouKwo2YKw5wuSUeW8ZE6Dceb9oIvtj1MQsLVgCaGnyUax/9lYWxJgexZjsF7gqqtIcMk4OL43Kwm2ysLNtKvqeaHZ4KSusUKX5NImZ2ecopqT+IJ3DP4vq64i5L6AeA7EbPszh88xMArXVJo6fPAA8de2gdLzvpcAu9pRr64VGldQ31cFdDDT18vS0nOYrvdpWwen8Z4zLjZQZGIY6BMltISGs6X44zKonRIy4CLmqyfQAwZcpPw55r7PjrWnwvC3D2cbM5u50xXtPosXbXoqxOYy4hQHs91NcWgdZY2jlOoK3akmFWAEOVUoOUUjbgSuD9xgcopfo1enoBsCVyIXacfvHO4LVusYU+qn8cSsG6RgtdtKWFPqZ/PIVVLi7597c8+vXOiMQshOgZlC2qIZkDKIsVZ1x/nPGZWKMSO+Q9W03oWmsv8AtgAUaiflNrvUkpda9SKvhX0y+VUpuUUuuAXwLzOiTaCLNZTA2t75Zq6DF2C0NSY5osXtFwU9QS/nU/nj6IBbfOYEhaTENvGiGE6ChtqqFrrT8GPm627Q+NHv8G+E1kQ+sc2YlR5FfUt9hCBxiXlcCi7YUN81+7Qszl0pxSiuEZsUzJSeTjDYciMne2EEKE0+eLusGeLi3V0AGOz46nuNrdMJnX4blcWu+zOjYzgYo6jywgLYToUJLQgwnd3HoLHWgou4SbyyX0a+ObvLai1sN/VuXh93d5Rx8hRC/S5xP6JROzuPOs4cTYW64+jegXi81sYl1uORB+tsVQhqUbr91wwEjoz3+7hzveWsdzS/ccW/BCCNFIn0/oOSnR/PzUIa3Wtu0WMyP7xzVMAeDy+jGp1ks1YLTiR/aPY32gl8yXWwoBeOjTbWw+WHlsn4AQQgT0+YTeHtOOS2b1/nIq6jy4vcbyc229yTkuM56NByrJLa1lw4EKbpw+iBiHhX9+uaP1FwshRBtIQm+H00ak4fNrFu8owuX1t+mGaNDpo9Kpdnn58YsrAbh8cjanjUhj2Z4SqaULISJCEno7TBiQSEKUla+2FuLy+tp0QzRo5rBULp6QybaCKrKTnAxNi+GEQUmU1XrYUVjdgVELIfoKSejtYDYpZgxNZdG2Iuo9/jbdEG3snvNHk5ng5ILx/VFKceLgZACW7Slp5ZWGeo+P8lp36wcKIfokSejtdNqINEpq3Hy9rbBdLXSA+CgrC++cxR1nGlNsZiU66R/vYNnu0lZf6/drfvziSi58bClaS4lGCHEkSejtdPaYDC6flIXb6yczwdn6C5qxmg/fSFVKccLgZJbtKWk1Sb+ybB9Ldhazr6SWbQVV5FfU8f3utrXsi6td1Li8gNHd0uvztztuIUT3Jwm9nRxWMw9fPp41fziD5+ZNOebznTg4ieJqNyvDzPVSWuPmr59t4/6PtzBhQAIAC7cV8dt3NvDDZ5aR28roU601l/z7W25/cx1aa656+nt+/Z/1xxy3EKL7kYR+lOwWc4sTerXVueP6kx5n574PN1Ne6+b15ft5c0Uuu4qqcXv9XPPMMh79eienDEnlyR9OYkRGLG+uyGXh9iK8fs3ji3axaHsR936wmcp6Dx+uP8htb65tGMm68UAl+0tr+XxLAZ9uPMSqfWV8s6NIyjZC9EKRWuBCHKUYu4XfnDOSW99Yy0l//oo6j5GIHVYTM4amsiW/kievncRZozMAmDk8lScX7cZiUswelcZbK3N5Y0UuPr/mw/UHKaxyATB5YBJXnzCAzzYfwqTA59fc8dY6AIqr3eSW1jEguWPmZBZCdA1poXcDFx7fn9NHpjMuK553f3YyX9w2kyFpMXy2uYBLJ2Y1JHOAWcPSAJgzth+/O3cUJqU4cXASz/9oChaT4vJJWYzPiuffC3fi8fn5bFMBUwclMW1IMjVuH1NyjHmYmy96LYTo+VRX/ek9efJkvXLlyi55756g2uXl3dV5XDQhk1iHtWG71+fnoQXbuGrqAAalRFNc7SIxyobZpBqm5/1ySwE3vLiS88b148P1+fz+vFHkJEfx01dX89+fT+PSx7/l8klZ/O+FY1qIQAjRHSmlVmmtJ4faJyWXbirGbuHak3KO2G4xm/jtnJENz1MC650CDb1nThuRxgXj+/PxhnysZsWZo9LJTopi/R/PxGE1Mz4rgdX7yzv6UxBCdDJJ6L2QUop/XjWBhy4bR0Wdh/TAqkwOqzFVwcSBCTy5aDd1bh9OW9unLxBCdG9SQ+/FHFZzQzJvbNLARLx+zZpcqaML0ZtIQu+Dpg5KxmpWLNpe1NWhCCEiSBJ6HxRjtzB5YBKLtklCF6I3kYTeR80ansrWQ1Ucqqjv6lCEEBEiCb2Pmjk8FYBF2wu7OBIhRKRIQu+jhqfHkhHnkDq6EL2IJPQ+SinFzGGpLN5RLLMvCtFLSELvw2YNT6Wq3sua3PKuDkUIEQGS0Puwk4ekYDYpFm4rxO/X1AcmBhNC9EwyUrQPi3damTQgkc83F7B0Zwl1bh/v3zKtXYtfCyG6D2mh93Ezh6eyvaCadXnlbCuo4rkleymtcZNfUdfVoYW1s7CK4mpXV4chmvH5NTsLq7o6jD5NEnofd/aYDJKjbTx46TjOGJXO37/Yzkl//pKz/76Ywqru10f9QHkd5/9rKT98ZpnczO1mnlm8m9Mf+YaVe1tfI1d0jDYldKXU2UqpbUqpnUqpu0Lstyul3gjsX6aUyol4pKJDHJcaw8rfnc4Vk7P5w3mjyEp0cu64ftR5fPzxv5u6Orwj3PP+Jtw+P1sPVTF/+f4Of7+Ve0u58611HCzvvn+xdAcur49nl+wB4IFPtvbIFbFcXh8+f8+Lu7FWa+hKKTPwGHAGkAesUEq9r7Xe3OiwG4AyrfUQpdSVwIPADzoiYBF5wWl3s5Oi+PL2WYCR6B9esI0T7/+ShCgrEwYkkpMcRUqMnYQoK1sPVbG3uIYR/eLIL69je2E1OclRDEuP5bjUGGIdFlxeP9UuL5kJDspqPazLLSc9zkFarDHlb4zDgs+v2V9aS2WdlxqXl1q3D7vVRJzDSrzTSpzTQo3Lx+IdRWw8WMm63HLuOmcEi3cU8fCCbewvqWXiwERGZMRSVuvGpBSDU2LQGD+YTpuZGpePWrcXk1KYlKKgsp4Ve0sxKUVGvIPBqdGYlMLl8eO0mVm4rZClO4txWM18trkAn1+zdGcxD142jpH94jArhdevqahz89XWQjw+zZyx/UiPazSVMarR9Q11zY2PtS4f2wqqiHdaGZ4eS1W9F7/WJERZG74ubVHt8vLnj7fw/e4Szhydwekj0xnVLw4VWK1KAw6LCUuzZRO9Pj8r9pbx+eYC1uWVc/64flw+ORuvXxPnsLQ5hvfXGqtlBefgf215LldMzqLG5aOy3oPNYiIlxo7Z1PbPKZx6j4/CShf9ExxHfD5Ho7Lew8vf7ePRr3ZiMStmDE1l7sk5TMlJbNfXoDtodYELpdRJwD1a67MCz38DoLX+c6NjFgSO+U4pZQEOAam6hZPLAhfdm8fn57Gvd3KgrI6CKhdr9pVR5fI2OSYp2kZpjRubxcRxqTHkltZS3eyYSImxWxibGc/EgQncevowDpTV8eu317M2txy3N/Kll+NSo6n3+DlhcBI/mJzNz+ev6fC6vc1iavhcLCaFKZD8FAR+GRm/fJUyngc/mhTUuX3UenxMGpDImtzysC1Nm9mEw2rCaTNjUorKOg81bh82i4mBSVHsKKxuONZuMZEUbUNr8Ovgr8jQKus8DEqJ5oNbTuHCR5eyOb+yyecDYDWrhsVYquq91Ht8WMwKi8kU+Nj0sVLGoi3B9w3GkV9Rj8+vsZlNxEdZURi/IBUq8PFwI0WppvuC15PAMV6/n7yyOrSGM0alkxRl47PNhyir9RBrt5AcY2uIwx94/1Ca5/3Gv9BDHXP11AH8ZOZxLVzR8Fpa4KItCf0y4Gyt9Y8Dz68FTtBa/6LRMRsDx+QFnu8KHFPc7Fw3ATcBDBgwYNK+ffuO6hMSnU9rTbXLS3G1m9IaFwOSokmNtVNYWU+sw4rTZkZrzYHyOvYW11Lj9mKzmIixW8grq8VptTBxYAJFVS7Kaz0AVNV7UQoGJEWRGGUj2m4mymbB5fVRWeelst5DZZ1x7LisBGyWI1tjbq+fzfmVbC+oIjXGjsfnZ19JLRazwq+hzu0lxm4hymZBY/xQxjosTB2UhNVk4kB5HbuLa1AYCayy3suofnGM6h/X5H0qaj2szi1jX3ENAGazCbvFxEmDkzGbjFWiat1Gt8/GP1HBH6/G6bDxj5zNbGJIegzFVS625FeREW/HbDJRUu3CrwOvM/7h9+uGbVrTJMkoBRcen8mUnCRKql2s2FvGrqJqTEoRbMTWe/zUeXxG8nd70RqibGZOOi6Z6UNTibKZWbyjmA0HKrBbTBRWuSirMf7qMZmAEEmqscsmZTFpYCL1Hh9fby1k2Z5S+ic4SIiy4fL6OVBWR0WdG69PE+uw4rCa8Pk1Hp/G5/fj8Wu8Pj9ev8brO3yRgknaeKzITHCSlehkT3ENlfWewLVodF04/JyG57rJ16bx1+C41BhOGZrCpIHG8ox1bh//XXuArYeqKKlxA2AK/hINdRl0i08D79d062kj07lgfP8Wr2c43SahNyYtdCGEaL+WEnpbClAHgOxGz7MC20IeEyi5xAMl7Q9VCCHE0WpLQl8BDFVKDVJK2YArgfebHfM+MDfw+DLgq5bq50IIISKv1V4uWmuvUuoXwALADDyntd6klLoXWKm1fh94FnhZKbUTKMVI+kIIITpRm4b+a60/Bj5utu0PjR7XA5dHNjQhhBDtISNFhRCil5CELoQQvYQkdCGE6CUkoQshRC/R6sCiDntjpYqAoxkqmgKEHbDUhSSu9uuusUlc7dNd44LuG9uxxDVQa50aakeXJfSjpZRaGW6UVFeSuNqvu8YmcbVPd40Lum9sHRWXlFyEEKKXkIQuhBC9RE9M6E91dQBhSFzt111jk7jap7vGBd03tg6Jq8fV0IUQQoTWE1voQgghQpCELoQQvUSPSeitLVTdwe+drZT6Wim1WSm1SSn1q8D2e5RSB5RSawP/5zR6zW8CsW5TSp3VwfHtVUptCMSwMrAtSSn1uVJqR+BjYmC7Ukr9MxDbeqXUxA6KaXij67JWKVWplLq1K66ZUuo5pVRhYCGW4LZ2Xx+l1NzA8TuUUnNDvVeEYntYKbU18P7vKqUSAttzlFJ1ja7dE41eMynwPbAzEP8xLYYZJq52f+0i/XMbJq43GsW0Vym1NrC9M69XuBzRud9nWutu/x9j2t5dwGDABqwDRnXi+/cDJgYexwLbgVHAPcAdIY4fFYjRDgwKxG7uwPj2AinNtj0E3BV4fBfwYODxHOATjIW0TgSWddLX7xAwsCuuGTADmAhsPNrrAyQBuwMfEwOPEzsotjMBS+Dxg41iy2l8XLPzLA/EqwLxn9MBcbXra9cRP7eh4mq2/6/AH7rgeoXLEZ36fdZTWuhTgZ1a691aazfwOnBhZ7251jpfa7068LgK2AJktvCSC4HXtdYurfUeYCfG59CZLgReDDx+Ebio0faXtOF7IEEp1a+DY5kN7NJatzQyuMOumdb6G4x5+pu/X3uuz1nA51rrUq11GfA5cHZHxKa1/kxrHVxt+3uMVcLCCsQXp7X+XhtZ4aVGn0/E4mpBuK9dxH9uW4or0Mq+AnitpXN00PUKlyM69fuspyT0TCC30fM8Wk6oHUYplQNMAJYFNv0i8CfTc8E/p+j8eDXwmVJqlTIW4gZI11rnBx4fAtK7KDYwFjxp/EPWHa5Ze69PV30PXo/RkgsapJRao5RapJSaHtiWGYinM2Jrz9eus6/ZdKBAa72j0bZOv17NckSnfp/1lITeLSilYoC3gVu11pXA48BxwPFAPsafe13hFK31ROAc4OdKqRmNdwZaIV3SP1UZyxZeALwV2NRdrlmDrrw+LVFK3Q14gVcDm/KBAVrrCcBtwHylVFwnhtTtvnbNXEXThkOnX68QOaJBZ3yf9ZSE3paFqjuUUsqK8YV6VWv9DoDWukBr7dNa+4GnOVwi6NR4tdYHAh8LgXcDcRQESymBj4VdERvGL5nVWuuCQIzd4prR/uvTqfEppeYB5wHXBBIBgZJGSeDxKoz69LBAHI3LMh0S21F87TrtmiljcfpLgDcaxdup1ytUjqCTv896SkJvy0LVHSZQm3sW2KK1fqTR9sa154uB4J3394ErlVJ2pdQgYCjGTZiOiC1aKRUbfIxxQ20jTRfungv8t1Fs1wXusp8IVDT6k7AjNGk1dYdr1uj92nN9FgBnKqUSA6WGMwPbIk4pdTbwa+ACrXVto+2pSilz4PFgjGu0OxBfpVLqxMD36nWNPp9IxtXer11n/tyeDmzVWjeUUjrzeoXLEXT299mx3NntzP8Yd4W3Y/yWvbuT3/sUjD+V1gNrA//nAC8DGwLb3wf6NXrN3YFYt3GMd9BbiW0wRu+BdcCm4LUBkoEvgR3AF0BSYLsCHgvEtgGY3IGxRQMlQHyjbZ1+zTB+oeQDHoya5A1Hc30w6tk7A/9/1IGx7cSoowa/154IHHtp4Gu8FlgNnN/oPJMxEuwu4FECo8AjHFe7v3aR/rkNFVdg+wvAzc2O7czrFS5HdOr3mQz9F0KIXqKnlFyEEEK0QhK6EEL0EpLQhRCil5CELoQQvYQkdCGE6CUkoYs+TRkzQEZ1dRxCRIJ0WxR9mlJqL0Yf4OKujkWIYyUtdNFnBEbVfqSUWqeU2qiU+iPQH/haKfV14JgzlVLfKaVWK6XeCszNEZxz/iFlzKG9XCk1pCs/FyFCkYQu+pKzgYNa6/Fa6zHA34GDwKla61OVUinA74DTtTHZ2UqMSZ2CKrTWYzFGFv69UyMXog0koYu+ZANwhlLqQaXUdK11RbP9J2IsSrBUGavezMVYlCPotUYfT+roYIVoL0tXByBEZ9Fab1fGUl9zgD8ppb5sdojCWFzgqnCnCPNYiG5BWuiiz1BK9QdqtdavAA9jLGVWhbFkGBirA00L1scDNfdhjU7xg0Yfv+ucqIVoO2mhi75kLPCwUsqPMVvfTzFKJ58qpQ4G6ujzgNeUUvbAa36HMVsgQKJSaj3gwpgWWIhuRbotCtEG0r1R9ARSchFCiF5CWuhCCNFLSAtdCCF6CUnoQgjRS0hCF0KIXkISuhBC9BKS0IUQopf4/+EJcmPE6FgSAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plugin.loss_history.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "id": "2ea981cd", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([3, 4, 5, 6, 7, 8, 9])" + ] + }, + "execution_count": 45, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "outcome = np.array([3, 4, 5, 6, 7, 8, 9])\n", + "outcome" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "bbd33233", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T18:05:59.734678+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:05:59.737612+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:00.157952+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:00.160095+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:00.484737+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:00.485757+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:00.786487+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:00.788466+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:01.100020+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:01.102261+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:01.460078+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:01.462163+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:01.805568+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:01.807568+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:02.183897+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:02.185904+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:02.569835+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:02.571874+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:03.033272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:03.035146+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:03.579187+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:03.582312+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:04.128201+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:04.131216+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:04.909594+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:04.912681+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:05.506491+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:05.509890+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:06.285555+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:06.287092+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:06.748144+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:06.751143+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:07.239364+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:07.241364+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:07.833861+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:07.835862+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:08.270020+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:08.273103+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:08.579762+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:08.581664+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:08.995746+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:08.996750+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:09.387130+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:09.389133+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:09.913255+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:09.915271+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:10.414403+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:10.417511+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:10.986099+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:10.988092+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:11.384006+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:11.699391+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:11.700392+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:12.138923+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:12.140923+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:12.604077+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:12.606674+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:12.997333+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:12.999663+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:13.547570+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:13.550541+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:13.954516+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:13.956516+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:14.445116+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:14.452112+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:14.829066+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:14.832071+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:15.312829+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:15.315831+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:15.757355+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:15.759926+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:16.143136+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:16.145134+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:16.560027+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:16.562025+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:16.861918+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:16.863918+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:17.183558+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:17.637582+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:17.640281+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:17.997687+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:17.998689+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:18.345381+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:18.347383+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:18.676026+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:18.678607+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:19.007549+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:19.010506+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:19.346424+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:19.348531+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:19.696186+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:19.697186+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:20.073809+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:20.077249+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:06:20.472414+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:20.475399+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:20.942265+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:20.944268+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:21.302342+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:21.304314+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:21.665401+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:21.666987+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:22.067854+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:22.069371+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:22.392718+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:22.395677+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:22.716515+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:22.717515+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:23.047434+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:23.049434+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:23.399152+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:23.401151+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:23.745625+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:23.747624+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:24.098540+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:24.099540+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:24.421854+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:24.422839+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:24.738758+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:24.739667+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:25.058648+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:25.060550+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:25.399681+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:25.401599+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:25.737806+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:25.738793+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:26.069784+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:26.071290+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:26.416549+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:26.418554+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:26.801542+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:26.803529+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:27.139240+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:27.141225+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:27.488070+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:27.490052+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:27.823788+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:27.824814+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:28.163857+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:28.166838+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:28.499341+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:28.501342+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:28.823408+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:28.825499+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:29.125222+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:29.128129+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:29.492914+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:29.496428+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:29.833079+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:29.835167+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:30.217776+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:30.219777+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:30.536676+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:30.538667+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:30.861816+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:30.863812+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:31.177127+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:31.180126+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:31.606751+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:31.607978+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:31.949768+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:31.951785+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:32.289786+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:32.291671+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:32.629730+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:32.942556+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:32.945560+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:32.949202+0200][38480][INFO] [residual sugar] quality loss for constraints ge = 0.6. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:33.286281+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:33.287280+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:33.620445+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:33.622445+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:33.945427+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:33.947494+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:34.298877+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:34.300955+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:34.618880+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:34.620789+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:34.959467+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:34.961383+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:35.296247+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:35.298303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:35.763113+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:35.765112+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:36.178981+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:36.181338+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:36.555008+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:36.555991+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:36.880093+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:36.881104+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:37.299044+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:37.301205+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:37.708557+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:37.711544+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:38.087165+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:38.089166+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:38.482563+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:38.483562+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:38.941184+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:38.942166+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:39.291995+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:39.294883+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:39.642425+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:39.645485+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:39.965926+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:39.967445+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:40.280863+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:40.281866+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:40.567363+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:40.569362+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:40.863820+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:40.865893+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:41.406311+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:41.409435+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:42.003319+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:42.006307+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:42.470804+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:42.471786+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:42.768361+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:42.770360+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:43.102405+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:43.105718+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:06:43.426329+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:43.429478+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:43.757004+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:43.759124+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:44.083407+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:44.084408+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:44.400443+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:44.401428+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:44.706402+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:44.708999+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:45.018534+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:45.019535+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:45.519397+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:45.521407+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:45.921477+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:45.922985+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:46.265432+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:46.267956+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:46.717722+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:46.719733+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:47.062693+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:47.064691+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:47.417125+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:47.418108+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:47.758309+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:47.760595+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:48.135817+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:48.137801+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:48.458595+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:48.460608+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:48.754069+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:48.756024+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:49.049862+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:49.051462+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:49.350537+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:49.352536+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:49.766318+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:49.769390+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:50.276306+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:50.279351+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:50.665664+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:50.666685+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:51.009462+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.012707+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:51.308313+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.309313+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:51.637138+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.639120+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:51.979944+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.980946+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:52.297063+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:52.298062+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:52.625280+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:52.628303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:52.938341+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:52.939345+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:53.233624+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:53.235624+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:53.550284+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:53.552284+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:53.859100+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:53.863229+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:54.227895+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:54.229895+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:54.534473+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:54.536457+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:54.835486+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:54.837487+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:55.132594+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:55.134593+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:55.465635+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:55.467185+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:55.807745+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:55.810517+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:56.300336+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:56.302923+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:56.604424+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:56.605423+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:56.898530+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:56.900544+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:57.205520+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:57.206520+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:57.503438+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:57.505437+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:57.819558+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:57.821581+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:06:58.160813+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:58.163417+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:58.462315+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:58.463303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:58.815614+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:58.817596+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:59.129940+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:59.130934+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:59.577632+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:59.580621+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:59.909210+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:59.910211+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:00.263906+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:00.265906+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:00.573175+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:00.574177+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:00.866210+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:00.868793+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:01.205344+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:01.207327+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:01.606906+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:01.608906+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:02.102300+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:02.105211+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:02.503969+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:02.506485+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:02.906864+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:02.908864+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:03.298141+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:03.300142+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:03.619687+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:03.621670+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:03.942307+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:03.946964+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:04.383317+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:04.384318+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:04.685032+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:04.687584+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:04.985829+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:04.986829+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:05.266858+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:05.269157+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:05.580166+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:05.582149+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:05.889785+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:05.892186+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:06.211209+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:06.213722+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:06.513714+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:06.515729+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:06.832167+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:06.834177+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:07.144798+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:07.146797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:07.479304+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:07.481835+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:07.846999+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:07.848997+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:08.195789+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:08.197813+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:08.200813+0200][38480][INFO] [residual sugar] quality loss for constraints le = 65.8. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:08.691113+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:08.694249+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:09.231893+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:09.235438+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:09.713446+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:09.716162+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:10.805837+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:10.809012+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:11.446846+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:11.450600+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:12.110297+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:12.114136+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:12.587219+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:12.589217+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:13.186604+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:13.188628+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:13.765722+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:13.767730+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:14.222493+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:14.225273+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:14.581621+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:14.582622+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:14.916005+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:14.917005+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:15.232768+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:15.233771+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:15.587426+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:15.589426+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:15.937914+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:15.939914+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:16.341209+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:16.343228+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:16.667291+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:16.669292+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:16.989838+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:16.991912+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:17.306825+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:17.308797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:17.659105+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:17.661131+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:18.018946+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:18.019947+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:18.393086+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:18.396311+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:18.830421+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:18.833527+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:19.232926+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:19.236012+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:19.669845+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:19.672139+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:20.034654+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.035654+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:20.365288+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.367291+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:20.677852+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.680692+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:20.988636+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.990732+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:21.326922+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:21.329905+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:21.682149+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:21.684150+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:22.042272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:22.043272+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:22.417916+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:22.418916+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:22.749237+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:22.751237+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:23.090475+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:23.091459+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:23.470508+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:23.473305+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:23.821072+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:23.823567+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:23.827191+0200][38480][INFO] [residual sugar] quality loss for constraints ge = 0.6. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:24.193607+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:24.194590+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:24.532529+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:24.534525+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:24.876586+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:24.878585+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:25.216076+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:25.217076+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:25.599528+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:25.601333+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:26.159795+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:26.161982+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:26.541276+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:26.542274+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:26.869887+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:26.872038+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:27.183814+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:27.186139+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:27.522592+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:27.524574+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:27.885528+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:27.886547+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:28.236311+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:28.237310+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:28.569622+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:28.571622+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:28.889372+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:28.890372+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:29.200272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:29.202272+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:29.533137+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:29.535216+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:29.936280+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:29.939026+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:30.369796+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:30.371797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:30.718054+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:30.720128+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:31.139806+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:31.140809+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mplugin\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgenerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0moutcome\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36mgenerate\u001b[1;34m(self, count, constraints, random_state, **kwargs)\u001b[0m\n\u001b[0;32m 337\u001b[0m \u001b[0msyn_schema\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mSchema\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_constraints\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mgen_constraints\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 338\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 339\u001b[1;33m \u001b[0mX_syn\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_generate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msyn_schema\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msyn_schema\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 340\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 341\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mX_syn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mis_tabular\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36m_generate\u001b[1;34m(self, count, syn_schema, **kwargs)\u001b[0m\n\u001b[0;32m 246\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 247\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 248\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_safe_generate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcallback\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msyn_schema\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 249\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 250\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36m_safe_generate\u001b[1;34m(self, gen_cbk, count, syn_schema, **kwargs)\u001b[0m\n\u001b[0;32m 391\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mit\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msampling_patience\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 392\u001b[0m \u001b[1;31m# sample\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 393\u001b[1;33m \u001b[0miter_samples\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgen_cbk\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 394\u001b[0m iter_samples_df = pd.DataFrame(\n\u001b[0;32m 395\u001b[0m \u001b[0miter_samples\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtraining_schema\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36mcallback\u001b[1;34m(count)\u001b[0m\n\u001b[0;32m 241\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 242\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mcallback\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;31m# type: ignore\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 243\u001b[1;33m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgenerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 244\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mis_classification\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 245\u001b[0m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minsert\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarget_iloc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\__init__.py\u001b[0m in \u001b[0;36mgenerate\u001b[1;34m(self, count, cond)\u001b[0m\n\u001b[0;32m 211\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 212\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 213\u001b[1;33m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdiffusion\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msample_all\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 214\u001b[0m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msample\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_col_perm\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 215\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0msample\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\gaussian_multinomial_diffsuion.py\u001b[0m in \u001b[0;36msample_all\u001b[1;34m(self, num_samples, cond, max_batch_size, ddim)\u001b[0m\n\u001b[0;32m 951\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 952\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mb\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mbs\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 953\u001b[1;33m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msample_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 954\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0many\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msample\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0misnan\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 955\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"found NaNs in sample\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\autograd\\grad_mode.py\u001b[0m in \u001b[0;36mdecorate_context\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mcast\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\gaussian_multinomial_diffsuion.py\u001b[0m in \u001b[0;36msample\u001b[1;34m(self, num_samples, cond)\u001b[0m\n\u001b[0;32m 918\u001b[0m \u001b[0mdebug\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf\"Sample timestep {i:4d}\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mend\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"\\r\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 919\u001b[0m \u001b[0mt\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfull\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 920\u001b[1;33m model_out = self.denoise_fn(\n\u001b[0m\u001b[0;32m 921\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mz_norm\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlog_z\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mt\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 922\u001b[0m )\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\modules.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x, t, y)\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[0memb\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0memb_nonlin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlabel_emb\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mproj\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0memb\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 113\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 114\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\mlp.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, X)\u001b[0m\n\u001b[0;32m 398\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mvalidate_arguments\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marbitrary_types_allowed\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 399\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 400\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 401\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 402\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_train_epoch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mloader\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mfloat\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\container.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 203\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 204\u001b[1;33m \u001b[0minput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 205\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 206\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\mlp.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, X)\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mvalidate_arguments\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marbitrary_types_allowed\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 113\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 114\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 116\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\container.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 203\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 204\u001b[1;33m \u001b[0minput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 205\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 206\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\linear.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 113\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 114\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlinear\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 116\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mextra_repr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "plugin.generate(len(outcome), cond=outcome)" + ] + }, + { + "cell_type": "markdown", + "id": "ea5abc50", + "metadata": {}, + "source": [ + "## Congratulations!\n", + "\n", + "Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement towards Machine learning and AI for medicine, you can do so in the following ways!\n", + "\n", + "### Star [Synthcity](https://github.com/vanderschaarlab/synthcity) on GitHub\n", + "\n", + "- The easiest way to help our community is just by starring the Repos! This helps raise awareness of the tools we're building.\n", + "\n", + "\n", + "### Checkout other projects from vanderschaarlab\n", + "- [HyperImpute](https://github.com/vanderschaarlab/hyperimpute)\n", + "- [AutoPrognosis](https://github.com/vanderschaarlab/autoprognosis)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From bcdce4b793cc5ad38b1687b28d5821a4bdea579e Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 27 Mar 2023 20:53:58 +0200 Subject: [PATCH 28/95] add TabDDPM tutorial --- ...al8_tabular_modelling_with_diffusion.ipynb | 1936 +++++++++++++++++ 1 file changed, 1936 insertions(+) create mode 100644 tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb new file mode 100644 index 00000000..97e38401 --- /dev/null +++ b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb @@ -0,0 +1,1936 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "97e2d93c", + "metadata": {}, + "source": [ + "# Tutorial 8: Modelling tabular data with diffusion models\n", + "\n", + "This tutorial demonstrates hot to use a denoising diffusion probabilistic model (DDPM) to synthesize tabular data. The algorithm was proposed in [TabDDPM: Modelling Tabular Data with Diffusion Models](https://arxiv.org/abs/2209.15421)." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "696e0157", + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[KeOps] Warning : \n", + " The default C++ compiler could not be found on your system.\n", + " You need to either define the CXX environment variable or a symlink to the g++ command.\n", + " For example if g++-8 is the command you can do\n", + " import os\n", + " os.environ['CXX'] = 'g++-8'\n", + " \n", + "[KeOps] Warning : Cuda libraries were not detected on the system ; using cpu only mode\n" + ] + } + ], + "source": [ + "# stdlib\n", + "import sys\n", + "import warnings\n", + "sys.path.insert(0, '../src')\n", + "\n", + "# third party\n", + "import numpy as np\n", + "from sklearn.datasets import load_iris, load_diabetes\n", + "\n", + "# synthcity absolute\n", + "import synthcity.logger as log\n", + "from synthcity.plugins import Plugins\n", + "from synthcity.plugins.core.dataloader import GenericDataLoader\n", + "\n", + "log.add(sink=sys.stderr, level=\"INFO\")\n", + "warnings.filterwarnings(\"ignore\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "54ce9a10", + "metadata": {}, + "source": [ + "## Synthesize a classification dataset\n", + "\n", + "For classification datasets, TabDDPM automatically uses the labels as the conditional variable during training. You should not provide an additional `cond` argument to the `fit` method." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "51076cdc", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sepal length (cm)sepal width (cm)petal length (cm)petal width (cm)target
05.13.51.40.20
14.93.01.40.20
24.73.21.30.20
34.63.11.50.20
45.03.61.40.20
\n", + "
" + ], + "text/plain": [ + " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", + "0 5.1 3.5 1.4 0.2 \n", + "1 4.9 3.0 1.4 0.2 \n", + "2 4.7 3.2 1.3 0.2 \n", + "3 4.6 3.1 1.5 0.2 \n", + "4 5.0 3.6 1.4 0.2 \n", + "\n", + " target \n", + "0 0 \n", + "1 0 \n", + "2 0 \n", + "3 0 \n", + "4 0 " + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Note: preprocessing data with OneHotEncoder or StandardScaler is not needed or recommended. Synthcity handles feature encoding and standardization internally.\n", + "\n", + "X, y = load_iris(return_X_y=True, as_frame=True)\n", + "X[\"target\"] = y\n", + "\n", + "loader = GenericDataLoader(X, target_column=\"target\", sensitive_columns=[])\n", + "\n", + "loader.dataframe().head()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "52397e4a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0 50\n", + "1 50\n", + "2 50\n", + "Name: target, dtype: int64" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y.value_counts()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "cda52bea", + "metadata": {}, + "source": [ + "### Model fitting" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "3bf24be4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T15:19:24.516935+0200][30696][INFO] Step 100: MLoss: 0.0 GLoss: 0.2235 Sum: 0.2235\n", + "[2023-03-27T15:19:25.913968+0200][30696][INFO] Step 200: MLoss: 0.0 GLoss: 0.2298 Sum: 0.2298\n", + "[2023-03-27T15:19:27.191123+0200][30696][INFO] Step 300: MLoss: 0.0 GLoss: 0.2305 Sum: 0.2305\n", + "[2023-03-27T15:19:28.432055+0200][30696][INFO] Step 400: MLoss: 0.0 GLoss: 0.2273 Sum: 0.2273\n", + "[2023-03-27T15:19:29.766838+0200][30696][INFO] Step 500: MLoss: 0.0 GLoss: 0.2333 Sum: 0.2333\n", + "[2023-03-27T15:19:31.280538+0200][30696][INFO] Step 600: MLoss: 0.0 GLoss: 0.221 Sum: 0.221\n", + "[2023-03-27T15:19:33.034999+0200][30696][INFO] Step 700: MLoss: 0.0 GLoss: 0.2123 Sum: 0.2123\n", + "[2023-03-27T15:19:34.519078+0200][30696][INFO] Step 800: MLoss: 0.0 GLoss: 0.2212 Sum: 0.2212\n", + "[2023-03-27T15:19:36.020932+0200][30696][INFO] Step 900: MLoss: 0.0 GLoss: 0.2014 Sum: 0.2014\n", + "[2023-03-27T15:19:38.330664+0200][30696][INFO] Step 1000: MLoss: 0.0 GLoss: 0.2069 Sum: 0.2069\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# define the model hyper-parameters\n", + "plugin_params = dict(\n", + " is_classification = True,\n", + " n_iter = 1000, # epochs\n", + " lr = 0.002,\n", + " weight_decay = 1e-4,\n", + " batch_size = 1000,\n", + " model_type = \"mlp\", # or \"resnet\"\n", + " num_timesteps = 500, # timesteps in diffusion\n", + " n_layers_hidden = 3,\n", + " dim_hidden = 256,\n", + " dim_embed = 128,\n", + " dropout = 0.0,\n", + " # performance logging\n", + " log_interval = 10,\n", + " print_interval = 100,\n", + ")\n", + "\n", + "plugin = Plugins().get(\"ddpm\", **plugin_params)\n", + "plugin.fit(loader)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "e1a270c9", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "TabDDPM(\n", + " (diffusion): GaussianMultinomialDiffusion(\n", + " (denoise_fn): MLPDiffusion(\n", + " (emb_nonlin): SiLU()\n", + " (proj): Linear(in_features=4, out_features=128, bias=True)\n", + " (time_emb): TimeStepEmbedding(\n", + " (fc): Sequential(\n", + " (0): Linear(in_features=128, out_features=128, bias=True)\n", + " (1): SiLU()\n", + " (2): Linear(in_features=128, out_features=128, bias=True)\n", + " )\n", + " )\n", + " (label_emb): Embedding(3, 128)\n", + " (model): MLP(\n", + " (model): Sequential(\n", + " (0): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=128, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (1): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (2): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (3): Linear(in_features=256, out_features=4, bias=True)\n", + " )\n", + " (loss): MSELoss()\n", + " )\n", + " )\n", + " )\n", + " (ema_model): MLPDiffusion(\n", + " (emb_nonlin): SiLU()\n", + " (proj): Linear(in_features=4, out_features=128, bias=True)\n", + " (time_emb): TimeStepEmbedding(\n", + " (fc): Sequential(\n", + " (0): Linear(in_features=128, out_features=128, bias=True)\n", + " (1): SiLU()\n", + " (2): Linear(in_features=128, out_features=128, bias=True)\n", + " )\n", + " )\n", + " (label_emb): Embedding(3, 128)\n", + " (model): MLP(\n", + " (model): Sequential(\n", + " (0): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=128, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (1): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (2): LinearLayer(\n", + " (model): Sequential(\n", + " (0): Linear(in_features=256, out_features=256, bias=True)\n", + " (1): ReLU()\n", + " )\n", + " )\n", + " (3): Linear(in_features=256, out_features=4, bias=True)\n", + " )\n", + " (loss): MSELoss()\n", + " )\n", + " )\n", + ")" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.model" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "49b18ada", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAAAwsklEQVR4nO3dd3gU5drH8e+9m7IBAtKkI1EBpYOAKE0EpChgP4Ag2FCPiOdYjlhe5WA7iL2CekDFgiCIKGikqFTpRaqEZkKR0AnJkrL3+8cuOUkIyQJJNru5P9eVi52ZZ2fu2Qm/zD6zO4+oKsYYY4KfI9AFGGOMKRgW6MYYEyIs0I0xJkRYoBtjTIiwQDfGmBARFqgNV6pUSevUqROozRtjTFBasWLFflWtnNuygAV6nTp1WL58eaA2b4wxQUlEdp5umXW5GGNMiLBAN8aYEGGBbowxISJgfejGGJOXtLQ0EhIScLvdgS4lIFwuFzVr1iQ8PNzv51igG2OKpYSEBKKjo6lTpw4iEuhyipSqcuDAARISEoiJifH7eX51uYhIdxHZLCJxIjI8l+W1ReRnEVklImtFpOcZ1G6MMadwu91UrFixxIU5gIhQsWLFM353km+gi4gTeBfoATQA+olIgxzNngYmqWpzoC/w3hlVYYwxuSiJYX7S2ey7P2forYE4Vd2mqqnARKBPjjYKlPU9LgfsPuNK/LRyzae8NfVWMtJTC2sTxhgTlPwJ9BpAfJbpBN+8rEYAA0QkAZgJPJjbikRkiIgsF5HliYmJZ1Eu/J4wnw+PbcTtPnhWzzfGmIL08ccfM3To0ECXARTcxxb7AR+rak2gJzBBRE5Zt6p+oKotVbVl5cq5fnM1X66wKABS3IfPvlpjjAlB/gT6LqBWlumavnlZ3QVMAlDVxYALqFQQBebkCi8FgNsC3RhTyHbs2MEll1zC4MGDqVevHrfddhuzZ8+mbdu21K1bl6VLl57S/uqrr6ZJkyZ07tyZP//8E4DJkyfTqFEjmjZtSocOHQBYv349rVu3plmzZjRp0oQtW7acc73+fGxxGVBXRGLwBnlfoH+ONn8CnYGPReRSvIF+dn0q+YgKLwOA+8TRwli9MaYY+vd369mwu2D/zzeoXpZnezXMt11cXByTJ09m3LhxtGrVii+++IIFCxYwffp0XnzxRa6//vrMtg8++CCDBg1i0KBBjBs3jmHDhjFt2jRGjhxJbGwsNWrU4PDhwwCMGTOGhx56iNtuu43U1FQyMjLOeZ/yPUNX1XRgKBALbMT7aZb1IjJSRHr7mj0C3CMia4AvgcFaSIOVuiKiAXCfOFIYqzfGmGxiYmJo3LgxDoeDhg0b0rlzZ0SExo0bs2PHjmxtFy9eTP/+3vPdgQMHsmDBAgDatm3L4MGD+fDDDzOD+4orruDFF19k1KhR7Ny5k6ioqHOu1a8vFqnqTLwXO7POeybL4w1A23Ouxg9REd4z9JTUY0WxOWNMMeDPmXRhiYyMzHzscDgypx0OB+np6X6tY8yYMSxZsoQZM2Zw2WWXsWLFCvr378/ll1/OjBkz6NmzJ2PHjuXqq68+p1qD7l4urgjvpyNTTligG2OKlyuvvJKJEycC8Pnnn9O+fXsAtm7dyuWXX87IkSOpXLky8fHxbNu2jQsvvJBhw4bRp08f1q5de87bD7qv/rsifV0udoZujClm3n77be644w5Gjx5N5cqVGT9+PACPPfYYW7ZsQVXp3LkzTZs2ZdSoUUyYMIHw8HCqVq3Kk08+ec7bl0Lq6s5Xy5Yt9WwGuIiPX0zPuUN4oVYvel/9YiFUZowpDjZu3Mill14a6DICKrfXQERWqGrL3NoHXZdLlOs8ANxpxwNbiDHGFDNBF+guX6CnpKcEthBjjClmgi7QI13lAHBboBtjTDZBF+jh4aUIU7VAN8aYHIIu0AGiFNwZJwJdhjHGFCtBGeguhRQLdGOMySY4Ax3B7UkLdBnGmBJo8ODBfP3114EuI1dBGehR4iDFYwNcGGNMVkEZ6C6cdoZujCl0zz33HPXr16ddu3b069ePV155JdvyOXPm0Lx5cxo3bsydd97JiRPeruDhw4fToEEDmjRpwqOPPgrkfgvdghZ0X/0HiHI4ceu532rSGBMkfhgOe38v2HVWbQw9/nPaxcuWLWPKlCmsWbOGtLQ0WrRowWWXXZa53O12M3jwYObMmUO9evW4/fbbef/99xk4cCDffPMNmzZtQkQyb5eb2y10C1pwnqFLGCkW6MaYQrRw4UL69OmDy+UiOjqaXr16ZVu+efNmYmJiqFevHgCDBg1i3rx5lCtXDpfLxV133cXUqVMpVco7KE9ut9AtaEF5hu5yhONOt6/+G1Ni5HEmXdyEhYWxdOlS5syZw9dff80777zD3Llzc72FbsWKFQt0236doYtIdxHZLCJxIjI8l+Wvi8hq388fInK4QKvMweWIwI2nMDdhjCnh2rZty3fffYfb7SYpKYnvv/8+2/L69euzY8cO4uLiAJgwYQIdO3YkKSmJI0eO0LNnT15//XXWrFkD5H4L3YKW7xm6iDiBd4GuQAKwTESm+wa1AEBV/5ml/YNA8wKvNAuXMwK3FOYWjDElXatWrejduzdNmjShSpUqNG7cmHLlymUud7lcjB8/nltuuYX09HRatWrFfffdx8GDB+nTpw9utxtV5bXXXgNyv4VuQfOny6U1EKeq2wBEZCLQB9hwmvb9gGcLprzcRTkjcRfmBowxBnj00UcZMWIEycnJdOjQgcsuu4x77rknc3nnzp1ZtWpVtudUq1btlMGjAaZOnVro9foT6DWArO8NEoDLc2soIhcAMcDccy/t9FxOFykOQT0exBGU13WNMUFgyJAhbNiwAbfbzaBBg2jRokWgS8pTQV8U7Qt8rZr7R1BEZAgwBKB27dpnvRFXmAuAEyeO4Ioqf9brMcaYvHzxxReBLuGM+HN6uwuolWW6pm9ebvoCX55uRar6gaq2VNWWlStX9r/KHFxh3tGxU1IOnvU6jDEm1PgT6MuAuiISIyIReEN7es5GInIJUB5YXLAlnioq3Pu5Trf7cGFvyhhjgka+ga6q6cBQIBbYCExS1fUiMlJEemdp2heYqEUwSGlUeBkAUk4cKexNGWNM0PCrD11VZwIzc8x7Jsf0iIIrK2+uCG+gu08cLapNGmNMsReUHxGxQDfGFIUyZcoEuoQzEpSBHhURDYA79ViAKzHGmOIjKAPd5Qv0lLSkAFdijCkJVJXHHnuMRo0a0bhxY7766isA9uzZQ4cOHWjWrBmNGjVi/vz5ZGRkMHjw4My2r7/+epHVGZw354r0fv3WnWo36DKmJBi1dBSbDm4q0HVeUuESHm/9uF9tp06dyurVq1mzZg379++nVatWdOjQgS+++IJu3brx1FNPkZGRQXJyMqtXr2bXrl2sW7cOoNBulZub4DxDd/kCPc0C3RhT+BYsWEC/fv1wOp1UqVKFjh07smzZMlq1asX48eMZMWIEv//+O9HR0Vx44YVs27aNBx98kB9//JGyZcsWWZ1BeYYe5ToPAHd6cmALMcYUCX/PpItahw4dmDdvHjNmzGDw4ME8/PDD3H777axZs4bY2FjGjBnDpEmTGDduXJHUE6Rn6N6v+6ekpwS4EmNMSdC+fXu++uorMjIySExMZN68ebRu3ZqdO3dSpUoV7rnnHu6++25WrlzJ/v378Xg83HTTTTz//POsXLmyyOoMyjP0iIhoRNUC3RhTJG644QYWL15M06ZNERFefvllqlatyieffMLo0aMJDw+nTJkyfPrpp+zatYs77rgDj8c7ZsNLL71UZHUGZaCLw4FLwZ1uN9E1xhSepCTvJ+lEhNGjRzN69OhsywcNGsSgQYNOeV5RnpVnFZRdLgBRgDvjRKDLMMaYYiNoA92lgtuTGugyjDGm2AjaQI9CSMlIC3QZxphCVAT3+iu2zmbfgzbQXeLArRboxoQql8vFgQMHSmSoqyoHDhzA5XKd0fOC8qIogEvCcHvSA12GMaaQ1KxZk4SEBBITEwNdSkC4XC5q1qx5Rs8J4kB3ctT60I0JWeHh4cTExAS6jKAStF0uUY5wUvAEugxjjCk2/Ap0EekuIptFJE5Ehp+mza0iskFE1otIoY+s6nKE41YLdGOMOSnfLhcRcQLvAl2BBGCZiExX1Q1Z2tQFngDaquohETm/sAo+yeWMIIWSd7HEGGNOx58z9NZAnKpuU9VUYCLQJ0ebe4B3VfUQgKruK9gyT+VyRuKWwt6KMcYED38CvQYQn2U6wTcvq3pAPRFZKCK/iUj33FYkIkNEZLmILD/XK9dRvkBXj3W7GGMMFNxF0TCgLnAV0A/4UETOy9lIVT9Q1Zaq2rJy5crntEFXmIsMEdLT7AZdxhgD/gX6LqBWlumavnlZJQDTVTVNVbcDf+AN+ELjCosCIMV9sDA3Y4wxQcOfQF8G1BWRGBGJAPoC03O0mYb37BwRqYS3C2ZbwZV5KldYKQDc7iOFuRljjAka+Qa6qqYDQ4FYYCMwSVXXi8hIEentaxYLHBCRDcDPwGOqeqCwigaICi8NgPvE4cLcjDHGBA2/vimqqjOBmTnmPZPlsQIP+36KRFR4GQBSTtgZujHGQBB/U9QV4Q1094ljAa7EGGOKhyAO9GgA3KlHA1yJMcYUD0Eb6FGRJwM9KcCVGGNM8RC0ge6KKAtASqp1uRhjDARzoLvKAZCSdjzAlRhjTPEQvIEeeR4A7rTkwBZijDHFRNAGepTrPADc6fbVf2OMgSAO9EgLdGOMySZoA90ZFkGEKikZ7kCXYowxxULQBjqAS8GdfiLQZRhjTLEQ/IHusUA3xhgI8kAvheDOSAt0GcYYUywEdaC7xEGKxwLdGGMg6APdSYpaoBtjDAR9oIfh9mQEugxjjCkWgj/QsUA3xhjwM9BFpLuIbBaROBEZnsvywSKSKCKrfT93F3ypp4pyhONWT1Fsyhhjir18RywSESfwLtAV72DQy0RkuqpuyNH0K1UdWgg1npbLGU4KWpSbNMaYYsufM/TWQJyqblPVVGAi0Kdwy/KPyxGJWwJdhTHGFA/+BHoNID7LdIJvXk43ichaEflaRGrltiIRGSIiy0VkeWJi4lmUm50rzALdGGNOKqiLot8BdVS1CTAL+CS3Rqr6gaq2VNWWlStXPueNRjldpIqQkZ56zusyxphg50+g7wKynnHX9M3LpKoHVPXkd/A/Ai4rmPLy5gqLAuCE+3BRbM4YY4o1fwJ9GVBXRGJEJALoC0zP2kBEqmWZ7A1sLLgST+9koKdYoBtjTP6BrqrpwFAgFm9QT1LV9SIyUkR6+5oNE5H1IrIGGAYMLqyCs4p2lQfgyLH4fFoaY0zoy/djiwCqOhOYmWPeM1kePwE8UbCl5a9mxUtgJyTsW8uFMZ2LevPGGFOsBPU3RWtW83bVxx/8I8CVGGNM4AV1oFesUI8oj5JwLCHQpRhjTMAFdaCLw0EtnMS79we6FGOMCbigDnSAWmFliE8/HugyjDEm4II+0Gu6KpPg8ODJSA90KcYYE1BBH+i1ytYmVYR9iesCXYoxxgRU8Ad6hfoAJOxdHdhCjDEmwII/0Ks2AyD+QM67+RpjTMkS9IFetWpznKrEH9kR6FKMMSaggj7Qw8NLUdUjJCT/FehSjDEmoII+0AFqOaOITzsa6DKMMSagQiPQIyuQQFqgyzDGmIAKjUAvU4PDDuHY0V35NzbGmBAVGoF+3sUAxO9eFuBKjDEmcEIj0M9vDEB84u8BrsQYYwInJAK9ZvWWAMQf3hrgSowxJnD8CnQR6S4im0UkTkSG59HuJhFREWlZcCXmr3SZqlTwKAlJu4tys8YYU6zkG+gi4gTeBXoADYB+ItIgl3bRwEPAkoIu0h81iSAh9VAgNm2MMcWCP2forYE4Vd2mqqnARKBPLu2eA0YB7gKsz2+1IsoRnxGQTRtjTLHgT6DXALKOwpzgm5dJRFoAtVR1Rl4rEpEhIrJcRJYnJiaecbF5qVWqKnsdStoJuze6MaZkOueLoiLiAF4DHsmvrap+oKotVbVl5cqVz3XT2dQsdwEeET6YeTdHj8Tn/wRjjAkx/gT6LqBWlumavnknRQONgF9EZAfQBphe1BdGO7d6iLaUYszRdXSd2oPXvr6BtLTkoizBGGMCyp9AXwbUFZEYEYkA+gLTTy5U1SOqWklV66hqHeA3oLeqLi+Uik+jTHQ1xgxawqQ2z9M+vALjj8fx44IXi7IEY4wJqHwDXVXTgaFALLARmKSq60VkpIj0LuwCz9Sl9fvwcr+5VMxQ5u+aH+hyjDGmyIT500hVZwIzc8x75jRtrzr3ss6NwxlGO1dV5p7YS3qam7BwV6BLMsaYQhcS3xTNTftaHTnmENZumBToUowxpkiEbKBf2fROwlSZFzc9/8bGGBMCQjbQo8vWoDku5h3dEuhSjDGmSIRsoAN0qNSMLQ4Pe3avCHQpxhhT6EI70Bv2A2D+758GuBJjjCl8IR3oMRd0okYGzPtraaBLMcaYQhfSgS4OBx1K12ZJxjHcKXYnRmNMaAvpQAdoH9MNt0NY9vuEQJdijDGFKuQDvXWTQbg8yvztsYEuxRhjClXIB3qkqxyXO6OZd/xP1OMJdDnGGFNoQj7QATpUac0uJ2zf+UugSzHGmEJTIgK9fePbAZi/4csAV2KMMYWnRAR6teqXcbHHwfz9qwNdijHGFJoSEegA7ctexApNIenYnkCXYowxhaLEBHqHi3qTLsJvaz4OdCnGGFMo/Ap0EekuIptFJE5Ehuey/D4R+V1EVovIAhFpUPClnptmDfsS7VHmxc8JdCnGGFMo8g10EXEC7wI9gAZAv1wC+wtVbayqzYCX8Q4aXayEhbu4MrwC81P22scXjTEhyZ8z9NZAnKpuU9VUYCLQJ2sDVT2aZbI0oAVXYsHpUL0d+53Chj+mBboUY4wpcP4Eeg0gPst0gm9eNiLygIhsxXuGPqxgyitY7ZrdicujPLVoBHv3rg50OcYYU6AK7KKoqr6rqhcBjwNP59ZGRIaIyHIRWZ6YmFhQm/ZbhQoX816zf/KXeBg4cwDbtlt/ujEmdIhq3r0jInIFMEJVu/mmnwBQ1ZdO094BHFLVcnmtt2XLlrp8+fKzKvpcbdo8nfsWPkm6QMuwciSmp3BQ07i71jXc1PXVgNRkjDH+EJEVqtoyt2X+nKEvA+qKSIyIRAB9gWwDdYpI3SyT1wLFety3S+r3ZkLXD4mRCHakHSPK4USBsfGxZKSnBro8Y4w5K2H5NVDVdBEZCsQCTmCcqq4XkZHAclWdDgwVkS5AGnAIGFSYRReEWrWuYMLglZnTsxa8yMNbv2T+8re5qs0jAazMGGPOTr5dLoUlkF0uuUlLS6bbhNbUd5bh/UG/BbocY4zJ1bl2uZQI4eGluKl8YxZqEgkJFujGmOBjgZ7FTVc8jgCTl4wOdCnGGHPGLNCzqFq1GR0d5fjm6GZSTxwLdDnGGHNGLNBz+Nul/TjkEGYttrN0Y0xwsUDP4YoW91EjA6bt/LHA152cvL/A12mMMSdZoOfgcIbRo1x9lmkyhw5uzbZs5q/P8smMIWd1c69v5w7n8smd6D++OZ/MuIe9e1YVVMnGGANYoOfqmkYDyRBhzop3M+e5Uw7xwrYpvLJ/Me9+2/+M1nfo4FZG7/yeizOEdJRX9v9Gt9iBvDX1FtLSkgu6fGNMCWWBnotL6vaiVgb8tHtB5rzYxaM46hBaaARjj65n/Pd3+b2+N2Lv47jA6I6vMOmO1czo9D69I6ry4bFNDPzsSnbs+PWM6tu4+VsOHozLs4075VC+ozPt37+JGb88Y7cTNiZEWKDnQhwOrilXn6VZul0m7fyJmAzhv7ctpLuzPK8dWMrH399Nepo7z3WtWvsZU1P3MjC6HhdfdA0AtWu347n+s3nton7Ek86tPz/AouXv+VXbstXj6L/4Kf71Xb882z0yuSddv+7KjF+eOW2bd2c/xPCd3zB/2Zt+bbukUI+HH34dwcJl7wS6FGPOiAX6aXTzdbvMXfEemzZPZ60jjVurXUlYuIsX//YjnaQsrx5YQu8Jrfhm9mO5dp2kp7l5bsVoqmYo93Ubc8ryru2eZGrPL6iFk3/8/h5r13+VZ00JCb/x8KrXcABLcLNp8/Rc263bMJl5mkQEMHznNzz++VUcO7orW5vkpH3MTPHOe3X9+Hz/MAW7o0fi/bpPT3z8Qu6ZcDn/2jGFx9eNwZ1yqAiqM6ZgWKCfxv+6XeYzadV7RHqUXlc+CXi/VfrmgPm8VW8Q0eLkmV0/0n1Ca96ccjPbd/xCcvJ+vp71MP0+u5wtDg/DLxlIqTLn57qdKlWaMPa6iVRU4e9Ln2Pr1lm5tktO2sewWfeSAXzS+llKeZRPlr+ea9uPVrxBtEeZfuNMHjivKbFp+xn4dY9sn62P/W00yQ5hcOmL2eZUpsz917m9YH7Yv38Thw9tP2X+r7+9xnvf9Mu3Gyk36vHw119r87wWER+/mC5Te3DTpy2ZveClXLuYTriP8NF3g7lx9r2s96TQP+oCjjiEHxblelNRY4olu5dLHt6YcjMfH9tEhMI1kVV5vv/sU9qox8P8ZW8zcfOXLPIkkSFCpEc54RDqehwMrN2N6zv9B3Hk/bczPn4hA2fdixNoH1Wd4xlukjJScYoQIWHsTj/GRknn/Yb3c2WrBxg1uTcTj2/jh+6fUbVqs8z1xG39iRsWPMJ9ZRvxwA1fAjB30cs8tGUCj1S8nMHXfQTAgPEtOKbpTBu8mjs/bc1WdTPjpliioiryxaxhTP9rCc2iqtPtklto0WgAzrAIv16zXbuWMnbek1SKrMDtnUZxXvkYMtJT+XLWMN7au4AyCh90fC2z++nX317jH5vGkS5ClEfpG12Xbo0GsitxA9sObiJMnNx13fhcX7+tW2cxesHTLCSZCFXqahiNSlXnvs6vUanSJZnt/jmhHQvTD1NVHWx3Kg09TvpUa0fj2h25OKYLs357hbe3f8sep9BJyvJUt/c5v3Ijbvy4GeHi4KtBK3Pdvno8uN2HiSpVwa/XJhQlJ+3DFVUBhzPf+/yZApLXvVws0POwYdM0/rbk/wD4vOXTNGn4tzzb70/cyIylrxGftItrL+1Ps0b98w3yrDb/8T0PL3iCZJQyOCgtDjxAqnpIRxlUswu3XOM9K9+1ayk9Z93JoDJ1efjmbzLX8cTnnZiTmshP13/HeeVjMuff/0kbVnuS+L7XVA4d2cENCx7h0UpXMOjaD1i/aSp9lzxLD2cFtqcdZpPDwyUeBzvIwO0Qzs9QXm31BM0a33ba2tPT3HwWO5T3En/z1iwQpfC36LqsStrJakmjLaX4I+M4qQLvt36WlNQj3L/mDepqGP/X5v/4bOU7zExLxCOSbd3PVu/KzV3/N0xtcvJ+3vhuEJNSdlJKYWD5JqSkp7DheAKrNIVLNYJx/X8lIjKapas+4q61b/Jg+Wbc2eNDZswfwZgdM0hwetclqqgIl3qcPNL071zeYkjmdr6KHcbze3/ms8ueommjvtlqSk7axyNTerPOk8Skbp9Qrfplub4uf8T9wHuLn8eBg7JhUVSJqsztXV6ldJmqef4unAlPRjoHDmwmIyOV9IwTVKxQr0j+yBw+tJ0+03pxgUTyRu+vqFDh4kLf5klJx/bw0U8P8rcrnjjtax+qLNDPkno89Pq4KaXEedqztEB69LMOLEo7yKxbZlO6TFXi4xfTa849DCh9MY/eMi1b223b53Ljr8O40VUTlzOSL49vZU7vaZn/CZ/8/Gq+S0/k/AxleL3+dLlyOCnug8xb/h5vx03mkCjj2/6H+vWuA2DvnlW8+/Nj7Ek7QoonnX2axl6ncJVE82SXtzmecoCxi58nNv0gZRUer3M913UcScKuJdwzawiHxPt7V10djL/h28w/Pjt2/MqmP3+h9vlNqVPzSoZNuZZ1nhSmdZ9A1WrNSU9z8+AXV7FIk7jFVYsHrnmH8hUuytzP2PnP8ei2SVwfXoURt87k1gmtSFIP3/abhyuqPOA9rnv3rmLdtlg27ltF3YoN6dbu6VPOMo8n7aXz5C50iqjMS7f9nDn/wP4/eOC7W9ko6UQoNHOUYuyARac8f92Gydy75N8IUFEdHMXDfqdwc2R1nu0bm9kuIz2VibP+QZPanWjc8JbM+erxsHzNeEScNKzXO9eQTjtxnPsnXsUS/ncNpGKGMq7Tm1wY0znvX6A8qMfD4pVjaNbgVkqVqpRrm1GTevFF8nbCFSqp8G7H17gwpjMb/5jOj+s+ocH5zene4fQX5desm8iiuO/o1vTOM671nW/6Mvboei7MED696XvKlat9Rs8PZhbo5yAh4TfCwlzZujWKi9/XT6b/8pFcQRRlnS7iUg/zp8PDjz0mcn6VRqe0/8+kXnyZvJ0ohbbh5Xl1wPzMZUcO7+CH317lurZPUia6Wrbn7dm9goE/DiId+KTz+6zbPosXtk8lHagvEZSWcEo5IugZ05POVz6e7Q/f3r2rKRVVkbLlamXO2/fXOu6deRup6mH8tV/mWutJ8fGLuWn2PbR0luHdAYv491fdmZK6h2eq/e/dSk4n/7NfjosluHnlwlvp1v7//HxVs3vxq2v5OmUns66bSoUKF7Nh8zc8tngEiaKMvvQOEo8mMHLPbJ6qchV9u7+d+byVaz7l7ytf5jwVPrrmQ2rWbAPA6Ml9+DR5G580e4wWTW/PVi/ANc7zGNruOf6In8+HcV+z2eHt7w9Tpb6GcVONTtzc5VXE4UA9nszX496yDakeXRNV5e34WBzAx13GULt2u7Pa75N/GC/1OHmrx8en/P7Hxy+k95x76RNZnRub3MWwJc9xQqCKOtjq9GZKpEf5pstYatVqe8r6Dx3cyg3T+nDA6X031sDjpFfVNlzX5vFs7yxTTxwjcf9GatRonTnvyOEddPvmOi4gjC2SThNcfNDvZyIio/Pcp9QTxwgPL11gJ2Z//bWW8b8+xdrjCbzd6ysqVqpXIOvNjwV6CHt4QntWpB+irAplJYzra1x12qA7cngH135zHUccwtgG93Flqwf83s627XMZ/Msw3ECKQ2jqCeelLm/n+p/VH+lpbjyetHz/EwJ8/sP9/GffAq4gisWkcE/0pQy7cdJp23sy0vnnFx2Z6znKZRrJ+NuXnvV/4m3b59Jn3kO0l9LszUhhi8PDeR7lnVZP07RRX9Tj4f4JV7DSc5wpXcbidEYybcmrfHz4d6qogw97fpotDJOT9nHDpM64ECbftpAFK97noS0T6B1+PjWizufjw7+T4vCGXJ0M4a4LelC+dBVW71rEoqNxbHBk0MNZgWev/4pvFzzHS3/N457oSxh24+TMbcRt/Yk75z1MpMLH3cZlC8OcUpIPsmr9l7RpcW/mOwxPRjo3f3oZRzWDJIHSCm9dOZKGl9yY+bxHPmvP/LRDzLj2Kyqf35A9u1fw9E/3koZyXbV2NL+oB7fPf5RGjlJ8MPC3bK+/ejz88/P2zMs4wnuNH2TLXyv57q8lbHRkEKFK57CKtDy/OUv2rWBh2iGOO4SX69xEj44jAHhr6i18dHQjU9q9wpZdi3l8x1S6Ocvz3I3TTtvVNHvBSzy75XNaOMvyWr/ZhIeXyly2Zt1EPJ50mjcZ4NfvxNats/hy+etMTfkTBRToFVGV53K5xlYYzjnQRaQ78CbeEYs+UtX/5Fj+MHA3kA4kAneq6s681mmBHhgzf32WH3fO5o3bfj3jC1kbNk3jiUXPcF2lZtzR4wPCwl2FVGV2nox0Bk+4nFWSSu/wyjzfd3a+AX08aS/v/nAvf2v9KBdc0P6ctn/vJ61ZRAqNPWH0rtaOHm0eodx5dTKX7927mht/GEAYcNjX/d/OUYaRPT/OdnH2pHlL3uSBTR9xY0RVYt17qEMYn/T7lUhXOfbv38SUhS9wQfm6dL1yeLaL0Z6MdMbNuIu3D66gukfY41DaO8ry5m3zTjmWmzZP585FT1JG4e32o6hf99pT6jh2dBdDp/ZmpaTyaKU2DLr2QwDmLPwP/4j7nJcuuJ56Na9k6LzHOCQwsNyl3NDqYQ4c2c7AFS9xf9lG/N134T03J69BvFi7N706vZA5/9u5w3k6fgYPV2zNHdf9N3P+5i0zmLLyPb5P3skxh1ApQ+kYVZ049342SirjWj5N7eqt6P5tH9qHV+CVAfMAGPfdnbx+cBmiSg2PcHFYNE3L16NVTHcuuqAjr824g8kndlErA+KdcH14FUb2/QlxOJg253FGxM8gQ4SeYRV57Jr3qVT50mz7kZy0j7idP7N060xmHljNFoeHMFWud9Xgrnb/ZtKSlxmftCXbtZbdu5cz5tfheBSiw6IoE16G6mVrUadyE2pXb02FChef9UnGOQW6iDiBP4CuQALeMUb7qeqGLG06AUtUNVlE7geuUtU8ryBaoJszsXfPKmJXvkf/Lm8QHlm6SLd99Eg8R44mUKvWFadt89P85xkTN5mu5RvSp/XDVK+e6/+3TI991oEfMw5R3qN8lcdF1dwsXfUR/1r1BuVxMOGW2FO6yE7asGkaDy56mmMCL9TtT9d2T2YuO3gwjvu+vZktkk49DeMPSeezNiNpUO96bv2kOSnqYdrAZYSFu9i/fxMjZ97Fr54jeEQo61EiFGbcOve0H8cF7x+gQRNas0NTmd7nW8qWrcW2HXMZOP9RLhEX/x2wKNdPT7lTDrFr93Ji6nTC4Qzj0MGt9J92PcmitIs8n+9S9/FN+9e56KKugPeMf9GK91i7azFbk+L5I/Uw233dPicvet9Rpi4PXjeBD2fezftH13FP9CW4wly8fWg1bYiiadkLGXdkHZEKXSKrcjzjBEcyUtjtOZF5AR2gmYbTvUoburV8MDP4jyftpfekLlSSML4YuJRdu5dy1+z7OCJQTiFJ4LiAZrnY//j5bRnQ49TvpvjjXAP9CmCEqnbzTT8BoKq5fkBXRJoD76hqnu/FLdBNSbY/cSPP/HAHdza5j5bNBp/x85OT9yM48v00S+K+9fxjxkDvF+Mia1ClVGU8qsw8sJbd4uH1hkNoXLcXN33TiyiEBy66iX/tmMLzta6lz9XZ3oizd+9qpi15hdgDa7n34pvo3uHZfOuM2/oTt8x/mCiFFIF0EUp7lCnX5N0VlNO27XMY8MtDHHMIPZwVeHlA3rfLOHgwjhUbJvH73mW0vbBH5qeX1ONh5KQefH1iNwDXhlXiuZu/JzyyNDt2/MqoXx9ngyeJcuqgnCOM88NKUzf6AupWbkzDC7ud9lrazF+f5fEdUxlc+mJmHN1CusDYK1/g0vp9AO8Ql7t3r2Dn3pX8eXAjrev2od7FPfze/6zONdBvBrqr6t2+6YHA5ao69DTt3wH2qurzuSwbAgwBqF279mU7d+bZK2OMKQCpJ47xwtQbmZq6N3NeeY/yWvNHM/+YLFv1X+5a8zoOoKpH+G7gkmz9zOfimzn/YuneZVR1VaRqmeq0qnc9F8Zcfcbr+W3FWF5f+z6jOr1JnTodz7qe9DQ3/5lyAxVd5bm316cF8hl69Xi489PWLJcTVMpQPur4v3cQBa3IAl1EBgBDgY6qeiKv9doZujFFK+3EcXAIDgnD4Qg7pQ/3ram38uGxjYyofg03dX01QFUGr+07fuHdBc/wYPsXzvm6TV7yCnR//jTtAmplma7pm5dzI12Ap/AjzI0xRS+/aw8P9P6M9hsm0azRmd0e2njF1LmKV+rMC2gN/lxmXQbUFZEYEYkA+gLZ7grl6zcfC/RW1X0FX6YxprA5wyJo3mRAsfsCnfFfvkdOVdPxdqPEAhuBSaq6XkRGikhvX7PRQBlgsoisFpHcbwNojDGm0Ph1NUBVZwIzc8x7JsvjLgVclzHGmDNk762MMSZEWKAbY0yIsEA3xpgQYYFujDEhwgLdGGNChAW6McaECAt0Y4wJERboxhgTIizQjTEmRFigG2NMiLBAN8aYEGGBbowxIcIC3RhjQoQFujHGhAgLdGOMCREW6MYYEyL8CnQR6S4im0UkTkSG57K8g4isFJF036DSxhhjili+gS4iTuBdoAfQAOgnIg1yNPsTGAx8UdAFGmOM8Y8/Q9C1BuJUdRuAiEwE+gAbTjZQ1R2+ZZ5CqNEYY4wf/OlyqQHEZ5lO8M07YyIyRESWi8jyxMTEs1mFMcaY0yjSi6Kq+oGqtlTVlpUrVy7KTRtjTMjzJ9B3AbWyTNf0zTPGGFOM+BPoy4C6IhIjIhFAX2B64ZZljDHmTOUb6KqaDgwFYoGNwCRVXS8iI0WkN4CItBKRBOAWYKyIrC/Moo0xxpzKn0+5oKozgZk55j2T5fEyvF0xxhhjAsS+KWqMMSHCAt0YY0KEBboxxoQIC3RjjAkRFujGGBMiLNCNMSZEWKAbY0yIsEA3xpgQYYFujDEhwgLdGGNChAW6McaECAt0Y4wJERboxhgTIizQjTEmRFigG2NMiLBAN8aYEOFXoItIdxHZLCJxIjI8l+WRIvKVb/kSEalT4JUaY4zJU76BLiJO4F2gB9AA6CciDXI0uws4pKoXA68Dowq6UGOMMXnzZwi61kCcqm4DEJGJQB9gQ5Y2fYARvsdfA++IiKiqFmCtAPz7u/Vs2H20oFdrjDFFpkH1sjzbq2GBr9efLpcaQHyW6QTfvFzb+AaVPgJUzLkiERkiIstFZHliYuLZVWyMMSZXfg0SXVBU9QPgA4CWLVue1dl7YfxVM8aYUODPGfouoFaW6Zq+ebm2EZEwoBxwoCAKNMYY4x9/An0ZUFdEYkQkAugLTM/RZjowyPf4ZmBuYfSfG2OMOb18u1xUNV1EhgKxgBMYp6rrRWQksFxVpwP/BSaISBxwEG/oG2OMKUJ+9aGr6kxgZo55z2R57AZuKdjSjDHGnAn7pqgxxoQIC3RjjAkRFujGGBMiLNCNMSZESKA+XSgiicDOM3hKJWB/IZVTnJXE/S6J+wwlc79L4j7Due33BapaObcFAQv0MyUiy1W1ZaDrKGolcb9L4j5DydzvkrjPUHj7bV0uxhgTIizQjTEmRARToH8Q6AICpCTud0ncZyiZ+10S9xkKab+Dpg/dGGNM3oLpDN0YY0weLNCNMSZEBEWg5zdIdbASkVoi8rOIbBCR9SLykG9+BRGZJSJbfP+W980XEXnL9zqsFZEWgd2DsyciThFZJSLf+6ZjfAOMx/kGHI/wzQ+ZAchF5DwR+VpENonIRhG5ItSPtYj80/e7vU5EvhQRVygeaxEZJyL7RGRdlnlnfGxFZJCv/RYRGZTbtvJS7APdz0Gqg1U68IiqNgDaAA/49m04MEdV6wJzfNPgfQ3q+n6GAO8XfckF5iFgY5bpUcDrvoHGD+EdeBxCawDyN4EfVfUSoCne/Q/ZYy0iNYBhQEtVbYT39tt9Cc1j/THQPce8Mzq2IlIBeBa4HO9Yzs+e/CPgN1Ut1j/AFUBslukngCcCXVch7eu3QFdgM1DNN68asNn3eCzQL0v7zHbB9IN31Ks5wNXA94Dg/dZcWM5jjvc+/Ff4Hof52kmg9+Es9rkcsD1n7aF8rPnfWMMVfMfue6BbqB5roA6w7myPLdAPGJtlfrZ2/vwU+zN0/BukOuj53l42B5YAVVR1j2/RXqCK73GovBZvAP8CPL7pisBh9Q4wDtn3y68ByINADJAIjPd1NX0kIqUJ4WOtqruAV4A/gT14j90KQv9Yn3Smx/acj3kwBHrIE5EywBTgH6p6NOsy9f6pDpnPlorIdcA+VV0R6FqKWBjQAnhfVZsDx/nfW3AgJI91eaAP3j9m1YHSnNotUSIU1bENhkD3Z5DqoCUi4XjD/HNVneqb/ZeIVPMtrwbs880PhdeiLdBbRHYAE/F2u7wJnOcbYByy71eoDECeACSo6hLf9Nd4Az6Uj3UXYLuqJqpqGjAV7/EP9WN90pke23M+5sEQ6P4MUh2URETwjse6UVVfy7Io66Dbg/D2rZ+cf7vvKnkb4EiWt3RBQVWfUNWaqloH77Gcq6q3AT/jHWAcTt3noB+AXFX3AvEiUt83qzOwgRA+1ni7WtqISCnf7/rJfQ7pY53FmR7bWOAaESnve3dzjW+e/wJ9IcHPiw09gT+ArcBTga6nAPerHd63YWuB1b6fnnj7DecAW4DZQAVfe8H7iZ+twO94Pz0Q8P04h/2/Cvje9/hCYCkQB0wGIn3zXb7pON/yCwNd9znsbzNgue94TwPKh/qxBv4NbALWAROAyFA81sCXeK8TpOF9N3bX2Rxb4E7f/scBd5xpHfbVf2OMCRHB0OVijDHGDxboxhgTIizQjTEmRFigG2NMiLBAN8aYEGGBbko0EfmHiJQKdB3GFAT72KIp0XzfWG2pqvsDXYsx58rO0E2JISKlRWSGiKzx3Z/7Wbz3GPlZRH72tblGRBaLyEoRmey7zw4iskNEXhaR30VkqYhcHMh9MSY3FuimJOkO7FbVpuq9P/cbwG6gk6p2EpFKwNNAF1VtgfdbnQ9nef4RVW0MvON7rjHFigW6KUl+B7qKyCgRaa+qR3Isb4N3EJWFIrIa7/03Lsiy/Mss/15R2MUac6bC8m9iTGhQ1T98w331BJ4XkTk5mggwS1X7nW4Vp3lsTLFgZ+imxBCR6kCyqn4GjMZ7+9pjQLSvyW9A25P9474+93pZVvG3LP8uLpqqjfGfnaGbkqQxMFpEPHjvinc/3q6TH0Vkt68ffTDwpYhE+p7zNN47fQKUF5G1wAm8w4UZU6zYxxaN8YN9vNEEA+tyMcaYEGFn6MYYEyLsDN0YY0KEBboxxoQIC3RjjAkRFujGGBMiLNCNMSZE/D84fzE31QmjlAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# plot training curves\n", + "plugin.loss_history.plot()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "cf5241cc", + "metadata": {}, + "source": [ + "### Data generation\n", + "\n", + "Since the model training is conditional to the labels, the data generation requires the labels as well. You can pass the labels as a `cond` argument to the `generate` method. If it is not provided, the model will randomly generate the labels following the multinomial distribution of the training labels." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a2e81779", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sepal length (cm)sepal width (cm)petal length (cm)petal width (cm)target
06.4421192.9347334.3269331.3725701
16.2854122.7214405.1209012.0575472
24.6963502.0427262.8569090.7889351
35.3360192.6885334.1632831.1920511
46.0818253.2216824.6457681.5052931
55.6901652.3360884.1056301.2966071
65.3989352.7577133.8099841.1613691
77.3582703.2834286.4965902.3172382
86.5953272.5985265.8056531.4513532
95.2247182.7962243.5009151.1252481
\n", + "
" + ], + "text/plain": [ + " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", + "0 6.442119 2.934733 4.326933 1.372570 \n", + "1 6.285412 2.721440 5.120901 2.057547 \n", + "2 4.696350 2.042726 2.856909 0.788935 \n", + "3 5.336019 2.688533 4.163283 1.192051 \n", + "4 6.081825 3.221682 4.645768 1.505293 \n", + "5 5.690165 2.336088 4.105630 1.296607 \n", + "6 5.398935 2.757713 3.809984 1.161369 \n", + "7 7.358270 3.283428 6.496590 2.317238 \n", + "8 6.595327 2.598526 5.805653 1.451353 \n", + "9 5.224718 2.796224 3.500915 1.125248 \n", + "\n", + " target \n", + "0 1 \n", + "1 2 \n", + "2 1 \n", + "3 1 \n", + "4 1 \n", + "5 1 \n", + "6 1 \n", + "7 2 \n", + "8 2 \n", + "9 1 " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.generate(10)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "f2d6c6cb", + "metadata": {}, + "source": [ + "### Conditional data generation" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "1f55ffdb", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sepal length (cm)sepal width (cm)petal length (cm)petal width (cm)target
05.2009353.4104481.2944040.2501560
14.8921723.4047651.3739660.3176620
24.5464153.0013621.3792670.1460120
36.9123333.3724784.7320091.6384991
45.4792602.6232463.4961611.2651181
55.6916102.5684203.6208421.0259881
66.9353143.2469516.2097022.2368082
77.0824953.0612085.9071951.9507212
86.0660102.5531235.1930901.6390342
\n", + "
" + ], + "text/plain": [ + " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", + "0 5.200935 3.410448 1.294404 0.250156 \n", + "1 4.892172 3.404765 1.373966 0.317662 \n", + "2 4.546415 3.001362 1.379267 0.146012 \n", + "3 6.912333 3.372478 4.732009 1.638499 \n", + "4 5.479260 2.623246 3.496161 1.265118 \n", + "5 5.691610 2.568420 3.620842 1.025988 \n", + "6 6.935314 3.246951 6.209702 2.236808 \n", + "7 7.082495 3.061208 5.907195 1.950721 \n", + "8 6.066010 2.553123 5.193090 1.639034 \n", + "\n", + " target \n", + "0 0 \n", + "1 0 \n", + "2 0 \n", + "3 1 \n", + "4 1 \n", + "5 1 \n", + "6 2 \n", + "7 2 \n", + "8 2 " + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "labels = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])\n", + "plugin.generate(len(labels), cond=labels)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "adf672a5", + "metadata": {}, + "source": [ + "## Synthesize a regression dataset\n", + "\n", + "For regression datasets, there is no conditional variable by default. The model learns the joint distribution of the whole dataset and generates new data points from it." + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "id": "13df0848", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
fixed acidityvolatile aciditycitric acidresidual sugarchloridesfree sulfur dioxidetotal sulfur dioxidedensitypHsulphatesalcoholquality
count4898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.0000004898.000000
mean6.8547880.2782410.3341926.3914150.04577235.308085138.3606570.9940273.1882670.48984710.5142675.877909
std0.8438680.1007950.1210205.0720580.02184817.00713742.4980650.0029910.1510010.1141261.2306210.885639
min3.8000000.0800000.0000000.6000000.0090002.0000009.0000000.9871102.7200000.2200008.0000003.000000
25%6.3000000.2100000.2700001.7000000.03600023.000000108.0000000.9917233.0900000.4100009.5000005.000000
50%6.8000000.2600000.3200005.2000000.04300034.000000134.0000000.9937403.1800000.47000010.4000006.000000
75%7.3000000.3200000.3900009.9000000.05000046.000000167.0000000.9961003.2800000.55000011.4000006.000000
max14.2000001.1000001.66000065.8000000.346000289.000000440.0000001.0389803.8200001.08000014.2000009.000000
\n", + "
" + ], + "text/plain": [ + " fixed acidity volatile acidity citric acid residual sugar \\\n", + "count 4898.000000 4898.000000 4898.000000 4898.000000 \n", + "mean 6.854788 0.278241 0.334192 6.391415 \n", + "std 0.843868 0.100795 0.121020 5.072058 \n", + "min 3.800000 0.080000 0.000000 0.600000 \n", + "25% 6.300000 0.210000 0.270000 1.700000 \n", + "50% 6.800000 0.260000 0.320000 5.200000 \n", + "75% 7.300000 0.320000 0.390000 9.900000 \n", + "max 14.200000 1.100000 1.660000 65.800000 \n", + "\n", + " chlorides free sulfur dioxide total sulfur dioxide density \\\n", + "count 4898.000000 4898.000000 4898.000000 4898.000000 \n", + "mean 0.045772 35.308085 138.360657 0.994027 \n", + "std 0.021848 17.007137 42.498065 0.002991 \n", + "min 0.009000 2.000000 9.000000 0.987110 \n", + "25% 0.036000 23.000000 108.000000 0.991723 \n", + "50% 0.043000 34.000000 134.000000 0.993740 \n", + "75% 0.050000 46.000000 167.000000 0.996100 \n", + "max 0.346000 289.000000 440.000000 1.038980 \n", + "\n", + " pH sulphates alcohol quality \n", + "count 4898.000000 4898.000000 4898.000000 4898.000000 \n", + "mean 3.188267 0.489847 10.514267 5.877909 \n", + "std 0.151001 0.114126 1.230621 0.885639 \n", + "min 2.720000 0.220000 8.000000 3.000000 \n", + "25% 3.090000 0.410000 9.500000 5.000000 \n", + "50% 3.180000 0.470000 10.400000 6.000000 \n", + "75% 3.280000 0.550000 11.400000 6.000000 \n", + "max 3.820000 1.080000 14.200000 9.000000 " + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import pandas as pd\n", + "\n", + "df = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv\", sep=\";\")\n", + "\n", + "loader = GenericDataLoader(df, target_column=\"quality\", sensitive_columns=[])\n", + "loader.dataframe().describe()" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "id": "14bca1cd", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T18:08:18.761007+0200][38480][INFO] Step 100: MLoss: 1.2836 GLoss: 0.9867 Sum: 2.2703\n", + "[2023-03-27T18:08:24.679745+0200][38480][INFO] Step 200: MLoss: 1.2622 GLoss: 0.9409 Sum: 2.2031\n", + "[2023-03-27T18:08:30.391531+0200][38480][INFO] Step 300: MLoss: 1.2059 GLoss: 0.7669 Sum: 1.9727999999999999\n", + "[2023-03-27T18:08:36.164268+0200][38480][INFO] Step 400: MLoss: 1.1645 GLoss: 0.6393 Sum: 1.8038\n", + "[2023-03-27T18:08:41.835318+0200][38480][INFO] Step 500: MLoss: 1.1717 GLoss: 0.6158 Sum: 1.7875\n", + "[2023-03-27T18:08:47.581383+0200][38480][INFO] Step 600: MLoss: 1.1946 GLoss: 0.5384 Sum: 1.733\n", + "[2023-03-27T18:08:53.378127+0200][38480][INFO] Step 700: MLoss: 1.1343 GLoss: 0.5135 Sum: 1.6478000000000002\n", + "[2023-03-27T18:08:59.698145+0200][38480][INFO] Step 800: MLoss: 1.1168 GLoss: 0.4788 Sum: 1.5956000000000001\n", + "[2023-03-27T18:09:05.752638+0200][38480][INFO] Step 900: MLoss: 1.1034 GLoss: 0.4734 Sum: 1.5768\n", + "[2023-03-27T18:09:12.070003+0200][38480][INFO] Step 1000: MLoss: 1.142 GLoss: 0.4692 Sum: 1.6112\n", + "[2023-03-27T18:09:18.112377+0200][38480][INFO] Step 1100: MLoss: 1.1691 GLoss: 0.4602 Sum: 1.6293\n", + "[2023-03-27T18:09:25.549484+0200][38480][INFO] Step 1200: MLoss: 1.1201 GLoss: 0.4578 Sum: 1.5779\n", + "[2023-03-27T18:09:31.574874+0200][38480][INFO] Step 1300: MLoss: 1.1436 GLoss: 0.4429 Sum: 1.5865\n", + "[2023-03-27T18:09:37.672797+0200][38480][INFO] Step 1400: MLoss: 1.1093 GLoss: 0.449 Sum: 1.5583\n", + "[2023-03-27T18:09:44.149652+0200][38480][INFO] Step 1500: MLoss: 1.1468 GLoss: 0.4347 Sum: 1.5815000000000001\n", + "[2023-03-27T18:09:49.923915+0200][38480][INFO] Step 1600: MLoss: 1.1545 GLoss: 0.4313 Sum: 1.5858\n", + "[2023-03-27T18:09:55.733558+0200][38480][INFO] Step 1700: MLoss: 1.102 GLoss: 0.4305 Sum: 1.5325000000000002\n", + "[2023-03-27T18:10:03.367053+0200][38480][INFO] Step 1800: MLoss: 1.0953 GLoss: 0.4267 Sum: 1.522\n", + "[2023-03-27T18:10:10.533359+0200][38480][INFO] Step 1900: MLoss: 1.1247 GLoss: 0.4223 Sum: 1.5470000000000002\n", + "[2023-03-27T18:10:17.355705+0200][38480][INFO] Step 2000: MLoss: 1.2767 GLoss: 0.4266 Sum: 1.7033\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 47, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# define the model hyper-parameters\n", + "plugin_params.update(\n", + " is_classification = False,\n", + " n_iter = 500, # epochs\n", + " lr = 5e-4,\n", + " weight_decay = 1e-4,\n", + " batch_size = 1250,\n", + " n_layers_hidden = 3,\n", + " dim_hidden = 256,\n", + " num_timesteps = 100, # timesteps in diffusion\n", + ")\n", + "plugin = Plugins().get(\"ddpm\", **plugin_params)\n", + "plugin.fit(loader)" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "id": "83064f94", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 48, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABiV0lEQVR4nO2dd3hU1fa/3z2TSSa9997ovfcqSFERsSsCevXqvViuX/V61Z967V67omIXK6CAoggovZcQagokpPfee+b8/jiTISEJJJDOfp8nTybn7HPOmjOTz1577bXXEYqiIJFIJJLuj6azDZBIJBJJ2yAFXSKRSHoIUtAlEomkhyAFXSKRSHoIUtAlEomkh2DWWRd2cXFRAgICOuvyEolE0i05cuRIjqIork3t6zRBDwgIICwsrLMuL5FIJN0SIURic/tkyEUikUh6CFLQJRKJpIcgBV0ikUh6CJ0WQ5dIJJILUV1dTUpKChUVFZ1tSqeg1+vx8fFBp9O1+Bgp6BKJpEuSkpKCra0tAQEBCCE625wORVEUcnNzSUlJITAwsMXHyZCLRCLpklRUVODs7HzFiTmAEAJnZ+dWj06koEskki7LlSjmdVzKe+92gn4m/wzvh79PfkV+Z5sikUgkXYpuJ+hJRUl8dvIzMssyO9sUiUQi4euvv2bp0qWdbQbQDQXd1twWgOKq4k62RCKRSLoW3VbQi6qKOtkSiUTS00lISKBPnz4sXryYXr16cccdd7BlyxbGjx9PaGgohw4datR+2rRpDBo0iOnTp5OUlATATz/9xIABAxg8eDCTJk0CICIiglGjRjFkyBAGDRpETEzMZdt70bRFIYQv8A3gDijAp4qivHdemynAr0C8cdNaRVFeuGzrmkB66BLJlcd/f4sgMq1tnbh+XnY8d23/i7aLjY3lp59+4ssvv2TkyJH88MMP7Nmzh/Xr1/PKK69w/fXXm9o++OCDLFq0iEWLFvHll1/y0EMP8csvv/DCCy+wefNmvL29KSgoAGD58uU8/PDD3HHHHVRVVVFbW3vZ76klHnoN8H+KovQDxgD/FEL0a6LdbkVRhhh/2kXMAezM7QAp6BKJpGMIDAxk4MCBaDQa+vfvz/Tp0xFCMHDgQBISEhq03b9/P7fffjsACxcuZM+ePQCMHz+exYsX89lnn5mEe+zYsbzyyiu8/vrrJCYmYmlpedm2XtRDVxQlHUg3vi4WQkQB3kDkZV/9ErDR2QBS0CWSK4mWeNLthYWFhem1RqMx/a3RaKipqWnROZYvX87BgwfZsGEDw4cP58iRI9x+++2MHj2aDRs2MGfOHD755BOmTZt2Wba2KoYuhAgAhgIHm9g9VghxXAixUQjR5N0XQtwnhAgTQoRlZ2e33lpAq9Fio7ORgi6RSLoc48aNY+XKlQB8//33TJw4EYCzZ88yevRoXnjhBVxdXUlOTiYuLo6goCAeeugh5s2bx4kTJy77+i0WdCGEDbAGeERRlPODWeGAv6Iog4EPgF+aOoeiKJ8qijJCUZQRrq5N1mdvEbbmtnJSVCKRdDk++OADvvrqKwYNGsS3337Le++p042PP/44AwcOZMCAAYwbN47BgwezevVqBgwYwJAhQzh16hR33XXXZV9fKIpy8UZC6IDfgc2KorzdgvYJwAhFUXKaazNixAjlUh9wsWD9ArxsvPhg2geXdLxEIun6REVF0bdv3842o1Np6h4IIY4oijKiqfYX9dCFuv70CyCqOTEXQngY2yGEGGU8b24rbW8xduZ2MuQikUgk59GSaovjgYXASSHEMeO2pwA/AEVRlgM3Ag8IIWqAcuBWpSWu/yVia25Laklqe51eIpFIuiUtyXLZA1ywSoyiKMuAZW1l1MWwNbeVHrpEIpGcR7dbKQoy5CKRSCRN0S0F3dbclpLqEmoNl7+ySiKRSHoK3VbQAUqqSzrZEolEIuk6dEtBr1v+L3PRJRJJZ7B48WJ+/vnnzjajEd1S0GWBLolEImmMFHSJRCK5AC+++CK9e/dmwoQJ3Hbbbbz55psN9m/dupWhQ4cycOBA7r77biorKwF48skn6devH4MGDeKxxx4Dmi6j25a0JA+9y1G/4qJBMaAR3bJfkkgkLWXjk5Bxsm3P6TEQZr92wSaHDx9mzZo1HD9+nOrqaoYNG8bw4cNN+ysqKli8eDFbt26lV69e3HXXXXz88ccsXLiQdevWER0djRDCVDK3qTK6bUm3VMI6D31v2l5Gfz+a03mnO9kiiUTSE9m7dy/z5s1Dr9dja2vLtdde22D/6dOnCQwMpFevXgAsWrSIXbt2YW9vj16v55577mHt2rVYWVkBTZfRbUu6tYf+29nfqKytZP3Z9Tzu9HgnWyWRSNqNi3jSXQ0zMzMOHTrE1q1b+fnnn1m2bBnbtm1rsoyus7Nzm123W3roVjorNEJDZa0aq9qcsBmDYuhkqyQSSU9j/Pjx/Pbbb1RUVFBSUsLvv//eYH/v3r1JSEggNjYWgG+//ZbJkydTUlJCYWEhc+bM4Z133uH48eNA02V025Ju6aFrhAYbnQ1FVUWM9x7P3tS9HM8+zlC3oZ1tmkQi6UGMHDmS6667jkGDBuHu7s7AgQOxt7c37dfr9Xz11VfcdNNN1NTUMHLkSO6//37y8vKYN28eFRUVKIrC22+rdQ0ff/xxYmJiUBSF6dOnM3jw4Da1t0Xlc9uDyymfCzBrzSwySzPZuGAj16y7hhtCb+Cp0U81aldaXcr6s+uZ6T8TZ8u2G9pIJJL2pauUzy0pKcHGxoaysjImTZrEp59+yrBhwzrk2m1ePrer4mPjw0SfiXhYezDZZzKb4jdRXVvdoE14ZjjXrbuOVw6+wuuHX+8kSyUSSXfmvvvuY8iQIQwbNowFCxZ0mJhfCt0y5ALw7tR3TemK80Lm8Wfin+xO3c1kn8lUG6rRm+l558g7aDQa5gTO4Y/4P1jSfwl9nRv2dnGFcfx+9neWDl0q0x8lEkkjfvjhh842ocV0WwWzMbfBSqemAo3zGoez3pkfo3/ktg23cdfGu8guy+Z49nEWhC7gmTHPYG9hz3tH32t0nrVn1vLZyc9ILm7byQmJRCLpaLqtoNfHTGPGtcHXciD9AFF5UUTlRfHCgRdQUJjuNx1bc1sWhC7gQNoBU2ZMHVF5UQ1+SyQSSXelRwg6wI29biTALoD/Tfof7lbu7EjegZ+tHyEOIQD0depLrVJLQmGC6RhFUYjKVYU8Oje6E6yWSCSStqPHCLq/nT+/zf+N2YGzuaPvHQBM95uO8VGnBDsEAxBbEGs6JqUkheJqtR5MdJ4UdIlE0r3ptpOiF+KmXjdxOv80N/W6ybQtwC4AM2HG2YKzpm113nmIQ4gUdIlE0ggbGxtKSrrPcxd6jIdeHxtzG16b+Bq+dr6mbTqtDj87vwYeenReNGZCjb/nVuSSXZbdGeZKJBJJm9AjBb05gh2CG3jokXmRBDsEM8hlEAB/xP/ByuiV1BhqGhxXXVvNz2d+ZnvSdgorCzvUZolE0vkoisLjjz/OgAEDGDhwIKtWrQIgPT2dSZMmMWTIEAYMGMDu3bupra1l8eLFprbvvPNOh9nZI0MuzRHiEMKWxC1U1FRgrjUnKjeKid4T6e3UG4A3w9Q6xynFKcwPnc+e1D3c0fcOtiZv5b/7/wvAMLdhrJi9otPeg0RyJfL6odfbPCzax6kP/x717xa1Xbt2LceOHeP48ePk5OQwcuRIJk2axA8//MDVV1/N008/TW1tLWVlZRw7dozU1FROnToF0C5lcpvjihN0BYW4wjiKqorIq8hjjNcYU1qjTqOj2lDNisgVfBf1HbVKLaEOoRzPOo6lmSULQhfwfdT35JTn4GLp0tlvRyKRdBB79uzhtttuQ6vV4u7uzuTJkzl8+DAjR47k7rvvprq6muuvv54hQ4YQFBREXFwcDz74IHPnzmXmzJkdZucVJ+gAp/NOsy9tH3bmdszwnwHA8+OeB6DaUE1FbQV6rZ5fz/7KwYyDHMs6Rn/n/swLmcd3Ud+xO2U380Pnd9bbkEiuOFrqSXc0kyZNYteuXWzYsIHFixfz6KOPctddd3H8+HE2b97M8uXLWb16NV9++WWH2HNFxdD97PzwtvHm3fB32Zq0lWuDr8VCa9GgjU6j47WJr/H8uOcZ5DKIXSm7iM6LZrDrYHo79sbD2oPtyds76R1IJJLOYOLEiaxatYra2lqys7PZtWsXo0aNIjExEXd3d+69917+9re/ER4eTk5ODgaDgQULFvDSSy8RHh7eYXZeUR66mcaM5VctZ9GmRVQbqlkQuuCC7Ud5jmL58eUADHYdjBCCyT6TWX92PRU1FejN9B1htkQi6WTmz5/P/v37GTxY1YH//e9/eHh4sGLFCt544w10Oh02NjZ88803pKamsmTJEgwG9RkNr776aofZ2W3L514OcQVxROZFck3QNRdsdzjjMHdvvhuAnbfsxEnvxN7Uvdy/5X5em/gac4PmdoS5EskVSVcpn9uZXDHlcy+HIIegi4o5qF65hdYCP1s/nPROAIzxHEOoYygfHvuwUbleiUQi6UyuSEFvKeZac27ve3uDFadajZZHhj1CcnEyHx3/iLiCuE60UCKRSM4hBf0iPDr8URYPWNxg20TviYzzGsfnJz9n3q/z+D3u3HMGT+WcYmX0yg62UiLpmXRWSLgrcCnvXQr6JSCEYNm0ZXw/53tCHUP5/MTnppv/Q9QPvHbotUZleiUSSevQ6/Xk5uZekaKuKAq5ubno9a1LvLiislzaEp1WxyDXQSzpv4Sn9jzFntQ9TPSZSGJxoqlMb90KVIlE0np8fHxISUkhO/vKrLGk1+vx8fFp1TFS0C+TWQGzePfIu3wb+S0TfSaSVJQEQExBjBR0ieQy0Ol0BAYGdrYZ3QoZcrlMdFodswNnE5YZRm55LgWVBQDE5sde+ECJRCJpY6SgtwH9XfpTbahusIK0fpleiUQi6QikoLcBfZ3UxP9NCZsACHUMlYIukUg6HCnobYCfnR9WZlYczjgMwFTfqaSWpFJaXdrJlkkkkiuJiwq6EMJXCLFdCBEphIgQQjzcRBshhHhfCBErhDghhBjWPuZ2TTRCQx+nPhgUAx7WHvR37g/Q4GEaEolE0t60xEOvAf5PUZR+wBjgn0KIfue1mQ2EGn/uAz5uUyu7Af2c1VviZ+tHqEMoAF+c/ILj2ccB2JywmXm/zJP56RKJpN24qKAripKuKEq48XUxEAV4n9dsHvCNonIAcBBCeLa5tV0Yk6Db+eFt682sgFnsS9vH4o2LSS5O5ouTXxBXGGd6MLVEIpG0Na2KoQshAoChwMHzdnkDyfX+TqGx6COEuE8IESaECOtpiwXqJkb9bf3RCA1vTH6D3+f/jhCCp3Y/RVSeKuSnck51ppkSiaQH02JBF0LYAGuARxRFKbqUiymK8qmiKCMURRnh6up6KafosgQ7BPP82OeZFzLPtM3d2p0bQm/gWPYxLM0scdI7cSLnRCdaKZFIejItEnQhhA5VzL9XFGVtE01SAd96f/sYt10xCCFY0GsBjnrHBtuXDFiCmTBjduBshrsPlx66RCJpN1qS5SKAL4AoRVHebqbZeuAuY7bLGKBQUZT0NrSz2+Jt483Ka1by+IjHGeAygOTiZPIr8imrLuts0yQSSQ+jJbVcxgMLgZNCiGPGbU8BfgCKoiwH/gDmALFAGbCkzS3txtTVdBnoMhCA+7fcT3xhPGuuXYOvne+FDpVIJJIWc1FBVxRlDyAu0kYB/tlWRvVU+jn3QyCIzI1EIzT8ePpHnhj5RGebJZFIeghypWgHYq2z5qFhD/H6xNe52v9qfon5RYZeJBJJmyEFvYP528C/MSdoDrf3vZ3i6mJ+PftrZ5skkUh6CFLQO4nBroMZ5jaMd468Q0RORGebI5FIegBS0DsJIQRvTXkLJ70T/9j6D3LKc1p9jsraSmavmc3G+I3tYKFEIuluSEHvRFwsXVg2bRlFlUUsP7681cefzjtNSkkK25K2Ndq35swaTmafbAszJRJJN0EKeicT4hjCgl4L+PnMz3wf9T3/b+//a+StV9ZW8tqh10gpTmmwvW6R0rHsYw22K4rCKwdfYUXkina1XSKRdC2koHcB7h98PxZaC1479Bq/xP7Cf/f9t8GTzg+lH+L7qO95es/TGBSDaXtErhp7zyjNIKM0w7Q9ryKPKkMVZ/LPdNybkEgknY4U9C6Ai6ULH07/kGXTlvHYiMfYkbKDX2J/Me0/nKk+OCM8K5yfz/xs2h6RE4GblRsA+9P2M//X+ayLWUdGmSruiUWJslyvRHIFIQW9izDCYwSTfSezsN9ChrsP542wN0yhl7CMMIa4DmG052jeOfIOOeU5lFWXEVcYx7zgeei1et4Ie4PYglgOZxw2eesGxSAfsiGRXEFIQe9iaISG58Y+R0VNBW8cfoPS6lIicyMZ6TGSp0c/TUVNBe+Hv09kbiQKCkPchtDfpT/FVcUAJBYnNgi/yLCLRHLl0JJaLpIOJtA+kLsH3M0nJz7BTGNGrVLLSI+RBNoHcme/O/k64muTUPd37s8oj1GczD7JcPfhROdFk1mWiU6jQyM0xOTHdPK7kUgkHYUU9C7KvYPu5UT2CdafXY+ZxozBroMB+Pugv5NQmEBMQQwTvCfgbOnMPQPv4fqQ69mcsJn96fuJzY/F3codOws76aFLJFcQUtC7KBZaC5ZNX8Z/9/8XM40ZVjorAGzMbfhg+geN2nrZeOFn6weok6d9nfriY+vDjuQdfHD0A0Z6jGSM55iOfhsSiaQDkYLehTHXmvPyhJdb3L6uFG9pdSke1h70duzNL7G/8OmJT9mdspvV165uL1MlEkkXQE6K9iB8bHxMrz2sPZgfOp+Xxr/E3QPuJiovitSSrvEQqcLKQuasnSNr2EgkbYwU9B6Elc4KN0s1L93DygNrnTXzQuZxY+iNAGxL2sbq06vZkbyj84wEYgtiSS5O5nj28U61QyLpaciQSw/D186XrPIsPKw9Gmzr5diLz058Rn5lPsPchjHFd0qn2ZhVlgVAdnl2p9kgkfREpIfew6ibGK0v6ADT/aaTX5kPQHpp6x73alAMvHTgJVOxr+raakCtGfNd5Het9rRNgl52TtCTi5PZlbKrUdtN8ZuYvGoy5TXlrbqGRHIlIgW9hxHsEIyZMMPTxrPB9lv73Mq9A+/l9j63k1mWSbWh+oLnySnP4eqfr2Z70nZOZJ9g1elVvHroVXLLc5m5ZiaP7niU76O+5/XDr/Ph0Q9Nx8Xkx3DL77dQUFHQ7LnrBL1+EbLlx5fz8PaHGwl3WGYYeRV5xObHtvQWSFrA1sSt7E/b39lmSNoYKeg9jJt738x3c7/DztyuwXYnvRMPDXuIPk59MCgGMkszee3Qa7y4/8Umz/ND1A+klabxfdT3bEncAsDJnJP8/a+/k1+Rz1+Jf/H64dex0FpwOPMwJVUlAOxI3kFkbiRHMo80a2NTIZeInAhqDDWmCpJ1JBQlABBTIBdItSVvHXmLj49/3NlmSNoYKeg9DEszS/o79292f53nnl6azpbELWyM34hBMfB22Ns8tvMxAMqqy1h1ehUWWgsOZhzkt7jfGO05Gncrd07nn+buAXfzyoRXmOA9gTcmvUGNoYa9aXsBiMqLAiAyL7JZG8730Ovq0gAcyzrWoG1CYQKAXPHahlTXVpNWkkZycXJnm3JFciD9AGklae1ybinoVxje1t4AROVGkVmWSXF1MWcLzrL+7Hq2Jm2loqaCdbHrKKoq4sXxqveeV5HHrIBZPDnqSSZ6T+TeQfdybfC1fHzVx0zymYSDhYMpcyYyN9J0/mpDNZsSNlFrqG1gQ2ZZpum81bXVROVFoaAgEIRnhZvalVWXmdpKQW87UkpSqFVqTUXeJB2HQTHwjy3/YOXple1yfinoVxge1h4IBDtTdpq2rYlZQ25Frink8Uf8H/R16svswNmM9hiNQDDFdwpX+V/FR1d9hKWZpelYrUbLJJ9J7ErZRW55LqklqWiEhsjcSH47+xuP73ycvxL/MrVXFIXssmxszW0ByK3INXUCk30nczz7uKnme1JxEgD2FvacyT/ToEZ8VyOtJK1L21efxKJE0+uUkpQLtJS0NVllWVQbqhusGWlLpKBfYei0Otys3AjPVD1hC61FgxrrW5O2cjL7JFN9pwLw+MjHeWH8C7hYujR7zqv8rqKoqoiPjn0EwCTvSeRW5PJt5LcA/BH/h6ltQWUBVYYq+jn3A9RMl4hcta77DP8ZFFcVm0r+1oVb6jJ0cityW/w+M0ozTMe3NynFKcxeO5sN8Rs65HqXS31Bb03YRVEUlh1dJieoL4O6xX3eNt7tcn4p6FcgXjZe1Cg12JrbMtZzLJW1lfjZ+hFsH8zq06tRUJjkMwmA3k69uT7k+gueb5LPJHxtfVl9Ri0tcEPoDYC6gMhWZ8vu1N0UVhYC5+LndXH+7PJsInIi6O/cn6GuQwE4mnUUgPiieASC6X7TgdaVAn75wMvcv+X+S/KaFUUxlSAuripmRcQKagw1zbaPzovGoBjYFL+p1dfqDBKLEtFr9QCNHmt4ISJyI/jkxCfdpuPqitTdbynokjbDy8YLgF6OvRjiNgSAMZ5jGOo+lCpDFc56Z/o6923x+bQaLXf2vRNQv6ijPdUwDcDTY56mxlDDhrgNlFSVmAR9gMsAAOIK40goSqCfcz98bH1w1jubBD2hMAFPa09T29bE0etKHdRlybSGtTFrmb1mNolFiaw+vZo3w940jWiaom5Cd3/afkqrS1t9vYzSDIqqilp93KWSVJREL6de2JrbtspD35q0FaDdJvQ6AkVR+CbimwZrIM7fX1Vb1W7XTy1JRSBM/4NtjRT0KxAva/XLFOoQygiPEQCM9x7PUDfVQ57oMxGNaN1X4/qQ67Ezt2OAywCsdFYEOwQzwHkAcwLn4Gfrx6uHXmXiqomm8E4fpz4IhOnvcV7jEEIwzH2YSdATixLxt/PHSe+Ep7UnB9IPtMiWoqoi02TqvrR9rXofAOvPrqdGqeGP+D9MIlb3/NamiCuMQyM0VBmq2J26u1XXSi5K5ob1N/DygXNF2KJyo5i2ehrJRc2LbVJREv/d/1/iC+NbdT1QU0ED7ALws/W7JEGv/wCVtqCwsrDDMm7iC+N5I+wNfov7rcn9v8f9ztTVU9ttsji1JBU3KzfMtebtcn4p6FcgdcO9UMdQBrsO5udrf2aq71RGe4zGysyKOYFzWn1OK50V3875lidHPQnA21Pe5q0pbyGE4I3Jb/DEyCfwtfVlW/I2QK0146R3IrUkFW8bbwa6DARgiOsQUktSSStJI74wngD7AADmhcxjb+reFoUI6mK8AtFI0GPzY1kZvbJR5k0dGaUZhGeFmzqbkznq6tiI3AiqDdVsiNvQ6Ni4gjhGeYzCSe/E5vjNGBQDn5/8nOf2PXfBkE95TTn/2vEviquKOZxx2NR25emVZJdnE5YZ1uRx25K2sWD9An4+8zOfn/zctH1TwiYWb1pMRU0F4ZnhPLL9EU5kn2h0zcyyTPxs/fC19W2xkMYVxhFfGI+5xvyiK41PZp9k/dn1jVJQm+PFAy+yeOPiDplUrlvP0NwoY2/aXoqqitqtg0kpTmm3cAtIQb8i6ePcB63QmsItvZ16I4TA3dqdA7cfYKzX2Es6b5B9kGnyNNA+0DSs7Ofcj4X9FprE3knvhE6rw9XKFYCZATMRQg3R1I0S3gx7k7KaMiZ4TwBgQegCNELTYAK3OWILVEGf7DuZwxmHGwyhPzj6AS8ffJmHtz/cwAsrrCxkY/xG08O57+x3pyk81NuxNxE5Efx29jee3P1kg+JmBsVAQlECIQ4hzAmcw5akLcz4aQbvhb/H2pi1F4z7fx/1PafzT3OV31Vkl2eTWpJKWXWZKRbf3LFfnfoKD2sPZvjP4K/Ev0xhnu8iv+NI5hG+jviaZ/c9y9akrdzxxx38dOYn07FJRWrmkL+9P762vqSXpF9w1XBJVQlP7X6KpVuXAjA7cDZZZVkXnFP4145/8fSep1m4caEpg6k5Kmoq2JWyi6zyLJKLk6k2VLdr+KkubNdcdk9dBdB2E/SSFHxs2yfDBaSgX5H0d+7P3tv20suxV6N9dcLaHozzGsdM/5mm+Hyd+F8dcLWpTR/nPui1ev5K/AsfGx+ToHtYezDZZzJrY9ZSUVOBoigczTrKJ8c/aeSFn8k/g43OhhtCbqC8ppy7Nt7Fx8c+ptZQy+HMwwTaB7I7dTeP7XzM5G2viFjBE7ue4MNjH9LXqS9/G/g3tEJLiEMIswNnk1KSwspoNXd4X9o+yqrLePPwm0TmRlJeU06QQxD/N+L/+O+4/2JnYcc/Bv8DrdCyKaHpidJaQy0/nf6JUR6juH/w/YA6GbwlaQtlNWXY6GyaFPSSqhJO5pxkhv8M7up3F+U15fyZ8CepJakczz6OpZklHx77kMSiRN6Y/AZ9nfqy9sxa0/F1GS7+tqqg1yg1ZJQ0HUKpMdTw2M7H2Bi/EX87fx4a+hBD3YZSq9Q2G4MuqCggsyyTW3rfYrpXF+Jg+kFTuYfj2cd5P/x9rl13bbO1e2oMNZflydd19k156MVVxaY5l/YQ9KraKrLLststZRGkoF+xWOusO+W6b0x+g4+nq0vO+zv3p69TX/o59TPt12l0DHRVwy+39L6lQSx/Uf9F5Ffm88mJT1h+Yjl3bbyLZceW8fSepxt44bEFsYQ4hDDOexzXh1xPZW0lHx//mK1JWymuKub+Qffz9Oin2Z26m/eOvgfAoYxDBNoHcm3QtTww+AGc9E78a/i/eHDog/R3UTNyovKiTGGctTFrWRG5gv/s/g+gjk7MNGbcEHoD6+at44EhDzDKYxSb4jc1KUB7UveQVprGzb1vJsQhBBudDUcyj7AyeiW+tr5cHXA1Z/LPUFpdyqsHXyWzVJ0TCMsMo1apZYznGAa7DibALoCfzvzEb2fVmPCbk99EK7SM9x7PrIBZTPWbSkRuBPkVamG2utx+fzt/gh2CATiRc6KRfQCfnfyMvWl7eWbMM3x81cfcO+hePK3PrTRuirqQxlTfqYQ4hHAo/ZBp36H0QxxMP9ig/fbk7VjrrLHR2RCeFc6GuA3kVeSxMX5jo3MbFAM3rL+BD45+0Ghffcqqy0ylKBrZl38u5FJjqOGhbQ+Z5mbqjyaSi5OJyIlgyaYllzTRXVRVxJRVU9ietN20La0kDQUFb1sZcpH0EDRCYxoFLB26lJXXrGw0KhjrORZrnXWjdMlh7sO4PuR6vjr1FR8d+4i5QXN5c/Kb5JTnmHLdFUUhJj+GEMcQLLQWvDj+RT6crhYPe+XgKwCM8hzFzb1vZkHoAr4+9TWx+bFE5EQw3W86r0x8hal+ag7+ov6LmOY3zZQzD2onk1KSYopd13l0QfZBjd7rrMBZpJSkmCZUI3IjKK4qRlEUfoz+ERdLF6b5TUOr0TLYbTC/xv7KyZyT3DfoPno59qKgsoAvTn7BD9E/8PTepzEoBg6kH0Cv1TPYbTBCCJYMWMLJnJN8eOxDBrkMYpLPJFZes5K3Jr8FwHiv8SgoJiFNKEzAzdINK50VA1wG4Grp2mDhV322JG5hlMcoFvRaYNrmYaNW8UwrPefhnsg+wdKtS9kQt8EkmCEOIYz2HM3RrKNU1VYRkx/DA1se4PGdj5s63+KqYrYnb2ei90QGuw5mQ9wGssuz0Wl0rIxe2agjjMqNIr4wnm1J25q0F9SQydx1c3l4+8ON9pXXlJNcnIyT3onK2koOZxxme/J2/kz4E8BUR6hubmFzwmbCMsMadEot5XD6YXIrchss4GvvHHSQgi7pZJrKplk8YDF/3PAHDnqHRvseG/EYjnpH+jr15fmxzzPTfyahjqF8deorVkSs4NVDr1JUVUSoQ6jpGC8bL8Z5jSO3IpdQx1BTqOe+QfcB6qRcjVLDSPeRTdpoZ25HgF0AvR17s7DfQkBd4frwsIfRa/U4WjjiqHdsdNx0v+lYmVnxz63/ZOnWpdz6+60s2bSEL059wd60vdzR9w50Gh0AQ12HUqPUMMF7AvOC55nCYd9EfoOlmSUH0w/y0bGP2Ju6l2Huw7DQWgBqzv+7U9/FSe/ELX3UMEcfpz6mEVh/5/7YmduZQh9JxUn42fmZ7v1V/lepo4WSNJYdXWaqkllQUcCZ/DOM9hzd4D3VeegZpRmEZYRx/5b7ueOPO9iZspNvI78ltiAWO3M73KzcGO0xmoraCval7eOJXU8ghCC/Mp/tydv5NvJbpv80nbyKPK4JuobBroMprynHXGPOg0MfJCovylQfqI4dKTsAOFt4tsmQT2JRIks2LyG3PJewzDCKq4pN+yJyIojOi26wxmJzwmbg3FxFRG4EPjY+DHAe0OABLAczDtJaDmWonUD90tIdIejyAReSLodOo8NJ79TkPnsLe9Zdtw5LnaVJ1Bb3X8zTe57mzbA3sTSzxFnvzCiPUQ2OuyH0Bvam7W3woGwvGy9GeY7iYPpBzDRmpknipnh90uvotXp8bX3xtvGmqKqIO/regb2FvWnRVFO2fjvnW57f9zz70/ZzS+9bWBuzlvfC32OSzySW9F9iajszYCaHMw/z3NjnEEIQ6qh2SJW1lTw24jGOZB7hkxOfADA/dH6D60z3m84032lNzn9oNVrGeI5hb9peFEUhsSjRtAoYYKb/TH6M/pHbN9xObkUuCgoPDn3QVC3z/PtoaWaJo4Ujx7OO8+GxD3G0cGTpkKVU1Fbw+cnPKakuIcQhBCEEwz2GoxEaHtn+CAoKy6Yt44UDL/DB0Q9IKkpinPc4HhyihrR0WrVjm+A9Qa0YGvUdD2x5gKv8rmJ+6HzGeY1jZ/JOnPRO5FXkcSjjEHOD5jawbVvSNsprynlh3As8u+9ZjmQeYYrvFI5lHWPhxoWm79QUnyn8EvuLKQ0zJj8Gg2LgVM4pBrkOwsfWhz8T/yS3XF2ZfH6YqCXUCfrZgrMUVxVjo7Phj/g/cNI74Wbl1urztRQp6JJux/me+7VB1+Jl7YW/nb8pc+Z8pvpO5Zbet3BjrxsbbL8+5HoOph9koMtArHRWzV6zftjlqdFPUW2oxtLMkpt63XRBW3s59uK7Od9RVl2GjbkNk3wmsSFuA8+MeQatRmtqF2gfyOczz6Ug2lvY42ntSVZZFnOD5rKw30KOZR0jPCucBaELGl3nQpPZ47zG8WfinxzPPk5eRR7+dv6mfUPdhuKsdya3IhdvG2/Wxazj/sH3cyjjULOVOz2sPdiRsgOBYMXsFfja+nI67zSfn/ycxKJEU6dpZ27HGM8xpBSn8PKElxniNoT5IfP5+PjH+Nv58/bkt033fIjrEHo59uKW3rdgrbNm3bx1rIhYwarTq9iStIW+Tn2Jyovi4WEP8+WpLzmYfrCRoB/MOEigfSBzg+by8sGXOZh+kCm+U/g97nfMhBl5FXmYa8xNo46CygIAymrKOJh+kPTSdBb1X4SVmRW1Si21tbX0cepDdF40OeU5Fyx/AeqE7bakbfRx6kNsQSyjPUdzMP0gJ7NPUlhVyJHMIzw79tlWr/FoDVLQJd0eIYRpgVRz6LQ6nhnzTKPt0/2m42rp2qpH8tUN2VuKRmiwMbcxHdvS42f6z6S8ptwkJMPchzHMfVirrg2qoAP8EP0DQANB12q0PDf2OcpqyrDWWfPgtgfZmbyTw5mHGeo21OQ518fLxouovCgm+07G19YXUDsudyt3MssyG2RPLZu+DDNhZupwbup1ExG5ESwdsrRBB2qls2LNdWtMf9uZ2/Hg0Ae5f9D9bEzYyKsHXwXUjvlk9kmTBxyWEcZPZ37i8ZGPE54ZznXB12GuNWeo21AOZhykulat+DkjYAYTvCeQWpKKjbmNydMf6TGSwxmH+fLUl4A6QqhLVwX428C/8djOxziccZjZgbMBNXTy8oGXeW7sc7hbuwNq2uvjOx9nf/p+U+G5uwfczaH0Q2xL3sb25O30derLDSE3tPrzaw1S0CVXNJZmlmxasMkUy+5KPDbysTY5j6eNJ4H2gabJzwC7gAb76yaBaw21eFh78OzeZymuLm52gVldHP2OvneYtgkhmOijrgQOcQgxbT//vrpauZomqVuCTqvjuuDrGOY2jKi8KIIdghnvPZ5tydu48487ichVH4ySUZpBeU25KUQ02nM074W/xzeR31BYWcg1Qdc06Ei9rL3Iq8jj+pDrCcsI40D6AXxtffG38zeF8pz1zkz3m46tzpatSVtNgv75yc/ZnbqbbyK/4fGRjwPw0oGXOJx5mCUDlrAqehU2OhtGeYwi2CGYVadXodfq+WDaBw1GZe3BRX1/IcSXQogsIcSpZvZPEUIUCiGOGX+ebXszJZL2w1xr3q75912BcV7jqDHUoBGaZhe2aDVanhn9DJN8JzEveB7XBF3TZLtrgq/hngH3MNqj4YTpjb1uZKzn2FbVAWopPrY+zPCfAajzIY+NeIz0knRGuo9kVsAsUx39kR7qxPZE74kAvBv+Lo4Wjo0Wy9WlDo7yGGUaZdSteXCzcsNCa8Eg10GYacy4pc8tbE7YzIH0A+SU57A+dj1mGjPWxKyhpKqEakM1e1L3MC94Ho8Of5SV16xk+YzlmGnMGOY2DI3Q8MbkNxqE7dqLlnjoXwPLgG8u0Ga3oihNf/oSiaTTGec1ju+jvsfT2vOCdUQm+05msu/kC56rv3P/JmPr/Z378+nMTy/b1othpjFjUf9F3NXvLkB9YMq2pG0E2AeYso16O/Vmw/wNRORG4G3j3WikMMpjFJmlmbhbudPLsRdJxUkmQdcIDU+Pfto00vj7oL/zZ8KfPLf3OQLsA6g2VPP6pNd5YtcTrItdxwCXAZRUlzDeezygzofUsXToUuaHzjcVmGtvLiroiqLsEkIEdIAtEomknRjhPgIzjVmD+Hl3p25U5WHtwf8m/c80T1GHn52fKUXzfG7ufTM3974ZUOcmwjLDTN49NMwk0pvpeWnCS/x71785lHGI60OuZ3bgbFadXsXXp75mduBsNELTKCMIwFHfdEpreyFasozWKOi/K4rSqJsRQkwB1gApQBrwmKIoTZamE0LcB9wH4OfnNzwxMbGpZhKJpB1YEbECP1s/U8xcolJrqKWitqJFq6cVRTF1JOGZ4SzatAiN0DDAZQDfz/m+vU0FQAhxRFGUJrMA2iJ/JhzwVxRlMPAB8EtzDRVF+VRRlBGKooxwdW06vUwikbQPi/ovkmLeBFqNtsWlMOrPtQxzH8ZVfldhUAymTKLO5rIFXVGUIkVRSoyv/wB0QogLJ2xKJBJJD+DR4Y+qz98NmN3ZpgBtkLYohPAAMhVFUYQQo1A7iZY//FEikUi6Kb52vqy+dnVnm2HiooIuhPgRmAK4CCFSgOcAHYCiKMuBG4EHhBA1QDlwq9JdHn8ukUgkPYiWZLncdpH9y1DTGiUSiUTSichqixKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJD6HaCnlFYwZojKVRU13a2KRKJRNKl6HaCHp6Uz//9dJzYrKaf6i2RSCRXKt1O0Hu5q08DOZNZfJGWEolEcmXR7QQ9wNkKc62G05nFRKUXMfzFv4jOKOpssyQSiaTT6XaCbqbVEORqzZmMYrZFZ5FbWsXXexM62yyJRCLpdLqdoIMadjmTWcKRxHwAfj2WRlFFdSdbJZFIJJ1LtxT03h62pBaUcyg+j0E+9pRX17L2SEqTbTedymD8a9v4/URak/uj0ot4fn0EBoOsJyaRSLo33VLQQ93UR02VVNZw5xh/BvvY893BJM4v8vjt/gTu/+4IGUUVPPdrBIVljb34n8JS+HpfAol5ZR1iu0QikbQX3VLQe3vYml6P8HfkjjH+xGaVcDA+j3+tOsbdXx8G4Jv9iQz1c2DNA+PIL6virb9ONzpXZHohAKcz2iZrprSyhpT8jusc1oankFlU0Wj78eQCsprY3hVQFIUNJ9LlWoJO5J8/hPPcr6c62wxJG9MtBd3X0Qq9ToOztTmBLtZcO8gLO70Z/1l7knVHU9kWncXBuFxiskqYO9CTIb4O3Djch1WHk6mpNZjOoygKkWlqhkxLBP10RjEJOaUXbPP+1hiu/WAPtR0QwskqquDR1cf5aHtso32LvzrEO1vOtLsNl8KZzBL++UM4f5xM72xTrlj2n83lYHxeZ5txRXLtB3v4Yk98u5y7Wwq6RiMYGeDE5N6uCCGwNNdy0whf4nNK8XWyBOA/604CMKW3GwCjAp2prDE0CK2k5JdTVFEDXDivvarGwL9/PsHV7+7iri8PXTDeHptVQn5ZNWezGy58+isyk1nv7qK8qu280ihjJ7TzTHaD7aWVNeSXVXfZxVd1I5j0wouPIA7F53HP14eprtcRSy6PwvJq8kqrSMoraxSmlLQvxRXVnEwtpKqmfb7P3VLQAb5cPJL/LRhk+nvR2ABC3Wx4/9ahDPF1IC5bFfdgV/Xhr32MYZr6nnhkuuqdu9tZcPoCgv5XZCarwpIZG+RMUl4Zu2Kym22bkl8OwLHkggbb1x1NITqjmANxuSiK0iZZOdFG+xNyyxqMHDKMoZb4i4wmOos0o5BntEDQ/4rMYGt01kVHRpKWk5ir3suyqlpySqo62Zori+Q8VR/8nKza5fzdVtB1Wg1m2nPm+zlb8dejkxnq58g1gzwBmNrbzfSU7hA3GzQCousJekRaERoB1w7yIj6nlMqaWpMHXWtQOJWqxtdPpBag0wo+WzQCFxtzvjuQZDpH/dCKoigm7/N4PUGvNSjsjVUfs7rjdBarw5IZ+dIWkls4EWswKKw8lERBWcN/vtMZxVjqtAANOplMo1DmlFRRWN710jnTC9QvdUa9GP+GE+k8uvpYo7Z1nVJXHW10RxJyz33vkvJa11FKj/7ySDbqQ10koa3ptoJ+Ia4b4kWwqzXzh3qbtul1WgKc1QVJdUSmFRHkasNgXwdqDQpPrjnJ4Bf+ZE9MDi/+Hsk1H+zhZEohp1IL6e1hi42FGTeP8GVbdCZns0vYdzaHIS/8yU9hyYA6lC01dgj1PfRTqYUUlldjZa5l2+ksPtkZR2WNgTXhTadans/R5HyeXHuSv60Io7LmXMgmKqOY0UFO+Dtbse5oKv/bFE10RlEDoexoz1ZRlEbhpvOp88zrT9quO5rK2vDURl57nBT0Nqf+dyIxt+UT+CWVNYx5dSvrjrbseytpTJ0T5+soPfQW42arZ+v/TWGon2OD7b3cbRuEViLTCunnaWfKmll3NJVag8K934Tx9b4EALZFZ3EqtYiB3vYALBzrj52ljts+PcA/vg+nuKKGZ3+NID6n1BRuCXGzITqjmJySSqIzitgTmwPAvRODSM4rJy6nFFu9GWvCU1qU/340qQCAsMR8nlqrZiZU1xo4m1VCHw87pvZ242hSAR/tOMs3+xMbCHpbhV1+O55GVvHFQySbIzKZ/tZOYrOaD2GlFTb20KOM4aNDCecm6mpqDSQZBSf2Ip1EZ1JZU8tT6052aHbT5ZCQW4qztTlCQFIr0nW3RmWSWVTJ8eTCdrSufVEUhS/2xDebAbbvbA63frq/3WLcKfnl2FiY4WCla5fz90hBb47eHrYk5JZSXlVLVHoRaYUVjAhwJNDFGp1W4Gprwa//HI+1hRkjAxzp52nH6rBkCsurGejtAICnvSWr7htrOueq+8ZgbqbhyTUnTP/Qcwd6UmtQmPnOLma9u5vlO8/S19OOG4f7AGrM/tlr+pGcV86B+NyL2n00uQBvB0senBbCmvAUdpzOIj6nlKpaA308bHnkqlC+WDSCAd52xGWXkFlYgZW5Fo1QPdzHfzrOV3vVWfXqWkOrF1GlF5bz4I9H+Wj72Yu2DU9SV++eSGn+n75uMjS7uJJag0JhWTWpxjDMoXr3IyW/nBqjrV3ZQz+eXMgPB5P49kBiZ5vSIhJzywh1t8HTTm/qMFvChhNqVlKa8bPqjpzNLuHF3yNZE57a5P6NJzM4EJfX6lBUS0nKK8PH0dIUCm5ruqegK4r600p6e9iiKKo4rA1PQacVXDPIC51Ww8vzB/LZXSMY4G3P9scm88O9Y5jWx80kNHUeet15Nj8yiU0PT2J0kDP3TAjkYHweJ40x97oYvgBuH+1HaWUNM/q64etkxQ3DvHn86j5cM8gLWwszbv/sICNe+otX/ohi/9lc4prwRI8lFTDEz4Gl00IIcrXm2V8jOBCnCl8fT1scrMyZ3tedvh52nM0uJaOoAi8HS3wcrdh8KoOfjqTwyc44DAaFOz47yJNrT7TofqXkl2EwKKYSC9uisy4aQ41IU+9B3VzF+dkpiqKQXliBrYUZBgVySiqJMhZXs7Ew43B8vqlt3ehisK8DZ7NLWtURfbrrLM9eYp51Ta3BlM5aWVPLplPpF3zfMcbRyJ8RmZcUY94dk90oy6ol2VDhSfmXlP2TkFNKgLM1fs5WLV5QV1xRzQ5jNlXdCKs5jiUXsON0VoPw4IX4ZOdZ7v/2SIvaXi5R6ep9Tm5mNHXK+P1tzcilNSTnleHbThOi0B0FPeIXeMkN8uJafWhdaGXf2RzWHU1jam83nKzNAbh5hC9DfB0AsNXr0Gk1TO7tCoBOK+jlYdPgXI7W5njY6wEYH+IMwLrwVGwszAhxs+G7e0az4aGJvDJ/IAf+M52l00IBePvmIdw43AdLcy1fLB7JYzN7McLfiS/2xHPbZweY9tZO/rP2hGnRTVZRBakF5Qz1dcDCTMtL8waQlFfGs79GoNMKglzO2RXsZkN2cSUxWSV42OkJdLE2hZgyiir4Yk88hxLyiEhrXJ2yqsbQYKFPQk4pU97YwXcHEwlPLADUL/mFQjiKonAqVT13dEYxx5IL6P/cZo4mnRPp3NIqqmoMDDbe64zCCpN43jjch9OZxeSXqpO/dfHzGX3dqKg2mDrXlrD+eBo/HkqirKqmxcfU8d2BROa8v5tTqYWs2JfA/d+FE24MezVFTKbaCcfnlJpGErUGpdEkdlNEZxSx5KvDvPJHlGnb4YQ8Bj6/+YJVRN/56ww3fLSP97bEmLZtOpXOwi8OXnDBVlFFNbmlVQS4WOPnZNUi4correLTXXFU1agjwrSCC4feHvjuCIu/OszE17dfNExXa1BDIJsiMsgpqSS/tMr0fWgP6rLcmkpIqKk1mEJ/rZlbaClq0kR5u2W4QHcUdL091FZBcUarDw1wtmaQjz2vbowmp6SSBcYQSHMM8XXA1sKM3h62WJhpm203yMcBK3MtaYUVpuHUhFAXk+C72ekxN2t8q0cFOrF0WijLFw5n77+n8cPfRnPfpCB+PJTMLZ8eoKiimqPGydWhfg4AjAtxYd0/xvHM3L68edPgBucNdlXFPS67FHejoANc3d8dCzMNr2+KBjAJ46ZTGaZh9JNrT7Dwi4Omc/18JIUag8IvR1MJT8o3zcpvi85q9j6k5JdTWF6NuZmG0xlFbDyZTlWNgeU7z4Vq6iY9695PZlEFUelFuNiYM2egOrIJM44I4nNKsLfUMTLACWh5HN1gUIjNKqG6VrmkxTMbT6nfre8PJrLysDrhfSKloNn2sVkleDuo92dzRAY1tQYWf3WIGe/suqCXWjcRX2NQOJ5cYPLuVx9OpsagcDCuadu/PZDIe1tjsLEw44dDSSYB/3RXHLtjcvh2fyJf7Y1n6At/8u+fT5Bez6OuC7EEOFvh72xNdnHlBTu9EykFjH9tGx9si2WwrwNzB3qSV1rV7AiisLya9MIKpvZ2Jau4ku0X+L4AHIzPJau4ElDXHLy4IZLrP9pLbknlBY+7VOo6yaY6sricUiqq1RFPYm4ZWcUVLNsWc0mLBGtqDbzyR1SDCeickirKq2vxdWyfDBfojoJuq/7TU9z6VYZajWDVfWO5bZQvg3zsmWpcdNQcOq2Gp+f2ZenUkIu2qxMdn0v8sDzs9YwLceGpOX1ZfucwItMKufPzg3x3IBGdVtDf61zIZ6ifI3+bGMS8Id4NzlGXc6+ez4JQd1Xg75sUxLQ+btQYFPQ6DQVl1ZRW1vDe1hhe2hCJoijsPJ1NWGI+heXVGAwKa8NT0GoE4UkFnEotZM4AT0LdbPjlWCofbI0xTXquP57G1ig11FDn+c/o605mUSW/n0hHCPgzMtPk2dfFX+tGQ5lFFUSmF9HX045BPvZYmGnYa5xEjs8pJdDFmhBj7Z6zLYyjJ+eXmf4x98bkNNiXkFPKplPNf3fyS6s4nJCHhZmG1WEpxGWrdp9MKSQ5r4zbPj3QqNRCTJaabTTE14FvDyTyzx/C2R2TQ3ZxJXvOu3591oancCy5gPEhzuSXVZOYW0ZFdS2bjB1KXQjvfH4OS2awjz0f3zmMvNIqfj+RTnJeGeFJBeh1Gt7dcoYXf4/E1daCdcdSeeWPaNOxdZ+Dv7O1yVNszhvNKank/m+P4GRtzi//HM+6B8bhY+zYmwu71I1Q7hjtj6uthSldF9TO/Pz4+2/H07Ey12JlrmXn6Wz+jMikqsbAj4eSaIpPd51l/9kLzzulFZQ36MTqUxdySc0vp7yqljnv7TZ9H+rChdbmWpLyylh9OJk3/zzDseT8Js91IY4lF/DprjhWGzPgoH7KovTQz2Hrrv4uybykwy3Ntbx6wyDWL53QpNd8PreO8mPWAM+LthsbrIZd6jy1y2HWAE+W3T6M2KwSdsfkMCbIGb2u+RFCHX5OVui06mSLh52eBcN8+OHe0Qz3d+KmET7otIK7xwcC6pc+IaeU9MIKdp7JJre0CkVR47L743JJK6zgkelqmKjGoDDUz5Gr+rlzKrWIt/46w3/WniQms5iHVx7lnhVhXP/hXv6KzESrEcwb4gWoI4G7xwei02i447MDzHp3FxuMy/0Hetuj1QhS8suJySyhn6cdep2WMUHOppz6+OxSglyscbaxwM3WwpQtdDHOGEMgjla6Rse8/dcZ7v8u3DQyOZ+t0VkYFHh6bl9qDQo2FmaMC3bmRGoha8NT2R+XaxLcxNxSiiqqySyqJNTNlqfm9MXRypzNEZncMdoPe0tds9epy7bo42HL03P6AXWx52yKK2twsNKZ1kGczig2ee+F5epKwym93ZgQ4kKomw2f7jrLdwfVCdlltw2jrLqWUDdb1v1jPHMHerIvNsd0fN2iIn9nK/p72QHnRkTn89afp8kpreKThcMZ4uuARiPwsjcKej1hziqq4MXfI9kXm2Pq6EPdbRgf7My+sznGeZNyrvlgN7d+esDk8ZZV1bDxVDpX9XVnRIATa8JTKKmswdXWgm8PJDaaH8gpqeTVjdF8tKNxqYs6fgpLZtpbO/h7EzH5ogp18t3f2Yoag8LW6Ewi04vYHq1+306lFqHXaRgb7EJibqkpzHYovvWCXve9q5++bEpZlIJeD70DmOkvyUNvT8YGqYLu00b5pVf39+Dk81cT9cIsViwZ1aJjzLQaApxVL93dTo9ep2VcsAsA0/q4c+zZmUzro45KjiYVUG4cqi/bdu4f5HB8HisPJ2OnN+PeSUH09VT/6Yf5O/Dw9FB+WzqB/8zuw+GEfB788SiWOi0vzutPXE4pa8JTCHG1MXnfADeN8OFfM3rh72xNYXk1vx5Lw1yrwcUo0qvCkqmqNTDGeP8m93IlLruUvyIzSSusoL9xMvqusf7sOJ3NyQtkz9RRN8F42yg/ojOKCU/Kp6iiGkVR2G+cTH7i58bPpc0orODXY6l42OlZOMafyb1cWTTOn9GBzpzNLuHX42pmxO6YbA7E5TL5jR28tlH1fkPdbBgV6MSmRyax599Teen6AVzd352/IjObDLscjM8jOqOYJeMD6OVug6VOy7HkAtaGp+BiY87to/yIySphS2QmV7+7i++MGTSH4vMwKKoDIYTgiVl9iM8p5ZOdcQz3Vzvd1X8fy/f3jsbawoyxwc7kllaZOrmE3DLc7SywMjcj0MUaXydLdp5ueuXz7pgcpvZ2ZUC9hAAvo8OSXlBBYVk17/x1hqlv7uCLPfEs2x5LbFYJFmYafBytGBfiQk5JFcdTCnngu3BTuYG/IjNIzC3lho/2UVheza2jfBkd6ESNQcHRSsfL1w8gs6iSj3ecbTDJvD06C0VR5xiauqeH4vN4/OcTmGs1nEgpNM3FgDoPUBc/n9lPdQp/OaqW1I42fl9OpRbS19OOIFdrkvPLTXM/h1qQiXY+daPMEymFpg6sbgR7qaP4ltD9BF0IsPW4pBh6ezLIx55nr+nHvKFebXZOrUatU6PRtDzFqS6OXhe/r4+1hZnpH7K+5xqWmI+9pY7BPvZsisjgj5Pp3DLSF71Oy/2Tg5g/1Bs3W7WDGOhjz6JxAbjZWhCdUczCsf4sHBvA6r+PxdNez8RQF1xtLXC00uFuZ0Fvd1semBLMj/eN4fNFIzA30+Bhr0ejEbjb6Skoq2aonwNTjBPQk3qpv/+95gR6nYYFw9Sw0l3jArDTm/HeVnUSsKK6lrXhKby5+XSjsMaZzGK87PWmmPwNH+1j7vu7ickqIbu4koemh2Kh03L/d0corVTjx78eS2XMq1vZHZPDDcO8EUKw4u5RPH51Hwb52KMo6tyEtbmW/Wdz+Xa/KrA/HFRDA3VhIVA7dSEEcwZ6UlxZw+qwFCqqa1l3NMXkpX25Jx4HKx3zhnhjptUw0Mee346n8WdkJreN8jMtdnvh90gA3tkSQ2F5NfvO5mBhpjHNQczo584P944hyMXaNPoaGeCEi40FcM7R2H9WvUcJOaX4Gzt9IQSTe7my72wOp1ILmfD6NlOHmVpQTkp+OaMDnRvcWw97PUJAfG4pc97fzXtbY5gY6sq1g70IS8jnREohwa42aDWC8SGqM3Hn5wc5llzA+7cNxdfJkreNE7rphRV8vWQU44JdTB36rAGeXNXXnRn93Hn7rzPc+cVBk7BujcoyfvYG00R9fbZFZ2GmESy7fRiAKRPsdEYxo17ewkM/HjXeMw8Adp5RzxeTWUy1MbOpv5cdfk5WVNUYyC+rxlZvRlhCfqvi6KWVNRxNUlONSypriMsuITmvjBX7Epg70BMrc7MWn6u1dD9BB7DpeoIuhODuCYG42TYW0o4k2O2ch94UbrYWaDWCfcY4ZJ03PcLfkVGBTsRll6IoCneNDQBg3hBv3rllSINz6HVaHr4qFEcrHfdODAKgr6cde/89jf/M6YsQgoVjA7hvUnCDfNv+XvZ8cudwHr+6t9FGVXSenNXH1C7Y1RpvB0vySqtYMMwHBys1C8lOr+NvE4PYEpXJRztiue/bIzy6+jjLtsfyyKpjDTI7zmSWEOpuywBve767ZzQPTw8lOa+clzaomSQ3DPXmg9uGEpddwn/WqkXcvtmfSJCrNWseGMdjM3s3eL/1PdQHp4dSWlXLhpPpDPZRt5ubaZocRo8PcWGAtx3/75dTDH/xL/616jg3Lt/HK39E8WdkJkvGBZpCaUN9HcgtrcLf2Yp/Tg0xXTMpr4wpvV3JL6vipd8j2ROTw8gApwaT9CMDnNj22BTmDmocGvR1ssLXydL0eSfklhHofG6uZXIvN8qqarlnxWFS8sv5Yo+aPVbnlY4OcmpwPp1Wg7utnp/CkkktKOfjO4axfOFwbhzuQ1WtgYPxeabOzdvBkhA3GwyKwicLh3PNIC/uHh/ImcwS9Dota/8xjsnGDnywjz33TQri75OC0GgEny4czovz+nMqtYj5H+3jP2tPsDsmm+sGe6ER5zqo6lqDKfa9/2wOQ/0cGBvsjJW51jQa+/FQEkKo6x7s9GYM83PATCOorlXQaQVlVbVsjsiguLKGkQHqyus67hzjT3FlTYOMo/TCcv7x/RGyixtO3B5JzOPaD/bw5Fp1ovvvk9X/jaPJBbz4eyQaIXh6bt9Gn1Fb0j0FvQt66F2Fm0f48sSs3rjZWjS530yrwcNOT05JJeZmGm4wesDDAxwZYZzYnTXA46JxvjtG+3PkmRkmTxDUKpha42ji0Rm9uGdCYKPjpvZx49rBXiZbH7kqlNFB57xAIYQpXXTJ+IAGx/59chBzB3nyv02n2R2TzSvzB/LtPaPIKak0lV+oNailB3oZJ4QnhLrw4LQQPO317DqTjYedHn9nK8aHuPDw9F6sP57G6rBkjiTms2CYD8P9HRuNiFxtLfCy1xPiZsPto/1M7/Gl6wcyvY8bQ3wcTNvqo9NqWPeP8Twzt6/qcd48mKoaA5/uiuPawV4snXZusn1ssDNajeCV+QPR67R42etNKbXPXtOPxeMC+OlICjFZJab5mpYyLsiFA3G5FFVUk1NSib/Luc92bLAzOq0gs6gSH0dL/jiVQX5pFQfj8rDVm9HHw67R+bwc9OSUVOFuZ8HM/qq3OyrAyTQnFVpvtPLV4pH8+a9JXG1sd9soP56Y1ZufHxhrGk2C+r18ak5fAlzOjR4Wjg1g35PTTJlfpVW1XD/Ui0E+Duw9m0utQeFfq44x9/09rA1P4WRqIWODXdBpNYwKdGLf2VzTSG72AE/W/WM8yxcOx0yrwdsY9qiz/0tjOdtxwS74O6k22FqYcfsoP0AN59Txyc44/jiZ0WDidtOpdG799AAJOaX8djwNCzMNNw33xVZvxntbYvgzMpOHrwo1jZDbi/bz/dsTW0+I3drZVnRJ/J2t+ceUC2fleDtYqpNDTlbM6OfO9weSmNnPA3c7C6b2duUh42ToxWhNKKgppvd1Z3pf90bbH5keyvQ+boS42TbYbmGm5YNbh9LP044AZ2vmDvJEURSG+Tnw0Y6znM4sVlfQ1hgIdT93rJlWw22j/Hj7rzOm2DOoHcTKw0k8ZfTSrxvcfLjs5fkDsTTXYqfXMTLAkeKKGgZ42/HxncMxXGAxkU6r4W/GUQyoo5QtUZncNymoQScwuZcr4c/MwN64JFwIwZTerhSV1xDkasNz1/bnusFebDiRzk0jLpxuez7jQpxZFZbMr8fUmHFAPQ/dxsKMiaGuZBVX8NoNg7jmgz38eDiJg/F5jApwarKj8nSwhKQC5g/1Me23NNcyMsCRvbG5puwqaDwBqNdpL/r9rI+1hRlPzemLp72ePyMyGRfswpHEfJbvjGPeh3s4lVqEtbmWZ345hUGB8cbOblywM6/8Ec3/Np2mqKKGW0f6MtDn3EjLz8mKxNwybhnhy4YT6YQnFdDHwxZXWwtqag2YaQRD/BzwdbLCz8mKVYeTuX20HxVVBlPmyuqwZJZODUGjEbzzVwzBrjasvG8MhxPyqaypxdJcy2AfB/bE5jClt6tpNNuedFNBd4eqYqgsAQubi7eXNMDLQQ3HBLpY42lvyeZ/TTLt+6qFE7DtiZudnunNhIw0GsE/66WRCiF4+KpeLPryEL8eTSPAxZqZ/dxNMfk6bh3py1d7402eIqji8shVofx7zUmGGf95m2Nqn3Mprh/fMRzFeG1zs9Z1ar09bBs8cav++7A/r77H2zcPaTApONTPsVF9opYwpZcbWo0weaH1BR3gozuGoSiqKA/3d+R/m9Qne902yrfJ8/kYvcwbhzdMm50Y6moU9Mbv73JZMj6QJcY5gptH+BKbVUJGYQX/ntUHFxtzHv9ZnXMZYpxbmNLbjVc3RvPl3ngCXaxNMfo6ApytORiXx6hAJ3ydLEnOKzclEJhpNSwZH8Bwf3XE+v+u6ce934Tx3K8R6HVayqpquX9yMMt3nuVAXC6+Tlaczizmmbl9cbAyZ0a/c07KnIGeFFdU894tQ5vsHNuabiroxlhhSaYU9EugbthXt/CouzO5lyvHnp2BnV7X7KjBzU5P+P+b0aiGxoJhPuw8k8315+X0XwhHYyikI2iLmh/2VjpGG0MQQIMYMdAgJfaThcPZeCqD7KIKbhzetKAvHOtPX0+7RiOoRWMDCHKxbhBKaQ/8na35ZOEI0981tQY+3nmWIBdr09xCL3dbDj99FVlFlbjZWTT6XvxjajBzBnqi12np7W5Lcl65acU3wNNz+5lez+jnzuJxAaaCfVf1deeRq0L54WAi3x1MNE0cT+vTeF3L7aP9uH20X5u994vRTQXd6GUVp4NzcOfa0g2pE/SAHiLogGny9EI0JY5mWg0f3TG8PUzqUszo586+s7m42lpgbdH8v72LjQULx/hf8Fw+jlZNpudammtNMemOxEyrYd0D49GcNyPoYmPRYI6nPp72lngac+oHGcMiowKdmmwLqpc+s7871uZm9PW0w9xMw6JxAXywLZZjSQUEuVgT1M4dWUvoppOidatF5cTopRBkXFHa1NBf0jO5yjhXEejcczrx+thb6bDVX1pJ2vsmBbH5kUkXPF6rEYwLdmGwr4Np8nfptBB6uduQVljRpHfeGXRPQbcxxqikoF8SY4Oc+W3pBIZdQjxW0j3xdbJici9XxoW0LkPmSkCv05py81uDhZmWt24agpe9nuuHtjxk1550z5CL3h7MLLvcatHughCiwYy/5Mpgxd2dP+Hd0xjoY8++/0zvbDNMXNRDF0J8KYTIEkI0WVxaqLwvhIgVQpwQQgxrezMbXVSNoxd0jwcKSCQSSUfQkpDL18CsC+yfDYQaf+4DPr58s1pA8DSI+g2i/+iQy0kkEklX56KCrijKLuBCRaXnAd8oKgcAByHExcsTXi5Xvwxew2DtvbBvGVS0X1F8iUQi6Q60RQzdG0iu93eKcVv7Brh1lnDrD7DuPvjzadjyHHiPgDEPQHk+7HgVnIIheCq49QP3fuAQQKPcJolEIukhdOikqBDiPtSwDH5+bZBsb+cJi36D1CMQvUENwfy0SN3nOwYqi2H7y+fa66zAvb8arul1Nbj2AXNrqCqD4z+C11Dwbv8pAIlEImkPREseaiuECAB+VxRlQBP7PgF2KIryo/Hv08AURVEu6KGPGDFCCQsLuySjm8VQCyd/gppKGLpQ9cYrSyD7NGRFQuYpSDsGKYdAMRbPt/eF6nIoy1HrrF/7PviOBHs/0HbPJCCJRNJzEUIcURRlRFP72kKx1gNLhRArgdFA4cXEvN3QaGHwrQ23WdiAz3D1p46SbEjcCzkxkHMGairUDmDX/9QQDoBLb7jhE9Vrl0gkkm7ARQVdCPEjMAVwEUKkAM8BOgBFUZYDfwBzgFigDFjSXsa2GTau0P/6xtsDJ8LZbeqCpV1vwKdT1RDM4Ntg+GLQXtpKNIlEIukIWhRyaQ/aJeTSlpTlweEvIPp3SD+mTrDO/0QNx0gkEkkncaGQi0z5aA4rJ5j8ONy3A25fDYZq+GoWHOiYNHuJRCJpLVLQL4YQakbM33dD6NWw6UnY9BQYDBc/ViKRSDoQKegtxdIBbvkWRv0dDnwIa+5Ws2kkEomkiyDz8lqDRguzXwd7b/jrWTXP/fbV6naJRCLpZKSH3lqEgPEPw9y3IXYL7Hitsy2SSCQSQHrol87Ie9QVqrveAAc/GLawsy2SSCRXONJDvxzmvAlBk2H9Utj8NHRSCqhEIpGAFPTLw9wK7lgDI++F/ctg95udbZFEIrmCkSGXy0VrBnPegIoC2PaSWgNm8C2dbZVEIrkCkYLeFggB8z5USwb8+k+1CmTgpM62SiKRXGHIkEtbYWah5qk7B8PKOyD9RGdbJJFIrjCkoLcllo5wx89gYQvfLYC8uM62SCKRXEFIQW9rHHxh4Tow1MA316thGIlEIukApKC3B669VU+9NEf11KvKOtsiiURyBSAFvb3wGQ43r1CfkvTX/+tsayQSyRWAFPT2JHQGjF0Khz9Xn3kqkUgk7YgU9PZm+rPgORjW/l19tqlEIpG0E1LQ2xszC7jle/X3j7dBeUFnWySRSHooUtA7AgdfNUe9IAnW3AOG2s62SCKR9ECkoHcU/uPUEgGxW2D7y51tjUQi6YHIpf8dyYglkBoGu9+GoClg5QK2HurzSyUSieQykYLe0cx6HRL3wzfzQDGAUxAs2agKu0QikVwGMuTS0VjYwM3fwIAb4ar/QnGmuqK0orCzLZNIJN0cKeidgccAWPAZTHgEbvsRcs7A74+qD51O2CsnTSUSySUhQy6dTdBkmPIf2P4SxG2HslzwnwDzP1YfbSeRSCQtRHroXYGJj0Kv2eDSC6b9P0g/Bh+Ogb3vQW1NZ1snkUi6CdJD7wpotHD7ynN/D7wJNj0Jfz0LyYdgwReg03eefRKJpFsgPfSuiKO/Gluf9TpE/w4rroXMiM62SiKRdHGkh96VGXM/2LjBhv+D5ROh92zodbX6II3KYrB2g5Dp6iPwAMrzwdxWfc6pRCK54pD/+V2dATeoi5D2vAPHV6oee30G3QJz34bSLFg+CZwC4IbPwa0PpIbD8R9hxgugs+wM6yUSSQciBb07YOUEM1+E6c9BUQpUFKn57Cd/hh2vQlYUmOlVT70oDT6dDBP+BQc/gfI8cA6F0fd19ruQSCTtjIyhdye0ZuAYAJ6D1BWmk5+A21dDbiykHILZr8MD+yFwkir0Gi14DIS970JNVWdbL5FI2hnpoXd3QmfA3ZsgJQwG36Z66bevhshfwK2/WuHx+wWw+T9qeMZ3VNtcd/dbcOhzmPsW9JnTNueUSCSXhVAUpVMuPGLECCUsLKxTrn1FoShqHfYzG9W/r30fhi+6vHMe+gz+eAz0DlBRANe8qxYek0gk7Y4Q4oiiKCOa2idDLj0dIdQc98fjIHiaKsQ/LYG3+0PivtafL+kAbHwCes+BRyPBewTsX6Z2HBKJpFORgn6lYO2sLlCy84KYP8FQA6vuhPyExm1Lc8/lvRsM5+LvFUWw9l6w94X5n4C5NQy7S43hp4V32FuRSCRNI2PoVxJWTnD/XmM2TDp8Pg0+nQJj/gGj7lXz28vy4MuZqkj7jYX8RPXY+3aoK1cLU2DJJtDbqdv7zVO9/hM/gffwznpnEokE6aFfeVjYqJ61Swgs/gN8x6hPUHpnIKy5F765DgqSYdxDUJqtZtSU58NXs+DESpj0OPiNPnc+SwfoNQtO/SwzaSSSTqZFgi6EmCWEOC2EiBVCPNnE/sVCiGwhxDHjz9/a3lRJm+MxQI2v378Hes2EpP1Qkg03fKrmvT94BG5fpaZD5sWp8fJJTzQ+z4glqvhvef7ctoJkKMnqsLcikUhaEHIRQmiBD4EZQApwWAixXlGUyPOarlIUZWk72ChpbzwGwo1fNr9/2F2qJ+43tumyAsHTYNTf4cCHYKiG6nI49gOY26jPUR1087nyBE1RUQgWdhduI5FILkpLPPRRQKyiKHGKolQBK4F57WuWpEshhBort3Frvs3MFyHkKjj8hVpuYPhitfzAuvvgp0Xq5GtdKWCDARL2QFUZxGyB/wXDx+Mg7Es1b14ikVwSLZkU9QaS6/2dAoxuot0CIcQk4AzwL0VRks9vIIS4D7gPwM9PPryhR2FmAXeuUcXaUANm5uqTl/a+B9tfgchfQaOD/ter5QkS94Kdtxqfdw5Rz/H7v9Tffa+Fac9CTQXo7aEwGXa8po4SRt6rPhSkrTEY1LCRrXvbn1si6SAuurBICHEjMEtRlL8Z/14IjK4fXhFCOAMliqJUCiH+DtyiKMq0C51XLiy6gsg+rQp4ZiScWAUImPAwRP2mVo1c/Ifq/WdFqcK/911VzOtj6wW1leoTnQYsUMM/yYdUsdeYqemYhhoQWrB2Bbe+amdQlKp2NjbuasfhGAglGXD0ezVjx8oRJj6mVrQ8tQbG/lN9gpSFTWfcKYnkolxoYVFLBH0s8LyiKFcb//4PgKIorzbTXgvkKYpif6HzSkG/QqkuV3/XVX80GEBzXuQv9yzE7wRLJ3UlqqEGBt8OQgP73oed/1Nj9bZeak0bQzUUp6sjAEON6mlXlzV9fVHvWraeUJwBOiuoKgb/8WrHY6ZX5wWG3qmWVDj6HdRWqR1CnzmgswYLW/Xaejv1WbAlWWonZG4Nrn3Awbd190VRVLvN9Oq55XyCpBkuV9DNUMMo04FU4DBwu6IoEfXaeCqKkm58PR/4t6IoYy50XinokkumMAVqq9VCZU0Jn8GgZuVUlaiLoGor1bz73BjIiVGLlg1dqIpu0gH4dak6RzDtGVXAT62BiHWqJw/Q5xpV/JMPQsaJltnoPVw9rrb6XPgpL17tfGqr1ZFFfqJqo99YdSSRHa0ea+Wsbht9v1p7J+0YnN6glkMuTFH3u/dXbQ6c3HiiWlHg7Da1c3LtC9WlUJwJ/uPAbwxodQ3bFyQDinyGbTfhsgTdeII5wLuAFvhSUZSXhRAvAGGKoqwXQrwKXAfUAHnAA4qiRF/onFLQJV2a2mpVFG09wHPwue1leerv8nxVoKtKVIG08VAfE1hRqArvka/VDqQ+Nu7GkYlQOxMHfzUclLBHHY30maOKcXY0xG6Bkkw1hKTUqmElr2Fg76OGnVLD1VGFlbO6lqC2Sh39VJep+wsSm35f5rbqHIT3MDWzKO2YOomNohZ3cwwwHp+sjhRs3NR74NJbXW2cE6OG0Goq1A7Fvb86wgHIOaMuSAuaAmaWao1+M716HY3mXHkIIdQRTVUpOAWqdtdWqSGyplAUdQRWlKaOsDRata1jwCV8sN2fyxb09kAKuqRHoyhQWaSGZ2qrAEUNx7SU6nI11FOcDh6DVJG0dKi3v0IV/Yi16tyETq9eS2cJ5lZqyGjgTWqnY2Gjdhjxu9SyD/E7z5V80JrD8CWqyIZ9qdpqbqOObKpK1U6ltrKhbUKrdjCm7UIV7hpjOM3cVhXdigLjNYxzGKXZ6nvwGqraXlsF7gMh76zagfa9Bqxc1LCZpYMaDss5o3YilUWN75HnYHUC3WOQWpaiJEM9piRL7XBrKtT5lMpiSD+uPoTdzkvNpPIZAW791HuSGaGOYoKnqQ9rD5yodrS11eooKnaLen88BqlrNyxs1VFgaphqq88oyDwF+fHqvfEcrF4nP1G1v7JI/dtMb+zUFHXE5+jf8u9D/dsvBV0ikTSgvECN/Vs5nQvB1NaowiU050JZiqKOSrIiVJF06QVOwapYntkMxWmq8FeVqnMKToEQ8QsoBlUADdWqyBZnqN5+QRKkHFaLuzn4wulNqkhqzODEavU4jZl6LRs39XouvcC1t9rJgCqiBUnqyuX04w3fl5le7TysnNTXxRmqOHsOVkc+pblg5wlpR9XzWLmooxWhVTu6urkXjZm6vxFCtUsxZkWB2inWnrdKum5k1RzjH4EZ/23551X/1FLQJRJJt0JRWjYxXJanhoCsnFQh19u37LjSXNWjd+17blK+ukL12FOPqAKts1Q9/OCpqminn1DnUApT1P1BU9XOMGk/eA4BryFqJ5l8SB3ZuISqTwuzdFDnSGprQKB2mA7+4Bx8SbdGCrpEIpH0EGQ9dIlEIrkCkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPYROW1gkhMgGmqkgdEFcgJw2NqctkHa1nq5qm7SrdXRVu6Dr2nY5dvkriuLa1I5OE/RLRQgR1twqqc5E2tV6uqpt0q7W0VXtgq5rW3vZJUMuEolE0kOQgi6RSCQ9hO4o6J92tgHNIO1qPV3VNmlX6+iqdkHXta1d7Op2MXSJRCKRNE139NAlEolE0gRS0CUSiaSH0G0EXQgxSwhxWggRK4R4soOv7SuE2C6EiBRCRAghHjZuf14IkSqEOGb8mVPvmP8YbT0thLi6ne1LEEKcNNoQZtzmJIT4SwgRY/ztaNwuhBDvG207IYQY1k429a53X44JIYqEEI90xj0TQnwphMgSQpyqt63V90cIscjYPkYIsagdbXtDCBFtvP46IYSDcXuAEKK83r1bXu+Y4cbvQKzR/hY8tqfVdrX6s2vr/9tm7FpVz6YEIcQx4/aOvF/NaUTHfs8URenyP4AWOAsEAebAcaBfB17fExhmfG0LnAH6Ac8DjzXRvp/RRgsg0Gi7th3tSwBcztv2P+BJ4+sngdeNr+cAG1EfhjUGONhBn18G4N8Z9wyYBAwDTl3q/QGcgDjjb0fja8d2sm0mYGZ8/Xo92wLqtzvvPIeM9gqj/bPbwa5WfXbt8X/blF3n7X8LeLYT7ldzGtGh37Pu4qGPAmIVRYlTFKUKWAnM66iLK4qSrihKuPF1MRAFeF/gkHnASkVRKhVFiQdiUd9DRzIPWGF8vQK4vt72bxSVA4CDEMKznW2ZDpxVFOVCK4Pb7Z4pirILyGvieq25P1cDfymKkqcoSj7wFzCrPWxTFOVPRVHqnlB8APC50DmM9tkpinJAUVXhm3rvp83sugDNfXZt/n97IbuMXvbNwI8XOkc73a/mNKJDv2fdRdC9geR6f6dwYUFtN4QQAcBQ4KBx01LjkOnLuuEUHW+vAvwphDgihLjPuM1dUZR04+sMwL2TbAO4lYb/ZF3hnrX2/nTWd/BuVE+ujkAhxFEhxE4hxETjNm+jPR1hW2s+u46+ZxOBTEVRYupt6/D7dZ5GdOj3rLsIepdACGEDrAEeURSlCPgYCAaGAOmow73OYIKiKMOA2cA/hRCT6u80eiGdkp8qhDAHrgN+Mm7qKvfMRGfenwshhHgaqAG+N25KB/wURRkKPAr8IISw60CTutxndx630dBx6PD71YRGmOiI71l3EfRUwLfe3z7GbR2GEEKH+kF9ryjKWgBFUTIVRalVFMUAfMa5EEGH2qsoSqrxdxawzmhHZl0oxfg7qzNsQ+1kwhVFyTTa2CXuGa2/Px1qnxBiMXANcIdRCDCGNHKNr4+gxqd7Ge2oH5ZpF9su4bPrsHsmhDADbgBW1bO3Q+9XUxpBB3/PuougHwZChRCBRo/vVmB9R13cGJv7AohSFOXtetvrx57nA3Uz7+uBW4UQFkKIQCAUdRKmPWyzFkLY1r1GnVA7ZbShboZ8EfBrPdvuMs6yjwEK6w0J24MGXlNXuGf1rtea+7MZmCmEcDSGGmYat7U5QohZwBPAdYqilNXb7iqE0BpfB6HeozijfUVCiDHG7+pd9d5PW9rV2s+uI/9vrwKiFUUxhVI68n41pxF09PfscmZ2O/IHdVb4DGov+3QHX3sC6lDpBHDM+DMH+BY4ady+HvCsd8zTRltPc5kz6BexLQg1e+A4EFF3bwBnYCsQA2wBnIzbBfCh0baTwIh2tM0ayAXs623r8HuG2qGkA9WoMcl7LuX+oMazY40/S9rRtljUOGrdd225se0C42d8DAgHrq13nhGoAnsWWIZxFXgb29Xqz66t/2+bssu4/Wvg/vPaduT9ak4jOvR7Jpf+SyQSSQ+hu4RcJBKJRHIRpKBLJBJJD0EKukQikfQQpKBLJBJJD0EKukQikfQQpKBLrmiEWgHSqrPtkEjaApm2KLmiEUIkoOYA53S2LRLJ5SI9dMkVg3FV7QYhxHEhxCkhxHOAF7BdCLHd2GamEGK/ECJcCPGTsTZHXc35/wm1hvYhIURIZ74XiaQppKBLriRmAWmKogxWFGUA8C6QBkxVFGWqEMIFeAa4SlGLnYWhFnWqo1BRlIGoKwvf7VDLJZIWIAVdciVxEpghhHhdCDFRUZTC8/aPQX0owV6hPvVmEepDOer4sd7vse1trETSWsw62wCJpKNQFOWMUB/1NQd4SQix9bwmAvXhArc1d4pmXkskXQLpoUuuGIQQXkCZoijfAW+gPsqsGPWRYaA+HWh8XXzcGHPvVe8Ut9T7vb9jrJZIWo700CVXEgOBN4QQBtRqfQ+ghk42CSHSjHH0xcCPQggL4zHPoFYLBHAUQpwAKlHLAkskXQqZtiiRtACZ3ijpDsiQi0QikfQQpIcukUgkPQTpoUskEkkPQQq6RCKR9BCkoEskEkkPQQq6RCKR9BCkoEskEkkP4f8DGXMhkWX7MfAAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plugin.loss_history.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "id": "af9d6df1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
fixed acidityvolatile aciditycitric acidresidual sugarchloridesfree sulfur dioxidetotal sulfur dioxidedensitypHsulphatesalcoholquality
053.753993-2.4752390.404968406.8983861.788962450.0737241221.5510775.69717712.45137714.83544581.5156960.0
1241.769932-33.9059337.4401881722.53605348.0345561312.2644572141.11936831.25907483.65474928.591759489.1726743.0
225.3449040.769463-11.237007-335.794326-3.595284-234.179124382.9075157.63768417.7483003.38029673.7010481.0
315.635557-28.371864-19.808469800.08844661.404066-596.053591-1749.79750528.376345-71.868790-14.556346-38.3151791.0
4-0.796959-8.546869-4.726590128.3430281.083628-288.3521041184.6802738.08150023.0128282.16859736.6728400.0
5-31.203381-39.052177-57.6510321269.158981-22.793850101.490751-661.9978235.01273819.61582226.791456-63.7736783.0
6-120.526480-49.314650-67.642982650.13681665.155843598.106999-3468.7530373.75056652.556860-108.310847-91.8163103.0
713.172627-7.196406-20.153565746.262383-30.8466881592.8153971610.699379-15.57666027.31969245.376814135.8714220.0
\n", + "
" + ], + "text/plain": [ + " fixed acidity volatile acidity citric acid residual sugar chlorides \\\n", + "0 53.753993 -2.475239 0.404968 406.898386 1.788962 \n", + "1 241.769932 -33.905933 7.440188 1722.536053 48.034556 \n", + "2 25.344904 0.769463 -11.237007 -335.794326 -3.595284 \n", + "3 15.635557 -28.371864 -19.808469 800.088446 61.404066 \n", + "4 -0.796959 -8.546869 -4.726590 128.343028 1.083628 \n", + "5 -31.203381 -39.052177 -57.651032 1269.158981 -22.793850 \n", + "6 -120.526480 -49.314650 -67.642982 650.136816 65.155843 \n", + "7 13.172627 -7.196406 -20.153565 746.262383 -30.846688 \n", + "\n", + " free sulfur dioxide total sulfur dioxide density pH \\\n", + "0 450.073724 1221.551077 5.697177 12.451377 \n", + "1 1312.264457 2141.119368 31.259074 83.654749 \n", + "2 -234.179124 382.907515 7.637684 17.748300 \n", + "3 -596.053591 -1749.797505 28.376345 -71.868790 \n", + "4 -288.352104 1184.680273 8.081500 23.012828 \n", + "5 101.490751 -661.997823 5.012738 19.615822 \n", + "6 598.106999 -3468.753037 3.750566 52.556860 \n", + "7 1592.815397 1610.699379 -15.576660 27.319692 \n", + "\n", + " sulphates alcohol quality \n", + "0 14.835445 81.515696 0.0 \n", + "1 28.591759 489.172674 3.0 \n", + "2 3.380296 73.701048 1.0 \n", + "3 -14.556346 -38.315179 1.0 \n", + "4 2.168597 36.672840 0.0 \n", + "5 26.791456 -63.773678 3.0 \n", + "6 -108.310847 -91.816310 3.0 \n", + "7 45.376814 135.871422 0.0 " + ] + }, + "execution_count": 51, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.model.generate(8)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "be62c2f0", + "metadata": {}, + "source": [ + "### Conditional data generation\n", + "\n", + "A conditional variable `cond` can be provided to the `fit` method. It can be either a column name in the dataset or a custom array. The model will then learn the conditional distribution of the dataset given `cond`. In this case, an array must be provided as the `cond` argument of the `generate` method." + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "id": "56a1fc7e", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T18:03:45.005934+0200][38480][INFO] Step 100: MLoss: 0.9066 GLoss: 1.0013 Sum: 1.9079000000000002\n", + "[2023-03-27T18:03:51.387087+0200][38480][INFO] Step 200: MLoss: 0.4735 GLoss: 1.0112 Sum: 1.4847000000000001\n", + "[2023-03-27T18:03:59.107456+0200][38480][INFO] Step 300: MLoss: 0.4567 GLoss: 1.001 Sum: 1.4577\n", + "[2023-03-27T18:04:05.835508+0200][38480][INFO] Step 400: MLoss: 0.2715 GLoss: 0.9856 Sum: 1.2571\n", + "[2023-03-27T18:04:12.739590+0200][38480][INFO] Step 500: MLoss: 0.2193 GLoss: 0.9046 Sum: 1.1239\n", + "[2023-03-27T18:04:19.417762+0200][38480][INFO] Step 600: MLoss: 0.0143 GLoss: 0.8463 Sum: 0.8606\n", + "[2023-03-27T18:04:26.022729+0200][38480][INFO] Step 700: MLoss: 0.0048 GLoss: 0.7509 Sum: 0.7557\n", + "[2023-03-27T18:04:32.757598+0200][38480][INFO] Step 800: MLoss: 0.0083 GLoss: 0.7102 Sum: 0.7185\n", + "[2023-03-27T18:04:39.550873+0200][38480][INFO] Step 900: MLoss: 0.0029 GLoss: 0.675 Sum: 0.6779000000000001\n", + "[2023-03-27T18:04:46.573464+0200][38480][INFO] Step 1000: MLoss: 0.0039 GLoss: 0.6414 Sum: 0.6453\n", + "[2023-03-27T18:04:53.438631+0200][38480][INFO] Step 1100: MLoss: 0.003 GLoss: 0.6046 Sum: 0.6076\n", + "[2023-03-27T18:05:01.283222+0200][38480][INFO] Step 1200: MLoss: 0.0013 GLoss: 0.6297 Sum: 0.631\n", + "[2023-03-27T18:05:08.559280+0200][38480][INFO] Step 1300: MLoss: 0.0012 GLoss: 0.5479 Sum: 0.5491\n", + "[2023-03-27T18:05:15.536738+0200][38480][INFO] Step 1400: MLoss: 0.0067 GLoss: 0.5275 Sum: 0.5342\n", + "[2023-03-27T18:05:22.391711+0200][38480][INFO] Step 1500: MLoss: 0.0007 GLoss: 0.5252 Sum: 0.5259\n", + "[2023-03-27T18:05:29.285959+0200][38480][INFO] Step 1600: MLoss: 0.0018 GLoss: 0.5017 Sum: 0.5035000000000001\n", + "[2023-03-27T18:05:36.288634+0200][38480][INFO] Step 1700: MLoss: 0.0012 GLoss: 0.5013 Sum: 0.5025\n", + "[2023-03-27T18:05:43.485831+0200][38480][INFO] Step 1800: MLoss: 0.0009 GLoss: 0.4927 Sum: 0.49360000000000004\n", + "[2023-03-27T18:05:50.629387+0200][38480][INFO] Step 1900: MLoss: 0.0009 GLoss: 0.4931 Sum: 0.494\n", + "[2023-03-27T18:05:58.709478+0200][38480][INFO] Step 2000: MLoss: 0.0006 GLoss: 0.4864 Sum: 0.487\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 43, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.fit(loader, cond='quality')" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "id": "3fcb9493", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 44, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABKvklEQVR4nO3dd3wc1dXw8d/dvupdsiXZsnGvuFKMC5hqeg0lYAcCIYWEh5KXhBQeyEMoCWkQegfTQgndVBvbgHvv3ZItq/ey9b5/zK4sybsq9qqf78f+aHdmdvZoJB1dnblFaa0RQgjR85m6OgAhhBCRIQldCCF6CUnoQgjRS0hCF0KIXkISuhBC9BKWrnrjlJQUnZOT01VvL4QQPdKqVauKtdapofZ1WULPyclh5cqVXfX2QgjRIyml9oXbJyUXIYToJSShCyFELyEJXQgheokuq6ELIURLPB4PeXl51NfXd3UoXcLhcJCVlYXVam3zayShCyG6pby8PGJjY8nJyUEp1dXhdCqtNSUlJeTl5TFo0KA2v05KLkKIbqm+vp7k5OQ+l8wBlFIkJye3+68TSehCiG6rLybzoKP53CWhB3x74FtyK3O7OgwhhDhqktADfrPkN7yw6YWuDkMI0cO88MIL/OIXv+jqMABJ6A3cPjdlrrKuDkMIIY6aJPQAn/ZR6a7s6jCEEN3I3r17GTFiBPPmzWPYsGFcc801fPHFF0ybNo2hQ4eyfPnyI44/7bTTGDduHLNnz2b//v0AvPXWW4wZM4bx48czY8YMADZt2sTUqVM5/vjjGTduHDt27DjmeKXbYoDX76XSJQldiO7ofz/YxOaDkf35HNU/jj+eP7rV43bu3Mlbb73Fc889x5QpU5g/fz5Llizh/fff5/777+eiiy5qOPaWW25h7ty5zJ07l+eee45f/vKXvPfee9x7770sWLCAzMxMysvLAXjiiSf41a9+xTXXXIPb7cbn8x3z5yQt9ACv3ystdCHEEQYNGsTYsWMxmUyMHj2a2bNno5Ri7Nix7N27t8mx3333HVdffTUA1157LUuWLAFg2rRpzJs3j6effrohcZ900kncf//9PPjgg+zbtw+n03nMsUoLHfBrPxpNhauiq0MRQoTQlpZ0R7Hb7Q2PTSZTw3OTyYTX623TOZ544gmWLVvGRx99xKRJk1i1ahVXX301J5xwAh999BFz5szhySef5LTTTjumWHtcC93n91HhqsDrb9uFbOs5Aao91RE9rxCibzn55JN5/fXXAXj11VeZPn06ALt27eKEE07g3nvvJTU1ldzcXHbv3s3gwYP55S9/yYUXXsj69euP+f1bTehKqWyl1NdKqc1KqU1KqV+FOGaWUqpCKbU28P8PxxxZGAv2LuCU109hf9X+iJ3Tqw8n8Sp3VcTOK4ToW/71r3/x/PPPM27cOF5++WX+8Y9/AHDnnXcyduxYxowZw8knn8z48eN58803GTNmDMcffzwbN27kuuuuO+b3V1rrlg9Qqh/QT2u9WikVC6wCLtJab250zCzgDq31eW1948mTJ+ujWeBi6YGl3PzFzbx8zsscn3Z8u18fSpW7ipNfOxmADy/+kIFxAyNyXiHE0duyZQsjR47s6jC6VKhroJRapbWeHOr4VlvoWut8rfXqwOMqYAuQGYFYj0q8PR6Acld5xM4ZLLkAUkcXQvRY7aqhK6VygAnAshC7T1JKrVNKfaKUCnkHQyl1k1JqpVJqZVFRUfuj5XBCj2TibVxykZ4uQoieqs0JXSkVA7wN3Kq1bp71VgMDtdbjgX8B74U6h9b6Ka31ZK315NTUkGuctqojErq00IUQvUGbErpSyoqRzF/VWr/TfL/WulJrXR14/DFgVUqlRDTSgBhrDCZlosItLXQhhGisLb1cFPAssEVr/UiYYzICx6GUmho4b0kkAw0yKRPxtnhpoQshRDNtGVg0DbgW2KCUWhvY9ltgAIDW+gngMuCnSikvUAdcqVvrPnMM4u2RTejSQhdC9AatJnSt9RKgxZnWtdaPAo9GKqjWxNnjpIUuhOgy8+bN47zzzuOyyy7r6lCa6HEjRQES7AmR7baoDyd0aaELIXqqHpnQ423xEU28jYf7y4yLQojG7rvvPoYPH84pp5zCVVddxV/+8pcm+7/88ksmTJjA2LFjuf7663G5XADcddddjBo1inHjxnHHHXcAoafRjaQeOTlXxGvogYTutDilhS5Ed/TJXXBoQ2TPmTEWznmgxUNWrFjB22+/zbp16/B4PEycOJFJkyY17K+vr2fevHl8+eWXDBs2jOuuu47HH3+ca6+9lnfffZetW7eilGqYMjfUNLqR1CNb6HH2OKo91Xj8noicL1hySXIkSQ1dCNFg6dKlXHjhhTgcDmJjYzn//POb7N+2bRuDBg1i2LBhAMydO5dvvvmG+Ph4HA4HN9xwA++88w5RUVFA6Gl0I6lHttAT7AmAUR5JdiYf8/mCN0WTHElsL9t+zOcTQkRYKy3p7sZisbB8+XK+/PJL/vOf//Doo4/y1VdfhZxGNzn52HNYUI9socfbAqNFIzS4KFhySXIk4fK5qPfWR+S8Qoiebdq0aXzwwQfU19dTXV3Nhx9+2GT/8OHD2bt3Lzt37gTg5ZdfZubMmVRXV1NRUcGcOXP429/+xrp164DQ0+hGUo9soUd6+H+wH3qSI6nhvA6LIyLnFkL0XFOmTOGCCy5g3LhxpKenM3bsWOLj4xv2OxwOnn/+eS6//HK8Xi9Tpkzh5ptvprS0lAsvvJD6+nq01jzyiDEm884772THjh1orZk9ezbjx4+PaLw9MqEHSy6RSujBkktaVBoAJfUlpEenR+TcQoie7Y477uCee+6htraWGTNmMGnSJG688caG/bNnz2bNmjVNXtOvX78jFpAGeOedI2ZOiagemdDj7HFABBN64KZoMIkX1xVH5LxCiJ7vpptuYvPmzdTX1zN37lwmTpzY1SGF1SMTeqTnRA/W0NOjjIReUtch09AIIXqg+fPnd3UIbdYjb4rGWmMxK3PEa+jBhC4tdCFET9QjE7pSijhbHJXuStw+Nx6/h2OZCyxYQ4+2RhNrjZWELoTokXpkQgej7PLOjneY9MokJr48kfPePY8XNr6A1++l3lvPX1b8haLatq2KFKyhW0wWkp3JktCFED1Sj6yhA1w89GLWFq5lZNJIUPD9we/566q/YrfYsZlsvLj5RQbGD+TyYZe3eq5gDd2szKQ4UyShCyF6pB6b0K8fc32T5zePu5lrPr6GVza/QrQ1GoBDNYfadK6GhG4yEvqW0i2RDVYI0SPFxMRQXV3d1WG0WY8tuTSnlOK6Udexv2p/Q0Jua0JvXHKRFroQoqfqNQkd4PSBp9Mvuh9RliiGJg5te0IP3BS1KKOGXuOpodZT25GhCiF6EK01d955J2PGjGHs2LG88cYbAOTn5zNjxgyOP/54xowZw+LFi/H5fMybN6/h2L/97W+dFmePLbmEYjFZ+PP0P1PlruKj3R+xuWRzm14X7LZoNplJdaYCRl/0KGtUh8UqhGi7B5c/yNbSrRE954ikEfy/qf+vTce+8847rF27lnXr1lFcXMyUKVOYMWMG8+fP56yzzuLuu+/G5/NRW1vL2rVrOXDgABs3bgTokGlyw+lVLXSASemTmJU9i37R/ThUc6hN3Rmb3xQFKK6XsosQwrBkyRKuuuoqzGYz6enpzJw5kxUrVjBlyhSef/557rnnHjZs2EBsbCyDBw9m9+7d3HLLLXz66afExcV1Wpy9qoXeWHp0Om6/m9L60lan2A3W0JskdKmjC9FttLUl3dlmzJjBN998w0cffcS8efO47bbbuO6661i3bh0LFizgiSee4M033+S5557rlHh6XQs9qF90P6BtN0Z9fh8WZUEp1ZD8JaELIYKmT5/OG2+8gc/no6ioiG+++YapU6eyb98+0tPTufHGG/nxj3/M6tWrKS4uxu/3c+mll/KnP/2J1atXd1qcvbaFnhGdARgJfXTK6BaP9WovZpMZgER7IiZlkoQuhGhw8cUX89133zF+/HiUUjz00ENkZGTw4osv8vDDD2O1WomJieGll17iwIED/OhHP8Lv9wPw5z//udPi7P0JvfYQawvXMiJpRNg5zr1+L2ZlJHSzyUySI0kSuhCioQ+6UoqHH36Yhx9+uMn+uXPnMnfu3CNe15mt8sZ6bckl0Z6I3Wzn490fc+0n1/Lh7g/DHuvz+7CYDv9uy4jK4GD1wc4IUwghIqbXJnSlFBnRGawvXg/QYoL26aYJPTs2m7yqvA6PUQghIqnXJnQwWtpBhbWFYY9rXHIByIrNIr8mH4/f06HxCSFadiyzqPZ0R/O59+qEPixpGNmx2QxPHE5RXfiZF33a13BTFIwWuk/72jzSVAgReQ6Hg5KSkj6Z1LXWlJSU4HC0b23jXntTFOD2Sbdzy4Rb+M3i37Cvcl/Y40K10AFyq3LJjs1u9X2K64opry9nSOKQYw9aCAFAVlYWeXl5FBW1bRrs3sbhcJCVldWu17Sa0JVS2cBLQDqggae01v9odowC/gHMAWqBeVrrrrnN24jZZMZpcpLqTGVlwcqwx/n8Pqwma8PzYBJvax399oW3U1xXzEeXfHRsAQshGlitVgYNGtTVYfQobWmhe4HbtdarlVKxwCql1Oda68YTpZwDDA38PwF4PPCxW0iLSqPCVYHL58Juth+x36ubttBTnalYTdY2JfRVBatYXbi6YcpeIYToKq3W0LXW+cHWtta6CtgCZDY77ELgJW34HkhQSvWLeLRHKTXKmHAr3I1Rn79pDd1sMpMZk0ledesJ/ZkNzwBQ46lpmBNGCCG6QrtuiiqlcoAJwLJmuzKB3EbP8zgy6aOUukkptVIptbIz62JpzjSAsEvSNW+hg1F2ya3KDXl80KGaQyw5sIS0KOP8NZ6aCEQrhBBHp80JXSkVA7wN3Kq1rjyaN9NaP6W1nqy1npyamno0pzgqDS30uvAt9MY1dDBujOZV5bV4h73CVQHA6GRjaoFK91FdFiGEiIg2JXSllBUjmb+qtX4nxCEHgMbdQbIC27qFYAu6xRa66cgWerWnmnJXedjzun1ugIYJvarcVRGIVgghjk6rCT3Qg+VZYIvW+pEwh70PXKcMJwIVWuv8CMZ5TOJscdjN9rAJ3ef3HVFy6R/dH4D8mvCfhsvnAiDZIQldCNH12tLLZRpwLbBBKbU2sO23wAAArfUTwMcYXRZ3YnRb/FHEIz0GSilSnakU1BaE3O/1e7FbmvZ+Cba6y+rLwp7X7Tda6ME51CWhCyG6UqsJXWu9BFCtHKOBn0cqqI6QFpUWdrRo87lcABIdiQCU1peGPafHZ0wNEEz+UkMXQnSlXj30v7HUqNTwNXS/F4tqmtCTHElAywk9WHKRFroQojvoMwk9IyqD/Jp8fH7fEft8+sgaeow1BqvJSkl9SdhzBksuifZEFEpa6EKILtVnEvrQxKG4fC72VR05p0vzgUVg1N2THEkt19ADvVwcFgextlhpoQshulSfSejDk4YDsL10+xH7vPrIkgsYZZeWSi7BhG41WSWhCyG6XJ9J6IPjB2NRFraVbTtin9fvPeKmKAQSel3rNXS72U6cLU4SuhCiS/WZhG4z2xiUMIjtZUe20JvPhx7UWgs9uACGzWyTFroQosv1mYQOMDxxONtKj2yhhxpYBK0n9GALPVhykZuiQoiu1OcSekFtQcMcLEHhSi6JjkTqffXUempDns/tc2Mz2VBKSQtdCNHl+lRCH5Y0DOCIVrpXh6+hQ/i+6G6fu2F+dUnoQoiu1qcS+vBEo6fLuqJ1TbaHK7kER4C2lNCtZmOWxlhbLLXeWpkTXQjRZfpUQk92JjMpfRL/2f6fJom3pZuiED6hu3wubGYbYEwABlDtro502EII0SZ9KqEDXDvyWg7WHOTr3K8btoUa+g+HE3q4wUVuf9OSC8jwfyFE1+lzCX1W9iwyYzJ5efPLAGitQ07OBYcn6Ao3/N/tczcsjBFrNRJ6pUd6ugghukafS+hmk5nzjzufNYVrcPlc+LQxt0uoGrrT4sRpcbb5pihIC10I0XX6XEKHw4tXFNYWHk7oIWro0HJfdLfP3VBDDyb0Spe00IUQXaNPJvT0qHTAWJIuOPtiqBo6GKsRtVRDb35TVAYXCSG6Sp9M6A2LRtcWNgzfD1VDB6OO3mIvF5OR0FOcKZiUqcUl64QQoiP1yYQeXDS6zSWXMBN0Na6hW81W+kX3I68qrwMiFkKI1vXJhB5cNLqwtrCh5BLqpigcrqEbq+w11XhgEUBWbJYkdCFEl+mTCV0pRVpUGoV1h1vo4UouSY4kvNobsjbeuB86QFZMFrlVuR0TtBBCtKJPJnSAVKexxmiwhh6uhR7six7qxmhwcq6g7NhsylxlMlpUCNEl+mxCT49Kb1JyCddCT3aEn8+lcbdFMEouAHnVUnYRQnS+PpvQU6NSKaorapjTZdG2EpbvOTJpJznDz+fSeC4XMFrogNTRhRBdos8m9LSoNOq8dZS7ygH4dGMh/1l1ZP073ARdWms8fk+TGnowoedW5fLlvi8pqQs9ZYAQQnSEPp3QgYZ+436/iYo6zxHHJdqNGnrzhN54+bmgWFss8fZ43t/1PrcuvJU3t73ZIbELIUQofT6hH6w+CIDPr0ImdKvZWF6ueUJvvPxcY9kx2ews3wnAvqp9EY9bCCHC6bsJ3dm8ha4orz0yoYNxY7R5Qnf73ABNSi5wuOzitDjJrZQujEKIzhO6a0cfEBz+H0zoXp+JyhAtdAg9QVcwoTcuuQCcmXMmFpMFm9nGl/u/jHTYQggRVp9toTssDhLtiYcHAunQNXQIPfzf7Q+d0E8feDr3T7+fnLgcyl3lRyxILYQQHaXVhK6Uek4pVaiU2hhm/yylVIVSam3g/x8iH2bHyIzJbKiha22mxu3D4/MfcVyiI5EyV9OBRcEaeuOBRY1lx0kXRiFE52pLC/0F4OxWjlmstT4+8P/eYw+rc2TGZjYM/Q9eilCt9CRHEmX1ZQ2DkAA8PuO45jX0oGAtfX/V/ghGLIQQ4bWa0LXW3wChpxvs4TJjMg8/0S0ndI1u6LMOjXq5mK1HHA/GvC6AzO0ihOg0kaqhn6SUWqeU+kQpNTrcQUqpm5RSK5VSK4uKiiL01kevSUJvqYUeYrRosIYeroUeZY0izZnG/kppoQshOkckEvpqYKDWejzwL+C9cAdqrZ/SWk/WWk9OTU2NwFsfm2ArGjjcQg/RdTEjKgNoWg9v6OUSpoYORh1dWuhCiM5yzAlda12pta4OPP4YsCqlUo45sjDW5ZZz51vrKK52HfO5+sf0b3istTHbYqgW+rDEYSgUW0u3NmwL122xsQGxA9hetp1/rP6H9EkXQnS4Y07oSqkMpZQKPJ4aOGeHTWJSWOXirVV5HCyvO+Zz9Y/pj0IZT1qooUdZo8iJz2FL6ZaGbQ29XFpI6CdnnoxSimc2PMPzm54/5niFEKIlbem2+BrwHTBcKZWnlLpBKXWzUurmwCGXARuVUuuAfwJX6lDL+0RISoyRQCPRQreZbQ0DjIKXItxo0ZFJI5sk9HAjRRs7O+dsvr3qW07odwIbi0P2+hRCiIhpdaSo1vqqVvY/CjwasYhakRJjJNDiKndEzpcVk0VhbWGLLXSAUcmj+HjPx5TWl5LkSGpTySVoTPIYXtz0Ii6fq8VfAEIIcSx63EjR1FgjIRZFoIUOh3u6tFRDB6OFDrC1xKijB3u5NJ+cK5SxKWPxam+TGrwQQkRaj0voDquZWLuFoqoIJfTYYNfFllvoI5JHALC5dDNwuIbelhb36BSjJ2ew7FLtrmbB3gUhF54WQoij1eMSOkBKrD0iNXSAS4ZcwrVDfwV+BwAVdaFLOXG2OLJisthcYiT04EjRtpRc0qPSSXGmsKl4EwCvbHmFOxbdwfyt8yPxKQghBNBTE3qMLWIJvV9MP2ZnXgJAtM0ctoUOMD5tPKsKVuHXflw+FxaTBZNq/RIqpRiTPIaNJUYLPTgL4yMrH2Fb6bYIfBZCCNFjE7o9YiUXAI/PKH0kxdhaTOjT+k+jtL6ULSVbcPvdLQ4qam5s6lj2VOxhyYElbC3dyg1jbiDGFsPj6x4/5viFEAJ6cEIvro5MLxcAn99I6MnR9rDdFgGmZU5DoVh8YDFun7tdPVYuGXoJsdZYblt4GwCXDruUWdmzWH5oeZNJv4QQ4mj12IReUefB7T1yqtuj4W1I6DZcXj/1ntAJNsmRxKjkUSw9sBS3zx12Yq6QMTtTuGXiLdR56xiRNILs2GymZkylyl3FtjIpuwghjl3PTOixRqmjpCYyZRdvYA705MCgpZbKLqdknsL64vXk1+S3u0/5FcOu4Jycc7hu1HUATM2YCsDy/OVHE7YQQjTRIxN6aoQHFwVb6BlxRk+Xlurzpw88HYDv879vVw0dwGwy89DMhzj/uPMBYxm8QfGDWHZo2dGELYQQTfTIhJ7SMLioPiLnC9bQ+yU4jfO2kNBHJI1g/rnzuXzY5Vwy9JJjfu+pGVNZXbAajz/8XwVB3x38jre3v33M7ymE6J165CLRHdVC7xdvtNALq1r+RTE6eTSjTwo77Xu7nNz/ZN7Y9gYLcxdyxsAzwh63t2Ivt359Kx6/h3MGnUOUNSoi7y+E6D16Zgs9JrLD/4M19H7xRgu9sDJyXSJbMzNrJjlxOfx77b9x+VysOLQCr9/bsH9PxR6e2fAMv/r6V7h9bjx+DysLVlLrqeVA9YFOi1MI0f31yBa602Ym2mZm8Y4iVu8rIzHaxpj+cZw2Ip0ByUbLtbCyntRYO4GZfVsUbKFH2czEO60URrCPe2vMJjM/O/5n/PqbX3PO2+dQVFfE0MSh/Hbqb8mKzWLep/MaJgR7ZNYj/PqbX7P0wFI+2PUBSw8s5YvLv5DWuhAC6KEJHYw6+ve7S0mLtaOB/6zK476PtvDqj08gzmHl/EeX8PR1kzhtRHqr5wrW0K1mE2mx9lZLLpF2Vs5ZvLDpBcrqy/ifSf/D61tf50cLfkSSIwmXz8V7F77HcQnHATAlYwqf7PmEclc5Gs2CvQu4eOjFnRqvEKJ76rEJfd7JORRXu/jFqUNx2szsLa7hkse/5cVv95IWa8fn12wvqG5TQg+20M0mRVqcvVNb6AAmZeLlc17GpExYTBauGnEVz2x4hje3vcmfT/lzQzIHY3DT4gOLibJEkehI5N2d7zZJ6Fpr6rx10moXog/qkTV0gB9NG8SdZ43AaTOmvc1JiebSiZl8vrmAd9YYteW2rmoUrKFbTIq0WEen1tCDbGYbFpPx+9VpcXLLhFtYfOViTh1wapPjpmdOB+CqEVdxxfArWFO4hvd2vseWEmPxjSfWPcHst2ZT6a7s3E9ACNHlemwLPZQfTMnm6cV7qKr3YjOb2pzQgyUXs1mRFmvME6O1blP9vbMNiBvA6+e+zrDEYVS4K3hszWP8funvUSjmjZ7HS5tfwqd9fH/we87MOROA0vpSHGaHtNqF6OV6bAs9lCFpsZwwKImsRCenDE3hQHnbauHBkovVZCI11o7b529xtGhXG50yGqvZSoozhQ8u/oA3znuDaZnTeH7T8yQ7kom1xbLkwBLAKMFc89E13PPdPV0btBCiw/WqFjrAv6+ZiMvr54lFu1i5t7RNr/E1qaEH+6K7SIhq30jQrtA/pj/9Y/rzj1P/wb/X/ptZ2bN4efPLLD2wFK01m0o2kVedR0l9CbWeWjaVbCLGGsPI5JFdHboQIsJ6VQsdIDnGTv8EJ/0TnFTWe6mqb72l7WlSQzf6uHdFHf1Y2Mw2bp10K8enHc8pmadQWFfI9rLtDXOv13nr+GjPR9zy1S08sPyBLo5WCNERel1CD+ofGMafX9F62cXn15gUmBon9E7uuhhJp2SeAsCHuz/ky/1fMjl9Mgn2BB5Y9gA1nhq2lm7FryMzU6UQovvotQk9M8EonRxow41Rr19jMRmXonHJpadKjUrlnJxzeGHTC+yp2MMZA8/gtAGn4fa7SbAnUOutZX/l/q4OUwgRYb02oQdb6G3p6eL1+TGbjB4tMXYLUTZzjyu5NHf/9Pu5YtgVxNpimT1gNhcPuZjMmEz+eNIfAdhSuqWLIxRCRFqvTehpsQ7MJsX2Q1Xc/PIqNh2sCHus0UI/3EUxI87Bocq2dXnsriwmC78/6fcs+sEi0qPTOT7teD699FNmZs/EarI29FsXQvQeva6XS5DZpMiIc/Dqsv14/Zrx2QmM7h8f8lifX2MxH07oWUlR5Jb27IQeZDVZj3g+NHEom0s3d1FEQoiO0vNa6K5q2Pxf0LrVQzMTnA19zAsqw9/k9Po1ZtPhS5Gd6CS3rPbYY+2mRiaNZGvpVnQbrqEQoufoeQl9y/vw5nWQt7LVQwenRhPnsJAR52g5ofv8TUouA5KiKK/1dOvBRcdiVPIoKlwVHKw52NWhCCEiqOcl9BHngtkGm95p9dDfnDOSj381nePSotvQQm+a0AFyS3tnK3186nhA1jIVorfpeQndEQ9DzoBN74G/5b7U8VFWshKjSI9zUNBCrxWfX2NtVEPPDiT0vF5adhmWOIyM6AwW5i7s6lCEEBHUakJXSj2nlCpUSm0Ms18ppf6plNqplFqvlJoY+TCbGXMJVB2E3O/bdHh6nIPCqnr8/tA14+Yt9GBC39+WFrrPw/df/Ie3332rTbF0B0opZmbN5Lv876j39twBVEKIptrSy+UF4FHgpTD7zwGGBv6fADwe+Nhxhp0NFid8fT+c/r+gfcZNUnss7FsK9RWQMQ76jQelmFL/LWt0CeUFo0jKGAjNZlF0uoq53v0feOEvUFtK/KgLudFRyPCNn0C/i+DAKtj4Ngw4AQaeAlFJ4EiA3Qvxf/9vTqwLzBkTtRbGXgYmq1EWciaA1Qm1JVBTDN56SB4KMWmHY9AafG6w2Dv0kjV3avapvLHtDZYfWs6MrBmd+t5CiI6h2tLTQSmVA3yotR4TYt+TwEKt9WuB59uAWVrr/JbOOXnyZL1yZes3NsP67t/w5b3gbWf3QosDlBksNsicBJ563PuXY9ZezJkTjP37lgLgR2EicH2yT4SCjeCubnK6rXHTeKR4KtPNm7jW/FnbYnAmQmIO+L1QkQd1ZZAxFhIGgtkKk2+AjDHGL5Kk44xjIzyVr9vnZvrr0zl38Ln84aQ/RPTcQoiOo5RapbWeHGpfJPqhZwK5jZ7nBbYdkdCVUjcBNwEMGDDg2N71pJ/B+Cth5xdGa1kpqC2FrMkQnQqHNkD+OvC52W4fw73vruT3J9kYbisxXu+qNHrK2GL4JuYc3racy+M3/sDYV3GAu99dx6oiM59eZIaYVKO176k/nIDryiizpnLe0/nYLSY+c0/h8hvuwFFfAn4P+DxGPJ5aI57oVDCZoXgHFG2B8lwjeWdOgqgUyF0GpXugphA2vQvKBMH5VuKzYeQFkDnReOyIh5ShxvmOks1sY1rmNBblLsJ/oh+T6nm3U4QQTXXqwCKt9VPAU2C00I/5hFFJMO6K0Ptyphn/gZjyOpb4XaxOH8vwqUf+Innx2WXUuLyHN8RnEpNeye4de/EfdyamYH3d6oCUIQ2HHcqvxOs/yEkDE1m8o5iyxHH0i3e2HPOQ2S3v99TByuehrhQGngylu2HH57D8KeMXRVB0Koy5DKbfZpRwjsKp2afy+b7P2VKyhdEpo4/qHEKI7iMSCf0AkN3oeVZgW7eRGmtHKTgUZuZFj8/fMDlX0ICkKNw+P4cq6xvmhWnO7fU3nB+gos7TekJvjdVp/PURdNxpMOXHxl8Hpbuh8iDUFMH2T2HF07D6JTj3r3D8Vcbxfr9RFnLEtfpW0zOnY1Imvs79WhK6EL1AJP7Ofh+4LtDb5USgorX6eWezmk0kR9vDTonbfOg/QL94Y9bFlvqvuwIJPS3WOLaitgMHIlkdkD4Khp5uJO8rXoSfLzfKMO/dDJ//EXZ8Ac+fA38dDgWtD+1PcCQwIW2CdF8UopdoS7fF14DvgOFKqTyl1A1KqZuVUjcHDvkY2A3sBJ4GfhbmVF0qI94etoXevNsiQFK00eourXGHPae7IaEfbqF3quTj4IfvwLgfwNK/w6uXQtFW48buuz8Bb/jYg2ZlzWJb2baGFY6EED1XqyUXrfVVrezXwM8jFlEHSY91cDBcQvc1nW0RIDnaWH6upDp8UnR5fUDTkkuns9jgkqfg1LuhaBv0Px5yl8Mb18B7PzXKMc6EsC8/e9DZPL/peW7+4mbOGHgGj8x6pNNCF0JEVp/p2pAe7+BQRegujs0n5wJIjgkk9BZa6K5mLfTKem/YYztc4kAYdqZxg3TkeXDq74zpEf4+Dh47AT75f1BTcsTLMqIz+PTST/nhyB/y+b7P2Va6rQuCF0JEQp9J6ENSYyir9YSso/v8/iZD/wGibBacVjMl1eGnDAiWXJJjurCFHs7MO+HHX8DoC43+7cufhn9OgG//Bd6mn5PT4uTm8TfjtDiZv3V+FwUshDhWvXY+9OZG9zd6fWw6WEnacEeTfaFq6GC00ltuoRsllyibmViHhcrulNDB6OOeOcl4XLgVPv89fPY7WPEMTL/D6A2zZzEcXEP8Zc9y3uDzeH/X+xTVFuG0OPnDSX8g3h56DnkhRPfTZ1roIwMJffPByiP2haqhg1FHbymhB1voNouJeKe1e7XQm0sbAde8ZdxEtUbB+7+AT+8yBmBpH7z3M64Zcgl+7Wd3xW4W5i7khx//kLyqvK6OXAjRRn2mhR7nsJKd5GRz/pEJ3Reihg5GKaUt3RbtPSGhBw2ZDYNmQv5aY9RpbLrRSn/xPI779gkWX/YlUY4EVh/8jl9+/Ut++NHVPHb649JPXYgeoM+00AFG94sP3UIPUUMHSIq2tdht0dWTWuiNmS3GFAmx6cbzQdNh2q9gzctEPz8H9dV9THrvNl7etwd7bSk/XnA9eyr2dG3MQohW9amEPqp/HHtLaqh2Ne2N4muphl7tDts/uyGhm03EOXpQQg/ljHvhyteM+WOW/B3qyhh85kM8X+7G6q7h1s9/SnWzicm6yq7yXRTVFnV1GEJ0O30qoY/uH4fWsLVZ2cXTQg3d7fMf8QsgyOX1YbOYUEr1rBZ6OCPmwM+Xwe8K4bbNMOUG+s/7lL9U+dlXncfl757Ptwe/7eooufmLm7lj0R1dHYYQ3U6fSuijAjdG1+aWN9luDP0PUUMPjBYNN7jI7fVjtxivi4/qBQk9yGw5PJNj0mCmXvMBT1eBuSqfn3z+E3707sU8tuZR3t7+Nh6fB601tZ7OWd2pqLaIQzWHWF24mk0lmzrlPYXoKfpUQs+IczC6fxxvrsxtUkbx+v2hW+gNg4tC90V3ef3YLUbii3dacXv91Ht8HRB5F0s+jik/WcbbQ+ZxV6WLA6VbeWL9k9zz3T1c8d4FXPrfiznl9VP4Yt8XHR7KltItDY9f3fxqh7+fED1Jn0roSinmnZzD9oJqvt11eNRk2Bp6O1rocU4rQPfrix4ptmjss+7imp9u4LOp97HOPp5/FZXjLd2DtXAzQ+pquGPhbTy25lF2lu3ssHlhNpcYk45d5Mzmk72fUFxX3CHvI0RP1KcSOsD54/uTHG3j+aV7AdBah6+htzL839W45BJI6L2m7BKOxQ5jL8N05SvM+tk6Phh7K2+M/jnPx01kWm0tT6x/kovfv5hz50/jD5/9lJc3vkCtpxatNfsr9+PxH9v12Vy4jhyPh+t3LMPr9/LhZhnZKkRQn+mHHuSwmrlkYibPLtmDx+fHFFjaLVQNPSkwQVe4rosuj3FTFPpQQm8sKglONCbdjNa/5LEVz1Cw50sWlW/na38hi1yLeDd/Ce9seI706H4sLdtMkiWKi3LO5dqJPyPFmdLut9xStJ4JLjeDZv8f49Y9wn+3vcncibegIrxEnxA9UZ9L6ACDU2PwayiscpESaIWHKrk4rGZi7BaKw8zn4vb1wRZ6OErB1BtJn3ojVwBX1BTDvm/5ds1T/L+63eTXl/DTiiq22Wp5YcebvLrzbS4beBbnj52LRVkoqC1gTcFqluYuYlrWdG6e8HPs5sMLZ28r3YZGc8hTySi/GabeyIXb3+Q+TxFbijYyKm1s133uQnQTfTKhZ8QZc7kcqqgjIZCIQ5VcoOXBRS6Pv2+30FsSnQKjLuDkURfwwdYP0HVlJI66GCoOsHfZozy7/1Pe2Psxr+77pOElZg3D3G6eqdjJ61vmo5SJQdH9sJmsrCw/PAvkyLTjwWTmrAk38+DKe3lt5SPcN+f5Lvgkhehe+mZCjw8mdBdD0oybd6Fa6HB4cFEobp8fp9XccBwYrX7RVMKI8w8/SRtBzvmPcl9tKT9b/jgb9y9E1xSTarIzOG4g8UNO5Ns1z/E5xZiBrTXl5JvN3F5ZRb7Fwla7lbHjLgQgftTF/GDpPbxctJIB3z/Aj6begcXUJ7+lhQD6akIPttAr6/H5jYRuDVFDB6OnS15Z6D7WLq+voYUf57CSEGVlf2nn9Mfu8aKS6Dfrbvpx9xG7Tp76E04u2Gj0ha8rA3cNpI4wFsrevQiGzzEONFu5/awnKVlwI//c9iqPb5vP9ORx/O60v5Ealdri23v9Xqrd1Xi1F6fFSbQ1uiM+SyE6VZ9M6AlRVmwWEwWV9Xj9xvD9sC30aBvr88pD7nN7D5dcAAYmRZErCf3Y2aIge+qR28/96xGbzIOm86fLP2TWgv9hfdlW/uNfw0Vvnc60+KGMzZzGmAEzGZE8Eqfl8OLd3x5Yyl0Lb6fMW9OwbWbcEP545pOkRqc1Ob/X72Vb2TZGJY2SG6+i2+uTCV0pRUacg0MV9Xh9Rgs9XA09OcaooWutj/iBbtxtESA7KYr1eRUdF7gIyZo8hHOu/oBzPPVc8e1feXTbfFaXbOKTim2w+TnMwBB7MiPiBlFRX8Y3lbsY7HFzU1UNVouDApuTl/zbufTts3h2zqsMTRkFgNvn5vZFt7MwdyFXjbiKu6behUn1uZ6+ogfpkwkdjLJL45JLuBZ6UrQNr19TWeclPsraZF/jm6IAA5Oj+GTjIbw+f8hukKKDWR0Mmnk3f53xWyjZRdGOT9i4fyEbSrewqTaPJTWFxPv9XOozcfukO4keezk44sHv47wv7ubG3P9y00fX8P9OuQ8Pmte2vMqGkk2cHJPDa1tfo7L6EPed+lesJmvrsQjRBfpsQk+Pd7A+rxxvKzX0lMDycsU1riMSutFt0dzwfEBSFD6/5mB5PQOSozooctEqpSBlCKkpt3DqSbdwqtZQUwRl+yAm1ZgH3nT464bJzOAzH+CpxQlcv+Ml7lzyGwAyfX4eKCllzp79PBMfxz/5mop3L+WS0dcyNusUMmL6hfzLrb382k9exX7KyncxPmf2MZ1L9G19NqFnxNn5rKIer6/lGnrjwUXHNbvP5vL4mpRcBiQZN9b2l9ZKQu9OlDIWz45Ja/Gw46bfxSdJw9i/6H485fsYnT0d07nzIGMMN7qqiPv0Fh6o2s2SZffCMhhhTeSgv54UZzL/N/NhxqSMaXdotZ5arn/vYjbVHgTguerfMmXMVUfzWQrRhxN6vBOX109xoEtiSzV0IORi0W5f05JLMInvK61hsicRe2BqXdFzRI2+hBEjL4Sa4sMLgAT8YO7XnLf9E/YWrGHp3s/5rjKf2V4v37mq+OGHVzHFmc6IhKHYzQ5OHXklozNPaPG9tNb87sNr2VJzgDu8Tp401/LWhmcloYuj1ncTeqDrYrBLYriad3CCruJmfdF9fmMOmMYll4w4BzazieV7Snngk63cPWckV04d0BHhi45kMh+RzIPbo0ecx+gR5zF65u+5qaYYyvdTsfsrnt3xFosr85hfewivgicPfMEJ5gTOG3Yp6UlD8bgq8biryEo/HpSJ57+7n++qdlOKj9tNqcydt4D8N8/lDVc+pVUHSYrt3+mf9rHyaz8Hqw+SFZvV1aH0WX03occbifpAeR3Q8khROHI+l8YLRAeZTYqsRCf/XWv8+bz+QAVXRjZs0Z1Ep0B0CvGZE7lt+h3cVlcO5fuorinkjdWP8lbVDn6/5dnQL/X7me2zcnLyeObMeRIsNi4bewOvrv4/Xvn2T9xy5mM97q+7t3e8zZ++/xPz58yXNWi7SJ9N6OmBFvqBMiOhh6uh2ywm4hyWI0ou7kYLRDc2IDmK3cVG/+a9xTWIPsSZAM4EYoAbhpzB9XXlbFn9LHXeGuz2eMxWJ7sK1lJRW8x5U35J/MBTmrx8yOgrmL7s/3j60GK+fXUap/Y7CYfFSW75Lk4ePIcZo6/CrMxszV2Cx1vH2EFnNEn6+dX5rC5czanZpxJlbd89nBpPDS9tfonLh11+VJOmASzYswC/9vPEuif41+x/HdU5xLHpswk9LdaBUpBX1nILHSA5xn7EFLour7GQha1ZQh+eHsvKvWVMGJDA7iJJ6H2ZciYwatrtTbaNbOkFJhP/uOAtPlj4W16p2MKjeZ8B4PD7eaN8I5ZVD+EEqgLfqhOXRDE+/jjSYzJJSR/H/eufoNRTSZTZzhWDL+CS0deyt3A9abHZjEqf0GKL/9+r/s5L215nWe43PDPn5bBTKCxc9QSvbH6JEl89d5/wWyaPvAyAsvoyVhasJMWZwsK8hWwu2cyo5FFtvVQiQvpsQrdZTKTF2huG6rfUbzw5+sj5XFxhWui/On0oc0/O4c2VuSzZWUy9x4fDakaItrCmjeCSK97hEq+Lij0L8flcxKWO5ptV/2Z98QYqfC7GJY2kzlvHq8UreKVsPZ7yDZD3KVkeD78vLefz6Che3P4mL+x4q+G8yVjIMDtJMjvIsMVz8fibOG7ADA6V76IazfxtbzDM5WZVyUZ++8EPuWj8j5mcPQOb2dZwjmc++Sn/KFxCf68fE3DDsnv4df4KrjntQRbmLsSnfTxYb+dWZeHP3/yG5y74D1az9NnvTG1K6Eqps4F/AGbgGa31A832zwMeBg4ENj2qtX4mgnF2iKzEKFbvLwPCl1zAqKPvK2k6pL8hoTdL1lE2C1E2C4NSotEacktrGZoeG+HIRa9nsRM/9KyGp6ed+VdOa3bI1VqjXdUU5K9k+96vGJ84gviU4ZzurefGnZ+wsmQjw5JHk1u2nRXl2ynRFZSoMlZZCnhr6a9RSzQ60GqP9fl5uv/ZPHdoCa+UbeSTRf9DjFaMNEdT7/dSrb3sUV7mqFj+75pPcdUW89v/XsEDuR+z7+0drPeU0d/rZUr5Pv5gdnOn3s19H83lypN+y4D4HGJsMZ148fquVhO6UsoMPAacAeQBK5RS72utNzc79A2t9S86IMYOk53oZNU+I6G3VnIJJv6ghpuiYVr2A5ONPul7SyShiw6iFMoRS8agU8kYdGqTXUNyTmFI4PFE4MJG+2pK9/DfpfdR4Sony55MxcFVjIzuT9KcR7hD+/nZnoWs3Po2nxevZZ+7hjhlIcNk5dyoQfz4wlcw26KwOOL469Vfc8+b5/Ja9Q4AfmZKRv38I84GNr8xh+fLNvDux1dhRTEtbghn5JxNdkx/Ssp3k5N9CoPSxmM2tf+vV7/2U1JX0uoEbEdzXq31UcXUXbSlhT4V2Km13g2glHod4/ujeULvcbISD984aqmFnhyYE93v15gCxwVr6HZr6ISeE+iTLjdGRXcTnTSIq89/LsxeM1FDzmDGkDOY0cp5LPZY/vTDRdx+aC02s43olJFgMn4e/ufqrzht5WOU5K9hVeEaPvduYWHljsMv3vw0URqGW2Lx+DzU+D1oBfXaTy1+6hWkahMZ5ijMJjMJZgf9rXEkxfTjw+I1bPdVM1Q5mRV7HKNTxpCSOoqElOGgNd+seQq7xcEF036H3RaDX/vxaz8+7aOweCvb9nzBwH6TGJh5IvmFG4lPyGFd6Sb+97v/pbiumFhrDCfFDeHcnLOZNfqqNs3fU1SxH7e3jszk4W37InSQtiT0TCC30fM8INSIiUuVUjOA7cD/aK1zmx+glLoJuAlgwICu75+dlXh4Br5wQ//BGFzk11Be52noxthQcgnzuoQoGwlRVvaWSEIXvZhSJPabcORmq53jT7oNgNl+P3ccWsemvV9R5qkmOS6bXXlL2VC6he31FcSbbPS3OFEaHBYrUWYHDrOdQ/WlFHir8Wg/2ylnkakAV81OBni8/MycxLf+Sp6r2ICvciPsPjK0v8//CB9QG6qxtv3FIzYNx84VNT7y/YdYXFfBZyVrSV7xAE4U0VoRj6IcTYnyU600qdpEvLJQo73sNRlTiGT5TQy0RJNmiyfRHk9BXQnl3lqSrdEk2xKIt8ejlImxWacwZdy1x3TpQ4nUTdEPgNe01i6l1E+AF+GIkh9a66eApwAmT57cMcvCt0ObW+iB+VxKql0NCb2h22KYFjoYZRdJ6KLPM5kw9Z/A2P6HE//oCddzQTtPo31eKkq2Ehs3ELMjlp8CdbWl7N7/DaXFWyivPkC9p44Thl9CQcV+3t/2OtFmB/G2WCzKhAlFgiOJYZknsTN/BYeqD9I/NouKwo2YKw5wuSUeW8ZE6Dceb9oIvtj1MQsLVgCaGnyUax/9lYWxJgexZjsF7gqqtIcMk4OL43Kwm2ysLNtKvqeaHZ4KSusUKX5NImZ2ecopqT+IJ3DP4vq64i5L6AeA7EbPszh88xMArXVJo6fPAA8de2gdLzvpcAu9pRr64VGldQ31cFdDDT18vS0nOYrvdpWwen8Z4zLjZQZGIY6BMltISGs6X44zKonRIy4CLmqyfQAwZcpPw55r7PjrWnwvC3D2cbM5u50xXtPosXbXoqxOYy4hQHs91NcWgdZY2jlOoK3akmFWAEOVUoOUUjbgSuD9xgcopfo1enoBsCVyIXacfvHO4LVusYU+qn8cSsG6RgtdtKWFPqZ/PIVVLi7597c8+vXOiMQshOgZlC2qIZkDKIsVZ1x/nPGZWKMSO+Q9W03oWmsv8AtgAUaiflNrvUkpda9SKvhX0y+VUpuUUuuAXwLzOiTaCLNZTA2t75Zq6DF2C0NSY5osXtFwU9QS/nU/nj6IBbfOYEhaTENvGiGE6ChtqqFrrT8GPm627Q+NHv8G+E1kQ+sc2YlR5FfUt9hCBxiXlcCi7YUN81+7Qszl0pxSiuEZsUzJSeTjDYciMne2EEKE0+eLusGeLi3V0AGOz46nuNrdMJnX4blcWu+zOjYzgYo6jywgLYToUJLQgwnd3HoLHWgou4SbyyX0a+ObvLai1sN/VuXh93d5Rx8hRC/S5xP6JROzuPOs4cTYW64+jegXi81sYl1uORB+tsVQhqUbr91wwEjoz3+7hzveWsdzS/ccW/BCCNFIn0/oOSnR/PzUIa3Wtu0WMyP7xzVMAeDy+jGp1ks1YLTiR/aPY32gl8yXWwoBeOjTbWw+WHlsn4AQQgT0+YTeHtOOS2b1/nIq6jy4vcbyc229yTkuM56NByrJLa1lw4EKbpw+iBiHhX9+uaP1FwshRBtIQm+H00ak4fNrFu8owuX1t+mGaNDpo9Kpdnn58YsrAbh8cjanjUhj2Z4SqaULISJCEno7TBiQSEKUla+2FuLy+tp0QzRo5rBULp6QybaCKrKTnAxNi+GEQUmU1XrYUVjdgVELIfoKSejtYDYpZgxNZdG2Iuo9/jbdEG3snvNHk5ng5ILx/VFKceLgZACW7Slp5ZWGeo+P8lp36wcKIfokSejtdNqINEpq3Hy9rbBdLXSA+CgrC++cxR1nGlNsZiU66R/vYNnu0lZf6/drfvziSi58bClaS4lGCHEkSejtdPaYDC6flIXb6yczwdn6C5qxmg/fSFVKccLgZJbtKWk1Sb+ybB9Ldhazr6SWbQVV5FfU8f3utrXsi6td1Li8gNHd0uvztztuIUT3Jwm9nRxWMw9fPp41fziD5+ZNOebznTg4ieJqNyvDzPVSWuPmr59t4/6PtzBhQAIAC7cV8dt3NvDDZ5aR28roU601l/z7W25/cx1aa656+nt+/Z/1xxy3EKL7kYR+lOwWc4sTerXVueP6kx5n574PN1Ne6+b15ft5c0Uuu4qqcXv9XPPMMh79eienDEnlyR9OYkRGLG+uyGXh9iK8fs3ji3axaHsR936wmcp6Dx+uP8htb65tGMm68UAl+0tr+XxLAZ9uPMSqfWV8s6NIyjZC9EKRWuBCHKUYu4XfnDOSW99Yy0l//oo6j5GIHVYTM4amsiW/kievncRZozMAmDk8lScX7cZiUswelcZbK3N5Y0UuPr/mw/UHKaxyATB5YBJXnzCAzzYfwqTA59fc8dY6AIqr3eSW1jEguWPmZBZCdA1poXcDFx7fn9NHpjMuK553f3YyX9w2kyFpMXy2uYBLJ2Y1JHOAWcPSAJgzth+/O3cUJqU4cXASz/9oChaT4vJJWYzPiuffC3fi8fn5bFMBUwclMW1IMjVuH1NyjHmYmy96LYTo+VRX/ek9efJkvXLlyi55756g2uXl3dV5XDQhk1iHtWG71+fnoQXbuGrqAAalRFNc7SIxyobZpBqm5/1ySwE3vLiS88b148P1+fz+vFHkJEfx01dX89+fT+PSx7/l8klZ/O+FY1qIQAjRHSmlVmmtJ4faJyWXbirGbuHak3KO2G4xm/jtnJENz1MC650CDb1nThuRxgXj+/PxhnysZsWZo9LJTopi/R/PxGE1Mz4rgdX7yzv6UxBCdDJJ6L2QUop/XjWBhy4bR0Wdh/TAqkwOqzFVwcSBCTy5aDd1bh9OW9unLxBCdG9SQ+/FHFZzQzJvbNLARLx+zZpcqaML0ZtIQu+Dpg5KxmpWLNpe1NWhCCEiSBJ6HxRjtzB5YBKLtklCF6I3kYTeR80ansrWQ1Ucqqjv6lCEEBEiCb2Pmjk8FYBF2wu7OBIhRKRIQu+jhqfHkhHnkDq6EL2IJPQ+SinFzGGpLN5RLLMvCtFLSELvw2YNT6Wq3sua3PKuDkUIEQGS0Puwk4ekYDYpFm4rxO/X1AcmBhNC9EwyUrQPi3damTQgkc83F7B0Zwl1bh/v3zKtXYtfCyG6D2mh93Ezh6eyvaCadXnlbCuo4rkleymtcZNfUdfVoYW1s7CK4mpXV4chmvH5NTsLq7o6jD5NEnofd/aYDJKjbTx46TjOGJXO37/Yzkl//pKz/76Ywqru10f9QHkd5/9rKT98ZpnczO1mnlm8m9Mf+YaVe1tfI1d0jDYldKXU2UqpbUqpnUqpu0Lstyul3gjsX6aUyol4pKJDHJcaw8rfnc4Vk7P5w3mjyEp0cu64ftR5fPzxv5u6Orwj3PP+Jtw+P1sPVTF/+f4Of7+Ve0u58611HCzvvn+xdAcur49nl+wB4IFPtvbIFbFcXh8+f8+Lu7FWa+hKKTPwGHAGkAesUEq9r7Xe3OiwG4AyrfUQpdSVwIPADzoiYBF5wWl3s5Oi+PL2WYCR6B9esI0T7/+ShCgrEwYkkpMcRUqMnYQoK1sPVbG3uIYR/eLIL69je2E1OclRDEuP5bjUGGIdFlxeP9UuL5kJDspqPazLLSc9zkFarDHlb4zDgs+v2V9aS2WdlxqXl1q3D7vVRJzDSrzTSpzTQo3Lx+IdRWw8WMm63HLuOmcEi3cU8fCCbewvqWXiwERGZMRSVuvGpBSDU2LQGD+YTpuZGpePWrcXk1KYlKKgsp4Ve0sxKUVGvIPBqdGYlMLl8eO0mVm4rZClO4txWM18trkAn1+zdGcxD142jpH94jArhdevqahz89XWQjw+zZyx/UiPazSVMarR9Q11zY2PtS4f2wqqiHdaGZ4eS1W9F7/WJERZG74ubVHt8vLnj7fw/e4Szhydwekj0xnVLw4VWK1KAw6LCUuzZRO9Pj8r9pbx+eYC1uWVc/64flw+ORuvXxPnsLQ5hvfXGqtlBefgf215LldMzqLG5aOy3oPNYiIlxo7Z1PbPKZx6j4/CShf9ExxHfD5Ho7Lew8vf7ePRr3ZiMStmDE1l7sk5TMlJbNfXoDtodYELpdRJwD1a67MCz38DoLX+c6NjFgSO+U4pZQEOAam6hZPLAhfdm8fn57Gvd3KgrI6CKhdr9pVR5fI2OSYp2kZpjRubxcRxqTHkltZS3eyYSImxWxibGc/EgQncevowDpTV8eu317M2txy3N/Kll+NSo6n3+DlhcBI/mJzNz+ev6fC6vc1iavhcLCaFKZD8FAR+GRm/fJUyngc/mhTUuX3UenxMGpDImtzysC1Nm9mEw2rCaTNjUorKOg81bh82i4mBSVHsKKxuONZuMZEUbUNr8Ovgr8jQKus8DEqJ5oNbTuHCR5eyOb+yyecDYDWrhsVYquq91Ht8WMwKi8kU+Nj0sVLGoi3B9w3GkV9Rj8+vsZlNxEdZURi/IBUq8PFwI0WppvuC15PAMV6/n7yyOrSGM0alkxRl47PNhyir9RBrt5AcY2uIwx94/1Ca5/3Gv9BDHXP11AH8ZOZxLVzR8Fpa4KItCf0y4Gyt9Y8Dz68FTtBa/6LRMRsDx+QFnu8KHFPc7Fw3ATcBDBgwYNK+ffuO6hMSnU9rTbXLS3G1m9IaFwOSokmNtVNYWU+sw4rTZkZrzYHyOvYW11Lj9mKzmIixW8grq8VptTBxYAJFVS7Kaz0AVNV7UQoGJEWRGGUj2m4mymbB5fVRWeelst5DZZ1x7LisBGyWI1tjbq+fzfmVbC+oIjXGjsfnZ19JLRazwq+hzu0lxm4hymZBY/xQxjosTB2UhNVk4kB5HbuLa1AYCayy3suofnGM6h/X5H0qaj2szi1jX3ENAGazCbvFxEmDkzGbjFWiat1Gt8/GP1HBH6/G6bDxj5zNbGJIegzFVS625FeREW/HbDJRUu3CrwOvM/7h9+uGbVrTJMkoBRcen8mUnCRKql2s2FvGrqJqTEoRbMTWe/zUeXxG8nd70RqibGZOOi6Z6UNTibKZWbyjmA0HKrBbTBRWuSirMf7qMZmAEEmqscsmZTFpYCL1Hh9fby1k2Z5S+ic4SIiy4fL6OVBWR0WdG69PE+uw4rCa8Pk1Hp/G5/fj8Wu8Pj9ev8brO3yRgknaeKzITHCSlehkT3ENlfWewLVodF04/JyG57rJ16bx1+C41BhOGZrCpIHG8ox1bh//XXuArYeqKKlxA2AK/hINdRl0i08D79d062kj07lgfP8Wr2c43SahNyYtdCGEaL+WEnpbClAHgOxGz7MC20IeEyi5xAMl7Q9VCCHE0WpLQl8BDFVKDVJK2YArgfebHfM+MDfw+DLgq5bq50IIISKv1V4uWmuvUuoXwALADDyntd6klLoXWKm1fh94FnhZKbUTKMVI+kIIITpRm4b+a60/Bj5utu0PjR7XA5dHNjQhhBDtISNFhRCil5CELoQQvYQkdCGE6CUkoQshRC/R6sCiDntjpYqAoxkqmgKEHbDUhSSu9uuusUlc7dNd44LuG9uxxDVQa50aakeXJfSjpZRaGW6UVFeSuNqvu8YmcbVPd40Lum9sHRWXlFyEEKKXkIQuhBC9RE9M6E91dQBhSFzt111jk7jap7vGBd03tg6Jq8fV0IUQQoTWE1voQgghQpCELoQQvUSPSeitLVTdwe+drZT6Wim1WSm1SSn1q8D2e5RSB5RSawP/5zR6zW8CsW5TSp3VwfHtVUptCMSwMrAtSSn1uVJqR+BjYmC7Ukr9MxDbeqXUxA6KaXij67JWKVWplLq1K66ZUuo5pVRhYCGW4LZ2Xx+l1NzA8TuUUnNDvVeEYntYKbU18P7vKqUSAttzlFJ1ja7dE41eMynwPbAzEP8xLYYZJq52f+0i/XMbJq43GsW0Vym1NrC9M69XuBzRud9nWutu/x9j2t5dwGDABqwDRnXi+/cDJgYexwLbgVHAPcAdIY4fFYjRDgwKxG7uwPj2AinNtj0E3BV4fBfwYODxHOATjIW0TgSWddLX7xAwsCuuGTADmAhsPNrrAyQBuwMfEwOPEzsotjMBS+Dxg41iy2l8XLPzLA/EqwLxn9MBcbXra9cRP7eh4mq2/6/AH7rgeoXLEZ36fdZTWuhTgZ1a691aazfwOnBhZ7251jpfa7068LgK2AJktvCSC4HXtdYurfUeYCfG59CZLgReDDx+Ebio0faXtOF7IEEp1a+DY5kN7NJatzQyuMOumdb6G4x5+pu/X3uuz1nA51rrUq11GfA5cHZHxKa1/kxrHVxt+3uMVcLCCsQXp7X+XhtZ4aVGn0/E4mpBuK9dxH9uW4or0Mq+AnitpXN00PUKlyM69fuspyT0TCC30fM8Wk6oHUYplQNMAJYFNv0i8CfTc8E/p+j8eDXwmVJqlTIW4gZI11rnBx4fAtK7KDYwFjxp/EPWHa5Ze69PV30PXo/RkgsapJRao5RapJSaHtiWGYinM2Jrz9eus6/ZdKBAa72j0bZOv17NckSnfp/1lITeLSilYoC3gVu11pXA48BxwPFAPsafe13hFK31ROAc4OdKqRmNdwZaIV3SP1UZyxZeALwV2NRdrlmDrrw+LVFK3Q14gVcDm/KBAVrrCcBtwHylVFwnhtTtvnbNXEXThkOnX68QOaJBZ3yf9ZSE3paFqjuUUsqK8YV6VWv9DoDWukBr7dNa+4GnOVwi6NR4tdYHAh8LgXcDcRQESymBj4VdERvGL5nVWuuCQIzd4prR/uvTqfEppeYB5wHXBBIBgZJGSeDxKoz69LBAHI3LMh0S21F87TrtmiljcfpLgDcaxdup1ytUjqCTv896SkJvy0LVHSZQm3sW2KK1fqTR9sa154uB4J3394ErlVJ2pdQgYCjGTZiOiC1aKRUbfIxxQ20jTRfungv8t1Fs1wXusp8IVDT6k7AjNGk1dYdr1uj92nN9FgBnKqUSA6WGMwPbIk4pdTbwa+ACrXVto+2pSilz4PFgjGu0OxBfpVLqxMD36nWNPp9IxtXer11n/tyeDmzVWjeUUjrzeoXLEXT299mx3NntzP8Yd4W3Y/yWvbuT3/sUjD+V1gNrA//nAC8DGwLb3wf6NXrN3YFYt3GMd9BbiW0wRu+BdcCm4LUBkoEvgR3AF0BSYLsCHgvEtgGY3IGxRQMlQHyjbZ1+zTB+oeQDHoya5A1Hc30w6tk7A/9/1IGx7cSoowa/154IHHtp4Gu8FlgNnN/oPJMxEuwu4FECo8AjHFe7v3aR/rkNFVdg+wvAzc2O7czrFS5HdOr3mQz9F0KIXqKnlFyEEEK0QhK6EEL0EpLQhRCil5CELoQQvYQkdCGE6CUkoYs+TRkzQEZ1dRxCRIJ0WxR9mlJqL0Yf4OKujkWIYyUtdNFnBEbVfqSUWqeU2qiU+iPQH/haKfV14JgzlVLfKaVWK6XeCszNEZxz/iFlzKG9XCk1pCs/FyFCkYQu+pKzgYNa6/Fa6zHA34GDwKla61OVUinA74DTtTHZ2UqMSZ2CKrTWYzFGFv69UyMXog0koYu+ZANwhlLqQaXUdK11RbP9J2IsSrBUGavezMVYlCPotUYfT+roYIVoL0tXByBEZ9Fab1fGUl9zgD8ppb5sdojCWFzgqnCnCPNYiG5BWuiiz1BK9QdqtdavAA9jLGVWhbFkGBirA00L1scDNfdhjU7xg0Yfv+ucqIVoO2mhi75kLPCwUsqPMVvfTzFKJ58qpQ4G6ujzgNeUUvbAa36HMVsgQKJSaj3gwpgWWIhuRbotCtEG0r1R9ARSchFCiF5CWuhCCNFLSAtdCCF6CUnoQgjRS0hCF0KIXkISuhBC9BKS0IUQopf4/+EJcmPE6FgSAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plugin.loss_history.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "id": "2ea981cd", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([3, 4, 5, 6, 7, 8, 9])" + ] + }, + "execution_count": 45, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "outcome = np.array([3, 4, 5, 6, 7, 8, 9])\n", + "outcome" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "id": "bbd33233", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-03-27T18:05:59.734678+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:05:59.737612+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:00.157952+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:00.160095+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:00.484737+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:00.485757+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:00.786487+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:00.788466+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:01.100020+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:01.102261+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:01.460078+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:01.462163+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:01.805568+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:01.807568+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:02.183897+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:02.185904+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:02.569835+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:02.571874+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:03.033272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:03.035146+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:03.579187+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:03.582312+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:04.128201+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:04.131216+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:04.909594+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:04.912681+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:05.506491+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:05.509890+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:06.285555+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:06.287092+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:06.748144+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:06.751143+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:07.239364+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:07.241364+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:07.833861+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:07.835862+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:08.270020+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:08.273103+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:08.579762+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:08.581664+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:08.995746+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:08.996750+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:09.387130+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:09.389133+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:09.913255+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:09.915271+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:10.414403+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:10.417511+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:10.986099+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:10.988092+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:11.384006+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:11.699391+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:11.700392+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:12.138923+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:12.140923+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:12.604077+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:12.606674+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:12.997333+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:12.999663+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:13.547570+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:13.550541+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:13.954516+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:13.956516+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:14.445116+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:14.452112+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:14.829066+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:14.832071+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:15.312829+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:15.315831+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:15.757355+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:15.759926+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:16.143136+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:16.145134+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:16.560027+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:16.562025+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:16.861918+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:16.863918+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:17.183558+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:17.637582+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:17.640281+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:17.997687+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:17.998689+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:18.345381+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:18.347383+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:18.676026+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:18.678607+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:19.007549+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:19.010506+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:19.346424+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:19.348531+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:19.696186+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:19.697186+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:20.073809+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:20.077249+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:06:20.472414+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:20.475399+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:20.942265+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:20.944268+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:21.302342+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:21.304314+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:21.665401+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:21.666987+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:22.067854+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:22.069371+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:22.392718+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:22.395677+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:22.716515+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:22.717515+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:23.047434+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:23.049434+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:23.399152+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:23.401151+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:23.745625+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:23.747624+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:24.098540+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:24.099540+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:24.421854+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:24.422839+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:24.738758+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:24.739667+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:25.058648+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:25.060550+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:25.399681+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:25.401599+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:25.737806+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:25.738793+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:26.069784+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:26.071290+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:26.416549+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:26.418554+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:26.801542+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:26.803529+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:27.139240+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:27.141225+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:27.488070+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:27.490052+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:27.823788+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:27.824814+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:28.163857+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:28.166838+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:28.499341+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:28.501342+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:28.823408+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:28.825499+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:29.125222+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:29.128129+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:29.492914+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:29.496428+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:29.833079+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:29.835167+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:30.217776+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:30.219777+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:30.536676+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:30.538667+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:30.861816+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:30.863812+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:31.177127+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:31.180126+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:31.606751+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:31.607978+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:31.949768+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:31.951785+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:32.289786+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:32.291671+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:32.629730+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:32.942556+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:32.945560+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:32.949202+0200][38480][INFO] [residual sugar] quality loss for constraints ge = 0.6. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:33.286281+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:33.287280+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:33.620445+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:33.622445+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:33.945427+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:33.947494+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:34.298877+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:34.300955+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:34.618880+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:34.620789+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:34.959467+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:34.961383+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:35.296247+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:35.298303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:35.763113+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:35.765112+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:36.178981+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:36.181338+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:36.555008+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:36.555991+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:36.880093+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:36.881104+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:37.299044+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:37.301205+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:37.708557+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:37.711544+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:38.087165+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:38.089166+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:38.482563+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:38.483562+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:38.941184+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:38.942166+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:39.291995+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:39.294883+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:39.642425+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:39.645485+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:39.965926+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:39.967445+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:40.280863+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:40.281866+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:40.567363+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:40.569362+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:40.863820+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:40.865893+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:41.406311+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:41.409435+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:42.003319+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:42.006307+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:42.470804+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:42.471786+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:42.768361+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:42.770360+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:43.102405+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:43.105718+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:06:43.426329+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:43.429478+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:43.757004+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:43.759124+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:44.083407+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:44.084408+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:44.400443+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:44.401428+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:44.706402+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:44.708999+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:45.018534+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:45.019535+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:45.519397+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:45.521407+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:45.921477+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:45.922985+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:46.265432+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:46.267956+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:46.717722+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:46.719733+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:47.062693+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:47.064691+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:47.417125+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:47.418108+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:47.758309+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:47.760595+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:48.135817+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:48.137801+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:48.458595+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:48.460608+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:48.754069+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:48.756024+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:49.049862+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:49.051462+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:49.350537+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:49.352536+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:49.766318+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:49.769390+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:50.276306+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:50.279351+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:50.665664+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:50.666685+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:51.009462+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.012707+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:51.308313+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.309313+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:51.637138+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.639120+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:51.979944+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:51.980946+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:52.297063+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:52.298062+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:52.625280+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:52.628303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:52.938341+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:52.939345+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:53.233624+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:53.235624+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:53.550284+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:53.552284+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:53.859100+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:53.863229+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:54.227895+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:54.229895+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:54.534473+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:54.536457+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:06:54.835486+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:54.837487+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:55.132594+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:55.134593+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:55.465635+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:55.467185+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:55.807745+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:55.810517+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:56.300336+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:56.302923+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:56.604424+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:56.605423+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:56.898530+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:56.900544+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:57.205520+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:57.206520+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:57.503438+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:57.505437+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:57.819558+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:57.821581+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:06:58.160813+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:58.163417+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:58.462315+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:58.463303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:06:58.815614+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:58.817596+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:06:59.129940+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:59.130934+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:06:59.577632+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:59.580621+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:06:59.909210+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:06:59.910211+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:00.263906+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:00.265906+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:00.573175+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:00.574177+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:00.866210+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:00.868793+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:01.205344+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:01.207327+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:01.606906+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:01.608906+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:02.102300+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:02.105211+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:02.503969+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:02.506485+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:02.906864+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:02.908864+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:03.298141+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:03.300142+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:03.619687+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:03.621670+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:03.942307+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:03.946964+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:04.383317+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:04.384318+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:04.685032+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:04.687584+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:04.985829+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:04.986829+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:05.266858+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:05.269157+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:05.580166+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:05.582149+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:05.889785+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:05.892186+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:06.211209+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:06.213722+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:06.513714+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:06.515729+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:06.832167+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:06.834177+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:07.144798+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:07.146797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:07.479304+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:07.481835+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:07.846999+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:07.848997+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:08.195789+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:08.197813+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:08.200813+0200][38480][INFO] [residual sugar] quality loss for constraints le = 65.8. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:08.691113+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:08.694249+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:09.231893+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:09.235438+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:09.713446+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:09.716162+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:10.805837+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:10.809012+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:11.446846+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:11.450600+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:12.110297+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:12.114136+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:12.587219+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:12.589217+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:13.186604+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:13.188628+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:13.765722+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:13.767730+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:14.222493+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:14.225273+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:14.581621+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:14.582622+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:14.916005+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:14.917005+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:15.232768+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:15.233771+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:15.587426+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:15.589426+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:15.937914+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:15.939914+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:16.341209+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:16.343228+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:16.667291+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:16.669292+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:16.989838+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:16.991912+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:17.306825+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:17.308797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:17.659105+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:17.661131+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:18.018946+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:18.019947+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:18.393086+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:18.396311+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:18.830421+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:18.833527+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:19.232926+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:19.236012+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:19.669845+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:19.672139+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:20.034654+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.035654+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:20.365288+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.367291+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:20.677852+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.680692+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:20.988636+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:20.990732+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:21.326922+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:21.329905+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:21.682149+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:21.684150+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:22.042272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:22.043272+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:22.417916+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:22.418916+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:22.749237+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:22.751237+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:23.090475+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:23.091459+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:23.470508+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:23.473305+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:23.821072+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:23.823567+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:23.827191+0200][38480][INFO] [residual sugar] quality loss for constraints ge = 0.6. Remaining 0. prev length 1. Original dtype float64.\n", + "[2023-03-27T18:07:24.193607+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:24.194590+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:24.532529+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:24.534525+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:24.876586+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:24.878585+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:25.216076+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:25.217076+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:25.599528+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:25.601333+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:26.159795+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:26.161982+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:26.541276+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:26.542274+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:26.869887+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:26.872038+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:27.183814+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:27.186139+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:27.522592+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:27.524574+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:27.885528+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:27.886547+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", + "[2023-03-27T18:07:28.236311+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:28.237310+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", + "[2023-03-27T18:07:28.569622+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:28.571622+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:28.889372+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:28.890372+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:29.200272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:29.202272+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:29.533137+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:29.535216+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", + "[2023-03-27T18:07:29.936280+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:29.939026+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:30.369796+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:30.371797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", + "[2023-03-27T18:07:30.718054+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:30.720128+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", + "[2023-03-27T18:07:31.139806+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", + "[2023-03-27T18:07:31.140809+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mplugin\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgenerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0moutcome\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36mgenerate\u001b[1;34m(self, count, constraints, random_state, **kwargs)\u001b[0m\n\u001b[0;32m 337\u001b[0m \u001b[0msyn_schema\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mSchema\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_constraints\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mgen_constraints\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 338\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 339\u001b[1;33m \u001b[0mX_syn\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_generate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msyn_schema\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msyn_schema\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 340\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 341\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mX_syn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mis_tabular\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36m_generate\u001b[1;34m(self, count, syn_schema, **kwargs)\u001b[0m\n\u001b[0;32m 246\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 247\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 248\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_safe_generate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcallback\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msyn_schema\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 249\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 250\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36m_safe_generate\u001b[1;34m(self, gen_cbk, count, syn_schema, **kwargs)\u001b[0m\n\u001b[0;32m 391\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mit\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msampling_patience\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 392\u001b[0m \u001b[1;31m# sample\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 393\u001b[1;33m \u001b[0miter_samples\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgen_cbk\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 394\u001b[0m iter_samples_df = pd.DataFrame(\n\u001b[0;32m 395\u001b[0m \u001b[0miter_samples\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtraining_schema\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36mcallback\u001b[1;34m(count)\u001b[0m\n\u001b[0;32m 241\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 242\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mcallback\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;31m# type: ignore\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 243\u001b[1;33m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgenerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 244\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mis_classification\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 245\u001b[0m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minsert\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarget_iloc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\__init__.py\u001b[0m in \u001b[0;36mgenerate\u001b[1;34m(self, count, cond)\u001b[0m\n\u001b[0;32m 211\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 212\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 213\u001b[1;33m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdiffusion\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msample_all\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 214\u001b[0m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msample\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_col_perm\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 215\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0msample\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\gaussian_multinomial_diffsuion.py\u001b[0m in \u001b[0;36msample_all\u001b[1;34m(self, num_samples, cond, max_batch_size, ddim)\u001b[0m\n\u001b[0;32m 951\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 952\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mb\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mbs\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 953\u001b[1;33m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msample_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 954\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0many\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msample\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0misnan\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 955\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"found NaNs in sample\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\autograd\\grad_mode.py\u001b[0m in \u001b[0;36mdecorate_context\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mcast\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\gaussian_multinomial_diffsuion.py\u001b[0m in \u001b[0;36msample\u001b[1;34m(self, num_samples, cond)\u001b[0m\n\u001b[0;32m 918\u001b[0m \u001b[0mdebug\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf\"Sample timestep {i:4d}\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mend\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"\\r\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 919\u001b[0m \u001b[0mt\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfull\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 920\u001b[1;33m model_out = self.denoise_fn(\n\u001b[0m\u001b[0;32m 921\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mz_norm\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlog_z\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mt\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 922\u001b[0m )\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\modules.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x, t, y)\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[0memb\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0memb_nonlin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlabel_emb\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mproj\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0memb\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 113\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 114\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\mlp.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, X)\u001b[0m\n\u001b[0;32m 398\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mvalidate_arguments\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marbitrary_types_allowed\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 399\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 400\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 401\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 402\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_train_epoch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mloader\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mfloat\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\container.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 203\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 204\u001b[1;33m \u001b[0minput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 205\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 206\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\mlp.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, X)\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mvalidate_arguments\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marbitrary_types_allowed\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 113\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 114\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 116\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\container.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 203\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 204\u001b[1;33m \u001b[0minput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 205\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 206\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\linear.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 113\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 114\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlinear\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 116\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mextra_repr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "plugin.generate(len(outcome), cond=outcome)" + ] + }, + { + "cell_type": "markdown", + "id": "ea5abc50", + "metadata": {}, + "source": [ + "## Congratulations!\n", + "\n", + "Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement towards Machine learning and AI for medicine, you can do so in the following ways!\n", + "\n", + "### Star [Synthcity](https://github.com/vanderschaarlab/synthcity) on GitHub\n", + "\n", + "- The easiest way to help our community is just by starring the Repos! This helps raise awareness of the tools we're building.\n", + "\n", + "\n", + "### Checkout other projects from vanderschaarlab\n", + "- [HyperImpute](https://github.com/vanderschaarlab/hyperimpute)\n", + "- [AutoPrognosis](https://github.com/vanderschaarlab/autoprognosis)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 8120e975dc0279e294847f6f2a2e44f1990775cc Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 30 Mar 2023 16:01:53 +0200 Subject: [PATCH 29/95] major update of FeatureEncoder and TabularEncoder --- .../plugins/core/models/data_encoder.py | 198 ++++++++----- .../gaussian_multinomial_diffsuion.py | 7 +- .../plugins/core/models/tabular_encoder.py | 280 +++++++----------- ...al8_tabular_modelling_with_diffusion.ipynb | 1 - 4 files changed, 230 insertions(+), 256 deletions(-) diff --git a/src/synthcity/plugins/core/models/data_encoder.py b/src/synthcity/plugins/core/models/data_encoder.py index 9f432d9c..57fdbc1c 100644 --- a/src/synthcity/plugins/core/models/data_encoder.py +++ b/src/synthcity/plugins/core/models/data_encoder.py @@ -1,6 +1,5 @@ # stdlib -from functools import wraps -from typing import Any, List, Optional, Union +from typing import Any, List, Type, Union # third party import numpy as np @@ -15,68 +14,111 @@ StandardScaler, ) +FeatureEncoder = Any -class _DataEncoder(TransformerMixin, BaseEstimator): - """Base data encoder, with sklearn-style API""" - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def fit(self, X: Any) -> Any: - return self._fit(X) +class FeatureEncoder(TransformerMixin, BaseEstimator): # type: ignore + """Base feature encoder, with sklearn-style API""" - def _fit(self, X: Any) -> Any: - return self + def __new__(cls, **kwargs: Any) -> FeatureEncoder: + obj = super().__new__() + obj.__dict__.update(kwargs) # auto set all parameters as attributes + return obj @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def transform(self, X: Any) -> Any: - return self._transform(X) + def fit(self, x: pd.Series, y: Any = None, **kwargs: Any) -> FeatureEncoder: + self.feature_name_in = x.name + out = self._fit(x, **kwargs)._transform(x) - def _transform(self, X: Any) -> Any: - return X + if np.ndim(out) == 1: + self.n_features_out = 1 + else: + self.n_features_out = np.shape(out)[1] - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def inverse_transform(self, X: Any) -> Any: - return self._inverse_transform(X) + self.feature_names_out = self.get_feature_names_out() - def _inverse_transform(self, X: Any) -> Any: - return X + return self + + def _fit(self, x: pd.Series, **kwargs: Any) -> FeatureEncoder: + return self + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def transform(self, x: pd.Series) -> Any: + out = self._transform(x) + if isinstance(out, np.ndarray): + if out.ndim == 1: + return pd.Series(out, self.feature_name_in) + else: + return pd.DataFrame(out, columns=self.feature_names_out) + return out + + def _transform(self, x: pd.Series) -> Any: + return x + + def get_feature_names_out(self) -> List[str]: + n = self.n_features_out + if n == 1: + return [self.feature_name_in] + else: + return [self.feature_name_in + str(i) for i in range(n)] @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def fit_transform(self, X: Any) -> Any: - return self.fit(X).transform(X) + def inverse_transform(self, data: Any) -> pd.Series: + x = self._inverse_transform(data) + return pd.Series(x, name=self.feature_name_in) + + def _inverse_transform(self, data: Any) -> pd.Series: + return data @classmethod - def wraps(cls, encoder_class: TransformerMixin) -> type: - """Wraps sklearn encoder to DataEncoder.""" + def wraps(cls, encoder_class: TransformerMixin) -> Type[FeatureEncoder]: + """Wraps sklearn transformer to FeatureEncoder.""" - @wraps(encoder_class) - class WrappedEncoder(_DataEncoder): + class WrappedEncoder(FeatureEncoder): def __init__(self, *args: Any, **kwargs: Any) -> None: self.encoder = encoder_class(*args, **kwargs) - def _fit(self, X: Any) -> _DataEncoder: - self.encoder.fit(X) + def _fit(self, x: pd.Series, **kwargs: Any) -> FeatureEncoder: + self.encoder.fit(x, **kwargs) return self - def _transform(self, X: Any) -> Any: - return self.encoder.transform(X) + def _transform(self, x: pd.Series) -> Any: + return self.encoder.transform(x) - def _inverse_transform(self, X: Any) -> Any: - return self.encoder.inverse_transform(X) + def _inverse_transform(self, x: pd.Series) -> Any: + return self.encoder.inverse_transform(x) + + def get_feature_names_out(self) -> List[str]: + return self.encoder.get_feature_names_out([self.feature_name_in]) + + for attr in ( + "__module__", + "__name__", + "__qualname__", + "__doc__", + "__annotations__", + ): + setattr(WrappedEncoder, attr, getattr(encoder_class, attr)) return WrappedEncoder -class DatetimeEncoder(_DataEncoder): +OneHotEncoder = FeatureEncoder.wraps(OneHotEncoder) +StandardScaler = FeatureEncoder.wraps(StandardScaler) +MinMaxScaler = FeatureEncoder.wraps(MinMaxScaler) + + +class DatetimeEncoder(FeatureEncoder): """Datetime variables encoder""" - def _transform(self, X: pd.Series) -> pd.Series: - return pd.to_numeric(X).astype(float) + def _transform(self, x: pd.Series) -> pd.Series: + return pd.to_numeric(x).astype(float) - def _inverse_transform(self, X: pd.Series) -> pd.Series: - return pd.to_datetime(X) + def _inverse_transform(self, data: pd.Series) -> pd.Series: + return pd.to_datetime(data) -class BayesianGMMEncoder(_DataEncoder): +class BayesianGMMEncoder(FeatureEncoder): """Bayesian Gaussian Mixture encoder""" def __init__( @@ -84,70 +126,72 @@ def __init__( n_components: int = 10, random_state: int = 0, weight_threshold: float = 0.005, + clip_output: bool = True, + std_multiplier: int = 4, ) -> None: self.model = BayesianGaussianMixture( n_components=n_components, random_state=random_state, weight_concentration_prior=1e-3, ) - self.n_components = n_components - self.weight_threshold = weight_threshold - self.weights: Optional[List[float]] = None - self.std_multiplier = 4 + self.weights: List[float] - def _fit(self, X: pd.DataFrame) -> Any: - self.min_value = X.min() - self.max_value = X.max() + def _fit(self, x: pd.Series, **kwargs: Any) -> "BayesianGaussianMixture": + self.min_value = x.min() + self.max_value = x.max() - self.model.fit(X.values.reshape(-1, 1)) + self.model.fit(x.values.reshape(-1, 1)) self.weights = self.model.weights_ - self.n_components = len(self.model.weights_) + self.means = self.model.means_.reshape(-1) + self.stds = np.sqrt(self.model.covariances_).reshape(-1) return self - def _transform(self, X: pd.DataFrame) -> pd.DataFrame: - name = X.name - X = X.values.reshape(-1, 1) - means = self.model.means_.reshape(1, self.n_components) + def _transform(self, x: pd.Series) -> pd.DataFrame: + x = x.values.reshape(-1, 1) + means = self.means.reshape(1, -1) + stds = self.stds.reshape(1, -1) # predict cluster value - stds = np.sqrt(self.model.covariances_).reshape(1, self.n_components) - - normalized_values = (X - means) / (self.std_multiplier * stds) + normalized_values = (x - means) / (self.std_multiplier * stds) # predict cluster - component_probs = self.model.predict_proba(X) + component_probs = self.model.predict_proba(x) components = np.argmax(component_probs, axis=1) - aranged = np.arange(len(X)) - normalized = normalized_values[aranged, components].reshape([-1, 1]) - normalized = np.clip(normalized, -0.99, 0.99).squeeze(axis=1) - out = np.stack([normalized, components], axis=1) + normalized = normalized_values[np.arange(len(x)), components] + if self.clip_output: + normalized = np.clip(normalized, -0.99, 0.99) + normalized = normalized.reshape(-1, 1) - return pd.DataFrame(out, columns=[f"{name}.value", f"{name}.component"]) + components = np.eye(self.n_components)[components] # onehot + return np.hstack([normalized, components]) + + def get_feature_names_out(self) -> List[str]: + name = self.feature_name_in + return [f"{name}.value"] + [ + f"{name}.component_{i}" for i in range(self.n_features_out - 1) + ] + + def _inverse_transform(self, data: pd.DataFrame) -> pd.Series: + if self.clip_output: + data = np.clip(data.values[:, 0], -1, 1) - def _inverse_transform(self, X: pd.DataFrame) -> pd.DataFrame: - normalized = np.clip(X.values[:, 0], -1, 1) means = self.model.means_.reshape([-1]) stds = np.sqrt(self.model.covariances_).reshape([-1]) - selected_component = X.values[:, 1].astype(int) + components = np.argmax(data.values[:, 1:], axis=1) # recreate data - std_t = stds[selected_component] - mean_t = means[selected_component] - reversed_data = normalized * self.std_multiplier * std_t + mean_t + std_t = stds[components] + mean_t = means[components] + reversed_data = data * self.std_multiplier * std_t + mean_t # clip values return np.clip(reversed_data, self.min_value, self.max_value) -OneHotEncoder = _DataEncoder.wraps(OneHotEncoder) -StandardScaler = _DataEncoder.wraps(StandardScaler) -MinMaxScaler = _DataEncoder.wraps(MinMaxScaler) - - -@_DataEncoder.wraps +@FeatureEncoder.wraps class GaussianQuantileTransformer(QuantileTransformer): """Quantile transformer with Gaussian distribution""" @@ -168,12 +212,12 @@ def __init__( copy=copy, ) - def fit(self, X: pd.DataFrame, y: Any = None) -> "GaussianQuantileTransformer": - self.n_quantiles = max(min(len(X) // 30, 1000), 10) - return super().fit(X, y) + def fit(self, x: pd.Series, y: Any = None) -> "GaussianQuantileTransformer": + self.n_quantiles = max(min(len(x) // 30, 1000), 10) + return super().fit(x, y) -REGISTRY = { +ENCODERS = { "datetime": DatetimeEncoder, "onehot": OneHotEncoder, "standard": StandardScaler, @@ -183,7 +227,7 @@ def fit(self, X: pd.DataFrame, y: Any = None) -> "GaussianQuantileTransformer": } -def get_encoder(encoder: Union[str, type]) -> TransformerMixin: +def get_encoder(encoder: Union[str, type]) -> Type[FeatureEncoder]: """Get a registered encoder. Supported encoders: @@ -198,5 +242,5 @@ def get_encoder(encoder: Union[str, type]) -> TransformerMixin: - bayesian_gmm """ if isinstance(encoder, type): # custom encoder - return encoder - return REGISTRY[encoder] + return FeatureEncoder.wraps(encoder) + return ENCODERS[encoder] diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index db55aedb..990d6cbb 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -946,11 +946,12 @@ def sample_all( else: sample_fn = self.sample - bs = np.diff([*range(0, num_samples, max_batch_size), num_samples]) + indices = [*range(0, num_samples, max_batch_size), num_samples] all_samples = [] - for b in bs: - sample = sample_fn(b, cond) + for i, b in enumerate(np.diff(indices)): + c = None if cond is None else cond[indices[i] : indices[i + 1]] + sample = sample_fn(b, c) if torch.any(sample.isnan()).item(): raise ValueError("found NaNs in sample") all_samples.append(sample) diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index a9bb0e82..0eb2a096 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -2,7 +2,7 @@ """ # stdlib -from typing import Any, List, Optional, Sequence, Tuple +from typing import Any, List, Optional, Sequence, Tuple, Union # third party import numpy as np @@ -16,13 +16,13 @@ from synthcity.utils.serialization import dataframe_hash # synthcity relative -from .data_encoder import get_encoder +from .data_encoder import FeatureEncoder, get_encoder class FeatureInfo(BaseModel): name: str feature_type: str - transform: Any + transform: FeatureEncoder output_dimensions: int transformed_features: List[str] @@ -56,95 +56,70 @@ class TabularEncoder(TransformerMixin, BaseEstimator): Discrete columns are encoded using a scikit-learn OneHotEncoder. """ + categorical_encoder: Union[str, type] = "onehot" + continuous_encoder: Union[str, type] = "bayesian_gmm" + cat_encoder_params: dict = dict(handle_unknown="ignore", sparse=False) + cont_encoder_params: dict = dict(n_components=10) + @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( self, - max_clusters: int = 10, + *, + whitelist: tuple = (), categorical_limit: int = 10, - whitelist: list = [], - categorical_encoder: str = "onehot", - continuous_encoder: str = "bayesian_gmm", + categorical_encoder: Optional[Union[str, type]] = None, + continuous_encoder: Optional[Union[str, type]] = None, + cat_encoder_params: Optional[dict] = None, + cont_encoder_params: Optional[dict] = None, ) -> None: """Create a data transformer. Args: - max_clusters (int): - Maximum number of Gaussian distributions in Bayesian GMM. + whitelist (tuple): + Columns that will not be transformed. """ - self.max_clusters = max_clusters self.categorical_limit = categorical_limit self.whitelist = whitelist - self.categorical_encoder = categorical_encoder - self.continuous_encoder = continuous_encoder - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def _fit_continuous(self, data: pd.Series) -> FeatureInfo: - """Fit the continuous encoder on a continuous column. - - Args: - data (pd.DataFrame): - A dataframe containing a column. - - Returns: - namedtuple: - A ``FeatureInfo`` object. - """ - name = data.name - - if self.continuous_encoder == "bayesian_gmm": - encoder = get_encoder("bayesian_gmm")( - n_components=min(self.max_clusters, len(data)), - ) - n_components = encoder.n_components - dim_out = 1 + n_components - transformed_features = [f"{name}.value"] + [ - f"{name}.component_{i}" for i in range(n_components) - ] + if categorical_encoder is not None: + self.categorical_encoder = categorical_encoder + if continuous_encoder is not None: + self.continuous_encoder = continuous_encoder + if cat_encoder_params is not None: + self.cat_encoder_params = cat_encoder_params else: - encoder = get_encoder(self.continuous_encoder)() - dim_out = 1 - transformed_features = [name] - - encoder.fit(data) - - return FeatureInfo( - name=name, - feature_type="continuous", - transform=encoder, - output_dimensions=dim_out, - transformed_features=transformed_features, - ) + self.cat_encoder_params = self.cat_encoder_params.copy() + if cont_encoder_params is not None: + self.cont_encoder_params = cont_encoder_params + else: + self.cont_encoder_params = self.cont_encoder_params.copy() @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def _fit_discrete(self, data: pd.Series) -> FeatureInfo: - """Fit one hot encoder for discrete column. + def _fit_feature(self, feature: pd.Series, feature_type: str) -> FeatureInfo: + """Fit the feature encoder on a column. Args: - data (pd.DataFrame): - A dataframe containing a column. + feature (pd.Series): + A column of a dataframe. + feature_type (str): + Type of the feature ('discrete' or 'continuous'). Returns: - namedtuple: - A ``FeatureInfo`` object. + FeatureInfo: + Information of the fitted feature encoder. """ - name = data.name - - if self.categorical_encoder == "onehot": - encoder = get_encoder("onehot")(handle_unknown="ignore", sparse=False) + if feature_type == "discrete": + encoder = get_encoder(self.categorical_encoder)(**self.cat_encoder_params) else: - raise ValueError(f"Unknown categorical encoder {self.categorical_encoder}") + encoder = get_encoder(self.continuous_encoder)(**self.cont_encoder_params) - encoder.fit(data.values.reshape(-1, 1)) - num_categories = len(encoder.categories_[0]) - - transformed_features = list(encoder.get_feature_names_out([data.name])) + encoder.fit(feature) return FeatureInfo( - name=name, - feature_type="discrete", + name=feature.name, + feature_type=feature_type, transform=encoder, - output_dimensions=num_categories, - transformed_features=transformed_features, + output_dimensions=encoder.n_features_out, + transformed_features=encoder.feature_names_out, ) @validate_arguments(config=dict(arbitrary_types_allowed=True)) @@ -161,81 +136,51 @@ def fit( self.output_dimensions = 0 self._column_raw_dtypes = raw_data.infer_objects().dtypes - self._column_transform_info = [] + self._column_transform_info_list = [] for name in raw_data.columns: if name in self.whitelist: continue column_hash = dataframe_hash(raw_data[[name]]) log.info(f"Encoding {name} {column_hash}") - if name in discrete_columns: - column_transform_info = self._fit_discrete(raw_data[name]) + ftype = "discrete" else: - column_transform_info = self._fit_continuous(raw_data[name]) + ftype = "continuous" + column_transform_info = self._fit_feature(raw_data[name], ftype) self.output_dimensions += column_transform_info.output_dimensions - self._column_transform_info.append(column_transform_info) + self._column_transform_info_list.append(column_transform_info) return self - def _transform_continuous( - self, column_transform_info: FeatureInfo, data: pd.Series - ) -> pd.DataFrame: - name = data.name - encoder = column_transform_info.transform - transformed = encoder.transform(data) - - # Converts the transformed data to the appropriate output format. - if self.continuous_encoder == "bayesian_gmm": - output = np.zeros( - (len(transformed), column_transform_info.output_dimensions) - ) - output[:, 0] = transformed[f"{name}.value"].to_numpy() - index = transformed[f"{name}.component"].to_numpy().astype(int) - output[np.arange(index.size), index + 1] = 1 - else: - output = transformed.to_numpy().reshape(-1, 1) - - return pd.DataFrame( - output, - columns=column_transform_info.transformed_features, - ) - - def _transform_discrete( - self, column_transform_info: FeatureInfo, data: pd.Series + def _transform_feature( + self, column_transform_info: FeatureInfo, feature: pd.Series ) -> pd.DataFrame: encoder = column_transform_info.transform return pd.DataFrame( - encoder.transform(data.to_frame().values), + encoder.transform(feature).values, columns=column_transform_info.transformed_features, ) @validate_arguments(config=dict(arbitrary_types_allowed=True)) def transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: """Take raw data and output a matrix data.""" - if len(self._column_transform_info) == 0: + if len(self._column_transform_info_list) == 0: return pd.DataFrame(np.zeros((len(raw_data), 0))) column_data_list = [] for name in self.whitelist: if name not in raw_data.columns: continue - data = raw_data[name] - column_data_list.append(data) + feature = raw_data[name] + column_data_list.append(feature) - for column_transform_info in self._column_transform_info: - name = column_transform_info.name - data = raw_data[name] - - if column_transform_info.feature_type == "continuous": - column_data_list.append( - self._transform_continuous(column_transform_info, data) - ) - else: - column_data_list.append( - self._transform_discrete(column_transform_info, data) - ) + for column_transform_info in self._column_transform_info_list: + feature = raw_data[column_transform_info.name] + column_data_list.append( + self._transform_feature(column_transform_info, feature) + ) result = pd.concat(column_data_list, axis=1) result.index = raw_data.index @@ -243,31 +188,13 @@ def transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: return result @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def _inverse_transform_continuous( + def _inverse_transform_feature( self, column_transform_info: FeatureInfo, column_data: pd.DataFrame, - ) -> pd.DataFrame: - encoder = column_transform_info.transform - if self.continuous_encoder == "bayesian_gmm": - data = pd.DataFrame( - column_data.values[:, :2], columns=["value", "component"] - ) - data.iloc[:, 1] = np.argmax(column_data.values[:, 1:], axis=1) - else: - data = column_data - return encoder.inverse_transform(data) - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def _inverse_transform_discrete( - self, column_transform_info: FeatureInfo, column_data: pd.DataFrame - ) -> pd.DataFrame: + ) -> pd.Series: encoder = column_transform_info.transform - column = column_transform_info.name - return pd.DataFrame( - encoder.inverse_transform(column_data), - columns=[column], - ) + return encoder.inverse_transform(column_data) @validate_arguments(config=dict(arbitrary_types_allowed=True)) def inverse_transform(self, data: pd.DataFrame) -> pd.DataFrame: @@ -275,39 +202,32 @@ def inverse_transform(self, data: pd.DataFrame) -> pd.DataFrame: Output uses the same type as input to the transform function. """ - if len(self._column_transform_info) == 0: + if len(self._column_transform_info_list) == 0: return pd.DataFrame(np.zeros((len(data), 0))) st = 0 - recovered_column_data_list = [] names = [] feature_types = [] + recovered_feature_list = [] for name in self.whitelist: if name not in data.columns: continue - local_data = data[name] names.append(name) feature_types.append(self._column_raw_dtypes) - recovered_column_data_list.append(local_data) + recovered_feature_list.append(data[name]) - for column_transform_info in self._column_transform_info: + for column_transform_info in self._column_transform_info_list: dim = column_transform_info.output_dimensions column_data = data.iloc[:, list(range(st, st + dim))] - if column_transform_info.feature_type == "continuous": - recovered_column_data = self._inverse_transform_continuous( - column_transform_info, column_data - ) - else: - recovered_column_data = self._inverse_transform_discrete( - column_transform_info, column_data - ) - - recovered_column_data_list.append(recovered_column_data) + recovered_feature = self._inverse_transform_feature( + column_transform_info, column_data + ) + recovered_feature_list.append(recovered_feature) names.append(column_transform_info.name) st += dim - recovered_data = np.column_stack(recovered_column_data_list) + recovered_data = np.column_stack(recovered_feature_list) recovered_data = pd.DataFrame( recovered_data, columns=names, index=data.index ).astype(self._column_raw_dtypes.filter(names)) @@ -320,18 +240,16 @@ def layout(self) -> List[Tuple]: - continuous, and with length 1 + number of GMM clusters. - discrete, and with length , the length of the one-hot encoding. """ - return self._column_transform_info + return self._column_transform_info_list def n_features(self) -> int: return np.sum( - [ - column_transform_info.output_dimensions - for column_transform_info in self._column_transform_info - ] + column_transform_info.output_dimensions + for column_transform_info in self._column_transform_info_list ) def get_column_info(self, name: str) -> FeatureInfo: - for column_transform_info in self._column_transform_info: + for column_transform_info in self._column_transform_info_list: if column_transform_info.name == name: return column_transform_info @@ -348,7 +266,7 @@ def activation_layout( - discrete, and with length , the length of the one-hot encoding. """ out = [] - for column_transform_info in self._column_transform_info: + for column_transform_info in self._column_transform_info_list: if column_transform_info.feature_type == "continuous": out.extend( [ @@ -374,26 +292,38 @@ class BinEncoder(TabularEncoder): Discrete columns are encoded using a scikit-learn OneHotEncoder. """ - def _transform_continuous( - self, column_transform_info: FeatureInfo, data: pd.Series - ) -> pd.Series: - name = data.name - encoder = column_transform_info.transform - transformed = encoder.transform(data) - return transformed[f"{name}.component"].to_numpy().astype(int) + continuous_encoder = "bayesian_gmm" + cont_encoder_params = dict(n_components=2) + categorical_encoder = "onehot" + cat_encoder_params = dict(handle_unknown="ignore", sparse=False) - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def transform(self, raw_data: pd.DataFrame) -> pd.DataFrame: - """Take raw data and output a matrix data.""" - output = raw_data.copy() - - for column_transform_info in self._column_transform_info: - name = column_transform_info.name - output[name] = self._transform_continuous( - column_transform_info, raw_data[name] + # TODO: check if this is correct + def _transform_feature( + self, column_transform_info: FeatureInfo, feature: pd.Series + ) -> pd.DataFrame: + if column_transform_info.feature_type == "discrete": + return super()._transform_feature(column_transform_info, feature) + bgm = column_transform_info.transform + out = bgm.transform(feature) + if out.shape != (len(feature), 3): + raise ValueError( + "BinEncoder should transform continuous features using a " + "BayesianGMM with 2 components" ) + # encoded as a binary vector corresponding to the first component + return pd.DataFrame(out.values[:, [1]], columns=[bgm.feature_name_in]) - return output + def _inverse_transform_feature( + self, column_transform_info: FeatureInfo, column_data: pd.DataFrame + ) -> pd.Series: + if column_transform_info == "discrete": + return super()._inverse_transform_feature( + column_transform_info, column_data + ) + bgm = column_transform_info.transform + components = column_data.values.reshape(-1) + features = bgm.means[components] + return pd.Series(features, name=bgm.feature_name_in) class TimeSeriesTabularEncoder(TransformerMixin, BaseEstimator): diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb index 97e38401..b520308e 100644 --- a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb +++ b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb @@ -38,7 +38,6 @@ "# stdlib\n", "import sys\n", "import warnings\n", - "sys.path.insert(0, '../src')\n", "\n", "# third party\n", "import numpy as np\n", From 2750791100fb9d4e30d46621d8f3b2fbc5e8c1a7 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 30 Mar 2023 16:30:54 +0200 Subject: [PATCH 30/95] add LogDistribution and LogIntDistribution --- src/synthcity/plugins/core/distribution.py | 41 ++++++++++++++++--- .../plugins/core/models/data_encoder.py | 4 +- src/synthcity/plugins/generic/plugin_ddpm.py | 20 +++++---- 3 files changed, 50 insertions(+), 15 deletions(-) diff --git a/src/synthcity/plugins/core/distribution.py b/src/synthcity/plugins/core/distribution.py index fb486e0a..3aff6f74 100644 --- a/src/synthcity/plugins/core/distribution.py +++ b/src/synthcity/plugins/core/distribution.py @@ -260,7 +260,7 @@ def max(self) -> Any: return self.high def __eq__(self, other: Any) -> bool: - if not isinstance(other, FloatDistribution): + if not isinstance(other, type(self)): return False return ( @@ -273,6 +273,21 @@ def dtype(self) -> str: return "float" +class LogDistribution(FloatDistribution): + low: float = np.iinfo(np.int64).min + high: float = np.iinfo(np.int64).max + base: float = 10.0 + _log_low: float = np.log(low) / np.log(base) + _log_high: float = np.log(high) / np.log(base) + + def sample(self, count: int = 1) -> Any: + np.random.seed(self.random_state) + msamples = self.sample_marginal(count) + if msamples is not None: + return msamples + return self.base ** np.random.uniform(self._log_low, self._log_high, count) + + class IntegerDistribution(Distribution): """ .. inheritance-diagram:: synthcity.plugins.core.distribution.IntegerDistribution @@ -345,7 +360,20 @@ def dtype(self) -> str: return "int" -OFFSET = 120 +class LogIntDistribution(FloatDistribution): + low: int = np.iinfo(np.int64).min + high: int = np.iinfo(np.int64).max + base: float = 10.0 + _log_low: float = np.log(low) / np.log(base) + _log_high: float = np.log(high) / np.log(base) + + def sample(self, count: int = 1) -> Any: + np.random.seed(self.random_state) + msamples = self.sample_marginal(count) + if msamples is not None: + return msamples + s = self.base ** np.random.uniform(self._log_low, self._log_high, count) + return s.astype(int) class DatetimeDistribution(Distribution): @@ -356,6 +384,7 @@ class DatetimeDistribution(Distribution): low: datetime = datetime.utcfromtimestamp(0) high: datetime = datetime.now() + offset: int = 120 @validator("low", always=True) def _validate_low_thresh(cls: Any, v: datetime, values: Dict) -> datetime: @@ -363,7 +392,7 @@ def _validate_low_thresh(cls: Any, v: datetime, values: Dict) -> datetime: if mkey in values and values[mkey] is not None: v = values[mkey].index.min() - return v - timedelta(seconds=OFFSET) + return v - timedelta(seconds=cls.offset) @validator("high", always=True) def _validate_high_thresh(cls: Any, v: datetime, values: Dict) -> datetime: @@ -371,7 +400,7 @@ def _validate_high_thresh(cls: Any, v: datetime, values: Dict) -> datetime: if mkey in values and values[mkey] is not None: v = values[mkey].index.max() - return v + timedelta(seconds=OFFSET) + return v + timedelta(seconds=cls.offset) def get(self) -> List[Any]: return [self.name, self.low, self.high] @@ -397,8 +426,8 @@ def has(self, val: datetime) -> bool: def includes(self, other: "Distribution") -> bool: return self.min() - timedelta( - seconds=OFFSET - ) <= other.min() and other.max() <= self.max() + timedelta(seconds=OFFSET) + seconds=self.offset + ) <= other.min() and other.max() <= self.max() + timedelta(seconds=self.offset) def as_constraint(self) -> Constraints: return Constraints( diff --git a/src/synthcity/plugins/core/models/data_encoder.py b/src/synthcity/plugins/core/models/data_encoder.py index 57fdbc1c..90ed7e1e 100644 --- a/src/synthcity/plugins/core/models/data_encoder.py +++ b/src/synthcity/plugins/core/models/data_encoder.py @@ -14,7 +14,7 @@ StandardScaler, ) -FeatureEncoder = Any +FeatureEncoder = Any # tried to use ForwardRef but it didn't work under mypy class FeatureEncoder(TransformerMixin, BaseEstimator): # type: ignore @@ -224,6 +224,7 @@ def fit(self, x: pd.Series, y: Any = None) -> "GaussianQuantileTransformer": "minmax": MinMaxScaler, "quantile": GaussianQuantileTransformer, "bayesian_gmm": BayesianGMMEncoder, + "passthrough": FeatureEncoder, } @@ -240,6 +241,7 @@ def get_encoder(encoder: Union[str, type]) -> Type[FeatureEncoder]: - minmax - quantile - bayesian_gmm + - Passthrough """ if isinstance(encoder, type): # custom encoder return FeatureEncoder.wraps(encoder) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 855eb4ec..b7f24974 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -15,7 +15,12 @@ # synthcity absolute from synthcity.plugins.core.dataloader import DataLoader -from synthcity.plugins.core.distribution import CategoricalDistribution, Distribution +from synthcity.plugins.core.distribution import ( + Distribution, + IntegerDistribution, + LogDistribution, + LogIntDistribution, +) from synthcity.plugins.core.models.tabular_ddpm import TabDDPM from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema @@ -174,13 +179,12 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: Gaussian diffusion loss MSE """ return [ - # TODO: change to loguniform distribution - CategoricalDistribution(name="lr", choices=[1e-5, 1e-4, 1e-3, 2e-3, 3e-3]), - CategoricalDistribution(name="batch_size", choices=[256, 4096]), - CategoricalDistribution(name="num_timesteps", choices=[100, 1000]), - CategoricalDistribution(name="n_iter", choices=[5000, 10000, 20000]), - CategoricalDistribution(name="n_layers_hidden", choices=[2, 4, 6, 8]), - CategoricalDistribution(name="dim_hidden", choices=[128, 256, 512, 1024]), + LogDistribution(name="lr", low=1e-5, high=1e-1), + LogIntDistribution(name="batch_size", low=256, high=4096), + IntegerDistribution(name="num_timesteps", choices=[100, 1000]), + LogIntDistribution(name="n_iter", low=1000, high=10000), + IntegerDistribution(name="n_layers_hidden", low=2, high=8), + LogIntDistribution(name="dim_hidden", low=128, high=1024), ] def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": From 52011d30127cc906bc2705991544102db12a5579 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 30 Mar 2023 16:37:46 +0200 Subject: [PATCH 31/95] update DDPM to use TabularEncoder --- src/synthcity/plugins/generic/plugin_ddpm.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index b7f24974..1588253f 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -22,6 +22,7 @@ LogIntDistribution, ) from synthcity.plugins.core.models.tabular_ddpm import TabDDPM +from synthcity.plugins.core.models.tabular_encoder import TabularEncoder from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema from synthcity.utils.callbacks import Callback @@ -200,11 +201,6 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": cond = kwargs.pop("cond", None) self.loss_history = None - # note that the TabularEncoder is not used in this plugin, because the - # Gaussian multinomial diffusion module needs to know the number of classes - # for each discrete feature before it applies torch.nn.functional.one_hot - # on these features, and it also preprocesses the continuous features differently. - if args: raise ValueError("Only keyword arguments are allowed") @@ -219,6 +215,11 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": self.target_name = cond.name self.target_iloc = list(X.columns).index(cond.name) + self.encoder = TabularEncoder( + categorical_encoder="passthrough", continuous_encoder="quantile" + ) + df = self.encoder.fit_transform(df) + if cond is not None: if type(cond) is str: cond = df[cond] @@ -245,6 +246,7 @@ def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader def callback(count): # type: ignore data = self.model.generate(count, cond=cond) + data = self.encoder.inverse_transform(data) if self.is_classification: data = np.insert(data, self.target_iloc, cond, axis=1) return data From 0ee6c8b58336dae255be2ce6994fcb4b0634de37 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 31 Mar 2023 00:55:57 +0200 Subject: [PATCH 32/95] update test_tabular_encoder and debug --- .gitignore | 1 - .../plugins/core/models/data_encoder.py | 186 ++++++++++++------ .../core/models/tabular_ddpm/__init__.py | 14 +- .../plugins/core/models/tabular_encoder.py | 54 +++-- src/synthcity/plugins/generic/plugin_ddpm.py | 20 +- .../core/models/test_tabular_encoder.py | 73 +++---- ...al8_tabular_modelling_with_diffusion.ipynb | 62 +++--- 7 files changed, 225 insertions(+), 185 deletions(-) diff --git a/.gitignore b/.gitignore index b2bc0daa..41f36b84 100644 --- a/.gitignore +++ b/.gitignore @@ -67,4 +67,3 @@ lightning_logs generated MNIST cifar-10* -src/test.py diff --git a/src/synthcity/plugins/core/models/data_encoder.py b/src/synthcity/plugins/core/models/data_encoder.py index 90ed7e1e..97cdcebb 100644 --- a/src/synthcity/plugins/core/models/data_encoder.py +++ b/src/synthcity/plugins/core/models/data_encoder.py @@ -1,5 +1,5 @@ # stdlib -from typing import Any, List, Type, Union +from typing import Any, List, Optional, Type, Union # third party import numpy as np @@ -8,51 +8,84 @@ from sklearn.base import BaseEstimator, TransformerMixin from sklearn.mixture import BayesianGaussianMixture from sklearn.preprocessing import ( + LabelEncoder, MinMaxScaler, OneHotEncoder, QuantileTransformer, + RobustScaler, StandardScaler, ) + +def validate_shape(x: np.ndarray, n_dim: int) -> np.ndarray: + if n_dim == 1: + if x.ndim == 2: + x = np.squeeze(x, axis=1) + if x.ndim != 1: + raise ValueError("array must be 1D") + return x + elif n_dim == 2: + if x.ndim == 1: + x = x.reshape(-1, 1) + if x.ndim != 2: + raise ValueError("array must be 2D") + return x + else: + raise ValueError("n_dim must be 1 or 2") + + FeatureEncoder = Any # tried to use ForwardRef but it didn't work under mypy class FeatureEncoder(TransformerMixin, BaseEstimator): # type: ignore - """Base feature encoder, with sklearn-style API""" + """ + Base feature encoder with sklearn-style API. + """ - def __new__(cls, **kwargs: Any) -> FeatureEncoder: - obj = super().__new__() - obj.__dict__.update(kwargs) # auto set all parameters as attributes - return obj + n_dim_in: int = 1 + n_dim_out: int = 2 + n_features_out: int + feature_name_in: str + feature_names_out: List[str] + feature_types_out: List[str] + categorical: bool = False # used by get_feature_types_out + + def __init__( + self, n_dim_in: Optional[int] = None, n_dim_out: Optional[int] = None + ) -> None: + super().__init__() + if n_dim_in is not None: + self.n_dim_in = n_dim_in + if n_dim_out is not None: + self.n_dim_out = n_dim_out @validate_arguments(config=dict(arbitrary_types_allowed=True)) def fit(self, x: pd.Series, y: Any = None, **kwargs: Any) -> FeatureEncoder: self.feature_name_in = x.name - out = self._fit(x, **kwargs)._transform(x) - - if np.ndim(out) == 1: - self.n_features_out = 1 - else: - self.n_features_out = np.shape(out)[1] - + self.feature_type_in = self._get_feature_type(x) + input = validate_shape(x.values, self.n_dim_in) + output = self._fit(input, **kwargs)._transform(input) + self._out_shape = (-1, *output.shape[1:]) # for inverse_transform + output = validate_shape(output, self.n_dim_out) self.feature_names_out = self.get_feature_names_out() - + self.n_features_out = len(self.feature_names_out) + self.feature_types_out = self.get_feature_types_out(output) return self - def _fit(self, x: pd.Series, **kwargs: Any) -> FeatureEncoder: + def _fit(self, x: np.ndarray, **kwargs: Any) -> FeatureEncoder: return self @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def transform(self, x: pd.Series) -> Any: - out = self._transform(x) - if isinstance(out, np.ndarray): - if out.ndim == 1: - return pd.Series(out, self.feature_name_in) - else: - return pd.DataFrame(out, columns=self.feature_names_out) - return out - - def _transform(self, x: pd.Series) -> Any: + def transform(self, x: pd.Series) -> Union[pd.DataFrame, pd.Series]: + data = validate_shape(x.values, self.n_dim_in) + out = self._transform(data) + out = validate_shape(out, self.n_dim_out) + if self.n_dim_out == 1: + return pd.Series(out, name=self.feature_name_in) + else: + return pd.DataFrame(out, columns=self.feature_names_out) + + def _transform(self, x: np.ndarray) -> np.ndarray: return x def get_feature_names_out(self) -> List[str]: @@ -60,61 +93,79 @@ def get_feature_names_out(self) -> List[str]: if n == 1: return [self.feature_name_in] else: - return [self.feature_name_in + str(i) for i in range(n)] + return [f"{self.feature_name_in}_{i}" for i in range(n)] + + def get_feature_types_out(self, output: np.ndarray) -> List[str]: + t = self._get_feature_type(output) + return [t] * self.n_features_out + + def _get_feature_type(self, x: Any) -> str: + if self.categorical: + return "discrete" + elif np.issubdtype(x.dtype, np.floating): + return "continuous" + else: + return "discrete" @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def inverse_transform(self, data: Any) -> pd.Series: - x = self._inverse_transform(data) + def inverse_transform(self, df: Union[pd.DataFrame, pd.Series]) -> pd.Series: + y = df.values.reshape(self._out_shape) + x = self._inverse_transform(y) + x = validate_shape(x, 1) return pd.Series(x, name=self.feature_name_in) - def _inverse_transform(self, data: Any) -> pd.Series: + def _inverse_transform(self, data: np.ndarray) -> np.ndarray: return data @classmethod - def wraps(cls, encoder_class: TransformerMixin) -> Type[FeatureEncoder]: + def wraps( + cls, encoder_class: TransformerMixin, **params: Any + ) -> Type[FeatureEncoder]: """Wraps sklearn transformer to FeatureEncoder.""" class WrappedEncoder(FeatureEncoder): + n_dim_in = 2 # most sklearn transformers accept 2D input + def __init__(self, *args: Any, **kwargs: Any) -> None: self.encoder = encoder_class(*args, **kwargs) - def _fit(self, x: pd.Series, **kwargs: Any) -> FeatureEncoder: + def _fit(self, x: np.ndarray, **kwargs: Any) -> FeatureEncoder: self.encoder.fit(x, **kwargs) return self - def _transform(self, x: pd.Series) -> Any: + def _transform(self, x: np.ndarray) -> np.ndarray: return self.encoder.transform(x) - def _inverse_transform(self, x: pd.Series) -> Any: - return self.encoder.inverse_transform(x) + def _inverse_transform(self, data: np.ndarray) -> np.ndarray: + return self.encoder.inverse_transform(data) def get_feature_names_out(self) -> List[str]: - return self.encoder.get_feature_names_out([self.feature_name_in]) - - for attr in ( - "__module__", - "__name__", - "__qualname__", - "__doc__", - "__annotations__", - ): + return list(self.encoder.get_feature_names_out([self.feature_name_in])) + + for attr in ("__name__", "__qualname__", "__doc__"): setattr(WrappedEncoder, attr, getattr(encoder_class, attr)) + for attr, val in params.items(): + setattr(WrappedEncoder, attr, val) return WrappedEncoder -OneHotEncoder = FeatureEncoder.wraps(OneHotEncoder) +OneHotEncoder = FeatureEncoder.wraps(OneHotEncoder, categorical=True) +LabelEncoder = FeatureEncoder.wraps(LabelEncoder, n_dim_out=1, categorical=True) StandardScaler = FeatureEncoder.wraps(StandardScaler) MinMaxScaler = FeatureEncoder.wraps(MinMaxScaler) +RobustScaler = FeatureEncoder.wraps(RobustScaler) class DatetimeEncoder(FeatureEncoder): """Datetime variables encoder""" - def _transform(self, x: pd.Series) -> pd.Series: + n_dim_out = 1 + + def _transform(self, x: np.ndarray) -> np.ndarray: return pd.to_numeric(x).astype(float) - def _inverse_transform(self, data: pd.Series) -> pd.Series: + def _inverse_transform(self, data: np.ndarray) -> np.ndarray: return pd.to_datetime(data) @@ -129,6 +180,11 @@ def __init__( clip_output: bool = True, std_multiplier: int = 4, ) -> None: + self.n_components = n_components + self.random_state = random_state + self.weight_threshold = weight_threshold + self.clip_output = clip_output + self.std_multiplier = std_multiplier self.model = BayesianGaussianMixture( n_components=n_components, random_state=random_state, @@ -136,19 +192,19 @@ def __init__( ) self.weights: List[float] - def _fit(self, x: pd.Series, **kwargs: Any) -> "BayesianGaussianMixture": + def _fit(self, x: np.ndarray, **kwargs: Any) -> "BayesianGaussianMixture": self.min_value = x.min() self.max_value = x.max() - self.model.fit(x.values.reshape(-1, 1)) + self.model.fit(x.reshape(-1, 1)) self.weights = self.model.weights_ self.means = self.model.means_.reshape(-1) self.stds = np.sqrt(self.model.covariances_).reshape(-1) return self - def _transform(self, x: pd.Series) -> pd.DataFrame: - x = x.values.reshape(-1, 1) + def _transform(self, x: np.ndarray) -> np.ndarray: + x = x.reshape(-1, 1) means = self.means.reshape(1, -1) stds = self.stds.reshape(1, -1) @@ -161,30 +217,32 @@ def _transform(self, x: pd.Series) -> pd.DataFrame: components = np.argmax(component_probs, axis=1) normalized = normalized_values[np.arange(len(x)), components] - if self.clip_output: + if self.clip_output: # why use 0.99 instead of 1? normalized = np.clip(normalized, -0.99, 0.99) normalized = normalized.reshape(-1, 1) - components = np.eye(self.n_components)[components] # onehot + components = np.eye(self.n_components, dtype=int)[components] return np.hstack([normalized, components]) def get_feature_names_out(self) -> List[str]: name = self.feature_name_in return [f"{name}.value"] + [ - f"{name}.component_{i}" for i in range(self.n_features_out - 1) + f"{name}.component_{i}" for i in range(self.n_components) ] - def _inverse_transform(self, data: pd.DataFrame) -> pd.Series: - if self.clip_output: - data = np.clip(data.values[:, 0], -1, 1) + def get_feature_types_out(self, output: np.ndarray) -> List[str]: + return ["continuous"] + ["discrete"] * self.n_components + + def _inverse_transform(self, data: np.ndarray) -> np.ndarray: + components = np.argmax(data[:, 1:], axis=1) - means = self.model.means_.reshape([-1]) - stds = np.sqrt(self.model.covariances_).reshape([-1]) - components = np.argmax(data.values[:, 1:], axis=1) + data = data[:, 0] + if self.clip_output: + data = np.clip(data, -1.0, 1.0) # recreate data - std_t = stds[components] - mean_t = means[components] + mean_t = self.means[components] + std_t = self.stds[components] reversed_data = data * self.std_multiplier * std_t + mean_t # clip values @@ -212,7 +270,7 @@ def __init__( copy=copy, ) - def fit(self, x: pd.Series, y: Any = None) -> "GaussianQuantileTransformer": + def fit(self, x: np.ndarray, y: Any = None) -> "GaussianQuantileTransformer": self.n_quantiles = max(min(len(x) // 30, 1000), 10) return super().fit(x, y) @@ -220,8 +278,10 @@ def fit(self, x: pd.Series, y: Any = None) -> "GaussianQuantileTransformer": ENCODERS = { "datetime": DatetimeEncoder, "onehot": OneHotEncoder, + "label": LabelEncoder, "standard": StandardScaler, "minmax": MinMaxScaler, + "robust": RobustScaler, "quantile": GaussianQuantileTransformer, "bayesian_gmm": BayesianGMMEncoder, "passthrough": FeatureEncoder, @@ -236,9 +296,11 @@ def get_encoder(encoder: Union[str, type]) -> Type[FeatureEncoder]: - datetime - Categorical - onehot + - label - Continuous - standard - minmax + - robust - quantile - bayesian_gmm - Passthrough diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index d6141f81..0332fa03 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -83,19 +83,17 @@ def fit( else: self.n_classes = 0 + self.feature_names = X.columns cat_cols = discrete_columns(X, return_counts=True) if cat_cols: - ini_cols = X.columns cat_cols, cat_counts = zip(*cat_cols) # reorder the columns so that the categorical ones go to the end X = X[np.hstack([X.columns[~X.keys().isin(cat_cols)], cat_cols])] - cur_cols = X.columns - # find the permutation from the reordered columns to the original ones - self._col_perm = np.argsort(cur_cols)[np.argsort(np.argsort(ini_cols))] + self.feature_names_out = X.columns else: cat_counts = [0] - self._col_perm = np.arange(X.shape[1]) + self.feature_names_out = self.feature_names model_params = dict( num_classes=self.n_classes, @@ -207,10 +205,10 @@ def fit( return self - def generate(self, count: int, cond: Any = None) -> np.ndarray: + def generate(self, count: int, cond: Any = None) -> pd.DataFrame: self.diffusion.eval() if cond is not None: cond = torch.tensor(cond, dtype=torch.long, device=self.device) sample = self.diffusion.sample_all(count, cond).detach().cpu().numpy() - sample = sample[:, self._col_perm] - return sample + df = pd.DataFrame(sample, columns=self.feature_names_out) + return df[self.feature_names] diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 0eb2a096..e5f260e3 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -9,6 +9,7 @@ import pandas as pd from pydantic import BaseModel, validate_arguments, validator from sklearn.base import BaseEstimator, TransformerMixin +from sklearn.preprocessing import MinMaxScaler # synthcity absolute import synthcity.logger as log @@ -16,15 +17,16 @@ from synthcity.utils.serialization import dataframe_hash # synthcity relative -from .data_encoder import FeatureEncoder, get_encoder +from .data_encoder import get_encoder class FeatureInfo(BaseModel): name: str feature_type: str - transform: FeatureEncoder + transform: Any output_dimensions: int transformed_features: List[str] + trans_feature_types: List[str] @validator("feature_type") def _feature_type_validator(cls: Any, v: str) -> str: @@ -66,6 +68,7 @@ def __init__( self, *, whitelist: tuple = (), + max_clusters: int = 10, categorical_limit: int = 10, categorical_encoder: Optional[Union[str, type]] = None, continuous_encoder: Optional[Union[str, type]] = None, @@ -78,8 +81,9 @@ def __init__( whitelist (tuple): Columns that will not be transformed. """ - self.categorical_limit = categorical_limit self.whitelist = whitelist + self.categorical_limit = categorical_limit + self.max_clusters = max_clusters # for compatibility if categorical_encoder is not None: self.categorical_encoder = categorical_encoder if continuous_encoder is not None: @@ -92,6 +96,8 @@ def __init__( self.cont_encoder_params = cont_encoder_params else: self.cont_encoder_params = self.cont_encoder_params.copy() + if self.continuous_encoder == "bayesian_gmm": + self.cont_encoder_params["n_components"] = max_clusters @validate_arguments(config=dict(arbitrary_types_allowed=True)) def _fit_feature(self, feature: pd.Series, feature_type: str) -> FeatureInfo: @@ -120,6 +126,7 @@ def _fit_feature(self, feature: pd.Series, feature_type: str) -> FeatureInfo: transform=encoder, output_dimensions=encoder.n_features_out, transformed_features=encoder.feature_names_out, + trans_feature_types=encoder.feature_types_out, ) @validate_arguments(config=dict(arbitrary_types_allowed=True)) @@ -136,7 +143,7 @@ def fit( self.output_dimensions = 0 self._column_raw_dtypes = raw_data.infer_objects().dtypes - self._column_transform_info_list = [] + self._column_transform_info_list: Sequence[FeatureInfo] = [] for name in raw_data.columns: if name in self.whitelist: @@ -233,7 +240,7 @@ def inverse_transform(self, data: pd.DataFrame) -> pd.DataFrame: ).astype(self._column_raw_dtypes.filter(names)) return recovered_data - def layout(self) -> List[Tuple]: + def layout(self) -> Sequence[FeatureInfo]: """Get the layout of the encoded dataset. Returns a list of tuple, describing each column as: @@ -258,7 +265,7 @@ def get_column_info(self, name: str) -> FeatureInfo: @validate_arguments(config=dict(arbitrary_types_allowed=True)) def activation_layout( self, discrete_activation: str, continuous_activation: str - ) -> Sequence[Tuple]: + ) -> Sequence[Tuple[str, int]]: """Get the layout of the activations. Returns a list of tuple, describing each column as: @@ -267,21 +274,9 @@ def activation_layout( """ out = [] for column_transform_info in self._column_transform_info_list: - if column_transform_info.feature_type == "continuous": - out.extend( - [ - (continuous_activation, 1), - ( - discrete_activation, - column_transform_info.output_dimensions - 1, - ), - ] - ) - else: - out.append( - (discrete_activation, column_transform_info.output_dimensions) - ) - + for t in column_transform_info.trans_feature_types: + act = discrete_activation if t == "discrete" else continuous_activation + out.append((act, 1)) return out @@ -305,13 +300,9 @@ def _transform_feature( return super()._transform_feature(column_transform_info, feature) bgm = column_transform_info.transform out = bgm.transform(feature) - if out.shape != (len(feature), 3): - raise ValueError( - "BinEncoder should transform continuous features using a " - "BayesianGMM with 2 components" - ) - # encoded as a binary vector corresponding to the first component - return pd.DataFrame(out.values[:, [1]], columns=[bgm.feature_name_in]) + return pd.DataFrame( + out.values[:, 1:].argmax(axis=1), columns=[bgm.feature_name_in] + ) def _inverse_transform_feature( self, column_transform_info: FeatureInfo, column_data: pd.DataFrame @@ -339,12 +330,10 @@ def __init__( max_clusters: int = 10, categorical_limit: int = 10, whitelist: list = [], - encoder: str = "minmax", ) -> None: self.max_clusters = max_clusters self.categorical_limit = categorical_limit self.whitelist = whitelist - self.encoder = encoder def fit_temporal( self, @@ -369,8 +358,9 @@ def fit_temporal( self.temporal_encoder.fit(temporal_df) # Temporal horizons - self.observation_times_encoder = get_encoder(self.encoder) - self.observation_times_encoder.fit(np.asarray(observation_times).reshape(-1, 1)) + self.observation_times_encoder = MinMaxScaler().fit( + np.asarray(observation_times).reshape(-1, 1) + ) return self diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 1588253f..b1150ac9 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -154,6 +154,12 @@ def __init__( dim_embed=dim_embed, ) + self.encoder = TabularEncoder( + categorical_encoder="passthrough", + continuous_encoder="quantile", + cont_encoder_params=dict(random_state=random_state), + ) + @staticmethod def name() -> str: return "ddpm" @@ -198,6 +204,8 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": If the task is regression, the target variable is not specially treated. There is no condition by default, but can be given by the user, either as a column name or an array-like. """ df = X.dataframe() + self.feature_names = df.columns + cond = kwargs.pop("cond", None) self.loss_history = None @@ -213,11 +221,7 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": self._labels, self._cond_dist = np.unique(cond, return_counts=True) self._cond_dist = self._cond_dist / self._cond_dist.sum() self.target_name = cond.name - self.target_iloc = list(X.columns).index(cond.name) - self.encoder = TabularEncoder( - categorical_encoder="passthrough", continuous_encoder="quantile" - ) df = self.encoder.fit_transform(df) if cond is not None: @@ -245,11 +249,11 @@ def _generate(self, count: int, syn_schema: Schema, **kwargs: Any) -> DataLoader raise ValueError("The length of cond is less than the required count") def callback(count): # type: ignore - data = self.model.generate(count, cond=cond) - data = self.encoder.inverse_transform(data) + df = self.model.generate(count, cond=cond) + df = self.encoder.inverse_transform(df) if self.is_classification: - data = np.insert(data, self.target_iloc, cond, axis=1) - return data + df = df.join(pd.Series(cond, name=self.target_name)) + return df[self.feature_names] # reorder columns return self._safe_generate(callback, count, syn_schema, **kwargs) diff --git a/tests/plugins/core/models/test_tabular_encoder.py b/tests/plugins/core/models/test_tabular_encoder.py index 948399b5..6837190a 100644 --- a/tests/plugins/core/models/test_tabular_encoder.py +++ b/tests/plugins/core/models/test_tabular_encoder.py @@ -78,7 +78,6 @@ def test_encoder_fit_transform(max_clusters: int) -> None: assert set(encoded[f"{column.name}_{val}"].unique()).issubset( set([0, 1]) ) - else: assert f"{column.name}.value" in encoded.columns for enc_col in encoded.columns: @@ -102,6 +101,27 @@ def test_encoder_inverse_transform(max_clusters: int) -> None: assert np.abs(X - recovered).sum().sum() < 5 +def check_equal_layouts( + layout: list, act_layout: list, disc_act: str, cont_act: str +) -> None: + expected_act_layout = [] + for col_info in layout: + if col_info.feature_type == "continuous": + expected_act_layout.append(cont_act) + for _ in range(col_info.output_dimensions - 1): + expected_act_layout.append(disc_act) + else: + for _ in range(col_info.output_dimensions): + expected_act_layout.append(disc_act) + + expanded_act_layout = [] + for act, num in act_layout: + for _ in range(num): + expanded_act_layout.append(act) + + assert expanded_act_layout == expected_act_layout + + def test_encoder_activation_layout() -> None: X, _ = load_diabetes(return_X_y=True, as_frame=True) net = TabularEncoder() @@ -113,20 +133,7 @@ def test_encoder_activation_layout() -> None: layout = net.layout() assert len(layout) <= len(act_layout) - - act_step = 0 - - for col_info in layout: - if col_info.feature_type == "continuous": - assert act_layout[act_step] == ("tanh", 1) - assert act_layout[act_step + 1] == ( - "softmax", - col_info.output_dimensions - 1, - ) - act_step += 2 - else: - assert act_layout[act_step] == ("softmax", col_info.output_dimensions) - act_step += 1 + check_equal_layouts(layout, act_layout, "softmax", "tanh") def test_bin_encoder() -> None: @@ -138,6 +145,8 @@ def test_bin_encoder() -> None: binned = net.transform(X) for col in X.columns: + # ! the target column is transformed by OneHotEncoder to target_0, target_1, target_2 + # ! will result in a KeyError assert len(binned[col].unique()) <= 10 @@ -272,35 +281,5 @@ def test_ts_encoder_activation_layout(source: Any) -> None: assert len(static_layout) <= len(static_act_layout) assert len(temporal_layout) <= len(temporal_act_layout) - - act_step = 0 - for col_info in static_layout: - if col_info.feature_type == "continuous": - assert static_act_layout[act_step] == ("tanh", 1) - assert static_act_layout[act_step + 1] == ( - "softmax", - col_info.output_dimensions - 1, - ) - act_step += 2 - else: - assert static_act_layout[act_step] == ( - "softmax", - col_info.output_dimensions, - ) - act_step += 1 - - act_step = 0 - for col_info in temporal_layout: - if col_info.feature_type == "continuous": - assert temporal_act_layout[act_step] == ("tanh", 1) - assert temporal_act_layout[act_step + 1] == ( - "softmax", - col_info.output_dimensions - 1, - ) - act_step += 2 - else: - assert temporal_act_layout[act_step] == ( - "softmax", - col_info.output_dimensions, - ) - act_step += 1 + check_equal_layouts(static_layout, static_act_layout, "softmax", "tanh") + check_equal_layouts(temporal_layout, temporal_act_layout, "softmax", "tanh") diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb index b520308e..197b5737 100644 --- a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb +++ b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb @@ -65,7 +65,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 2, "id": "51076cdc", "metadata": {}, "outputs": [ @@ -158,7 +158,7 @@ "4 0 " ] }, - "execution_count": 12, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -176,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 3, "id": "52397e4a", "metadata": {}, "outputs": [ @@ -189,7 +189,7 @@ "Name: target, dtype: int64" ] }, - "execution_count": 13, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -209,7 +209,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 4, "id": "3bf24be4", "metadata": {}, "outputs": [ @@ -217,27 +217,35 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2023-03-27T15:19:24.516935+0200][30696][INFO] Step 100: MLoss: 0.0 GLoss: 0.2235 Sum: 0.2235\n", - "[2023-03-27T15:19:25.913968+0200][30696][INFO] Step 200: MLoss: 0.0 GLoss: 0.2298 Sum: 0.2298\n", - "[2023-03-27T15:19:27.191123+0200][30696][INFO] Step 300: MLoss: 0.0 GLoss: 0.2305 Sum: 0.2305\n", - "[2023-03-27T15:19:28.432055+0200][30696][INFO] Step 400: MLoss: 0.0 GLoss: 0.2273 Sum: 0.2273\n", - "[2023-03-27T15:19:29.766838+0200][30696][INFO] Step 500: MLoss: 0.0 GLoss: 0.2333 Sum: 0.2333\n", - "[2023-03-27T15:19:31.280538+0200][30696][INFO] Step 600: MLoss: 0.0 GLoss: 0.221 Sum: 0.221\n", - "[2023-03-27T15:19:33.034999+0200][30696][INFO] Step 700: MLoss: 0.0 GLoss: 0.2123 Sum: 0.2123\n", - "[2023-03-27T15:19:34.519078+0200][30696][INFO] Step 800: MLoss: 0.0 GLoss: 0.2212 Sum: 0.2212\n", - "[2023-03-27T15:19:36.020932+0200][30696][INFO] Step 900: MLoss: 0.0 GLoss: 0.2014 Sum: 0.2014\n", - "[2023-03-27T15:19:38.330664+0200][30696][INFO] Step 1000: MLoss: 0.0 GLoss: 0.2069 Sum: 0.2069\n" + "[2023-03-31T00:29:29.172830+0200][10148][INFO] Encoding sepal length (cm) 8461685668942494555\n" ] }, { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" + "ename": "TypeError", + "evalue": "__init__() got an unexpected keyword argument 'n_components'", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", + "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 18\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 19\u001b[0m \u001b[0mplugin\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mPlugins\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"ddpm\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mplugin_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 20\u001b[1;33m \u001b[0mplugin\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloader\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, X, *args, **kwargs)\u001b[0m\n\u001b[0;32m 242\u001b[0m )\n\u001b[0;32m 243\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 244\u001b[1;33m \u001b[0moutput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_fit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 245\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfitted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 246\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36m_fit\u001b[1;34m(self, X, *args, **kwargs)\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarget_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 222\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 223\u001b[1;33m \u001b[0mdf\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mencoder\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit_transform\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 224\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python39\\site-packages\\sklearn\\utils\\_set_output.py\u001b[0m in \u001b[0;36mwrapped\u001b[1;34m(self, X, *args, **kwargs)\u001b[0m\n\u001b[0;32m 140\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mwraps\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mf\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 141\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mwrapped\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 142\u001b[1;33m \u001b[0mdata_to_wrap\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 143\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata_to_wrap\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtuple\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 144\u001b[0m \u001b[1;31m# only wrap the first output for cross decomposition\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python39\\site-packages\\sklearn\\base.py\u001b[0m in \u001b[0;36mfit_transform\u001b[1;34m(self, X, y, **fit_params)\u001b[0m\n\u001b[0;32m 857\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0my\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 858\u001b[0m \u001b[1;31m# fit method of arity 1 (unsupervised transformation)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 859\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mfit_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtransform\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 860\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 861\u001b[0m \u001b[1;31m# fit method of arity 2 (supervised transformation)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_encoder.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, raw_data, discrete_columns)\u001b[0m\n\u001b[0;32m 155\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 156\u001b[0m \u001b[0mftype\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;34m\"continuous\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 157\u001b[1;33m \u001b[0mcolumn_transform_info\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_fit_feature\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mraw_data\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mftype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 158\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 159\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutput_dimensions\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mcolumn_transform_info\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutput_dimensions\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", + "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_encoder.py\u001b[0m in \u001b[0;36m_fit_feature\u001b[1;34m(self, feature, feature_type)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[0mencoder\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_encoder\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcategorical_encoder\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m**\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcat_encoder_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 118\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 119\u001b[1;33m \u001b[0mencoder\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_encoder\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcontinuous_encoder\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m**\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcont_encoder_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 120\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 121\u001b[0m \u001b[0mencoder\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfeature\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\data_encoder.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 122\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 123\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 124\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mencoder\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mencoder_class\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 125\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 126\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_fit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mFeatureEncoder\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", + "\u001b[1;31mTypeError\u001b[0m: __init__() got an unexpected keyword argument 'n_components'" + ] } ], "source": [ @@ -265,7 +273,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "e1a270c9", "metadata": {}, "outputs": [ @@ -361,7 +369,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "id": "49b18ada", "metadata": {}, "outputs": [ @@ -406,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "id": "a2e81779", "metadata": {}, "outputs": [ @@ -569,7 +577,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "id": "1f55ffdb", "metadata": {}, "outputs": [ From 244854d7c0c6137f5de9f84436644875e8d65756 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 31 Mar 2023 01:11:44 +0200 Subject: [PATCH 33/95] debug and DDPM tutorial OK --- .../plugins/core/models/data_encoder.py | 2 +- src/synthcity/plugins/generic/plugin_ddpm.py | 3 +- ...al8_tabular_modelling_with_diffusion.ipynb | 1345 +++++++---------- 3 files changed, 508 insertions(+), 842 deletions(-) diff --git a/src/synthcity/plugins/core/models/data_encoder.py b/src/synthcity/plugins/core/models/data_encoder.py index 97cdcebb..518400fa 100644 --- a/src/synthcity/plugins/core/models/data_encoder.py +++ b/src/synthcity/plugins/core/models/data_encoder.py @@ -67,8 +67,8 @@ def fit(self, x: pd.Series, y: Any = None, **kwargs: Any) -> FeatureEncoder: output = self._fit(input, **kwargs)._transform(input) self._out_shape = (-1, *output.shape[1:]) # for inverse_transform output = validate_shape(output, self.n_dim_out) + self.n_features_out = output.shape[1] self.feature_names_out = self.get_feature_names_out() - self.n_features_out = len(self.feature_names_out) self.feature_types_out = self.get_feature_types_out(output) return self diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index b1150ac9..9ac18878 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -155,9 +155,10 @@ def __init__( ) self.encoder = TabularEncoder( - categorical_encoder="passthrough", continuous_encoder="quantile", + categorical_encoder="passthrough", cont_encoder_params=dict(random_state=random_state), + cat_encoder_params=dict(), ) @staticmethod diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb index 197b5737..d73d0f60 100644 --- a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb +++ b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb @@ -217,35 +217,31 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2023-03-31T00:29:29.172830+0200][10148][INFO] Encoding sepal length (cm) 8461685668942494555\n" + "[2023-03-31T01:04:28.062034+0200][12004][INFO] Encoding sepal length (cm) 8461685668942494555\n", + "[2023-03-31T01:04:28.068034+0200][12004][INFO] Encoding sepal width (cm) 7372477013158199918\n", + "[2023-03-31T01:04:28.074037+0200][12004][INFO] Encoding petal length (cm) 8795408021141068254\n", + "[2023-03-31T01:04:28.081036+0200][12004][INFO] Encoding petal width (cm) 1839870727438321343\n", + "[2023-03-31T01:04:29.905425+0200][12004][INFO] Step 100: MLoss: 0.0 GLoss: 0.3103 Sum: 0.3103\n", + "[2023-03-31T01:04:31.486761+0200][12004][INFO] Step 200: MLoss: 0.0 GLoss: 0.3111 Sum: 0.3111\n", + "[2023-03-31T01:04:33.076905+0200][12004][INFO] Step 300: MLoss: 0.0 GLoss: 0.317 Sum: 0.317\n", + "[2023-03-31T01:04:34.611746+0200][12004][INFO] Step 400: MLoss: 0.0 GLoss: 0.3009 Sum: 0.3009\n", + "[2023-03-31T01:04:36.176039+0200][12004][INFO] Step 500: MLoss: 0.0 GLoss: 0.3154 Sum: 0.3154\n", + "[2023-03-31T01:04:37.956754+0200][12004][INFO] Step 600: MLoss: 0.0 GLoss: 0.3055 Sum: 0.3055\n", + "[2023-03-31T01:04:39.561269+0200][12004][INFO] Step 700: MLoss: 0.0 GLoss: 0.2917 Sum: 0.2917\n", + "[2023-03-31T01:04:41.195544+0200][12004][INFO] Step 800: MLoss: 0.0 GLoss: 0.2817 Sum: 0.2817\n", + "[2023-03-31T01:04:42.967236+0200][12004][INFO] Step 900: MLoss: 0.0 GLoss: 0.266 Sum: 0.266\n", + "[2023-03-31T01:04:44.913448+0200][12004][INFO] Step 1000: MLoss: 0.0 GLoss: 0.2793 Sum: 0.2793\n" ] }, { - "ename": "TypeError", - "evalue": "__init__() got an unexpected keyword argument 'n_components'", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mTypeError\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 18\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 19\u001b[0m \u001b[0mplugin\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mPlugins\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mget\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"ddpm\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mplugin_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 20\u001b[1;33m \u001b[0mplugin\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloader\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, X, *args, **kwargs)\u001b[0m\n\u001b[0;32m 242\u001b[0m )\n\u001b[0;32m 243\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 244\u001b[1;33m \u001b[0moutput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_fit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 245\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfitted\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 246\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36m_fit\u001b[1;34m(self, X, *args, **kwargs)\u001b[0m\n\u001b[0;32m 221\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarget_name\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 222\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 223\u001b[1;33m \u001b[0mdf\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mencoder\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit_transform\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdf\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 224\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 225\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python39\\site-packages\\sklearn\\utils\\_set_output.py\u001b[0m in \u001b[0;36mwrapped\u001b[1;34m(self, X, *args, **kwargs)\u001b[0m\n\u001b[0;32m 140\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mwraps\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mf\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 141\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mwrapped\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 142\u001b[1;33m \u001b[0mdata_to_wrap\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mf\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 143\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata_to_wrap\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtuple\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 144\u001b[0m \u001b[1;31m# only wrap the first output for cross decomposition\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32m~\\AppData\\Roaming\\Python\\Python39\\site-packages\\sklearn\\base.py\u001b[0m in \u001b[0;36mfit_transform\u001b[1;34m(self, X, y, **fit_params)\u001b[0m\n\u001b[0;32m 857\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0my\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 858\u001b[0m \u001b[1;31m# fit method of arity 1 (unsupervised transformation)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 859\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mfit_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtransform\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 860\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 861\u001b[0m \u001b[1;31m# fit method of arity 2 (supervised transformation)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_encoder.py\u001b[0m in \u001b[0;36mfit\u001b[1;34m(self, raw_data, discrete_columns)\u001b[0m\n\u001b[0;32m 155\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 156\u001b[0m \u001b[0mftype\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;34m\"continuous\"\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 157\u001b[1;33m \u001b[0mcolumn_transform_info\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_fit_feature\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mraw_data\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mftype\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 158\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 159\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutput_dimensions\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mcolumn_transform_info\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0moutput_dimensions\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_encoder.py\u001b[0m in \u001b[0;36m_fit_feature\u001b[1;34m(self, feature, feature_type)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[0mencoder\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_encoder\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcategorical_encoder\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m**\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcat_encoder_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 118\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 119\u001b[1;33m \u001b[0mencoder\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mget_encoder\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcontinuous_encoder\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m**\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcont_encoder_params\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 120\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 121\u001b[0m \u001b[0mencoder\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfeature\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32mD:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\data_encoder.py\u001b[0m in \u001b[0;36m__init__\u001b[1;34m(self, *args, **kwargs)\u001b[0m\n\u001b[0;32m 122\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 123\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 124\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mencoder\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mencoder_class\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 125\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 126\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_fit\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mFeatureEncoder\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;31mTypeError\u001b[0m: __init__() got an unexpected keyword argument 'n_components'" - ] + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ @@ -273,7 +269,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "id": "e1a270c9", "metadata": {}, "outputs": [ @@ -358,7 +354,7 @@ ")" ] }, - "execution_count": 15, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -369,7 +365,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "id": "49b18ada", "metadata": {}, "outputs": [ @@ -385,7 +381,7 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAAAwsklEQVR4nO3dd3gU5drH8e+9m7IBAtKkI1EBpYOAKE0EpChgP4Ag2FCPiOdYjlhe5WA7iL2CekDFgiCIKGikqFTpRaqEZkKR0AnJkrL3+8cuOUkIyQJJNru5P9eVi52ZZ2fu2Qm/zD6zO4+oKsYYY4KfI9AFGGOMKRgW6MYYEyIs0I0xJkRYoBtjTIiwQDfGmBARFqgNV6pUSevUqROozRtjTFBasWLFflWtnNuygAV6nTp1WL58eaA2b4wxQUlEdp5umXW5GGNMiLBAN8aYEGGBbowxISJgfejGGJOXtLQ0EhIScLvdgS4lIFwuFzVr1iQ8PNzv51igG2OKpYSEBKKjo6lTpw4iEuhyipSqcuDAARISEoiJifH7eX51uYhIdxHZLCJxIjI8l+W1ReRnEVklImtFpOcZ1G6MMadwu91UrFixxIU5gIhQsWLFM353km+gi4gTeBfoATQA+olIgxzNngYmqWpzoC/w3hlVYYwxuSiJYX7S2ey7P2forYE4Vd2mqqnARKBPjjYKlPU9LgfsPuNK/LRyzae8NfVWMtJTC2sTxhgTlPwJ9BpAfJbpBN+8rEYAA0QkAZgJPJjbikRkiIgsF5HliYmJZ1Eu/J4wnw+PbcTtPnhWzzfGmIL08ccfM3To0ECXARTcxxb7AR+rak2gJzBBRE5Zt6p+oKotVbVl5cq5fnM1X66wKABS3IfPvlpjjAlB/gT6LqBWlumavnlZ3QVMAlDVxYALqFQQBebkCi8FgNsC3RhTyHbs2MEll1zC4MGDqVevHrfddhuzZ8+mbdu21K1bl6VLl57S/uqrr6ZJkyZ07tyZP//8E4DJkyfTqFEjmjZtSocOHQBYv349rVu3plmzZjRp0oQtW7acc73+fGxxGVBXRGLwBnlfoH+ONn8CnYGPReRSvIF+dn0q+YgKLwOA+8TRwli9MaYY+vd369mwu2D/zzeoXpZnezXMt11cXByTJ09m3LhxtGrVii+++IIFCxYwffp0XnzxRa6//vrMtg8++CCDBg1i0KBBjBs3jmHDhjFt2jRGjhxJbGwsNWrU4PDhwwCMGTOGhx56iNtuu43U1FQyMjLOeZ/yPUNX1XRgKBALbMT7aZb1IjJSRHr7mj0C3CMia4AvgcFaSIOVuiKiAXCfOFIYqzfGmGxiYmJo3LgxDoeDhg0b0rlzZ0SExo0bs2PHjmxtFy9eTP/+3vPdgQMHsmDBAgDatm3L4MGD+fDDDzOD+4orruDFF19k1KhR7Ny5k6ioqHOu1a8vFqnqTLwXO7POeybL4w1A23Ouxg9REd4z9JTUY0WxOWNMMeDPmXRhiYyMzHzscDgypx0OB+np6X6tY8yYMSxZsoQZM2Zw2WWXsWLFCvr378/ll1/OjBkz6NmzJ2PHjuXqq68+p1qD7l4urgjvpyNTTligG2OKlyuvvJKJEycC8Pnnn9O+fXsAtm7dyuWXX87IkSOpXLky8fHxbNu2jQsvvJBhw4bRp08f1q5de87bD7qv/rsifV0udoZujClm3n77be644w5Gjx5N5cqVGT9+PACPPfYYW7ZsQVXp3LkzTZs2ZdSoUUyYMIHw8HCqVq3Kk08+ec7bl0Lq6s5Xy5Yt9WwGuIiPX0zPuUN4oVYvel/9YiFUZowpDjZu3Mill14a6DICKrfXQERWqGrL3NoHXZdLlOs8ANxpxwNbiDHGFDNBF+guX6CnpKcEthBjjClmgi7QI13lAHBboBtjTDZBF+jh4aUIU7VAN8aYHIIu0AGiFNwZJwJdhjHGFCtBGeguhRQLdGOMySY4Ax3B7UkLdBnGmBJo8ODBfP3114EuI1dBGehR4iDFYwNcGGNMVkEZ6C6cdoZujCl0zz33HPXr16ddu3b069ePV155JdvyOXPm0Lx5cxo3bsydd97JiRPeruDhw4fToEEDmjRpwqOPPgrkfgvdghZ0X/0HiHI4ceu532rSGBMkfhgOe38v2HVWbQw9/nPaxcuWLWPKlCmsWbOGtLQ0WrRowWWXXZa53O12M3jwYObMmUO9evW4/fbbef/99xk4cCDffPMNmzZtQkQyb5eb2y10C1pwnqFLGCkW6MaYQrRw4UL69OmDy+UiOjqaXr16ZVu+efNmYmJiqFevHgCDBg1i3rx5lCtXDpfLxV133cXUqVMpVco7KE9ut9AtaEF5hu5yhONOt6/+G1Ni5HEmXdyEhYWxdOlS5syZw9dff80777zD3Llzc72FbsWKFQt0236doYtIdxHZLCJxIjI8l+Wvi8hq388fInK4QKvMweWIwI2nMDdhjCnh2rZty3fffYfb7SYpKYnvv/8+2/L69euzY8cO4uLiAJgwYQIdO3YkKSmJI0eO0LNnT15//XXWrFkD5H4L3YKW7xm6iDiBd4GuQAKwTESm+wa1AEBV/5ml/YNA8wKvNAuXMwK3FOYWjDElXatWrejduzdNmjShSpUqNG7cmHLlymUud7lcjB8/nltuuYX09HRatWrFfffdx8GDB+nTpw9utxtV5bXXXgNyv4VuQfOny6U1EKeq2wBEZCLQB9hwmvb9gGcLprzcRTkjcRfmBowxBnj00UcZMWIEycnJdOjQgcsuu4x77rknc3nnzp1ZtWpVtudUq1btlMGjAaZOnVro9foT6DWArO8NEoDLc2soIhcAMcDccy/t9FxOFykOQT0exBGU13WNMUFgyJAhbNiwAbfbzaBBg2jRokWgS8pTQV8U7Qt8rZr7R1BEZAgwBKB27dpnvRFXmAuAEyeO4Ioqf9brMcaYvHzxxReBLuGM+HN6uwuolWW6pm9ebvoCX55uRar6gaq2VNWWlStX9r/KHFxh3tGxU1IOnvU6jDEm1PgT6MuAuiISIyIReEN7es5GInIJUB5YXLAlnioq3Pu5Trf7cGFvyhhjgka+ga6q6cBQIBbYCExS1fUiMlJEemdp2heYqEUwSGlUeBkAUk4cKexNGWNM0PCrD11VZwIzc8x7Jsf0iIIrK2+uCG+gu08cLapNGmNMsReUHxGxQDfGFIUyZcoEuoQzEpSBHhURDYA79ViAKzHGmOIjKAPd5Qv0lLSkAFdijCkJVJXHHnuMRo0a0bhxY7766isA9uzZQ4cOHWjWrBmNGjVi/vz5ZGRkMHjw4My2r7/+epHVGZw354r0fv3WnWo36DKmJBi1dBSbDm4q0HVeUuESHm/9uF9tp06dyurVq1mzZg379++nVatWdOjQgS+++IJu3brx1FNPkZGRQXJyMqtXr2bXrl2sW7cOoNBulZub4DxDd/kCPc0C3RhT+BYsWEC/fv1wOp1UqVKFjh07smzZMlq1asX48eMZMWIEv//+O9HR0Vx44YVs27aNBx98kB9//JGyZcsWWZ1BeYYe5ToPAHd6cmALMcYUCX/PpItahw4dmDdvHjNmzGDw4ME8/PDD3H777axZs4bY2FjGjBnDpEmTGDduXJHUE6Rn6N6v+6ekpwS4EmNMSdC+fXu++uorMjIySExMZN68ebRu3ZqdO3dSpUoV7rnnHu6++25WrlzJ/v378Xg83HTTTTz//POsXLmyyOoMyjP0iIhoRNUC3RhTJG644QYWL15M06ZNERFefvllqlatyieffMLo0aMJDw+nTJkyfPrpp+zatYs77rgDj8c7ZsNLL71UZHUGZaCLw4FLwZ1uN9E1xhSepCTvJ+lEhNGjRzN69OhsywcNGsSgQYNOeV5RnpVnFZRdLgBRgDvjRKDLMMaYYiNoA92lgtuTGugyjDGm2AjaQI9CSMlIC3QZxphCVAT3+iu2zmbfgzbQXeLArRboxoQql8vFgQMHSmSoqyoHDhzA5XKd0fOC8qIogEvCcHvSA12GMaaQ1KxZk4SEBBITEwNdSkC4XC5q1qx5Rs8J4kB3ctT60I0JWeHh4cTExAS6jKAStF0uUY5wUvAEugxjjCk2/Ap0EekuIptFJE5Ehp+mza0iskFE1otIoY+s6nKE41YLdGOMOSnfLhcRcQLvAl2BBGCZiExX1Q1Z2tQFngDaquohETm/sAo+yeWMIIWSd7HEGGNOx58z9NZAnKpuU9VUYCLQJ0ebe4B3VfUQgKruK9gyT+VyRuKWwt6KMcYED38CvQYQn2U6wTcvq3pAPRFZKCK/iUj33FYkIkNEZLmILD/XK9dRvkBXj3W7GGMMFNxF0TCgLnAV0A/4UETOy9lIVT9Q1Zaq2rJy5crntEFXmIsMEdLT7AZdxhgD/gX6LqBWlumavnlZJQDTVTVNVbcDf+AN+ELjCosCIMV9sDA3Y4wxQcOfQF8G1BWRGBGJAPoC03O0mYb37BwRqYS3C2ZbwZV5KldYKQDc7iOFuRljjAka+Qa6qqYDQ4FYYCMwSVXXi8hIEentaxYLHBCRDcDPwGOqeqCwigaICi8NgPvE4cLcjDHGBA2/vimqqjOBmTnmPZPlsQIP+36KRFR4GQBSTtgZujHGQBB/U9QV4Q1094ljAa7EGGOKhyAO9GgA3KlHA1yJMcYUD0Eb6FGRJwM9KcCVGGNM8RC0ge6KKAtASqp1uRhjDARzoLvKAZCSdjzAlRhjTPEQvIEeeR4A7rTkwBZijDHFRNAGepTrPADc6fbVf2OMgSAO9EgLdGOMySZoA90ZFkGEKikZ7kCXYowxxULQBjqAS8GdfiLQZRhjTLEQ/IHusUA3xhgI8kAvheDOSAt0GcYYUywEdaC7xEGKxwLdGGMg6APdSYpaoBtjDAR9oIfh9mQEugxjjCkWgj/QsUA3xhjwM9BFpLuIbBaROBEZnsvywSKSKCKrfT93F3ypp4pyhONWT1Fsyhhjir18RywSESfwLtAV72DQy0RkuqpuyNH0K1UdWgg1npbLGU4KWpSbNMaYYsufM/TWQJyqblPVVGAi0Kdwy/KPyxGJWwJdhTHGFA/+BHoNID7LdIJvXk43ichaEflaRGrltiIRGSIiy0VkeWJi4lmUm50rzALdGGNOKqiLot8BdVS1CTAL+CS3Rqr6gaq2VNWWlStXPueNRjldpIqQkZ56zusyxphg50+g7wKynnHX9M3LpKoHVPXkd/A/Ai4rmPLy5gqLAuCE+3BRbM4YY4o1fwJ9GVBXRGJEJALoC0zP2kBEqmWZ7A1sLLgST+9koKdYoBtjTP6BrqrpwFAgFm9QT1LV9SIyUkR6+5oNE5H1IrIGGAYMLqyCs4p2lQfgyLH4fFoaY0zoy/djiwCqOhOYmWPeM1kePwE8UbCl5a9mxUtgJyTsW8uFMZ2LevPGGFOsBPU3RWtW83bVxx/8I8CVGGNM4AV1oFesUI8oj5JwLCHQpRhjTMAFdaCLw0EtnMS79we6FGOMCbigDnSAWmFliE8/HugyjDEm4II+0Gu6KpPg8ODJSA90KcYYE1BBH+i1ytYmVYR9iesCXYoxxgRU8Ad6hfoAJOxdHdhCjDEmwII/0Ks2AyD+QM67+RpjTMkS9IFetWpznKrEH9kR6FKMMSaggj7Qw8NLUdUjJCT/FehSjDEmoII+0AFqOaOITzsa6DKMMSagQiPQIyuQQFqgyzDGmIAKjUAvU4PDDuHY0V35NzbGmBAVGoF+3sUAxO9eFuBKjDEmcEIj0M9vDEB84u8BrsQYYwInJAK9ZvWWAMQf3hrgSowxJnD8CnQR6S4im0UkTkSG59HuJhFREWlZcCXmr3SZqlTwKAlJu4tys8YYU6zkG+gi4gTeBXoADYB+ItIgl3bRwEPAkoIu0h81iSAh9VAgNm2MMcWCP2forYE4Vd2mqqnARKBPLu2eA0YB7gKsz2+1IsoRnxGQTRtjTLHgT6DXALKOwpzgm5dJRFoAtVR1Rl4rEpEhIrJcRJYnJiaecbF5qVWqKnsdStoJuze6MaZkOueLoiLiAF4DHsmvrap+oKotVbVl5cqVz3XT2dQsdwEeET6YeTdHj8Tn/wRjjAkx/gT6LqBWlumavnknRQONgF9EZAfQBphe1BdGO7d6iLaUYszRdXSd2oPXvr6BtLTkoizBGGMCyp9AXwbUFZEYEYkA+gLTTy5U1SOqWklV66hqHeA3oLeqLi+Uik+jTHQ1xgxawqQ2z9M+vALjj8fx44IXi7IEY4wJqHwDXVXTgaFALLARmKSq60VkpIj0LuwCz9Sl9fvwcr+5VMxQ5u+aH+hyjDGmyIT500hVZwIzc8x75jRtrzr3ss6NwxlGO1dV5p7YS3qam7BwV6BLMsaYQhcS3xTNTftaHTnmENZumBToUowxpkiEbKBf2fROwlSZFzc9/8bGGBMCQjbQo8vWoDku5h3dEuhSjDGmSIRsoAN0qNSMLQ4Pe3avCHQpxhhT6EI70Bv2A2D+758GuBJjjCl8IR3oMRd0okYGzPtraaBLMcaYQhfSgS4OBx1K12ZJxjHcKXYnRmNMaAvpQAdoH9MNt0NY9vuEQJdijDGFKuQDvXWTQbg8yvztsYEuxRhjClXIB3qkqxyXO6OZd/xP1OMJdDnGGFNoQj7QATpUac0uJ2zf+UugSzHGmEJTIgK9fePbAZi/4csAV2KMMYWnRAR6teqXcbHHwfz9qwNdijHGFJoSEegA7ctexApNIenYnkCXYowxhaLEBHqHi3qTLsJvaz4OdCnGGFMo/Ap0EekuIptFJE5Ehuey/D4R+V1EVovIAhFpUPClnptmDfsS7VHmxc8JdCnGGFMo8g10EXEC7wI9gAZAv1wC+wtVbayqzYCX8Q4aXayEhbu4MrwC81P22scXjTEhyZ8z9NZAnKpuU9VUYCLQJ2sDVT2aZbI0oAVXYsHpUL0d+53Chj+mBboUY4wpcP4Eeg0gPst0gm9eNiLygIhsxXuGPqxgyitY7ZrdicujPLVoBHv3rg50OcYYU6AK7KKoqr6rqhcBjwNP59ZGRIaIyHIRWZ6YmFhQm/ZbhQoX816zf/KXeBg4cwDbtlt/ujEmdIhq3r0jInIFMEJVu/mmnwBQ1ZdO094BHFLVcnmtt2XLlrp8+fKzKvpcbdo8nfsWPkm6QMuwciSmp3BQ07i71jXc1PXVgNRkjDH+EJEVqtoyt2X+nKEvA+qKSIyIRAB9gWwDdYpI3SyT1wLFety3S+r3ZkLXD4mRCHakHSPK4USBsfGxZKSnBro8Y4w5K2H5NVDVdBEZCsQCTmCcqq4XkZHAclWdDgwVkS5AGnAIGFSYRReEWrWuYMLglZnTsxa8yMNbv2T+8re5qs0jAazMGGPOTr5dLoUlkF0uuUlLS6bbhNbUd5bh/UG/BbocY4zJ1bl2uZQI4eGluKl8YxZqEgkJFujGmOBjgZ7FTVc8jgCTl4wOdCnGGHPGLNCzqFq1GR0d5fjm6GZSTxwLdDnGGHNGLNBz+Nul/TjkEGYttrN0Y0xwsUDP4YoW91EjA6bt/LHA152cvL/A12mMMSdZoOfgcIbRo1x9lmkyhw5uzbZs5q/P8smMIWd1c69v5w7n8smd6D++OZ/MuIe9e1YVVMnGGANYoOfqmkYDyRBhzop3M+e5Uw7xwrYpvLJ/Me9+2/+M1nfo4FZG7/yeizOEdJRX9v9Gt9iBvDX1FtLSkgu6fGNMCWWBnotL6vaiVgb8tHtB5rzYxaM46hBaaARjj65n/Pd3+b2+N2Lv47jA6I6vMOmO1czo9D69I6ry4bFNDPzsSnbs+PWM6tu4+VsOHozLs4075VC+ozPt37+JGb88Y7cTNiZEWKDnQhwOrilXn6VZul0m7fyJmAzhv7ctpLuzPK8dWMrH399Nepo7z3WtWvsZU1P3MjC6HhdfdA0AtWu347n+s3nton7Ek86tPz/AouXv+VXbstXj6L/4Kf71Xb882z0yuSddv+7KjF+eOW2bd2c/xPCd3zB/2Zt+bbukUI+HH34dwcJl7wS6FGPOiAX6aXTzdbvMXfEemzZPZ60jjVurXUlYuIsX//YjnaQsrx5YQu8Jrfhm9mO5dp2kp7l5bsVoqmYo93Ubc8ryru2eZGrPL6iFk3/8/h5r13+VZ00JCb/x8KrXcABLcLNp8/Rc263bMJl5mkQEMHznNzz++VUcO7orW5vkpH3MTPHOe3X9+Hz/MAW7o0fi/bpPT3z8Qu6ZcDn/2jGFx9eNwZ1yqAiqM6ZgWKCfxv+6XeYzadV7RHqUXlc+CXi/VfrmgPm8VW8Q0eLkmV0/0n1Ca96ccjPbd/xCcvJ+vp71MP0+u5wtDg/DLxlIqTLn57qdKlWaMPa6iVRU4e9Ln2Pr1lm5tktO2sewWfeSAXzS+llKeZRPlr+ea9uPVrxBtEeZfuNMHjivKbFp+xn4dY9sn62P/W00yQ5hcOmL2eZUpsz917m9YH7Yv38Thw9tP2X+r7+9xnvf9Mu3Gyk36vHw119r87wWER+/mC5Te3DTpy2ZveClXLuYTriP8NF3g7lx9r2s96TQP+oCjjiEHxblelNRY4olu5dLHt6YcjMfH9tEhMI1kVV5vv/sU9qox8P8ZW8zcfOXLPIkkSFCpEc54RDqehwMrN2N6zv9B3Hk/bczPn4hA2fdixNoH1Wd4xlukjJScYoQIWHsTj/GRknn/Yb3c2WrBxg1uTcTj2/jh+6fUbVqs8z1xG39iRsWPMJ9ZRvxwA1fAjB30cs8tGUCj1S8nMHXfQTAgPEtOKbpTBu8mjs/bc1WdTPjpliioiryxaxhTP9rCc2iqtPtklto0WgAzrAIv16zXbuWMnbek1SKrMDtnUZxXvkYMtJT+XLWMN7au4AyCh90fC2z++nX317jH5vGkS5ClEfpG12Xbo0GsitxA9sObiJMnNx13fhcX7+tW2cxesHTLCSZCFXqahiNSlXnvs6vUanSJZnt/jmhHQvTD1NVHWx3Kg09TvpUa0fj2h25OKYLs357hbe3f8sep9BJyvJUt/c5v3Ijbvy4GeHi4KtBK3Pdvno8uN2HiSpVwa/XJhQlJ+3DFVUBhzPf+/yZApLXvVws0POwYdM0/rbk/wD4vOXTNGn4tzzb70/cyIylrxGftItrL+1Ps0b98w3yrDb/8T0PL3iCZJQyOCgtDjxAqnpIRxlUswu3XOM9K9+1ayk9Z93JoDJ1efjmbzLX8cTnnZiTmshP13/HeeVjMuff/0kbVnuS+L7XVA4d2cENCx7h0UpXMOjaD1i/aSp9lzxLD2cFtqcdZpPDwyUeBzvIwO0Qzs9QXm31BM0a33ba2tPT3HwWO5T3En/z1iwQpfC36LqsStrJakmjLaX4I+M4qQLvt36WlNQj3L/mDepqGP/X5v/4bOU7zExLxCOSbd3PVu/KzV3/N0xtcvJ+3vhuEJNSdlJKYWD5JqSkp7DheAKrNIVLNYJx/X8lIjKapas+4q61b/Jg+Wbc2eNDZswfwZgdM0hwetclqqgIl3qcPNL071zeYkjmdr6KHcbze3/ms8ueommjvtlqSk7axyNTerPOk8Skbp9Qrfplub4uf8T9wHuLn8eBg7JhUVSJqsztXV6ldJmqef4unAlPRjoHDmwmIyOV9IwTVKxQr0j+yBw+tJ0+03pxgUTyRu+vqFDh4kLf5klJx/bw0U8P8rcrnjjtax+qLNDPkno89Pq4KaXEedqztEB69LMOLEo7yKxbZlO6TFXi4xfTa849DCh9MY/eMi1b223b53Ljr8O40VUTlzOSL49vZU7vaZn/CZ/8/Gq+S0/k/AxleL3+dLlyOCnug8xb/h5vx03mkCjj2/6H+vWuA2DvnlW8+/Nj7Ek7QoonnX2axl6ncJVE82SXtzmecoCxi58nNv0gZRUer3M913UcScKuJdwzawiHxPt7V10djL/h28w/Pjt2/MqmP3+h9vlNqVPzSoZNuZZ1nhSmdZ9A1WrNSU9z8+AXV7FIk7jFVYsHrnmH8hUuytzP2PnP8ei2SVwfXoURt87k1gmtSFIP3/abhyuqPOA9rnv3rmLdtlg27ltF3YoN6dbu6VPOMo8n7aXz5C50iqjMS7f9nDn/wP4/eOC7W9ko6UQoNHOUYuyARac8f92Gydy75N8IUFEdHMXDfqdwc2R1nu0bm9kuIz2VibP+QZPanWjc8JbM+erxsHzNeEScNKzXO9eQTjtxnPsnXsUS/ncNpGKGMq7Tm1wY0znvX6A8qMfD4pVjaNbgVkqVqpRrm1GTevFF8nbCFSqp8G7H17gwpjMb/5jOj+s+ocH5zene4fQX5desm8iiuO/o1vTOM671nW/6Mvboei7MED696XvKlat9Rs8PZhbo5yAh4TfCwlzZujWKi9/XT6b/8pFcQRRlnS7iUg/zp8PDjz0mcn6VRqe0/8+kXnyZvJ0ohbbh5Xl1wPzMZUcO7+CH317lurZPUia6Wrbn7dm9goE/DiId+KTz+6zbPosXtk8lHagvEZSWcEo5IugZ05POVz6e7Q/f3r2rKRVVkbLlamXO2/fXOu6deRup6mH8tV/mWutJ8fGLuWn2PbR0luHdAYv491fdmZK6h2eq/e/dSk4n/7NfjosluHnlwlvp1v7//HxVs3vxq2v5OmUns66bSoUKF7Nh8zc8tngEiaKMvvQOEo8mMHLPbJ6qchV9u7+d+byVaz7l7ytf5jwVPrrmQ2rWbAPA6Ml9+DR5G580e4wWTW/PVi/ANc7zGNruOf6In8+HcV+z2eHt7w9Tpb6GcVONTtzc5VXE4UA9nszX496yDakeXRNV5e34WBzAx13GULt2u7Pa75N/GC/1OHmrx8en/P7Hxy+k95x76RNZnRub3MWwJc9xQqCKOtjq9GZKpEf5pstYatVqe8r6Dx3cyg3T+nDA6X031sDjpFfVNlzX5vFs7yxTTxwjcf9GatRonTnvyOEddPvmOi4gjC2SThNcfNDvZyIio/Pcp9QTxwgPL11gJ2Z//bWW8b8+xdrjCbzd6ysqVqpXIOvNjwV6CHt4QntWpB+irAplJYzra1x12qA7cngH135zHUccwtgG93Flqwf83s627XMZ/Msw3ECKQ2jqCeelLm/n+p/VH+lpbjyetHz/EwJ8/sP9/GffAq4gisWkcE/0pQy7cdJp23sy0vnnFx2Z6znKZRrJ+NuXnvV/4m3b59Jn3kO0l9LszUhhi8PDeR7lnVZP07RRX9Tj4f4JV7DSc5wpXcbidEYybcmrfHz4d6qogw97fpotDJOT9nHDpM64ECbftpAFK97noS0T6B1+PjWizufjw7+T4vCGXJ0M4a4LelC+dBVW71rEoqNxbHBk0MNZgWev/4pvFzzHS3/N457oSxh24+TMbcRt/Yk75z1MpMLH3cZlC8OcUpIPsmr9l7RpcW/mOwxPRjo3f3oZRzWDJIHSCm9dOZKGl9yY+bxHPmvP/LRDzLj2Kyqf35A9u1fw9E/3koZyXbV2NL+oB7fPf5RGjlJ8MPC3bK+/ejz88/P2zMs4wnuNH2TLXyv57q8lbHRkEKFK57CKtDy/OUv2rWBh2iGOO4SX69xEj44jAHhr6i18dHQjU9q9wpZdi3l8x1S6Ocvz3I3TTtvVNHvBSzy75XNaOMvyWr/ZhIeXyly2Zt1EPJ50mjcZ4NfvxNats/hy+etMTfkTBRToFVGV53K5xlYYzjnQRaQ78CbeEYs+UtX/5Fj+MHA3kA4kAneq6s681mmBHhgzf32WH3fO5o3bfj3jC1kbNk3jiUXPcF2lZtzR4wPCwl2FVGV2nox0Bk+4nFWSSu/wyjzfd3a+AX08aS/v/nAvf2v9KBdc0P6ctn/vJ61ZRAqNPWH0rtaOHm0eodx5dTKX7927mht/GEAYcNjX/d/OUYaRPT/OdnH2pHlL3uSBTR9xY0RVYt17qEMYn/T7lUhXOfbv38SUhS9wQfm6dL1yeLaL0Z6MdMbNuIu3D66gukfY41DaO8ry5m3zTjmWmzZP585FT1JG4e32o6hf99pT6jh2dBdDp/ZmpaTyaKU2DLr2QwDmLPwP/4j7nJcuuJ56Na9k6LzHOCQwsNyl3NDqYQ4c2c7AFS9xf9lG/N134T03J69BvFi7N706vZA5/9u5w3k6fgYPV2zNHdf9N3P+5i0zmLLyPb5P3skxh1ApQ+kYVZ049342SirjWj5N7eqt6P5tH9qHV+CVAfMAGPfdnbx+cBmiSg2PcHFYNE3L16NVTHcuuqAjr824g8kndlErA+KdcH14FUb2/QlxOJg253FGxM8gQ4SeYRV57Jr3qVT50mz7kZy0j7idP7N060xmHljNFoeHMFWud9Xgrnb/ZtKSlxmftCXbtZbdu5cz5tfheBSiw6IoE16G6mVrUadyE2pXb02FChef9UnGOQW6iDiBP4CuQALeMUb7qeqGLG06AUtUNVlE7geuUtU8ryBaoJszsXfPKmJXvkf/Lm8QHlm6SLd99Eg8R44mUKvWFadt89P85xkTN5mu5RvSp/XDVK+e6/+3TI991oEfMw5R3qN8lcdF1dwsXfUR/1r1BuVxMOGW2FO6yE7asGkaDy56mmMCL9TtT9d2T2YuO3gwjvu+vZktkk49DeMPSeezNiNpUO96bv2kOSnqYdrAZYSFu9i/fxMjZ97Fr54jeEQo61EiFGbcOve0H8cF7x+gQRNas0NTmd7nW8qWrcW2HXMZOP9RLhEX/x2wKNdPT7lTDrFr93Ji6nTC4Qzj0MGt9J92PcmitIs8n+9S9/FN+9e56KKugPeMf9GK91i7azFbk+L5I/Uw233dPicvet9Rpi4PXjeBD2fezftH13FP9CW4wly8fWg1bYiiadkLGXdkHZEKXSKrcjzjBEcyUtjtOZF5AR2gmYbTvUoburV8MDP4jyftpfekLlSSML4YuJRdu5dy1+z7OCJQTiFJ4LiAZrnY//j5bRnQ49TvpvjjXAP9CmCEqnbzTT8BoKq5fkBXRJoD76hqnu/FLdBNSbY/cSPP/HAHdza5j5bNBp/x85OT9yM48v00S+K+9fxjxkDvF+Mia1ClVGU8qsw8sJbd4uH1hkNoXLcXN33TiyiEBy66iX/tmMLzta6lz9XZ3oizd+9qpi15hdgDa7n34pvo3uHZfOuM2/oTt8x/mCiFFIF0EUp7lCnX5N0VlNO27XMY8MtDHHMIPZwVeHlA3rfLOHgwjhUbJvH73mW0vbBH5qeX1ONh5KQefH1iNwDXhlXiuZu/JzyyNDt2/MqoXx9ngyeJcuqgnCOM88NKUzf6AupWbkzDC7ud9lrazF+f5fEdUxlc+mJmHN1CusDYK1/g0vp9AO8Ql7t3r2Dn3pX8eXAjrev2od7FPfze/6zONdBvBrqr6t2+6YHA5ao69DTt3wH2qurzuSwbAgwBqF279mU7d+bZK2OMKQCpJ47xwtQbmZq6N3NeeY/yWvNHM/+YLFv1X+5a8zoOoKpH+G7gkmz9zOfimzn/YuneZVR1VaRqmeq0qnc9F8Zcfcbr+W3FWF5f+z6jOr1JnTodz7qe9DQ3/5lyAxVd5bm316cF8hl69Xi489PWLJcTVMpQPur4v3cQBa3IAl1EBgBDgY6qeiKv9doZujFFK+3EcXAIDgnD4Qg7pQ/3ram38uGxjYyofg03dX01QFUGr+07fuHdBc/wYPsXzvm6TV7yCnR//jTtAmplma7pm5dzI12Ap/AjzI0xRS+/aw8P9P6M9hsm0azRmd0e2njF1LmKV+rMC2gN/lxmXQbUFZEYEYkA+gLZ7grl6zcfC/RW1X0FX6YxprA5wyJo3mRAsfsCnfFfvkdOVdPxdqPEAhuBSaq6XkRGikhvX7PRQBlgsoisFpHcbwNojDGm0Ph1NUBVZwIzc8x7JsvjLgVclzHGmDNk762MMSZEWKAbY0yIsEA3xpgQYYFujDEhwgLdGGNChAW6McaECAt0Y4wJERboxhgTIizQjTEmRFigG2NMiLBAN8aYEGGBbowxIcIC3RhjQoQFujHGhAgLdGOMCREW6MYYEyL8CnQR6S4im0UkTkSG57K8g4isFJF036DSxhhjili+gS4iTuBdoAfQAOgnIg1yNPsTGAx8UdAFGmOM8Y8/Q9C1BuJUdRuAiEwE+gAbTjZQ1R2+ZZ5CqNEYY4wf/OlyqQHEZ5lO8M07YyIyRESWi8jyxMTEs1mFMcaY0yjSi6Kq+oGqtlTVlpUrVy7KTRtjTMjzJ9B3AbWyTNf0zTPGGFOM+BPoy4C6IhIjIhFAX2B64ZZljDHmTOUb6KqaDgwFYoGNwCRVXS8iI0WkN4CItBKRBOAWYKyIrC/Moo0xxpzKn0+5oKozgZk55j2T5fEyvF0xxhhjAsS+KWqMMSHCAt0YY0KEBboxxoQIC3RjjAkRFujGGBMiLNCNMSZEWKAbY0yIsEA3xpgQYYFujDEhwgLdGGNChAW6McaECAt0Y4wJERboxhgTIizQjTEmRFigG2NMiLBAN8aYEOFXoItIdxHZLCJxIjI8l+WRIvKVb/kSEalT4JUaY4zJU76BLiJO4F2gB9AA6CciDXI0uws4pKoXA68Dowq6UGOMMXnzZwi61kCcqm4DEJGJQB9gQ5Y2fYARvsdfA++IiKiqFmCtAPz7u/Vs2H20oFdrjDFFpkH1sjzbq2GBr9efLpcaQHyW6QTfvFzb+AaVPgJUzLkiERkiIstFZHliYuLZVWyMMSZXfg0SXVBU9QPgA4CWLVue1dl7YfxVM8aYUODPGfouoFaW6Zq+ebm2EZEwoBxwoCAKNMYY4x9/An0ZUFdEYkQkAugLTM/RZjowyPf4ZmBuYfSfG2OMOb18u1xUNV1EhgKxgBMYp6rrRWQksFxVpwP/BSaISBxwEG/oG2OMKUJ+9aGr6kxgZo55z2R57AZuKdjSjDHGnAn7pqgxxoQIC3RjjAkRFujGGBMiLNCNMSZESKA+XSgiicDOM3hKJWB/IZVTnJXE/S6J+wwlc79L4j7Due33BapaObcFAQv0MyUiy1W1ZaDrKGolcb9L4j5DydzvkrjPUHj7bV0uxhgTIizQjTEmRARToH8Q6AICpCTud0ncZyiZ+10S9xkKab+Dpg/dGGNM3oLpDN0YY0weLNCNMSZEBEWg5zdIdbASkVoi8rOIbBCR9SLykG9+BRGZJSJbfP+W980XEXnL9zqsFZEWgd2DsyciThFZJSLf+6ZjfAOMx/kGHI/wzQ+ZAchF5DwR+VpENonIRhG5ItSPtYj80/e7vU5EvhQRVygeaxEZJyL7RGRdlnlnfGxFZJCv/RYRGZTbtvJS7APdz0Gqg1U68IiqNgDaAA/49m04MEdV6wJzfNPgfQ3q+n6GAO8XfckF5iFgY5bpUcDrvoHGD+EdeBxCawDyN4EfVfUSoCne/Q/ZYy0iNYBhQEtVbYT39tt9Cc1j/THQPce8Mzq2IlIBeBa4HO9Yzs+e/CPgN1Ut1j/AFUBslukngCcCXVch7eu3QFdgM1DNN68asNn3eCzQL0v7zHbB9IN31Ks5wNXA94Dg/dZcWM5jjvc+/Ff4Hof52kmg9+Es9rkcsD1n7aF8rPnfWMMVfMfue6BbqB5roA6w7myPLdAPGJtlfrZ2/vwU+zN0/BukOuj53l42B5YAVVR1j2/RXqCK73GovBZvAP8CPL7pisBh9Q4wDtn3y68ByINADJAIjPd1NX0kIqUJ4WOtqruAV4A/gT14j90KQv9Yn3Smx/acj3kwBHrIE5EywBTgH6p6NOsy9f6pDpnPlorIdcA+VV0R6FqKWBjQAnhfVZsDx/nfW3AgJI91eaAP3j9m1YHSnNotUSIU1bENhkD3Z5DqoCUi4XjD/HNVneqb/ZeIVPMtrwbs880PhdeiLdBbRHYAE/F2u7wJnOcbYByy71eoDECeACSo6hLf9Nd4Az6Uj3UXYLuqJqpqGjAV7/EP9WN90pke23M+5sEQ6P4MUh2URETwjse6UVVfy7Io66Dbg/D2rZ+cf7vvKnkb4EiWt3RBQVWfUNWaqloH77Gcq6q3AT/jHWAcTt3noB+AXFX3AvEiUt83qzOwgRA+1ni7WtqISCnf7/rJfQ7pY53FmR7bWOAaESnve3dzjW+e/wJ9IcHPiw09gT+ArcBTga6nAPerHd63YWuB1b6fnnj7DecAW4DZQAVfe8H7iZ+twO94Pz0Q8P04h/2/Cvje9/hCYCkQB0wGIn3zXb7pON/yCwNd9znsbzNgue94TwPKh/qxBv4NbALWAROAyFA81sCXeK8TpOF9N3bX2Rxb4E7f/scBd5xpHfbVf2OMCRHB0OVijDHGDxboxhgTIizQjTEmRFigG2NMiLBAN8aYEGGBbko0EfmHiJQKdB3GFAT72KIp0XzfWG2pqvsDXYsx58rO0E2JISKlRWSGiKzx3Z/7Wbz3GPlZRH72tblGRBaLyEoRmey7zw4iskNEXhaR30VkqYhcHMh9MSY3FuimJOkO7FbVpuq9P/cbwG6gk6p2EpFKwNNAF1VtgfdbnQ9nef4RVW0MvON7rjHFigW6KUl+B7qKyCgRaa+qR3Isb4N3EJWFIrIa7/03Lsiy/Mss/15R2MUac6bC8m9iTGhQ1T98w331BJ4XkTk5mggwS1X7nW4Vp3lsTLFgZ+imxBCR6kCyqn4GjMZ7+9pjQLSvyW9A25P9474+93pZVvG3LP8uLpqqjfGfnaGbkqQxMFpEPHjvinc/3q6TH0Vkt68ffTDwpYhE+p7zNN47fQKUF5G1wAm8w4UZU6zYxxaN8YN9vNEEA+tyMcaYEGFn6MYYEyLsDN0YY0KEBboxxoQIC3RjjAkRFujGGBMiLNCNMSZE/D84fzE31QmjlAAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAAAx7ElEQVR4nO3dd3gU5drH8e+9SSAJTSkqCpioFCEUIYB0qQIqRUSpgooczxHxHI8ovnbsYi9HQQUEhVBVmiCCSBXpAqGFDoKETtgkJLv3+0eWGCAkCynLbu7PdeVid+bZmXt2uH47++zMPKKqGGOM8X8OXxdgjDEmd1igG2NMgLBAN8aYAGGBbowxAcIC3RhjAkSwr1ZcunRpjYiI8NXqjTHGL61cufKQqpbJbJ7PAj0iIoIVK1b4avXGGOOXRGTXheZZl4sxxgQIC3RjjAkQFujGGBMgfNaHbowxWUlJSWHv3r0kJSX5uhSfCA0NpVy5coSEhHj9Ggt0Y8xlae/evRQrVoyIiAhExNfl5CtV5fDhw+zdu5fIyEivX2ddLsaYy1JSUhKlSpUqcGEOICKUKlXqor+dWKAbYy5bBTHMz7iUbfe7QF+1djQfTemKK/W0r0sxxpjLit8F+rq9C/ni5CYSEw/7uhRjjGHUqFEMGDDA12UAfhjoYSFFAHA6LdCNMSYjPwz0ogAkJh31cSXGmEC3c+dOqlSpQt++falUqRI9e/bk559/plGjRlSsWJHff//9vPYtWrSgRo0atGzZkt27dwMwceJEoqKiqFmzJk2bNgVgw4YN1KtXj1q1alGjRg22bt2a43r97rTF8MLFAHAmHfFxJcaY/PLytA3E/nkiV5dZ9drivHhXtWzbxcXFMXHiREaMGEHdunUZO3YsixYtYurUqbz++ut06tQpve1jjz1Gnz596NOnDyNGjGDgwIF8//33DBkyhNmzZ3Pddddx7NgxAD7//HMef/xxevbsyenTp3G5XDneJr87Qg8vXAIAZ9Ix3xZijCkQIiMjqV69Og6Hg2rVqtGyZUtEhOrVq7Nz586z2i5dupQePXoA0Lt3bxYtWgRAo0aN6Nu3L1988UV6cDdo0IDXX3+dt956i127dhEWFpbjWv3wCP0KAJzJx31biDEm33hzJJ1XChcunP7Y4XCkP3c4HKSmpnq1jM8//5xly5YxY8YM6tSpw8qVK+nRowf169dnxowZtG/fnmHDhtGiRYsc1ep3R+hhoWlH6InJufv1yxhjcqphw4bExMQA8O2339KkSRMAtm3bRv369RkyZAhlypRhz549bN++nRtuuIGBAwfSsWNH/vjjjxyv36tAF5G2IrJZROJEZHAm898XkTWevy0icizHlV1AeOiVADhPn8yrVRhjzCX5+OOPGTlyJDVq1GDMmDF8+OGHAAwaNIjq1asTFRVFw4YNqVmzJhMmTCAqKopatWqxfv167r///hyvX1Q16wYiQcAWoDWwF1gOdFfV2Au0fwy4RVUfzGq50dHReikDXBw5EkezaZ155uqm9Gj76UW/3hjjHzZu3MjNN9/s6zJ8KrP3QERWqmp0Zu29OUKvB8Sp6nZVPQ3EAB2zaN8dGOdlvRctPKwUAM6UU3m1CmOM8UveBPp1wJ4Mz/d6pp1HRK4HIoF5OS8tc4ULl0BUSUxNzKtVGGOMX8rtH0W7AZNUNdMTKkWkv4isEJEV8fHxl7QCcTgIV3BaoBtjzFm8CfR9QPkMz8t5pmWmG1l0t6jqcFWNVtXoMmUyHbTaK+EKiakF86b3xhhzId4E+nKgoohEikgh0kJ76rmNRKQKcCWwNHdLPF84gtOVnNerMcYYv5JtoKtqKjAAmA1sBCao6gYRGSIiHTI07QbEaHanzeSCMHGQ6E7J69UYY4xf8epKUVWdCcw8Z9oL5zx/KffKylq4BON02/3QjTH5r2/fvtx5553cc889vi7lPH53pShAmATjzPx3V2OMKbD8MtDDHYUs0I0xee6VV16hcuXKNG7cmO7du/POO++cNX/u3LnccsstVK9enQcffJDk5LTf9gYPHkzVqlWpUaMGTz75JJD5LXRzm9/dnAsgLKgQiSluX5dhjMkvPw6GA+tyd5nXVId2b15w9vLly5k8eTJr164lJSWF2rVrU6dOnfT5SUlJ9O3bl7lz51KpUiXuv/9+PvvsM3r37s13333Hpk2bEJH02+Vmdgvd3OafR+hBhXEW3LFjjTH5YPHixXTs2JHQ0FCKFSvGXXfdddb8zZs3ExkZSaVKlQDo06cPCxYsoESJEoSGhvLQQw8xZcoUwsPDgcxvoZvb/PIIPTw4zALdmIIkiyPpy01wcDC///47c+fOZdKkSXzyySfMmzcv01volipVKlfX7Z9H6MHhpIiQkuL0dSnGmADVqFEjpk2bRlJSEgkJCUyfPv2s+ZUrV2bnzp3ExcUBMGbMGJo1a0ZCQgLHjx+nffv2vP/++6xduxbI/Ba6uc0vj9DDQtJG9kh0HiakRLiPqzHGBKK6devSoUMHatSowdVXX0316tUpUaJE+vzQ0FBGjhxJ165dSU1NpW7dujzyyCMcOXKEjh07kpSUhKry3nvvAWm30N26dSuqSsuWLalZs2au1+yXgR7uGSjamXiY4iXKZ9PaGGMuzZNPPslLL72E0+mkadOm1KlTh4cffjh9fsuWLVm9evVZrylbtux5g0cDTJkyJc/r9c9AL+QZKDrRBoo2xuSd/v37ExsbS1JSEn369KF27dq+LilL/hnohYsDkJh01MeVGGMC2dixY31dwkXxyx9Fw84codtA0cYYk84vAz28cNq4ookW6MYYk84/Az30CgCcySd8W4gxxlxG/DPQw0oC4Dx90seVGGPM5cMvAz3ME+iJNlC0MSYPFS1a1NclXBS/DPTw8NIAOC3QjTEmnV8GekhIEYJVcdql/8aYfKCqDBo0iKioKKpXr8748eMB2L9/P02bNqVWrVpERUWxcOFCXC4Xffv2TW/7/vvv51udfnkeujgchCk4XTZQtDEFwVu/v8WmI5tydZlVSlbh6XpPe9V2ypQprFmzhrVr13Lo0CHq1q1L06ZNGTt2LLfffjvPPvssLpcLp9PJmjVr2LdvH+vXrwfIs1vlZsarI3QRaSsim0UkTkQGX6DNvSISKyIbRCTPz8YPV0i0QDfG5INFixbRvXt3goKCuPrqq2nWrBnLly+nbt26jBw5kpdeeol169ZRrFgxbrjhBrZv385jjz3GrFmzKF68eL7Vme0RuogEAZ8CrYG9wHIRmaqqsRnaVASeARqp6lERuSqvCj4jDAdOl40rakxB4O2RdH5r2rQpCxYsYMaMGfTt25cnnniC+++/n7Vr1zJ79mw+//xzJkyYwIgRI/KlHm+O0OsBcaq6XVVPAzFAx3PaPAx8qqpHAVT1YO6Web5wcdhA0caYfNGkSRPGjx+Py+UiPj6eBQsWUK9ePXbt2sXVV1/Nww8/TL9+/Vi1ahWHDh3C7XbTpUsXXn31VVatWpVvdXrTh34dkPHGvXuB+ue0qQQgIouBIOAlVZ117oJEpD/QH6BChQqXUm+6cAnG6U7N0TKMMcYbnTt3ZunSpdSsWRMR4e233+aaa67h66+/ZujQoYSEhFC0aFFGjx7Nvn37eOCBB3C704bJfOONN/Ktztz6UTQYqAjcBpQDFohIdVU9lrGRqg4HhgNER0drTlYY7gjhkCsxJ4swxpgsJSQkACAiDB06lKFDh541v0+fPvTp0+e81+XnUXlG3nS57AMy3nS8nGdaRnuBqaqaoqo7gC2kBXyeCXOE4FQbKNoYY87wJtCXAxVFJFJECgHdgKnntPmetKNzRKQ0aV0w23OvzPOFBxXCiQW6McackW2gq2oqMACYDWwEJqjqBhEZIiIdPM1mA4dFJBb4BRikqofzqmiA8KAwEm2gaGMCmmqOemb92qVsu1d96Ko6E5h5zrQXMjxW4AnPX74IDw4lUUDdbsThlxe8GmOyEBoayuHDhylVqhQiBevoTVU5fPgwoaGhF/U6v7xSFCAsOIxUEVJSTlGocDFfl2OMyWXlypVj7969xMfH+7oUnwgNDaVcuXIX9Rq/DfTwkCIAOJ2HLNCNCUAhISFERkb6ugy/4rd9FeEhabe1dCbmaVe9Mcb4Db8N9DPjiiYm2kDRxhgDfhzo4Z5uFmfSMd8WYowxlwm/DfSwwiUAcCbbEboxxoAfB7oNFG2MMWfz40D3jCtqA0UbYwzgx4EeduYI3QLdGGMAPw708LBSADhTEnxciTHGXB78NtDDws8Eug0UbYwx4MeBHhISTiFVElPtnujGGAN+HOgAYQpOC3RjjAH8PNDDFZyuZF+XYYwxlwX/DnQcJFqgG2MM4O+BLkE43Sm+LsMYYy4Lfh3oYRJEoqb6ugxjjLks+HWghzsK4bRAN8YYwMtAF5G2IrJZROJEZHAm8/uKSLyIrPH89cv9Us8X5gjBqTZQtDHGgBcjFolIEPAp0BrYCywXkamqGntO0/GqOiAParyg8KDCJFJwB5E1xpiMvDlCrwfEqep2VT0NxAAd87Ys74QFh+IsWGPHGmPMBXkT6NcBezI83+uZdq4uIvKHiEwSkfKZLUhE+ovIChFZkRsDv4YHh+EUULd1uxhjTG79KDoNiFDVGsAc4OvMGqnqcFWNVtXoMmXK5Hil4cHhqAhJNmqRMcZ4Fej7gIxH3OU809Kp6mFVPXOFz5dAndwpL2vhIUUASEw6kh+rM8aYy5o3gb4cqCgikSJSCOgGTM3YQETKZnjaAdiYeyVe2JXhVwHwV/yG/FidMcZc1rINdFVNBQYAs0kL6gmqukFEhohIB0+zgSKyQUTWAgOBvnlVcEY3X98cgI17FubH6owx5rKW7WmLAKo6E5h5zrQXMjx+Bngmd0vLXvlyDSjqVmIPbeDu/F65McZcZvz6SlFHUDA3SygbEw/4uhRjjPE5vw50gJuLXMdmUkhNSfJ1KcYY41N+H+hVy9Qk2SFs2znX16UYY4xP+X+gR7QAIHb3rz6uxBhjfMvvA/368o0JdysbD9mpi8aYgs3vA90RFEwVKUysc7+vSzHGGJ/y+0AHqFrkOjZz2n4YNcYUaIER6KVrkOQQdu5e4OtSjDHGZwIj0Cs0AyB213zfFmKMMT4UEIEecX0zwtxK7KF1vi7FGGN8JiACPSi4EFWkMBudf/q6FGOM8ZmACHSAm8PLslGTcaWe9nUpxhjjEwET6FVLR5HoELbvnOfrUowxxicCJtDrV+0GwK8bY3xciTHG+EbABPo119Qiyh3MvENrfV2KMcb4RMAEOkDLUjVY50jlr7/+8HUpxhiT7wIq0FtU7wPAvNXDfVyJMcbkv4AK9BsiWxDhEub+tczXpRhjTL7zKtBFpK2IbBaROBEZnEW7LiKiIhKdeyVenJYlKrFCEzl+fLevSjDGGJ/INtBFJAj4FGgHVAW6i0jVTNoVAx4HfHp43LLKfbhEWLBqmC/LMMaYfOfNEXo9IE5Vt6vqaSAG6JhJu1eAtwCf3vKwWpXOXOVS5u6d78syjDEm33kT6NcBezI83+uZlk5EagPlVXVGVgsSkf4iskJEVsTHx190sd5wBAXTokgFFqceZ/uOeaSkOPNkPcYYc7nJ8Y+iIuIA3gP+m11bVR2uqtGqGl2mTJmcrvqC2lS+hySH0HHB49T7th6dR9Rgw8bJebY+Y4y5HAR70WYfUD7D83KeaWcUA6KA+SICcA0wVUQ6qOqK3Cr0YtSt9SAxoVcQ9+fv7Dq2jR+ObWTw0peYcH1zwsJL+qIkY4zJc6KqWTcQCQa2AC1JC/LlQA9VzXQQTxGZDzyZXZhHR0frihX5k/fLVg2n37qP6RUWwdP3TsuXdRpjTF4QkZWqmumZhNl2uahqKjAAmA1sBCao6gYRGSIiHXK31LxRv3Z/uoWW55vEnSxfMyLTNm5Xaj5XZYwxuSvbI/S8kp9H6ABO5yHuiWmOG5hy71zCi16VPu/E8T30nnwHd5auxcMdRudbTcYYc7FydIQeKMLDS/NqnUH86VCG/HAf6nanz3tv5kNsD1JGHl7FqYQDPqzSGGMuXYEJdIDaNe/n0StvYUbqIWJ+GgjA0pWfM/n0fppIEU46hO8XDvFxlcYYc2kKVKADPHznSJpJUd4+MJ+lKz/n5bWfEuGC97r+SA13CN/uX2ijHhlj/FKBC3RHUDCvdRzPNW7hH+s+4U+H8nKdQYSGXUnvGzuxJwgWLP/I12UaY8xFK3CBDlCiRAU+aPwGYQq9itxI7Zr3A9CqwVOUdSljNtuoR8YY/+PNhUUBqXKlO5l3bTTh4X+f7RIcEkqPq27l3cPL2LR5KlUq+8VZmcYYAxTQI/QzihS9BnGc/Rbc3eRFwtxKzKpPfFSVMcZcmgId6JkpXqI8jYOvZGHin2ed2miMMZc7C/RMNLymLgeDhO075mY6f8+epQz7oTff/PhIPleWZuJP/2Hl2q9zvJy4bT/x3uS77SpZYwKEBXomGlbrAcDizRPPmr5s1XB6jaxN+3n9+eTYGt46uJhlqy5+/NKU5FPEH8z0VjjZOnZ0B6/9OYfXV72X428QY34fysiEraxZPzZHyzHGXB4s0DNx7bXRRLiEJfFr06elpiTx7JqPOKin+U/Jukxr9gnlXfDKmo9JTjqe7TITTu7noyn30mdUHRqMrU+LH7td0tHxL6s+wyXCFoeb9Tm4JbC63SxK3A/AT+d8cBlj/JMF+gU0LFqBle5T6WG9aOWn/BUkPFWpJw/eNYKIiGY8X/NRdgXBFz/+I9vlfTSzH1+eiCVV3dxb9EbuCi7DyIStPBfT5qIG4Zi7byFXuZQwtzL5jy+ybX/gwBp+/PUlFv3+8VnTt26fzcEgIcytzEnYYd0uxgQAC/QLaHR9S5IcwuoNaeekT9oyidIupVn9x9PbNKjzCHcGl+ar4+vZtm3OBZe1Z89SJibuokvodXz7wGqe6voDr3X/mQFX1GJaajwDxt6GM+FgtjWdSjjAEvdJbi92I+1CyzIz6U8STu7PtO0P8wbTdkR1Ws/uzVM7JzMwdhhHj2xLn79oY9pR+SOl63EwSPgjdrxX74sx5vJlgX4B0dV6EqzKkh2zOLB/NQvdJ+l8xc2EhISf1W5Quy8oovDMgkHs3Plrpsv6dMH/Eazwz+bvpk8Th4N/dBzDy9fdzjJ18uDENhw5EpdlTQtXDSNFhFaVunBPjf4kOoSZS986r138wQ28tms6xSWIp69qxHs3didFhB+WvJHeZtHhP6jodnDvba9RSJXZmwI70J0JB73+zSE56Tivj7+DL6f2yeOqjMldFugXEF70Km4hlCUntzPlt7dR4O5654+yV7LkTbxcuTe7SaXz/Ed5Z2InTp74e0CnzVumMzMlnp7Fq3DV1VHnvf7uVu/wQZUHiCOV+7/vzN69v12wpp93z6WUS6lZrRtRN3ehstvBpH3zz2v3v3lPkCLwbqtP6dXuc1o3/j9qayEmHlyG25XKqYQDrCaJxsVvpGixsjR0FGfOye1+2e2ibvcFv6WccfjQFlpNbEHf0XWzfH/PtH1oXHPGJe3mq8MrbUxa41cs0LPQsFQUmx1uxh9bR0MpQrlyt2barmXDp5l+50Q6FC7L6FNxtJ18O0MndmTXroV8sPRViik82Or9C67ntlv/y5fRz3BUlN4/9WP+b++edzSZnHScBSlHaBFejqDgQojDQZeyTdjocLFh05T0dtt3zGVK0j66hUdSvnyj9OldI9qzOwh+X/MVv/3xNakiNLnhDgDalG/OX0HCuo2TANi0eSqvxLRlz57Fl/ze5SVX6mlmLXiZ58a2otWoGjSY0oZnx7bk+LGdmbb/bslrnHQImzWZLnP6MXnOfzM9Wt8aN4ueU7uwmdN0KVSWBIewdsOEHNXqdqWyY+f8HC3DGG8VmAEuLkXspu+5b9nzALx/Yw9aNX7Gq9d8teJd5qUeJTVtjFX+U7IuD96V+UhJGW3bNoeBvz7B7iCo5g7iX1UfoEndxxCHg/m/vctjm0cxrOojNKz7KJA2MEfLKe2oKoV5pcWHVKjQmMdGN2SF6wQzO/7AlSVvTF92ctJxWo1rRN3gKykREs7MxH0s6rGMkMJFOHliH82m3E73IjdSrlh5hh6YT4oIRd3Kyzd1o02T586rVd1uVv4xmqo3tT9rsJALSUk+xajZ/2TziZ0UDQolPDiMYHGQ4k7ltDuFKiVv5p5W75535W5mPv7uPoafiKW4W6kffCUlCxVjcuJuiis8c+O9tG36QnpbV+pp2o2uTYWgMF5p/TnPz3mEZSRR0x3CP6r1pXH0AE6c2M3ncwYSc2o7V7jh41tfJKJ8Y5pMbk2fYlX4d5dJ2daUGXW7eXl8Wyaf3s+b13fmjtvs1swm57Ia4MKrQBeRtsCHQBDwpaq+ec78R4BHAReQAPRX1dislukPge52pdJ8dC0cCj/1/v28/vOsxB/cwHe/vcm2k3t4+e7vCA270qvXpaQ4mf7riwzbPYt9QVDOBbeXqMzWU/tY7TrJr54QPmPKz0/y5p5ZpAi0DSnD9NRDPF6yDv3uGnXest+Z2IlvT8VRXKFWcAk+7P33EfiA0Q1Y6D6JW4QmUoRH6z3F60tf4Q9HKveFlufJDt+mb4O63Xz43b18lbCZ2lqIYff9nD7PlXqayfOewiEOWtcdSIkrIti+Yx7PzH+CWIeL8i5IRkmQtP8shQABTjiER6+oySMdv8ny/dm8ZTrdlgzm9pAyvHbfbIKCC6VPf2Hxc8Q6XDx/TQvuvf1DAH5Z+g4Dt3yd/oHsdqUyee4ghu+dw4EgoYrbwZ+4OCnQuXBZHmv1EaXL3AzAA6OiSdAUJj6w9oL1nHk/Dh3aSGLSUSpUaJw+7Z3JnRnt3E5xtxIMTO08nRJXRGS5LH/iSj3Ng9804JZikTzeeYJXH8Ym53IU6CISRNog0a2BvaQNEt09Y2CLSHFVPeF53AH4l6q2zWq5/hDoAD8tfJXQQkVoWv8/+brelBQnsxa9xozdP/ObnsIlwl3BZXi957zz2sYf3MCHPz/ODyl/cZVLmdF9YaYfIDt3/spdvw4A4IWyreja5u9uoLlL3mLQljE8XqYBvdt+hiMomJTkU3w4tSdfO7dxvQterP0fomv05e1JHfkmcSe3EsYydXKbowTvdZ/L6eQTPD2lE/P1JADBqtSVcFa7nYQCL1XqRctGg8+ry+1K5fnxbZiaEs9TZRrSu/0wDsVvZMyC59iXGM/Apq9ToUJjUlOS6PnNrRwglR86TeOKKyPPWk5qShIDxjZjpfsUE5p/QmTEbfzj63rEuZzM7r2C4JDQv9/f5FNMW/gi3+yZw9WOMP7d8HkqV7zjrOV9Oa0vHx5Zybx2MZS5qhoASYlHWbx6ODsOb2THyT3sTD7CDlI46Uj7NlbF7aDTNY045DzIlwmb6RF2PXfXeoT7lgymU+Frean7T5lu/7ifBtI0qjflyzfI9P/D5ej31V/y0B9pH5xPlr6VPndkfxqtybmcBnoD4CVVvd3z/BkAVX3jAu27A/eraruslusvgX45OHpkG4v/GEXdm7ty9dU1Lthua9wsChcqmn6UmJl+X9dlGUnMaTOaa8recta81JSks0LvjN9WDuPltZ+w19MVtMHhomdYBE/f8wPjfnqMN/5aQPvgUmw7fZQ4cfH0NbdRK7I1M9eN4ucTcdwUXIznbx+e6Y/CGdf9VEwr5riPc5sUY4n7BKlAqOe/53MRnTh0aj/vHf6dd264l9ubPJ/pcg7+tZ67Z3ajHMG81nQonRY94dWRf2Y2b5nOPUufYUi5dnRu+TaQ9k3mV00A4CqXEuEIJTLsKiKLR6C4mXbgN2IdLgA6hlzNkPtm4QgK5t1JnRl1Ko6vaw1Kv13zGQuWvc+jm0Zwg0sY120e4eGlL7pWSDuT57nv7uav1FOM6D6fwqElLmk53hoS05bpiXtpGFyCue4TvB3RhXbNXuLY0R0sWjuCitfWp3KlO/O0hoIop4F+D9BWVft5nvcG6qvqgHPaPQo8Qdq36BaqujWr5Vqg+8amzVP5fdt07m9/cbcsSHQe4X8zH2JMwlb6FKvMvztPTP+K/cl33Rh2YgNF3co7UY/QqO6AbJaWuZTkUzw+viW/uRPoUPhaHmz0AoVCivD07H6sktM4VLktqAQf9FyY5df7OYte54lt47jKpRxxwJw7JqZ3o1wMdbtpOaoGtQuV5J1eC9KD918lqtO75bsULVY209dt3jKd2D0L6NDs1fQuIafzEJ1ibiMcBxN7Lj2r26zf13WJdSeSINCh0DW82uPns2rYsGky09eP5qeEHTQOu4Yh3X8+b50H/1rPgJm92CypuEXoX7wqj3W+9FNR1e3m5yVvUrNih0w/iFNSnLT8ph63hpTilXum0j+mBetIpiahrCYJlwiRLuGHvmu86orZGjeLX2PHcd9tr1Os+HXZtncmHGTsL0/RqcFgSpeu4tX2fD2zP7UiWlArqke27S9n+RLoGdr3AG5X1fNO4hWR/kB/gAoVKtTZtWvXRW2I8b2kxKPndeeo2820+c9R/YY2REbclqPlu1JPk5h4+KywTE1JYvj0B/jlWCyftv82yyP9M54b24ofUv6ibdCVDO214JLreX5sK+YmH+Dn++bTNaY5QQiTzwlkb535QMj4jWFL3I90WfwUj5esQ3JqMp+fWM9r5e+iXaNnmbFoCKN2/ci2ICVElRs1iE0ONx9X7sttt/59Cu2WuB/514JBnBR4p1p/ZsV9z8zTB4lp+Gb6EfIfG8azYvts+rT7PP1DJitzF7/Jv+O+JUSVDoWv5YGGz3P99U3S5y9e/gmPxA7jg4q9aNnwaY4f28nD33XitLppcUUVRIThJ2IZWfO/RNfqm+W6Ek7u556JbdgXBKVcyn8jO3FnsyFZfhC8GtOO8cl7qeJ2MKLLzGw/BEbP7M/Q+KVUdwcz9oHV2W6/txKdR9jz5zIq3ZRlh0Suyu8uFwdwVFWz/L5nR+gmLyWc3M/7Mx6g162Dc/QhM2vBEAbtmEhzKc4veuKss4wuxVPfNGNO6mEmNXmfG29szQvjWvFj0gF+7jydokWvpd83DdmgSZRQOBAkVHY76F6uFa3r/Zuw0JLcO7YhJ9TF9/fMpljx69i4+QceXvIshRX+1zgtwI8d3UHH7+/iGoL5ptcSxv70OB/ELyVVhIeKVj7rrJ15S95mxOYYht7+BWWvrZM+vd/XddnpSuS28PJ8l7SHVODl8nfQqWXahWzPj23Fz8kHmN99caZdO4nOI7Qc35QmhUrzVs/5Wb4nz41txbTTB3j22lZ8t+9X1jtSiXAJAhwTNyEKb9V+Mv2DYdmq4fRb9zGNCGeZnuIWQvms29wLdjGtXPs1D60eSjGFYw5h+m3/O+vDKaOFv3/E+j9/o1yJSMqXqUaliFYXPItr8fJPeGXdMPYFwfj6r1C1SqcstzO3ZBXo3vwsvRyoKCKRIlII6AZMPWcFFTM8vQPIsrvFmLxWtFhZnu82K8ffGBrU7ItDlV/0BM2leI7CHODptl9QVOGFBU8Tf3ADM5IP0CGsHCWuiCAouBBvtRvFFQrXOgrzv5sfZmKf1XRp/S7FS5QnpHARhtR7jkMOeH/mQ+lhHq7wdZsv04/Gr7gykv+76T5iHS7uHlOfdw79RrOgEnQKuZqvEjYz89cXgbQfwv+7ZTRrHSl8Ov/p9Bq375jLMpK4r3Rtnuv2I7PvmEhdCeOVPTOI3fQ9p5NPMjf5AC0KX3XBEA0LL8md4dczJ+XQWbeciN30PdPnP0+i8wiQdtLBDyl/0a9ENe5t8wHf3r+cl65tQ9mgUG4KKUHrsHKEI/xr9TssX/0VzoSDvLjmY653wftdf+SViM4sl2QGT7wj08HdD8VvZNDKoZRzC6Oavo+oMnP1Z+e1S0lx8s7ETvxr4xf87/g6/m/3VHqvfIOOE1qcdwX3saM7eOqbZjwSO4wQ0u6HFLP68hgQx9vTFtsDH5B22uIIVX1NRIYAK1R1qoh8CLQCUoCjwABVzfL+sHaEbvxF71G1ieU037f8IlfOQpk+/3me2fU9N7mEuCBlarOPz/rgUbc7y+6GdyZ24mvnNoq6lWIKI9p8ed5Fb+p2859vm/Cr6zhPXNWIXm0/IzUlkX7jmhKryfyjVB0+PbKSqhpCxdAyfJf8J1M83xpeH38HkxJ3MefOKZQqXQmAI0fiuPf7TgQj/CviLp7dM43/3fwwTeoNvGCdW+NmcffiQelnwOzYOZ+evwzgpEMo5lbuKhLBjFM7KU8Io3stvuBpwYcObaLf1HvZJ27qOIqwRE8x6pan0n9cHjPzH7wdv4S7C13DS/fNTn/vnAkH+eekdsRqMt82GUqlm9rx4KhoDrqTmdZ3bXq7+IMbeHLm/ayS03QLLc+/7xjBX/GxbN69gOd2TKK+oyif9FyEIygYZ8JBHpzYhi2SysNX1OChdsN447t7mJa4j7kZTkt1u1KZv+w9Nv21mh2n9rE/JYFbr6hEl/qDzvomdClyfB56XrBAN/5i85bpHD6xm4bR/8qV5anbzaPfNGShnqKJFOF/92d9O4JzJTqP0DWmGafRTMP8jJTkUxw5tu2sM6MOH9pCt6l3cyBIqOEO5vMu00lNTaLdDx25NbgEr3WeTMuJrWheqAxv9PzlrOWtXR9D3xWvAlBE4Zde2V+b0XtUbY65UxjTeSq9vuvASVGeq9iDOTtnMyf1MCEKE5p/SkREsyyXc/jQFh6e1pWtDje9wiN5uutZnQR8NOVevji5kb5FbuKJuydz7NgOHv3hHjZICm/d0JW2TdO+lUye819e+vMnxtV9gaiqXXEmHOS+Ca04IG5ejOzMnbe9ctZyx80awOt//cqTpW+lR5sPeWxsc37TU3xY5UGa3foE8PfZUBlP3Txzyquocp1bKCkhrJO0bxCNHUXpf8tj1KreM8ttvpCsAh1V9clfnTp11JiCav+fq/ShUdG6PnbSJb3+xPG9eurkX5f02i1bf9Q3J9ylJ47vTZ/2v+96aNSoKH3225YaNSpK166PyfS138z8p0aNitIXx7b2al0/zH1Go0ZF6R1fRektI6vpqrVj0ucdit+su3cv8bruI4fj9JuZ/9RE55Hz5rldLn01pp1GjYrSoRM66p1fRWmdEdV07uK3zmp3/NhuvWVkNX1z/J2qqvp/3zbXGiOr6bJVX2S6TrfLpY+Pbqi1RlbTf4yqp1GjonTynP+e1673yNra7qsodaWmaOym77XWyGr6n9GNNSnxWHqbffuW68dT7tMWX1XTWQuGeL3d5yKtZyTTXLUjdGMMpxIO0H5iK444hKruIGL6rMq020fdbmYveoV6UT0pWfKmbJeblHiUljFNOOEQXq/Qkbuav5oX5QNp3Rz/F9OaGamHKOZWPqn91Hnn/AP8e0wj1qQcZ2CFtry4bzb/KlGdf3a68Khdx4/vpuvk9uwPuvDVzDN/fZGnd07h/Rt78PGWcSTgZsrdmV8ZnJqSBJDpNR/esC4XY0y2zvRFZ7yQKjfMXvgKp5KPc3erd3JtmReSkuJk7E+P07jKvdx4Y+tM25y5TsGhSh1C+aLXkmxP5dy+Yy5rt8+mU/M3M/2gS0k+Retv65MgkOwQhlX7Z6510Z3LAt0Yk63UlCQWrviEpnUHenWuur9KTjpO83GNCAEmto/x6roGb5y5aVyvsAievndariwzMxboxhiTwdr1MRQrchU3RLbItWUmnNzPjCVv0KnZK3l62wULdGOMCRA5vbDIGGOMH7BAN8aYAGGBbowxAcIC3RhjAoQFujHGBAgLdGOMCRAW6MYYEyAs0I0xJkBYoBtjTICwQDfGmABhgW6MMQHCAt0YYwKEV4EuIm1FZLOIxInI4EzmPyEisSLyh4jMFZHrc79UY4wxWck20EUkCPgUaAdUBbqLSNVzmq0GolW1BjAJyL274xtjjPGKN0fo9YA4Vd2uqqeBGKBjxgaq+ouqOj1PfwPK5W6ZxhhjsuNNoF8H7MnwfK9n2oU8BPyY2QwR6S8iK0RkRXx8vPdVGmOMyVau/igqIr2AaGBoZvNVdbiqRqtqdJkyZXJz1cYYU+AFe9FmH1A+w/NynmlnEZFWwLNAM1VNzp3yjDHGeMubI/TlQEURiRSRQkA3YGrGBiJyCzAM6KCqB3O/TGOMMdnJNtBVNRUYAMwGNgITVHWDiAwRkQ6eZkOBosBEEVkjIlMvsDhjjDF5xJsuF1R1JjDznGkvZHjcKpfrMsYYc5HsSlFjjAkQFujGGBMgLNCNMSZAWKAbY0yAsEA3xpgAYYFujDEBwgLdGGMChAW6McYECAt0Y4wJEBboxhgTICzQjTEmQFigG2NMgLBAN8aYAGGBbowxAcIC3RhjAoQFujHGBAgLdGOMCRAW6MYYEyC8CnQRaSsim0UkTkQGZzK/qYisEpFUEbkn98s0xhiTnWwDXUSCgE+BdkBVoLuIVD2n2W6gLzA2tws0xhjjHW8Gia4HxKnqdgARiQE6ArFnGqjqTs88dx7UaIwxxgvedLlcB+zJ8HyvZ9pFE5H+IrJCRFbEx8dfyiKMMcZcQL7+KKqqw1U1WlWjy5Qpk5+rNsaYgOdNoO8Dymd4Xs4zzRhjzGXEm0BfDlQUkUgRKQR0A6bmbVnGGGMuVraBrqqpwABgNrARmKCqG0RkiIh0ABCRuiKyF+gKDBORDXlZtDHGmPN5c5YLqjoTmHnOtBcyPF5OWleMMcYYH7ErRY0xJkBYoBtjTICwQDfGmABhgW6MMQHCAt0YYwKEBboxxgQIC3RjjAkQFujGGBMgLNCNMSZAWKAbY0yAsEA3xpgAYYFujDEBwgLdGGMChAW6McYECAt0Y4wJEBboxhgTICzQjTEmQFigG2NMgPAq0EWkrYhsFpE4ERmcyfzCIjLeM3+ZiETkeqXGGGOylG2gi0gQ8CnQDqgKdBeRquc0ewg4qqo3Ae8Db+V2ocYYY7LmzSDR9YA4Vd0OICIxQEcgNkObjsBLnseTgE9ERFRVc7FWAF6etoHYP0/k9mKNMSbfVL22OC/eVS3Xl+tNl8t1wJ4Mz/d6pmXaRlVTgeNAqXMXJCL9RWSFiKyIj4+/tIqNMcZkypsj9FyjqsOB4QDR0dGXdPSeF59qxhgTCLw5Qt8HlM/wvJxnWqZtRCQYKAEczo0CjTHGeMebQF8OVBSRSBEpBHQDpp7TZirQx/P4HmBeXvSfG2OMubBsu1xUNVVEBgCzgSBghKpuEJEhwApVnQp8BYwRkTjgCGmhb4wxJh951YeuqjOBmedMeyHD4ySga+6WZowx5mLYlaLGGBMgLNCNMSZAWKAbY0yAsEA3xpgAIb46u1BE4oFdF/GS0sChPCrnclYQt7sgbjMUzO0uiNsMOdvu61W1TGYzfBboF0tEVqhqtK/ryG8FcbsL4jZDwdzugrjNkHfbbV0uxhgTICzQjTEmQPhToA/3dQE+UhC3uyBuMxTM7S6I2wx5tN1+04dujDEma/50hG6MMSYLFujGGBMg/CLQsxuk2l+JSHkR+UVEYkVkg4g87pleUkTmiMhWz79XeqaLiHzkeR/+EJHavt2CSyciQSKyWkSme55HegYYj/MMOF7IMz1gBiAXkStEZJKIbBKRjSLSIND3tYj8x/N/e72IjBOR0EDc1yIyQkQOisj6DNMuet+KSB9P+60i0iezdWXlsg90Lwep9lepwH9VtSpwK/CoZ9sGA3NVtSIw1/Mc0t6Dip6//sBn+V9yrnkc2Jjh+VvA+56Bxo+SNvA4BNYA5B8Cs1S1ClCTtO0P2H0tItcBA4FoVY0i7fbb3QjMfT0KaHvOtIvatyJSEngRqE/aWM4vnvkQ8JqqXtZ/QANgdobnzwDP+LquPNrWH4DWwGagrGdaWWCz5/EwoHuG9unt/OmPtFGv5gItgOmAkHbVXPC5+5y0+/A38DwO9rQTX2/DJWxzCWDHubUH8r7m77GGS3r23XTg9kDd10AEsP5S9y3QHRiWYfpZ7bz5u+yP0PFukGq/5/l6eQuwDLhaVfd7Zh0ArvY8DpT34gPgKcDteV4KOKZpA4zD2dvl1QDkfiASiAdGerqavhSRIgTwvlbVfcA7wG5gP2n7biWBv6/PuNh9m+N97g+BHvBEpCgwGfi3qp7IOE/TPqoD5txSEbkTOKiqK31dSz4LBmoDn6nqLcAp/v4KDgTkvr4S6Ejah9m1QBHO75YoEPJr3/pDoHszSLXfEpEQ0sL8W1Wd4pn8l4iU9cwvCxz0TA+E96IR0EFEdgIxpHW7fAhc4RlgHM7erkAZgHwvsFdVl3meTyIt4AN5X7cCdqhqvKqmAFNI2/+Bvq/PuNh9m+N97g+B7s0g1X5JRIS08Vg3qup7GWZlHHS7D2l962em3+/5lfxW4HiGr3R+QVWfUdVyqhpB2r6cp6o9gV9IG2Aczt9mvx+AXFUPAHtEpLJnUksglgDe16R1tdwqIuGe/+tntjmg93UGF7tvZwNtRORKz7ebNp5p3vP1Dwle/tjQHtgCbAOe9XU9ubhdjUn7GvYHsMbz1560fsO5wFbgZ6Ckp72QdsbPNmAdaWcP+Hw7crD9twHTPY9vAH4H4oCJQGHP9FDP8zjP/Bt8XXcOtrcWsMKzv78Hrgz0fQ28DGwC1gNjgMKBuK+BcaT9TpBC2rexhy5l3wIPerY/DnjgYuuwS/+NMSZA+EOXizHGGC9YoBtjTICwQDfGmABhgW6MMQHCAt0YYwKEBbop0ETk3yIS7us6jMkNdtqiKdA8V6xGq+ohX9diTE7ZEbopMESkiIjMEJG1nvtzv0jaPUZ+EZFfPG3aiMhSEVklIhM999lBRHaKyNsisk5EfheRm3y5LcZkxgLdFCRtgT9Vtaam3Z/7A+BPoLmqNheR0sBzQCtVrU3aVZ1PZHj9cVWtDnziea0xlxULdFOQrANai8hbItJEVY+fM/9W0gZRWSwia0i7/8b1GeaPy/Bvg7wu1piLFZx9E2MCg6pu8Qz31R54VUTmntNEgDmq2v1Ci7jAY2MuC3aEbgoMEbkWcKrqN8BQ0m5fexIo5mnyG9DoTP+4p8+9UoZF3Jfh36X5U7Ux3rMjdFOQVAeGioibtLvi/ZO0rpNZIvKnpx+9LzBORAp7XvMcaXf6BLhSRP4AkkkbLsyYy4qdtmiMF+z0RuMPrMvFGGMChB2hG2NMgLAjdGOMCRAW6MYYEyAs0I0xJkBYoBtjTICwQDfGmADx/6vbHhm3NqFwAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -414,7 +410,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "id": "a2e81779", "metadata": {}, "outputs": [ @@ -449,82 +445,82 @@ " \n", " \n", " 0\n", - " 6.442119\n", - " 2.934733\n", - " 4.326933\n", - " 1.372570\n", + " 6.491386\n", + " 2.937301\n", + " 4.396537\n", + " 1.363964\n", " 1\n", " \n", " \n", " 1\n", - " 6.285412\n", - " 2.721440\n", - " 5.120901\n", - " 2.057547\n", + " 6.272807\n", + " 2.878930\n", + " 5.028617\n", + " 1.973149\n", " 2\n", " \n", " \n", " 2\n", - " 4.696350\n", - " 2.042726\n", - " 2.856909\n", - " 0.788935\n", + " 4.912787\n", + " 2.239502\n", + " 2.384605\n", + " 0.845205\n", " 1\n", " \n", " \n", " 3\n", - " 5.336019\n", - " 2.688533\n", - " 4.163283\n", - " 1.192051\n", + " 5.115768\n", + " 2.636920\n", + " 3.933653\n", + " 1.100583\n", " 1\n", " \n", " \n", " 4\n", - " 6.081825\n", - " 3.221682\n", - " 4.645768\n", - " 1.505293\n", + " 5.946947\n", + " 2.976103\n", + " 4.557983\n", + " 1.417799\n", " 1\n", " \n", " \n", " 5\n", - " 5.690165\n", - " 2.336088\n", - " 4.105630\n", - " 1.296607\n", + " 5.528565\n", + " 2.197114\n", + " 4.133016\n", + " 1.296019\n", " 1\n", " \n", " \n", " 6\n", - " 5.398935\n", - " 2.757713\n", - " 3.809984\n", - " 1.161369\n", + " 5.275113\n", + " 2.565652\n", + " 3.698843\n", + " 1.068934\n", " 1\n", " \n", " \n", " 7\n", - " 7.358270\n", - " 3.283428\n", - " 6.496590\n", - " 2.317238\n", + " 7.900000\n", + " 4.400000\n", + " 6.899995\n", + " 2.500000\n", " 2\n", " \n", " \n", " 8\n", - " 6.595327\n", - " 2.598526\n", - " 5.805653\n", - " 1.451353\n", + " 6.899334\n", + " 2.847685\n", + " 6.243627\n", + " 1.561012\n", " 2\n", " \n", " \n", " 9\n", - " 5.224718\n", - " 2.796224\n", - " 3.500915\n", - " 1.125248\n", + " 5.267148\n", + " 2.780006\n", + " 3.565531\n", + " 1.128439\n", " 1\n", " \n", " \n", @@ -533,16 +529,16 @@ ], "text/plain": [ " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", - "0 6.442119 2.934733 4.326933 1.372570 \n", - "1 6.285412 2.721440 5.120901 2.057547 \n", - "2 4.696350 2.042726 2.856909 0.788935 \n", - "3 5.336019 2.688533 4.163283 1.192051 \n", - "4 6.081825 3.221682 4.645768 1.505293 \n", - "5 5.690165 2.336088 4.105630 1.296607 \n", - "6 5.398935 2.757713 3.809984 1.161369 \n", - "7 7.358270 3.283428 6.496590 2.317238 \n", - "8 6.595327 2.598526 5.805653 1.451353 \n", - "9 5.224718 2.796224 3.500915 1.125248 \n", + "0 6.491386 2.937301 4.396537 1.363964 \n", + "1 6.272807 2.878930 5.028617 1.973149 \n", + "2 4.912787 2.239502 2.384605 0.845205 \n", + "3 5.115768 2.636920 3.933653 1.100583 \n", + "4 5.946947 2.976103 4.557983 1.417799 \n", + "5 5.528565 2.197114 4.133016 1.296019 \n", + "6 5.275113 2.565652 3.698843 1.068934 \n", + "7 7.900000 4.400000 6.899995 2.500000 \n", + "8 6.899334 2.847685 6.243627 1.561012 \n", + "9 5.267148 2.780006 3.565531 1.128439 \n", "\n", " target \n", "0 1 \n", @@ -577,7 +573,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "id": "1f55ffdb", "metadata": {}, "outputs": [ @@ -612,74 +608,74 @@ " \n", " \n", " 0\n", - " 5.200935\n", - " 3.410448\n", - " 1.294404\n", - " 0.250156\n", + " 5.230361\n", + " 3.371515\n", + " 1.408195\n", + " 0.201252\n", " 0\n", " \n", " \n", " 1\n", - " 4.892172\n", - " 3.404765\n", - " 1.373966\n", - " 0.317662\n", + " 4.705658\n", + " 3.064075\n", + " 1.388975\n", + " 0.386298\n", " 0\n", " \n", " \n", " 2\n", - " 4.546415\n", - " 3.001362\n", - " 1.379267\n", - " 0.146012\n", + " 4.711709\n", + " 3.056369\n", + " 1.451635\n", + " 0.195365\n", " 0\n", " \n", " \n", " 3\n", - " 6.912333\n", - " 3.372478\n", - " 4.732009\n", - " 1.638499\n", + " 6.981074\n", + " 3.274333\n", + " 4.803886\n", + " 1.623058\n", " 1\n", " \n", " \n", " 4\n", - " 5.479260\n", - " 2.623246\n", - " 3.496161\n", - " 1.265118\n", + " 5.999308\n", + " 2.927207\n", + " 4.040594\n", + " 1.389657\n", " 1\n", " \n", " \n", " 5\n", - " 5.691610\n", - " 2.568420\n", - " 3.620842\n", - " 1.025988\n", + " 5.698102\n", + " 2.521559\n", + " 3.288451\n", + " 0.966808\n", " 1\n", " \n", " \n", " 6\n", - " 6.935314\n", - " 3.246951\n", - " 6.209702\n", - " 2.236808\n", + " 6.776549\n", + " 3.012238\n", + " 6.285867\n", + " 2.134174\n", " 2\n", " \n", " \n", " 7\n", - " 7.082495\n", - " 3.061208\n", - " 5.907195\n", - " 1.950721\n", + " 7.900000\n", + " 4.400000\n", + " 6.896603\n", + " 2.500000\n", " 2\n", " \n", " \n", " 8\n", - " 6.066010\n", - " 2.553123\n", - " 5.193090\n", - " 1.639034\n", + " 7.900000\n", + " 4.400000\n", + " 6.898989\n", + " 2.500000\n", " 2\n", " \n", " \n", @@ -688,15 +684,15 @@ ], "text/plain": [ " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", - "0 5.200935 3.410448 1.294404 0.250156 \n", - "1 4.892172 3.404765 1.373966 0.317662 \n", - "2 4.546415 3.001362 1.379267 0.146012 \n", - "3 6.912333 3.372478 4.732009 1.638499 \n", - "4 5.479260 2.623246 3.496161 1.265118 \n", - "5 5.691610 2.568420 3.620842 1.025988 \n", - "6 6.935314 3.246951 6.209702 2.236808 \n", - "7 7.082495 3.061208 5.907195 1.950721 \n", - "8 6.066010 2.553123 5.193090 1.639034 \n", + "0 5.230361 3.371515 1.408195 0.201252 \n", + "1 4.705658 3.064075 1.388975 0.386298 \n", + "2 4.711709 3.056369 1.451635 0.195365 \n", + "3 6.981074 3.274333 4.803886 1.623058 \n", + "4 5.999308 2.927207 4.040594 1.389657 \n", + "5 5.698102 2.521559 3.288451 0.966808 \n", + "6 6.776549 3.012238 6.285867 2.134174 \n", + "7 7.900000 4.400000 6.896603 2.500000 \n", + "8 7.900000 4.400000 6.898989 2.500000 \n", "\n", " target \n", "0 0 \n", @@ -733,7 +729,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 9, "id": "13df0848", "metadata": {}, "outputs": [ @@ -929,7 +925,7 @@ "max 3.820000 1.080000 14.200000 9.000000 " ] }, - "execution_count": 29, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -945,7 +941,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 10, "id": "14bca1cd", "metadata": {}, "outputs": [ @@ -953,35 +949,47 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2023-03-27T18:08:18.761007+0200][38480][INFO] Step 100: MLoss: 1.2836 GLoss: 0.9867 Sum: 2.2703\n", - "[2023-03-27T18:08:24.679745+0200][38480][INFO] Step 200: MLoss: 1.2622 GLoss: 0.9409 Sum: 2.2031\n", - "[2023-03-27T18:08:30.391531+0200][38480][INFO] Step 300: MLoss: 1.2059 GLoss: 0.7669 Sum: 1.9727999999999999\n", - "[2023-03-27T18:08:36.164268+0200][38480][INFO] Step 400: MLoss: 1.1645 GLoss: 0.6393 Sum: 1.8038\n", - "[2023-03-27T18:08:41.835318+0200][38480][INFO] Step 500: MLoss: 1.1717 GLoss: 0.6158 Sum: 1.7875\n", - "[2023-03-27T18:08:47.581383+0200][38480][INFO] Step 600: MLoss: 1.1946 GLoss: 0.5384 Sum: 1.733\n", - "[2023-03-27T18:08:53.378127+0200][38480][INFO] Step 700: MLoss: 1.1343 GLoss: 0.5135 Sum: 1.6478000000000002\n", - "[2023-03-27T18:08:59.698145+0200][38480][INFO] Step 800: MLoss: 1.1168 GLoss: 0.4788 Sum: 1.5956000000000001\n", - "[2023-03-27T18:09:05.752638+0200][38480][INFO] Step 900: MLoss: 1.1034 GLoss: 0.4734 Sum: 1.5768\n", - "[2023-03-27T18:09:12.070003+0200][38480][INFO] Step 1000: MLoss: 1.142 GLoss: 0.4692 Sum: 1.6112\n", - "[2023-03-27T18:09:18.112377+0200][38480][INFO] Step 1100: MLoss: 1.1691 GLoss: 0.4602 Sum: 1.6293\n", - "[2023-03-27T18:09:25.549484+0200][38480][INFO] Step 1200: MLoss: 1.1201 GLoss: 0.4578 Sum: 1.5779\n", - "[2023-03-27T18:09:31.574874+0200][38480][INFO] Step 1300: MLoss: 1.1436 GLoss: 0.4429 Sum: 1.5865\n", - "[2023-03-27T18:09:37.672797+0200][38480][INFO] Step 1400: MLoss: 1.1093 GLoss: 0.449 Sum: 1.5583\n", - "[2023-03-27T18:09:44.149652+0200][38480][INFO] Step 1500: MLoss: 1.1468 GLoss: 0.4347 Sum: 1.5815000000000001\n", - "[2023-03-27T18:09:49.923915+0200][38480][INFO] Step 1600: MLoss: 1.1545 GLoss: 0.4313 Sum: 1.5858\n", - "[2023-03-27T18:09:55.733558+0200][38480][INFO] Step 1700: MLoss: 1.102 GLoss: 0.4305 Sum: 1.5325000000000002\n", - "[2023-03-27T18:10:03.367053+0200][38480][INFO] Step 1800: MLoss: 1.0953 GLoss: 0.4267 Sum: 1.522\n", - "[2023-03-27T18:10:10.533359+0200][38480][INFO] Step 1900: MLoss: 1.1247 GLoss: 0.4223 Sum: 1.5470000000000002\n", - "[2023-03-27T18:10:17.355705+0200][38480][INFO] Step 2000: MLoss: 1.2767 GLoss: 0.4266 Sum: 1.7033\n" + "[2023-03-31T01:04:50.377220+0200][12004][INFO] Encoding fixed acidity 8821222230854998919\n", + "[2023-03-31T01:04:50.427480+0200][12004][INFO] Encoding volatile acidity 3689048099044143611\n", + "[2023-03-31T01:04:50.442050+0200][12004][INFO] Encoding citric acid 735380040632581265\n", + "[2023-03-31T01:04:50.457233+0200][12004][INFO] Encoding residual sugar 2442409671939919968\n", + "[2023-03-31T01:04:50.473234+0200][12004][INFO] Encoding chlorides 7195838597182208600\n", + "[2023-03-31T01:04:50.488234+0200][12004][INFO] Encoding free sulfur dioxide 3309873879720413309\n", + "[2023-03-31T01:04:50.501098+0200][12004][INFO] Encoding total sulfur dioxide 8059822526963442530\n", + "[2023-03-31T01:04:50.512236+0200][12004][INFO] Encoding density 3625281346475756911\n", + "[2023-03-31T01:04:50.523222+0200][12004][INFO] Encoding pH 4552002723230490789\n", + "[2023-03-31T01:04:50.532220+0200][12004][INFO] Encoding sulphates 4957484118723629481\n", + "[2023-03-31T01:04:50.540983+0200][12004][INFO] Encoding alcohol 3711001505059098944\n", + "[2023-03-31T01:04:50.547987+0200][12004][INFO] Encoding quality 3457201635469827215\n", + "[2023-03-31T01:04:58.399971+0200][12004][INFO] Step 100: MLoss: 1.3342 GLoss: 0.9783 Sum: 2.3125\n", + "[2023-03-31T01:05:04.973385+0200][12004][INFO] Step 200: MLoss: 1.2858 GLoss: 0.9031 Sum: 2.1889000000000003\n", + "[2023-03-31T01:05:11.741000+0200][12004][INFO] Step 300: MLoss: 1.186 GLoss: 0.7758 Sum: 1.9618\n", + "[2023-03-31T01:05:18.619270+0200][12004][INFO] Step 400: MLoss: 1.1481 GLoss: 0.6615 Sum: 1.8095999999999999\n", + "[2023-03-31T01:05:24.930108+0200][12004][INFO] Step 500: MLoss: 1.1661 GLoss: 0.6094 Sum: 1.7755\n", + "[2023-03-31T01:05:31.651906+0200][12004][INFO] Step 600: MLoss: 1.1902 GLoss: 0.5381 Sum: 1.7283\n", + "[2023-03-31T01:05:38.246164+0200][12004][INFO] Step 700: MLoss: 1.1305 GLoss: 0.5087 Sum: 1.6392000000000002\n", + "[2023-03-31T01:05:44.776216+0200][12004][INFO] Step 800: MLoss: 1.1131 GLoss: 0.4832 Sum: 1.5963\n", + "[2023-03-31T01:05:51.917105+0200][12004][INFO] Step 900: MLoss: 1.1014 GLoss: 0.4786 Sum: 1.58\n", + "[2023-03-31T01:05:59.098745+0200][12004][INFO] Step 1000: MLoss: 1.1479 GLoss: 0.4707 Sum: 1.6185999999999998\n", + "[2023-03-31T01:06:05.690366+0200][12004][INFO] Step 1100: MLoss: 1.1712 GLoss: 0.4693 Sum: 1.6405\n", + "[2023-03-31T01:06:12.549553+0200][12004][INFO] Step 1200: MLoss: 1.1199 GLoss: 0.4611 Sum: 1.581\n", + "[2023-03-31T01:06:19.575478+0200][12004][INFO] Step 1300: MLoss: 1.1525 GLoss: 0.4614 Sum: 1.6139000000000001\n", + "[2023-03-31T01:06:26.641319+0200][12004][INFO] Step 1400: MLoss: 1.1164 GLoss: 0.4671 Sum: 1.5835000000000001\n", + "[2023-03-31T01:06:33.249503+0200][12004][INFO] Step 1500: MLoss: 1.1356 GLoss: 0.4577 Sum: 1.5933\n", + "[2023-03-31T01:06:40.025759+0200][12004][INFO] Step 1600: MLoss: 1.1367 GLoss: 0.4541 Sum: 1.5908\n", + "[2023-03-31T01:06:46.754777+0200][12004][INFO] Step 1700: MLoss: 1.0896 GLoss: 0.4524 Sum: 1.5419999999999998\n", + "[2023-03-31T01:06:54.036939+0200][12004][INFO] Step 1800: MLoss: 1.075 GLoss: 0.4471 Sum: 1.5221\n", + "[2023-03-31T01:07:00.554405+0200][12004][INFO] Step 1900: MLoss: 1.1154 GLoss: 0.4495 Sum: 1.5649\n", + "[2023-03-31T01:07:07.289610+0200][12004][INFO] Step 2000: MLoss: 1.266 GLoss: 0.454 Sum: 1.72\n" ] }, { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 47, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -1004,7 +1012,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 11, "id": "83064f94", "metadata": {}, "outputs": [ @@ -1014,13 +1022,13 @@ "" ] }, - "execution_count": 48, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABiV0lEQVR4nO2dd3hU1fa/3z2TSSa9997ovfcqSFERsSsCevXqvViuX/V61Z967V67omIXK6CAoggovZcQagokpPfee+b8/jiTISEJJJDOfp8nTybn7HPOmjOTz1577bXXEYqiIJFIJJLuj6azDZBIJBJJ2yAFXSKRSHoIUtAlEomkhyAFXSKRSHoIUtAlEomkh2DWWRd2cXFRAgICOuvyEolE0i05cuRIjqIork3t6zRBDwgIICwsrLMuL5FIJN0SIURic/tkyEUikUh6CFLQJRKJpIcgBV0ikUh6CJ0WQ5dIJJILUV1dTUpKChUVFZ1tSqeg1+vx8fFBp9O1+Bgp6BKJpEuSkpKCra0tAQEBCCE625wORVEUcnNzSUlJITAwsMXHyZCLRCLpklRUVODs7HzFiTmAEAJnZ+dWj06koEskki7LlSjmdVzKe+92gn4m/wzvh79PfkV+Z5sikUgkXYpuJ+hJRUl8dvIzMssyO9sUiUQi4euvv2bp0qWdbQbQDQXd1twWgOKq4k62RCKRSLoW3VbQi6qKOtkSiUTS00lISKBPnz4sXryYXr16cccdd7BlyxbGjx9PaGgohw4datR+2rRpDBo0iOnTp5OUlATATz/9xIABAxg8eDCTJk0CICIiglGjRjFkyBAGDRpETEzMZdt70bRFIYQv8A3gDijAp4qivHdemynAr0C8cdNaRVFeuGzrmkB66BLJlcd/f4sgMq1tnbh+XnY8d23/i7aLjY3lp59+4ssvv2TkyJH88MMP7Nmzh/Xr1/PKK69w/fXXm9o++OCDLFq0iEWLFvHll1/y0EMP8csvv/DCCy+wefNmvL29KSgoAGD58uU8/PDD3HHHHVRVVVFbW3vZ76klHnoN8H+KovQDxgD/FEL0a6LdbkVRhhh/2kXMAezM7QAp6BKJpGMIDAxk4MCBaDQa+vfvz/Tp0xFCMHDgQBISEhq03b9/P7fffjsACxcuZM+ePQCMHz+exYsX89lnn5mEe+zYsbzyyiu8/vrrJCYmYmlpedm2XtRDVxQlHUg3vi4WQkQB3kDkZV/9ErDR2QBS0CWSK4mWeNLthYWFhem1RqMx/a3RaKipqWnROZYvX87BgwfZsGEDw4cP58iRI9x+++2MHj2aDRs2MGfOHD755BOmTZt2Wba2KoYuhAgAhgIHm9g9VghxXAixUQjR5N0XQtwnhAgTQoRlZ2e33lpAq9Fio7ORgi6RSLoc48aNY+XKlQB8//33TJw4EYCzZ88yevRoXnjhBVxdXUlOTiYuLo6goCAeeugh5s2bx4kTJy77+i0WdCGEDbAGeERRlPODWeGAv6Iog4EPgF+aOoeiKJ8qijJCUZQRrq5N1mdvEbbmtnJSVCKRdDk++OADvvrqKwYNGsS3337Le++p042PP/44AwcOZMCAAYwbN47BgwezevVqBgwYwJAhQzh16hR33XXXZV9fKIpy8UZC6IDfgc2KorzdgvYJwAhFUXKaazNixAjlUh9wsWD9ArxsvPhg2geXdLxEIun6REVF0bdv3842o1Np6h4IIY4oijKiqfYX9dCFuv70CyCqOTEXQngY2yGEGGU8b24rbW8xduZ2MuQikUgk59GSaovjgYXASSHEMeO2pwA/AEVRlgM3Ag8IIWqAcuBWpSWu/yVia25Laklqe51eIpFIuiUtyXLZA1ywSoyiKMuAZW1l1MWwNbeVHrpEIpGcR7dbKQoy5CKRSCRN0S0F3dbclpLqEmoNl7+ySiKRSHoK3VbQAUqqSzrZEolEIuk6dEtBr1v+L3PRJRJJZ7B48WJ+/vnnzjajEd1S0GWBLolEImmMFHSJRCK5AC+++CK9e/dmwoQJ3Hbbbbz55psN9m/dupWhQ4cycOBA7r77biorKwF48skn6devH4MGDeKxxx4Dmi6j25a0JA+9y1G/4qJBMaAR3bJfkkgkLWXjk5Bxsm3P6TEQZr92wSaHDx9mzZo1HD9+nOrqaoYNG8bw4cNN+ysqKli8eDFbt26lV69e3HXXXXz88ccsXLiQdevWER0djRDCVDK3qTK6bUm3VMI6D31v2l5Gfz+a03mnO9kiiUTSE9m7dy/z5s1Dr9dja2vLtdde22D/6dOnCQwMpFevXgAsWrSIXbt2YW9vj16v55577mHt2rVYWVkBTZfRbUu6tYf+29nfqKytZP3Z9Tzu9HgnWyWRSNqNi3jSXQ0zMzMOHTrE1q1b+fnnn1m2bBnbtm1rsoyus7Nzm123W3roVjorNEJDZa0aq9qcsBmDYuhkqyQSSU9j/Pjx/Pbbb1RUVFBSUsLvv//eYH/v3r1JSEggNjYWgG+//ZbJkydTUlJCYWEhc+bM4Z133uH48eNA02V025Ju6aFrhAYbnQ1FVUWM9x7P3tS9HM8+zlC3oZ1tmkQi6UGMHDmS6667jkGDBuHu7s7AgQOxt7c37dfr9Xz11VfcdNNN1NTUMHLkSO6//37y8vKYN28eFRUVKIrC22+rdQ0ff/xxYmJiUBSF6dOnM3jw4Da1t0Xlc9uDyymfCzBrzSwySzPZuGAj16y7hhtCb+Cp0U81aldaXcr6s+uZ6T8TZ8u2G9pIJJL2pauUzy0pKcHGxoaysjImTZrEp59+yrBhwzrk2m1ePrer4mPjw0SfiXhYezDZZzKb4jdRXVvdoE14ZjjXrbuOVw6+wuuHX+8kSyUSSXfmvvvuY8iQIQwbNowFCxZ0mJhfCt0y5ALw7tR3TemK80Lm8Wfin+xO3c1kn8lUG6rRm+l558g7aDQa5gTO4Y/4P1jSfwl9nRv2dnGFcfx+9neWDl0q0x8lEkkjfvjhh842ocV0WwWzMbfBSqemAo3zGoez3pkfo3/ktg23cdfGu8guy+Z49nEWhC7gmTHPYG9hz3tH32t0nrVn1vLZyc9ILm7byQmJRCLpaLqtoNfHTGPGtcHXciD9AFF5UUTlRfHCgRdQUJjuNx1bc1sWhC7gQNoBU2ZMHVF5UQ1+SyQSSXelRwg6wI29biTALoD/Tfof7lbu7EjegZ+tHyEOIQD0depLrVJLQmGC6RhFUYjKVYU8Oje6E6yWSCSStqPHCLq/nT+/zf+N2YGzuaPvHQBM95uO8VGnBDsEAxBbEGs6JqUkheJqtR5MdJ4UdIlE0r3ptpOiF+KmXjdxOv80N/W6ybQtwC4AM2HG2YKzpm113nmIQ4gUdIlE0ggbGxtKSrrPcxd6jIdeHxtzG16b+Bq+dr6mbTqtDj87vwYeenReNGZCjb/nVuSSXZbdGeZKJBJJm9AjBb05gh2CG3jokXmRBDsEM8hlEAB/xP/ByuiV1BhqGhxXXVvNz2d+ZnvSdgorCzvUZolE0vkoisLjjz/OgAEDGDhwIKtWrQIgPT2dSZMmMWTIEAYMGMDu3bupra1l8eLFprbvvPNOh9nZI0MuzRHiEMKWxC1U1FRgrjUnKjeKid4T6e3UG4A3w9Q6xynFKcwPnc+e1D3c0fcOtiZv5b/7/wvAMLdhrJi9otPeg0RyJfL6odfbPCzax6kP/x717xa1Xbt2LceOHeP48ePk5OQwcuRIJk2axA8//MDVV1/N008/TW1tLWVlZRw7dozU1FROnToF0C5lcpvjihN0BYW4wjiKqorIq8hjjNcYU1qjTqOj2lDNisgVfBf1HbVKLaEOoRzPOo6lmSULQhfwfdT35JTn4GLp0tlvRyKRdBB79uzhtttuQ6vV4u7uzuTJkzl8+DAjR47k7rvvprq6muuvv54hQ4YQFBREXFwcDz74IHPnzmXmzJkdZucVJ+gAp/NOsy9tH3bmdszwnwHA8+OeB6DaUE1FbQV6rZ5fz/7KwYyDHMs6Rn/n/swLmcd3Ud+xO2U380Pnd9bbkEiuOFrqSXc0kyZNYteuXWzYsIHFixfz6KOPctddd3H8+HE2b97M8uXLWb16NV9++WWH2HNFxdD97PzwtvHm3fB32Zq0lWuDr8VCa9GgjU6j47WJr/H8uOcZ5DKIXSm7iM6LZrDrYHo79sbD2oPtyds76R1IJJLOYOLEiaxatYra2lqys7PZtWsXo0aNIjExEXd3d+69917+9re/ER4eTk5ODgaDgQULFvDSSy8RHh7eYXZeUR66mcaM5VctZ9GmRVQbqlkQuuCC7Ud5jmL58eUADHYdjBCCyT6TWX92PRU1FejN9B1htkQi6WTmz5/P/v37GTxY1YH//e9/eHh4sGLFCt544w10Oh02NjZ88803pKamsmTJEgwG9RkNr776aofZ2W3L514OcQVxROZFck3QNRdsdzjjMHdvvhuAnbfsxEnvxN7Uvdy/5X5em/gac4PmdoS5EskVSVcpn9uZXDHlcy+HIIegi4o5qF65hdYCP1s/nPROAIzxHEOoYygfHvuwUbleiUQi6UyuSEFvKeZac27ve3uDFadajZZHhj1CcnEyHx3/iLiCuE60UCKRSM4hBf0iPDr8URYPWNxg20TviYzzGsfnJz9n3q/z+D3u3HMGT+WcYmX0yg62UiLpmXRWSLgrcCnvXQr6JSCEYNm0ZXw/53tCHUP5/MTnppv/Q9QPvHbotUZleiUSSevQ6/Xk5uZekaKuKAq5ubno9a1LvLiislzaEp1WxyDXQSzpv4Sn9jzFntQ9TPSZSGJxoqlMb90KVIlE0np8fHxISUkhO/vKrLGk1+vx8fFp1TFS0C+TWQGzePfIu3wb+S0TfSaSVJQEQExBjBR0ieQy0Ol0BAYGdrYZ3QoZcrlMdFodswNnE5YZRm55LgWVBQDE5sde+ECJRCJpY6SgtwH9XfpTbahusIK0fpleiUQi6QikoLcBfZ3UxP9NCZsACHUMlYIukUg6HCnobYCfnR9WZlYczjgMwFTfqaSWpFJaXdrJlkkkkiuJiwq6EMJXCLFdCBEphIgQQjzcRBshhHhfCBErhDghhBjWPuZ2TTRCQx+nPhgUAx7WHvR37g/Q4GEaEolE0t60xEOvAf5PUZR+wBjgn0KIfue1mQ2EGn/uAz5uUyu7Af2c1VviZ+tHqEMoAF+c/ILj2ccB2JywmXm/zJP56RKJpN24qKAripKuKEq48XUxEAV4n9dsHvCNonIAcBBCeLa5tV0Yk6Db+eFt682sgFnsS9vH4o2LSS5O5ouTXxBXGGd6MLVEIpG0Na2KoQshAoChwMHzdnkDyfX+TqGx6COEuE8IESaECOtpiwXqJkb9bf3RCA1vTH6D3+f/jhCCp3Y/RVSeKuSnck51ppkSiaQH02JBF0LYAGuARxRFKbqUiymK8qmiKCMURRnh6up6KafosgQ7BPP82OeZFzLPtM3d2p0bQm/gWPYxLM0scdI7cSLnRCdaKZFIejItEnQhhA5VzL9XFGVtE01SAd96f/sYt10xCCFY0GsBjnrHBtuXDFiCmTBjduBshrsPlx66RCJpN1qS5SKAL4AoRVHebqbZeuAuY7bLGKBQUZT0NrSz2+Jt483Ka1by+IjHGeAygOTiZPIr8imrLuts0yQSSQ+jJbVcxgMLgZNCiGPGbU8BfgCKoiwH/gDmALFAGbCkzS3txtTVdBnoMhCA+7fcT3xhPGuuXYOvne+FDpVIJJIWc1FBVxRlDyAu0kYB/tlWRvVU+jn3QyCIzI1EIzT8ePpHnhj5RGebJZFIeghypWgHYq2z5qFhD/H6xNe52v9qfon5RYZeJBJJmyEFvYP528C/MSdoDrf3vZ3i6mJ+PftrZ5skkUh6CFLQO4nBroMZ5jaMd468Q0RORGebI5FIegBS0DsJIQRvTXkLJ70T/9j6D3LKc1p9jsraSmavmc3G+I3tYKFEIuluSEHvRFwsXVg2bRlFlUUsP7681cefzjtNSkkK25K2Ndq35swaTmafbAszJRJJN0EKeicT4hjCgl4L+PnMz3wf9T3/b+//a+StV9ZW8tqh10gpTmmwvW6R0rHsYw22K4rCKwdfYUXkina1XSKRdC2koHcB7h98PxZaC1479Bq/xP7Cf/f9t8GTzg+lH+L7qO95es/TGBSDaXtErhp7zyjNIKM0w7Q9ryKPKkMVZ/LPdNybkEgknY4U9C6Ai6ULH07/kGXTlvHYiMfYkbKDX2J/Me0/nKk+OCM8K5yfz/xs2h6RE4GblRsA+9P2M//X+ayLWUdGmSruiUWJslyvRHIFIQW9izDCYwSTfSezsN9ChrsP542wN0yhl7CMMIa4DmG052jeOfIOOeU5lFWXEVcYx7zgeei1et4Ie4PYglgOZxw2eesGxSAfsiGRXEFIQe9iaISG58Y+R0VNBW8cfoPS6lIicyMZ6TGSp0c/TUVNBe+Hv09kbiQKCkPchtDfpT/FVcUAJBYnNgi/yLCLRHLl0JJaLpIOJtA+kLsH3M0nJz7BTGNGrVLLSI+RBNoHcme/O/k64muTUPd37s8oj1GczD7JcPfhROdFk1mWiU6jQyM0xOTHdPK7kUgkHYUU9C7KvYPu5UT2CdafXY+ZxozBroMB+Pugv5NQmEBMQQwTvCfgbOnMPQPv4fqQ69mcsJn96fuJzY/F3codOws76aFLJFcQUtC7KBZaC5ZNX8Z/9/8XM40ZVjorAGzMbfhg+geN2nrZeOFn6weok6d9nfriY+vDjuQdfHD0A0Z6jGSM55iOfhsSiaQDkYLehTHXmvPyhJdb3L6uFG9pdSke1h70duzNL7G/8OmJT9mdspvV165uL1MlEkkXQE6K9iB8bHxMrz2sPZgfOp+Xxr/E3QPuJiovitSSrvEQqcLKQuasnSNr2EgkbYwU9B6Elc4KN0s1L93DygNrnTXzQuZxY+iNAGxL2sbq06vZkbyj84wEYgtiSS5O5nj28U61QyLpaciQSw/D186XrPIsPKw9Gmzr5diLz058Rn5lPsPchjHFd0qn2ZhVlgVAdnl2p9kgkfREpIfew6ibGK0v6ADT/aaTX5kPQHpp6x73alAMvHTgJVOxr+raakCtGfNd5Het9rRNgl52TtCTi5PZlbKrUdtN8ZuYvGoy5TXlrbqGRHIlIgW9hxHsEIyZMMPTxrPB9lv73Mq9A+/l9j63k1mWSbWh+oLnySnP4eqfr2Z70nZOZJ9g1elVvHroVXLLc5m5ZiaP7niU76O+5/XDr/Ph0Q9Nx8Xkx3DL77dQUFHQ7LnrBL1+EbLlx5fz8PaHGwl3WGYYeRV5xObHtvQWSFrA1sSt7E/b39lmSNoYKeg9jJt738x3c7/DztyuwXYnvRMPDXuIPk59MCgGMkszee3Qa7y4/8Umz/ND1A+klabxfdT3bEncAsDJnJP8/a+/k1+Rz1+Jf/H64dex0FpwOPMwJVUlAOxI3kFkbiRHMo80a2NTIZeInAhqDDWmCpJ1JBQlABBTIBdItSVvHXmLj49/3NlmSNoYKeg9DEszS/o79292f53nnl6azpbELWyM34hBMfB22Ns8tvMxAMqqy1h1ehUWWgsOZhzkt7jfGO05Gncrd07nn+buAXfzyoRXmOA9gTcmvUGNoYa9aXsBiMqLAiAyL7JZG8730Ovq0gAcyzrWoG1CYQKAXPHahlTXVpNWkkZycXJnm3JFciD9AGklae1ybinoVxje1t4AROVGkVmWSXF1MWcLzrL+7Hq2Jm2loqaCdbHrKKoq4sXxqveeV5HHrIBZPDnqSSZ6T+TeQfdybfC1fHzVx0zymYSDhYMpcyYyN9J0/mpDNZsSNlFrqG1gQ2ZZpum81bXVROVFoaAgEIRnhZvalVWXmdpKQW87UkpSqFVqTUXeJB2HQTHwjy3/YOXple1yfinoVxge1h4IBDtTdpq2rYlZQ25Frink8Uf8H/R16svswNmM9hiNQDDFdwpX+V/FR1d9hKWZpelYrUbLJJ9J7ErZRW55LqklqWiEhsjcSH47+xuP73ycvxL/MrVXFIXssmxszW0ByK3INXUCk30nczz7uKnme1JxEgD2FvacyT/ToEZ8VyOtJK1L21efxKJE0+uUkpQLtJS0NVllWVQbqhusGWlLpKBfYei0Otys3AjPVD1hC61FgxrrW5O2cjL7JFN9pwLw+MjHeWH8C7hYujR7zqv8rqKoqoiPjn0EwCTvSeRW5PJt5LcA/BH/h6ltQWUBVYYq+jn3A9RMl4hcta77DP8ZFFcVm0r+1oVb6jJ0cityW/w+M0ozTMe3NynFKcxeO5sN8Rs65HqXS31Bb03YRVEUlh1dJieoL4O6xX3eNt7tcn4p6FcgXjZe1Cg12JrbMtZzLJW1lfjZ+hFsH8zq06tRUJjkMwmA3k69uT7k+gueb5LPJHxtfVl9Ri0tcEPoDYC6gMhWZ8vu1N0UVhYC5+LndXH+7PJsInIi6O/cn6GuQwE4mnUUgPiieASC6X7TgdaVAn75wMvcv+X+S/KaFUUxlSAuripmRcQKagw1zbaPzovGoBjYFL+p1dfqDBKLEtFr9QCNHmt4ISJyI/jkxCfdpuPqitTdbynokjbDy8YLgF6OvRjiNgSAMZ5jGOo+lCpDFc56Z/o6923x+bQaLXf2vRNQv6ijPdUwDcDTY56mxlDDhrgNlFSVmAR9gMsAAOIK40goSqCfcz98bH1w1jubBD2hMAFPa09T29bE0etKHdRlybSGtTFrmb1mNolFiaw+vZo3w940jWiaom5Cd3/afkqrS1t9vYzSDIqqilp93KWSVJREL6de2JrbtspD35q0FaDdJvQ6AkVR+CbimwZrIM7fX1Vb1W7XTy1JRSBM/4NtjRT0KxAva/XLFOoQygiPEQCM9x7PUDfVQ57oMxGNaN1X4/qQ67Ezt2OAywCsdFYEOwQzwHkAcwLn4Gfrx6uHXmXiqomm8E4fpz4IhOnvcV7jEEIwzH2YSdATixLxt/PHSe+Ep7UnB9IPtMiWoqoi02TqvrR9rXofAOvPrqdGqeGP+D9MIlb3/NamiCuMQyM0VBmq2J26u1XXSi5K5ob1N/DygXNF2KJyo5i2ehrJRc2LbVJREv/d/1/iC+NbdT1QU0ED7ALws/W7JEGv/wCVtqCwsrDDMm7iC+N5I+wNfov7rcn9v8f9ztTVU9ttsji1JBU3KzfMtebtcn4p6FcgdcO9UMdQBrsO5udrf2aq71RGe4zGysyKOYFzWn1OK50V3875lidHPQnA21Pe5q0pbyGE4I3Jb/DEyCfwtfVlW/I2QK0146R3IrUkFW8bbwa6DARgiOsQUktSSStJI74wngD7AADmhcxjb+reFoUI6mK8AtFI0GPzY1kZvbJR5k0dGaUZhGeFmzqbkznq6tiI3AiqDdVsiNvQ6Ni4gjhGeYzCSe/E5vjNGBQDn5/8nOf2PXfBkE95TTn/2vEviquKOZxx2NR25emVZJdnE5YZ1uRx25K2sWD9An4+8zOfn/zctH1TwiYWb1pMRU0F4ZnhPLL9EU5kn2h0zcyyTPxs/fC19W2xkMYVxhFfGI+5xvyiK41PZp9k/dn1jVJQm+PFAy+yeOPiDplUrlvP0NwoY2/aXoqqitqtg0kpTmm3cAtIQb8i6ePcB63QmsItvZ16I4TA3dqdA7cfYKzX2Es6b5B9kGnyNNA+0DSs7Ofcj4X9FprE3knvhE6rw9XKFYCZATMRQg3R1I0S3gx7k7KaMiZ4TwBgQegCNELTYAK3OWILVEGf7DuZwxmHGwyhPzj6AS8ffJmHtz/cwAsrrCxkY/xG08O57+x3pyk81NuxNxE5Efx29jee3P1kg+JmBsVAQlECIQ4hzAmcw5akLcz4aQbvhb/H2pi1F4z7fx/1PafzT3OV31Vkl2eTWpJKWXWZKRbf3LFfnfoKD2sPZvjP4K/Ev0xhnu8iv+NI5hG+jviaZ/c9y9akrdzxxx38dOYn07FJRWrmkL+9P762vqSXpF9w1XBJVQlP7X6KpVuXAjA7cDZZZVkXnFP4145/8fSep1m4caEpg6k5Kmoq2JWyi6zyLJKLk6k2VLdr+KkubNdcdk9dBdB2E/SSFHxs2yfDBaSgX5H0d+7P3tv20suxV6N9dcLaHozzGsdM/5mm+Hyd+F8dcLWpTR/nPui1ev5K/AsfGx+ToHtYezDZZzJrY9ZSUVOBoigczTrKJ8c/aeSFn8k/g43OhhtCbqC8ppy7Nt7Fx8c+ptZQy+HMwwTaB7I7dTeP7XzM5G2viFjBE7ue4MNjH9LXqS9/G/g3tEJLiEMIswNnk1KSwspoNXd4X9o+yqrLePPwm0TmRlJeU06QQxD/N+L/+O+4/2JnYcc/Bv8DrdCyKaHpidJaQy0/nf6JUR6juH/w/YA6GbwlaQtlNWXY6GyaFPSSqhJO5pxkhv8M7up3F+U15fyZ8CepJakczz6OpZklHx77kMSiRN6Y/AZ9nfqy9sxa0/F1GS7+tqqg1yg1ZJQ0HUKpMdTw2M7H2Bi/EX87fx4a+hBD3YZSq9Q2G4MuqCggsyyTW3rfYrpXF+Jg+kFTuYfj2cd5P/x9rl13bbO1e2oMNZflydd19k156MVVxaY5l/YQ9KraKrLLststZRGkoF+xWOusO+W6b0x+g4+nq0vO+zv3p69TX/o59TPt12l0DHRVwy+39L6lQSx/Uf9F5Ffm88mJT1h+Yjl3bbyLZceW8fSepxt44bEFsYQ4hDDOexzXh1xPZW0lHx//mK1JWymuKub+Qffz9Oin2Z26m/eOvgfAoYxDBNoHcm3QtTww+AGc9E78a/i/eHDog/R3UTNyovKiTGGctTFrWRG5gv/s/g+gjk7MNGbcEHoD6+at44EhDzDKYxSb4jc1KUB7UveQVprGzb1vJsQhBBudDUcyj7AyeiW+tr5cHXA1Z/LPUFpdyqsHXyWzVJ0TCMsMo1apZYznGAa7DibALoCfzvzEb2fVmPCbk99EK7SM9x7PrIBZTPWbSkRuBPkVamG2utx+fzt/gh2CATiRc6KRfQCfnfyMvWl7eWbMM3x81cfcO+hePK3PrTRuirqQxlTfqYQ4hHAo/ZBp36H0QxxMP9ig/fbk7VjrrLHR2RCeFc6GuA3kVeSxMX5jo3MbFAM3rL+BD45+0Ghffcqqy0ylKBrZl38u5FJjqOGhbQ+Z5mbqjyaSi5OJyIlgyaYllzTRXVRVxJRVU9ietN20La0kDQUFb1sZcpH0EDRCYxoFLB26lJXXrGw0KhjrORZrnXWjdMlh7sO4PuR6vjr1FR8d+4i5QXN5c/Kb5JTnmHLdFUUhJj+GEMcQLLQWvDj+RT6crhYPe+XgKwCM8hzFzb1vZkHoAr4+9TWx+bFE5EQw3W86r0x8hal+ag7+ov6LmOY3zZQzD2onk1KSYopd13l0QfZBjd7rrMBZpJSkmCZUI3IjKK4qRlEUfoz+ERdLF6b5TUOr0TLYbTC/xv7KyZyT3DfoPno59qKgsoAvTn7BD9E/8PTepzEoBg6kH0Cv1TPYbTBCCJYMWMLJnJN8eOxDBrkMYpLPJFZes5K3Jr8FwHiv8SgoJiFNKEzAzdINK50VA1wG4Grp2mDhV322JG5hlMcoFvRaYNrmYaNW8UwrPefhnsg+wdKtS9kQt8EkmCEOIYz2HM3RrKNU1VYRkx/DA1se4PGdj5s63+KqYrYnb2ei90QGuw5mQ9wGssuz0Wl0rIxe2agjjMqNIr4wnm1J25q0F9SQydx1c3l4+8ON9pXXlJNcnIyT3onK2koOZxxme/J2/kz4E8BUR6hubmFzwmbCMsMadEot5XD6YXIrchss4GvvHHSQgi7pZJrKplk8YDF/3PAHDnqHRvseG/EYjnpH+jr15fmxzzPTfyahjqF8deorVkSs4NVDr1JUVUSoQ6jpGC8bL8Z5jSO3IpdQx1BTqOe+QfcB6qRcjVLDSPeRTdpoZ25HgF0AvR17s7DfQkBd4frwsIfRa/U4WjjiqHdsdNx0v+lYmVnxz63/ZOnWpdz6+60s2bSEL059wd60vdzR9w50Gh0AQ12HUqPUMMF7AvOC55nCYd9EfoOlmSUH0w/y0bGP2Ju6l2Huw7DQWgBqzv+7U9/FSe/ELX3UMEcfpz6mEVh/5/7YmduZQh9JxUn42fmZ7v1V/lepo4WSNJYdXWaqkllQUcCZ/DOM9hzd4D3VeegZpRmEZYRx/5b7ueOPO9iZspNvI78ltiAWO3M73KzcGO0xmoraCval7eOJXU8ghCC/Mp/tydv5NvJbpv80nbyKPK4JuobBroMprynHXGPOg0MfJCovylQfqI4dKTsAOFt4tsmQT2JRIks2LyG3PJewzDCKq4pN+yJyIojOi26wxmJzwmbg3FxFRG4EPjY+DHAe0OABLAczDtJaDmWonUD90tIdIejyAReSLodOo8NJ79TkPnsLe9Zdtw5LnaVJ1Bb3X8zTe57mzbA3sTSzxFnvzCiPUQ2OuyH0Bvam7W3woGwvGy9GeY7iYPpBzDRmpknipnh90uvotXp8bX3xtvGmqKqIO/regb2FvWnRVFO2fjvnW57f9zz70/ZzS+9bWBuzlvfC32OSzySW9F9iajszYCaHMw/z3NjnEEIQ6qh2SJW1lTw24jGOZB7hkxOfADA/dH6D60z3m84032lNzn9oNVrGeI5hb9peFEUhsSjRtAoYYKb/TH6M/pHbN9xObkUuCgoPDn3QVC3z/PtoaWaJo4Ujx7OO8+GxD3G0cGTpkKVU1Fbw+cnPKakuIcQhBCEEwz2GoxEaHtn+CAoKy6Yt44UDL/DB0Q9IKkpinPc4HhyihrR0WrVjm+A9Qa0YGvUdD2x5gKv8rmJ+6HzGeY1jZ/JOnPRO5FXkcSjjEHOD5jawbVvSNsprynlh3As8u+9ZjmQeYYrvFI5lHWPhxoWm79QUnyn8EvuLKQ0zJj8Gg2LgVM4pBrkOwsfWhz8T/yS3XF2ZfH6YqCXUCfrZgrMUVxVjo7Phj/g/cNI74Wbl1urztRQp6JJux/me+7VB1+Jl7YW/nb8pc+Z8pvpO5Zbet3BjrxsbbL8+5HoOph9koMtArHRWzV6zftjlqdFPUW2oxtLMkpt63XRBW3s59uK7Od9RVl2GjbkNk3wmsSFuA8+MeQatRmtqF2gfyOczz6Ug2lvY42ntSVZZFnOD5rKw30KOZR0jPCucBaELGl3nQpPZ47zG8WfinxzPPk5eRR7+dv6mfUPdhuKsdya3IhdvG2/Wxazj/sH3cyjjULOVOz2sPdiRsgOBYMXsFfja+nI67zSfn/ycxKJEU6dpZ27HGM8xpBSn8PKElxniNoT5IfP5+PjH+Nv58/bkt033fIjrEHo59uKW3rdgrbNm3bx1rIhYwarTq9iStIW+Tn2Jyovi4WEP8+WpLzmYfrCRoB/MOEigfSBzg+by8sGXOZh+kCm+U/g97nfMhBl5FXmYa8xNo46CygIAymrKOJh+kPTSdBb1X4SVmRW1Si21tbX0cepDdF40OeU5Fyx/AeqE7bakbfRx6kNsQSyjPUdzMP0gJ7NPUlhVyJHMIzw79tlWr/FoDVLQJd0eIYRpgVRz6LQ6nhnzTKPt0/2m42rp2qpH8tUN2VuKRmiwMbcxHdvS42f6z6S8ptwkJMPchzHMfVirrg2qoAP8EP0DQANB12q0PDf2OcpqyrDWWfPgtgfZmbyTw5mHGeo21OQ518fLxouovCgm+07G19YXUDsudyt3MssyG2RPLZu+DDNhZupwbup1ExG5ESwdsrRBB2qls2LNdWtMf9uZ2/Hg0Ae5f9D9bEzYyKsHXwXUjvlk9kmTBxyWEcZPZ37i8ZGPE54ZznXB12GuNWeo21AOZhykulat+DkjYAYTvCeQWpKKjbmNydMf6TGSwxmH+fLUl4A6QqhLVwX428C/8djOxziccZjZgbMBNXTy8oGXeW7sc7hbuwNq2uvjOx9nf/p+U+G5uwfczaH0Q2xL3sb25O30derLDSE3tPrzaw1S0CVXNJZmlmxasMkUy+5KPDbysTY5j6eNJ4H2gabJzwC7gAb76yaBaw21eFh78OzeZymuLm52gVldHP2OvneYtgkhmOijrgQOcQgxbT//vrpauZomqVuCTqvjuuDrGOY2jKi8KIIdghnvPZ5tydu48487ichVH4ySUZpBeU25KUQ02nM074W/xzeR31BYWcg1Qdc06Ei9rL3Iq8jj+pDrCcsI40D6AXxtffG38zeF8pz1zkz3m46tzpatSVtNgv75yc/ZnbqbbyK/4fGRjwPw0oGXOJx5mCUDlrAqehU2OhtGeYwi2CGYVadXodfq+WDaBw1GZe3BRX1/IcSXQogsIcSpZvZPEUIUCiGOGX+ebXszJZL2w1xr3q75912BcV7jqDHUoBGaZhe2aDVanhn9DJN8JzEveB7XBF3TZLtrgq/hngH3MNqj4YTpjb1uZKzn2FbVAWopPrY+zPCfAajzIY+NeIz0knRGuo9kVsAsUx39kR7qxPZE74kAvBv+Lo4Wjo0Wy9WlDo7yGGUaZdSteXCzcsNCa8Eg10GYacy4pc8tbE7YzIH0A+SU57A+dj1mGjPWxKyhpKqEakM1e1L3MC94Ho8Of5SV16xk+YzlmGnMGOY2DI3Q8MbkNxqE7dqLlnjoXwPLgG8u0Ga3oihNf/oSiaTTGec1ju+jvsfT2vOCdUQm+05msu/kC56rv3P/JmPr/Z378+nMTy/b1othpjFjUf9F3NXvLkB9YMq2pG0E2AeYso16O/Vmw/wNRORG4G3j3WikMMpjFJmlmbhbudPLsRdJxUkmQdcIDU+Pfto00vj7oL/zZ8KfPLf3OQLsA6g2VPP6pNd5YtcTrItdxwCXAZRUlzDeezygzofUsXToUuaHzjcVmGtvLiroiqLsEkIEdIAtEomknRjhPgIzjVmD+Hl3p25U5WHtwf8m/c80T1GHn52fKUXzfG7ufTM3974ZUOcmwjLDTN49NMwk0pvpeWnCS/x71785lHGI60OuZ3bgbFadXsXXp75mduBsNELTKCMIwFHfdEpreyFasozWKOi/K4rSqJsRQkwB1gApQBrwmKIoTZamE0LcB9wH4OfnNzwxMbGpZhKJpB1YEbECP1s/U8xcolJrqKWitqJFq6cVRTF1JOGZ4SzatAiN0DDAZQDfz/m+vU0FQAhxRFGUJrMA2iJ/JhzwVxRlMPAB8EtzDRVF+VRRlBGKooxwdW06vUwikbQPi/ovkmLeBFqNtsWlMOrPtQxzH8ZVfldhUAymTKLO5rIFXVGUIkVRSoyv/wB0QogLJ2xKJBJJD+DR4Y+qz98NmN3ZpgBtkLYohPAAMhVFUYQQo1A7iZY//FEikUi6Kb52vqy+dnVnm2HiooIuhPgRmAK4CCFSgOcAHYCiKMuBG4EHhBA1QDlwq9JdHn8ukUgkPYiWZLncdpH9y1DTGiUSiUTSichqixKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJD6HaCnlFYwZojKVRU13a2KRKJRNKl6HaCHp6Uz//9dJzYrKaf6i2RSCRXKt1O0Hu5q08DOZNZfJGWEolEcmXR7QQ9wNkKc62G05nFRKUXMfzFv4jOKOpssyQSiaTT6XaCbqbVEORqzZmMYrZFZ5FbWsXXexM62yyJRCLpdLqdoIMadjmTWcKRxHwAfj2WRlFFdSdbJZFIJJ1LtxT03h62pBaUcyg+j0E+9pRX17L2SEqTbTedymD8a9v4/URak/uj0ot4fn0EBoOsJyaRSLo33VLQQ93UR02VVNZw5xh/BvvY893BJM4v8vjt/gTu/+4IGUUVPPdrBIVljb34n8JS+HpfAol5ZR1iu0QikbQX3VLQe3vYml6P8HfkjjH+xGaVcDA+j3+tOsbdXx8G4Jv9iQz1c2DNA+PIL6virb9ONzpXZHohAKcz2iZrprSyhpT8jusc1oankFlU0Wj78eQCsprY3hVQFIUNJ9LlWoJO5J8/hPPcr6c62wxJG9MtBd3X0Qq9ToOztTmBLtZcO8gLO70Z/1l7knVHU9kWncXBuFxiskqYO9CTIb4O3Djch1WHk6mpNZjOoygKkWlqhkxLBP10RjEJOaUXbPP+1hiu/WAPtR0QwskqquDR1cf5aHtso32LvzrEO1vOtLsNl8KZzBL++UM4f5xM72xTrlj2n83lYHxeZ5txRXLtB3v4Yk98u5y7Wwq6RiMYGeDE5N6uCCGwNNdy0whf4nNK8XWyBOA/604CMKW3GwCjAp2prDE0CK2k5JdTVFEDXDivvarGwL9/PsHV7+7iri8PXTDeHptVQn5ZNWezGy58+isyk1nv7qK8qu280ihjJ7TzTHaD7aWVNeSXVXfZxVd1I5j0wouPIA7F53HP14eprtcRSy6PwvJq8kqrSMoraxSmlLQvxRXVnEwtpKqmfb7P3VLQAb5cPJL/LRhk+nvR2ABC3Wx4/9ahDPF1IC5bFfdgV/Xhr32MYZr6nnhkuuqdu9tZcPoCgv5XZCarwpIZG+RMUl4Zu2Kym22bkl8OwLHkggbb1x1NITqjmANxuSiK0iZZOdFG+xNyyxqMHDKMoZb4i4wmOos0o5BntEDQ/4rMYGt01kVHRpKWk5ir3suyqlpySqo62Zori+Q8VR/8nKza5fzdVtB1Wg1m2nPm+zlb8dejkxnq58g1gzwBmNrbzfSU7hA3GzQCousJekRaERoB1w7yIj6nlMqaWpMHXWtQOJWqxtdPpBag0wo+WzQCFxtzvjuQZDpH/dCKoigm7/N4PUGvNSjsjVUfs7rjdBarw5IZ+dIWkls4EWswKKw8lERBWcN/vtMZxVjqtAANOplMo1DmlFRRWN710jnTC9QvdUa9GP+GE+k8uvpYo7Z1nVJXHW10RxJyz33vkvJa11FKj/7ySDbqQ10koa3ptoJ+Ia4b4kWwqzXzh3qbtul1WgKc1QVJdUSmFRHkasNgXwdqDQpPrjnJ4Bf+ZE9MDi/+Hsk1H+zhZEohp1IL6e1hi42FGTeP8GVbdCZns0vYdzaHIS/8yU9hyYA6lC01dgj1PfRTqYUUlldjZa5l2+ksPtkZR2WNgTXhTadans/R5HyeXHuSv60Io7LmXMgmKqOY0UFO+Dtbse5oKv/bFE10RlEDoexoz1ZRlEbhpvOp88zrT9quO5rK2vDURl57nBT0Nqf+dyIxt+UT+CWVNYx5dSvrjrbseytpTJ0T5+soPfQW42arZ+v/TWGon2OD7b3cbRuEViLTCunnaWfKmll3NJVag8K934Tx9b4EALZFZ3EqtYiB3vYALBzrj52ljts+PcA/vg+nuKKGZ3+NID6n1BRuCXGzITqjmJySSqIzitgTmwPAvRODSM4rJy6nFFu9GWvCU1qU/340qQCAsMR8nlqrZiZU1xo4m1VCHw87pvZ242hSAR/tOMs3+xMbCHpbhV1+O55GVvHFQySbIzKZ/tZOYrOaD2GlFTb20KOM4aNDCecm6mpqDSQZBSf2Ip1EZ1JZU8tT6052aHbT5ZCQW4qztTlCQFIr0nW3RmWSWVTJ8eTCdrSufVEUhS/2xDebAbbvbA63frq/3WLcKfnl2FiY4WCla5fz90hBb47eHrYk5JZSXlVLVHoRaYUVjAhwJNDFGp1W4Gprwa//HI+1hRkjAxzp52nH6rBkCsurGejtAICnvSWr7htrOueq+8ZgbqbhyTUnTP/Qcwd6UmtQmPnOLma9u5vlO8/S19OOG4f7AGrM/tlr+pGcV86B+NyL2n00uQBvB0senBbCmvAUdpzOIj6nlKpaA308bHnkqlC+WDSCAd52xGWXkFlYgZW5Fo1QPdzHfzrOV3vVWfXqWkOrF1GlF5bz4I9H+Wj72Yu2DU9SV++eSGn+n75uMjS7uJJag0JhWTWpxjDMoXr3IyW/nBqjrV3ZQz+eXMgPB5P49kBiZ5vSIhJzywh1t8HTTm/qMFvChhNqVlKa8bPqjpzNLuHF3yNZE57a5P6NJzM4EJfX6lBUS0nKK8PH0dIUCm5ruqegK4r600p6e9iiKKo4rA1PQacVXDPIC51Ww8vzB/LZXSMY4G3P9scm88O9Y5jWx80kNHUeet15Nj8yiU0PT2J0kDP3TAjkYHweJ40x97oYvgBuH+1HaWUNM/q64etkxQ3DvHn86j5cM8gLWwszbv/sICNe+otX/ohi/9lc4prwRI8lFTDEz4Gl00IIcrXm2V8jOBCnCl8fT1scrMyZ3tedvh52nM0uJaOoAi8HS3wcrdh8KoOfjqTwyc44DAaFOz47yJNrT7TofqXkl2EwKKYSC9uisy4aQ41IU+9B3VzF+dkpiqKQXliBrYUZBgVySiqJMhZXs7Ew43B8vqlt3ehisK8DZ7NLWtURfbrrLM9eYp51Ta3BlM5aWVPLplPpF3zfMcbRyJ8RmZcUY94dk90oy6ol2VDhSfmXlP2TkFNKgLM1fs5WLV5QV1xRzQ5jNlXdCKs5jiUXsON0VoPw4IX4ZOdZ7v/2SIvaXi5R6ep9Tm5mNHXK+P1tzcilNSTnleHbThOi0B0FPeIXeMkN8uJafWhdaGXf2RzWHU1jam83nKzNAbh5hC9DfB0AsNXr0Gk1TO7tCoBOK+jlYdPgXI7W5njY6wEYH+IMwLrwVGwszAhxs+G7e0az4aGJvDJ/IAf+M52l00IBePvmIdw43AdLcy1fLB7JYzN7McLfiS/2xHPbZweY9tZO/rP2hGnRTVZRBakF5Qz1dcDCTMtL8waQlFfGs79GoNMKglzO2RXsZkN2cSUxWSV42OkJdLE2hZgyiir4Yk88hxLyiEhrXJ2yqsbQYKFPQk4pU97YwXcHEwlPLADUL/mFQjiKonAqVT13dEYxx5IL6P/cZo4mnRPp3NIqqmoMDDbe64zCCpN43jjch9OZxeSXqpO/dfHzGX3dqKg2mDrXlrD+eBo/HkqirKqmxcfU8d2BROa8v5tTqYWs2JfA/d+FE24MezVFTKbaCcfnlJpGErUGpdEkdlNEZxSx5KvDvPJHlGnb4YQ8Bj6/+YJVRN/56ww3fLSP97bEmLZtOpXOwi8OXnDBVlFFNbmlVQS4WOPnZNUi4correLTXXFU1agjwrSCC4feHvjuCIu/OszE17dfNExXa1BDIJsiMsgpqSS/tMr0fWgP6rLcmkpIqKk1mEJ/rZlbaClq0kR5u2W4QHcUdL091FZBcUarDw1wtmaQjz2vbowmp6SSBcYQSHMM8XXA1sKM3h62WJhpm203yMcBK3MtaYUVpuHUhFAXk+C72ekxN2t8q0cFOrF0WijLFw5n77+n8cPfRnPfpCB+PJTMLZ8eoKiimqPGydWhfg4AjAtxYd0/xvHM3L68edPgBucNdlXFPS67FHejoANc3d8dCzMNr2+KBjAJ46ZTGaZh9JNrT7Dwi4Omc/18JIUag8IvR1MJT8o3zcpvi85q9j6k5JdTWF6NuZmG0xlFbDyZTlWNgeU7z4Vq6iY9695PZlEFUelFuNiYM2egOrIJM44I4nNKsLfUMTLACWh5HN1gUIjNKqG6VrmkxTMbT6nfre8PJrLysDrhfSKloNn2sVkleDuo92dzRAY1tQYWf3WIGe/suqCXWjcRX2NQOJ5cYPLuVx9OpsagcDCuadu/PZDIe1tjsLEw44dDSSYB/3RXHLtjcvh2fyJf7Y1n6At/8u+fT5Bez6OuC7EEOFvh72xNdnHlBTu9EykFjH9tGx9si2WwrwNzB3qSV1rV7AiisLya9MIKpvZ2Jau4ku0X+L4AHIzPJau4ElDXHLy4IZLrP9pLbknlBY+7VOo6yaY6sricUiqq1RFPYm4ZWcUVLNsWc0mLBGtqDbzyR1SDCeickirKq2vxdWyfDBfojoJuq/7TU9z6VYZajWDVfWO5bZQvg3zsmWpcdNQcOq2Gp+f2ZenUkIu2qxMdn0v8sDzs9YwLceGpOX1ZfucwItMKufPzg3x3IBGdVtDf61zIZ6ifI3+bGMS8Id4NzlGXc6+ez4JQd1Xg75sUxLQ+btQYFPQ6DQVl1ZRW1vDe1hhe2hCJoijsPJ1NWGI+heXVGAwKa8NT0GoE4UkFnEotZM4AT0LdbPjlWCofbI0xTXquP57G1ig11FDn+c/o605mUSW/n0hHCPgzMtPk2dfFX+tGQ5lFFUSmF9HX045BPvZYmGnYa5xEjs8pJdDFmhBj7Z6zLYyjJ+eXmf4x98bkNNiXkFPKplPNf3fyS6s4nJCHhZmG1WEpxGWrdp9MKSQ5r4zbPj3QqNRCTJaabTTE14FvDyTyzx/C2R2TQ3ZxJXvOu3591oancCy5gPEhzuSXVZOYW0ZFdS2bjB1KXQjvfH4OS2awjz0f3zmMvNIqfj+RTnJeGeFJBeh1Gt7dcoYXf4/E1daCdcdSeeWPaNOxdZ+Dv7O1yVNszhvNKank/m+P4GRtzi//HM+6B8bhY+zYmwu71I1Q7hjtj6uthSldF9TO/Pz4+2/H07Ey12JlrmXn6Wz+jMikqsbAj4eSaIpPd51l/9kLzzulFZQ36MTqUxdySc0vp7yqljnv7TZ9H+rChdbmWpLyylh9OJk3/zzDseT8Js91IY4lF/DprjhWGzPgoH7KovTQz2Hrrv4uybykwy3Ntbx6wyDWL53QpNd8PreO8mPWAM+LthsbrIZd6jy1y2HWAE+W3T6M2KwSdsfkMCbIGb2u+RFCHX5OVui06mSLh52eBcN8+OHe0Qz3d+KmET7otIK7xwcC6pc+IaeU9MIKdp7JJre0CkVR47L743JJK6zgkelqmKjGoDDUz5Gr+rlzKrWIt/46w3/WniQms5iHVx7lnhVhXP/hXv6KzESrEcwb4gWoI4G7xwei02i447MDzHp3FxuMy/0Hetuj1QhS8suJySyhn6cdep2WMUHOppz6+OxSglyscbaxwM3WwpQtdDHOGEMgjla6Rse8/dcZ7v8u3DQyOZ+t0VkYFHh6bl9qDQo2FmaMC3bmRGoha8NT2R+XaxLcxNxSiiqqySyqJNTNlqfm9MXRypzNEZncMdoPe0tds9epy7bo42HL03P6AXWx52yKK2twsNKZ1kGczig2ee+F5epKwym93ZgQ4kKomw2f7jrLdwfVCdlltw2jrLqWUDdb1v1jPHMHerIvNsd0fN2iIn9nK/p72QHnRkTn89afp8kpreKThcMZ4uuARiPwsjcKej1hziqq4MXfI9kXm2Pq6EPdbRgf7My+sznGeZNyrvlgN7d+esDk8ZZV1bDxVDpX9XVnRIATa8JTKKmswdXWgm8PJDaaH8gpqeTVjdF8tKNxqYs6fgpLZtpbO/h7EzH5ogp18t3f2Yoag8LW6Ewi04vYHq1+306lFqHXaRgb7EJibqkpzHYovvWCXve9q5++bEpZlIJeD70DmOkvyUNvT8YGqYLu00b5pVf39+Dk81cT9cIsViwZ1aJjzLQaApxVL93dTo9ep2VcsAsA0/q4c+zZmUzro45KjiYVUG4cqi/bdu4f5HB8HisPJ2OnN+PeSUH09VT/6Yf5O/Dw9FB+WzqB/8zuw+GEfB788SiWOi0vzutPXE4pa8JTCHG1MXnfADeN8OFfM3rh72xNYXk1vx5Lw1yrwcUo0qvCkqmqNTDGeP8m93IlLruUvyIzSSusoL9xMvqusf7sOJ3NyQtkz9RRN8F42yg/ojOKCU/Kp6iiGkVR2G+cTH7i58bPpc0orODXY6l42OlZOMafyb1cWTTOn9GBzpzNLuHX42pmxO6YbA7E5TL5jR28tlH1fkPdbBgV6MSmRyax599Teen6AVzd352/IjObDLscjM8jOqOYJeMD6OVug6VOy7HkAtaGp+BiY87to/yIySphS2QmV7+7i++MGTSH4vMwKKoDIYTgiVl9iM8p5ZOdcQz3Vzvd1X8fy/f3jsbawoyxwc7kllaZOrmE3DLc7SywMjcj0MUaXydLdp5ueuXz7pgcpvZ2ZUC9hAAvo8OSXlBBYVk17/x1hqlv7uCLPfEs2x5LbFYJFmYafBytGBfiQk5JFcdTCnngu3BTuYG/IjNIzC3lho/2UVheza2jfBkd6ESNQcHRSsfL1w8gs6iSj3ecbTDJvD06C0VR5xiauqeH4vN4/OcTmGs1nEgpNM3FgDoPUBc/n9lPdQp/OaqW1I42fl9OpRbS19OOIFdrkvPLTXM/h1qQiXY+daPMEymFpg6sbgR7qaP4ltD9BF0IsPW4pBh6ezLIx55nr+nHvKFebXZOrUatU6PRtDzFqS6OXhe/r4+1hZnpH7K+5xqWmI+9pY7BPvZsisjgj5Pp3DLSF71Oy/2Tg5g/1Bs3W7WDGOhjz6JxAbjZWhCdUczCsf4sHBvA6r+PxdNez8RQF1xtLXC00uFuZ0Fvd1semBLMj/eN4fNFIzA30+Bhr0ejEbjb6Skoq2aonwNTjBPQk3qpv/+95gR6nYYFw9Sw0l3jArDTm/HeVnUSsKK6lrXhKby5+XSjsMaZzGK87PWmmPwNH+1j7vu7ickqIbu4koemh2Kh03L/d0corVTjx78eS2XMq1vZHZPDDcO8EUKw4u5RPH51Hwb52KMo6tyEtbmW/Wdz+Xa/KrA/HFRDA3VhIVA7dSEEcwZ6UlxZw+qwFCqqa1l3NMXkpX25Jx4HKx3zhnhjptUw0Mee346n8WdkJreN8jMtdnvh90gA3tkSQ2F5NfvO5mBhpjHNQczo584P944hyMXaNPoaGeCEi40FcM7R2H9WvUcJOaX4Gzt9IQSTe7my72wOp1ILmfD6NlOHmVpQTkp+OaMDnRvcWw97PUJAfG4pc97fzXtbY5gY6sq1g70IS8jnREohwa42aDWC8SGqM3Hn5wc5llzA+7cNxdfJkreNE7rphRV8vWQU44JdTB36rAGeXNXXnRn93Hn7rzPc+cVBk7BujcoyfvYG00R9fbZFZ2GmESy7fRiAKRPsdEYxo17ewkM/HjXeMw8Adp5RzxeTWUy1MbOpv5cdfk5WVNUYyC+rxlZvRlhCfqvi6KWVNRxNUlONSypriMsuITmvjBX7Epg70BMrc7MWn6u1dD9BB7DpeoIuhODuCYG42TYW0o4k2O2ch94UbrYWaDWCfcY4ZJ03PcLfkVGBTsRll6IoCneNDQBg3hBv3rllSINz6HVaHr4qFEcrHfdODAKgr6cde/89jf/M6YsQgoVjA7hvUnCDfNv+XvZ8cudwHr+6t9FGVXSenNXH1C7Y1RpvB0vySqtYMMwHBys1C8lOr+NvE4PYEpXJRztiue/bIzy6+jjLtsfyyKpjDTI7zmSWEOpuywBve767ZzQPTw8lOa+clzaomSQ3DPXmg9uGEpddwn/WqkXcvtmfSJCrNWseGMdjM3s3eL/1PdQHp4dSWlXLhpPpDPZRt5ubaZocRo8PcWGAtx3/75dTDH/xL/616jg3Lt/HK39E8WdkJkvGBZpCaUN9HcgtrcLf2Yp/Tg0xXTMpr4wpvV3JL6vipd8j2ROTw8gApwaT9CMDnNj22BTmDmocGvR1ssLXydL0eSfklhHofG6uZXIvN8qqarlnxWFS8sv5Yo+aPVbnlY4OcmpwPp1Wg7utnp/CkkktKOfjO4axfOFwbhzuQ1WtgYPxeabOzdvBkhA3GwyKwicLh3PNIC/uHh/ImcwS9Dota/8xjsnGDnywjz33TQri75OC0GgEny4czovz+nMqtYj5H+3jP2tPsDsmm+sGe6ER5zqo6lqDKfa9/2wOQ/0cGBvsjJW51jQa+/FQEkKo6x7s9GYM83PATCOorlXQaQVlVbVsjsiguLKGkQHqyus67hzjT3FlTYOMo/TCcv7x/RGyixtO3B5JzOPaD/bw5Fp1ovvvk9X/jaPJBbz4eyQaIXh6bt9Gn1Fb0j0FvQt66F2Fm0f48sSs3rjZWjS530yrwcNOT05JJeZmGm4wesDDAxwZYZzYnTXA46JxvjtG+3PkmRkmTxDUKpha42ji0Rm9uGdCYKPjpvZx49rBXiZbH7kqlNFB57xAIYQpXXTJ+IAGx/59chBzB3nyv02n2R2TzSvzB/LtPaPIKak0lV+oNailB3oZJ4QnhLrw4LQQPO317DqTjYedHn9nK8aHuPDw9F6sP57G6rBkjiTms2CYD8P9HRuNiFxtLfCy1xPiZsPto/1M7/Gl6wcyvY8bQ3wcTNvqo9NqWPeP8Twzt6/qcd48mKoaA5/uiuPawV4snXZusn1ssDNajeCV+QPR67R42etNKbXPXtOPxeMC+OlICjFZJab5mpYyLsiFA3G5FFVUk1NSib/Luc92bLAzOq0gs6gSH0dL/jiVQX5pFQfj8rDVm9HHw67R+bwc9OSUVOFuZ8HM/qq3OyrAyTQnFVpvtPLV4pH8+a9JXG1sd9soP56Y1ZufHxhrGk2C+r18ak5fAlzOjR4Wjg1g35PTTJlfpVW1XD/Ui0E+Duw9m0utQeFfq44x9/09rA1P4WRqIWODXdBpNYwKdGLf2VzTSG72AE/W/WM8yxcOx0yrwdsY9qiz/0tjOdtxwS74O6k22FqYcfsoP0AN59Txyc44/jiZ0WDidtOpdG799AAJOaX8djwNCzMNNw33xVZvxntbYvgzMpOHrwo1jZDbi/bz/dsTW0+I3drZVnRJ/J2t+ceUC2fleDtYqpNDTlbM6OfO9weSmNnPA3c7C6b2duUh42ToxWhNKKgppvd1Z3pf90bbH5keyvQ+boS42TbYbmGm5YNbh9LP044AZ2vmDvJEURSG+Tnw0Y6znM4sVlfQ1hgIdT93rJlWw22j/Hj7rzOm2DOoHcTKw0k8ZfTSrxvcfLjs5fkDsTTXYqfXMTLAkeKKGgZ42/HxncMxXGAxkU6r4W/GUQyoo5QtUZncNymoQScwuZcr4c/MwN64JFwIwZTerhSV1xDkasNz1/bnusFebDiRzk0jLpxuez7jQpxZFZbMr8fUmHFAPQ/dxsKMiaGuZBVX8NoNg7jmgz38eDiJg/F5jApwarKj8nSwhKQC5g/1Me23NNcyMsCRvbG5puwqaDwBqNdpL/r9rI+1hRlPzemLp72ePyMyGRfswpHEfJbvjGPeh3s4lVqEtbmWZ345hUGB8cbOblywM6/8Ec3/Np2mqKKGW0f6MtDn3EjLz8mKxNwybhnhy4YT6YQnFdDHwxZXWwtqag2YaQRD/BzwdbLCz8mKVYeTuX20HxVVBlPmyuqwZJZODUGjEbzzVwzBrjasvG8MhxPyqaypxdJcy2AfB/bE5jClt6tpNNuedFNBd4eqYqgsAQubi7eXNMDLQQ3HBLpY42lvyeZ/TTLt+6qFE7DtiZudnunNhIw0GsE/66WRCiF4+KpeLPryEL8eTSPAxZqZ/dxNMfk6bh3py1d7402eIqji8shVofx7zUmGGf95m2Nqn3Mprh/fMRzFeG1zs9Z1ar09bBs8cav++7A/r77H2zcPaTApONTPsVF9opYwpZcbWo0weaH1BR3gozuGoSiqKA/3d+R/m9Qne902yrfJ8/kYvcwbhzdMm50Y6moU9Mbv73JZMj6QJcY5gptH+BKbVUJGYQX/ntUHFxtzHv9ZnXMZYpxbmNLbjVc3RvPl3ngCXaxNMfo6ApytORiXx6hAJ3ydLEnOKzclEJhpNSwZH8Bwf3XE+v+u6ce934Tx3K8R6HVayqpquX9yMMt3nuVAXC6+Tlaczizmmbl9cbAyZ0a/c07KnIGeFFdU894tQ5vsHNuabiroxlhhSaYU9EugbthXt/CouzO5lyvHnp2BnV7X7KjBzU5P+P+b0aiGxoJhPuw8k8315+X0XwhHYyikI2iLmh/2VjpGG0MQQIMYMdAgJfaThcPZeCqD7KIKbhzetKAvHOtPX0+7RiOoRWMDCHKxbhBKaQ/8na35ZOEI0981tQY+3nmWIBdr09xCL3dbDj99FVlFlbjZWTT6XvxjajBzBnqi12np7W5Lcl65acU3wNNz+5lez+jnzuJxAaaCfVf1deeRq0L54WAi3x1MNE0cT+vTeF3L7aP9uH20X5u994vRTQXd6GUVp4NzcOfa0g2pE/SAHiLogGny9EI0JY5mWg0f3TG8PUzqUszo586+s7m42lpgbdH8v72LjQULx/hf8Fw+jlZNpudammtNMemOxEyrYd0D49GcNyPoYmPRYI6nPp72lngac+oHGcMiowKdmmwLqpc+s7871uZm9PW0w9xMw6JxAXywLZZjSQUEuVgT1M4dWUvoppOidatF5cTopRBkXFHa1NBf0jO5yjhXEejcczrx+thb6bDVX1pJ2vsmBbH5kUkXPF6rEYwLdmGwr4Np8nfptBB6uduQVljRpHfeGXRPQbcxxqikoF8SY4Oc+W3pBIZdQjxW0j3xdbJici9XxoW0LkPmSkCv05py81uDhZmWt24agpe9nuuHtjxk1550z5CL3h7MLLvcatHughCiwYy/5Mpgxd2dP+Hd0xjoY8++/0zvbDNMXNRDF0J8KYTIEkI0WVxaqLwvhIgVQpwQQgxrezMbXVSNoxd0jwcKSCQSSUfQkpDL18CsC+yfDYQaf+4DPr58s1pA8DSI+g2i/+iQy0kkEklX56KCrijKLuBCRaXnAd8oKgcAByHExcsTXi5Xvwxew2DtvbBvGVS0X1F8iUQi6Q60RQzdG0iu93eKcVv7Brh1lnDrD7DuPvjzadjyHHiPgDEPQHk+7HgVnIIheCq49QP3fuAQQKPcJolEIukhdOikqBDiPtSwDH5+bZBsb+cJi36D1CMQvUENwfy0SN3nOwYqi2H7y+fa66zAvb8arul1Nbj2AXNrqCqD4z+C11Dwbv8pAIlEImkPREseaiuECAB+VxRlQBP7PgF2KIryo/Hv08AURVEu6KGPGDFCCQsLuySjm8VQCyd/gppKGLpQ9cYrSyD7NGRFQuYpSDsGKYdAMRbPt/eF6nIoy1HrrF/7PviOBHs/0HbPJCCJRNJzEUIcURRlRFP72kKx1gNLhRArgdFA4cXEvN3QaGHwrQ23WdiAz3D1p46SbEjcCzkxkHMGairUDmDX/9QQDoBLb7jhE9Vrl0gkkm7ARQVdCPEjMAVwEUKkAM8BOgBFUZYDfwBzgFigDFjSXsa2GTau0P/6xtsDJ8LZbeqCpV1vwKdT1RDM4Ntg+GLQXtpKNIlEIukIWhRyaQ/aJeTSlpTlweEvIPp3SD+mTrDO/0QNx0gkEkkncaGQi0z5aA4rJ5j8ONy3A25fDYZq+GoWHOiYNHuJRCJpLVLQL4YQakbM33dD6NWw6UnY9BQYDBc/ViKRSDoQKegtxdIBbvkWRv0dDnwIa+5Ws2kkEomkiyDz8lqDRguzXwd7b/jrWTXP/fbV6naJRCLpZKSH3lqEgPEPw9y3IXYL7Hitsy2SSCQSQHrol87Ie9QVqrveAAc/GLawsy2SSCRXONJDvxzmvAlBk2H9Utj8NHRSCqhEIpGAFPTLw9wK7lgDI++F/ctg95udbZFEIrmCkSGXy0VrBnPegIoC2PaSWgNm8C2dbZVEIrkCkYLeFggB8z5USwb8+k+1CmTgpM62SiKRXGHIkEtbYWah5qk7B8PKOyD9RGdbJJFIrjCkoLcllo5wx89gYQvfLYC8uM62SCKRXEFIQW9rHHxh4Tow1MA316thGIlEIukApKC3B669VU+9NEf11KvKOtsiiURyBSAFvb3wGQ43r1CfkvTX/+tsayQSyRWAFPT2JHQGjF0Khz9Xn3kqkUgk7YgU9PZm+rPgORjW/l19tqlEIpG0E1LQ2xszC7jle/X3j7dBeUFnWySRSHooUtA7AgdfNUe9IAnW3AOG2s62SCKR9ECkoHcU/uPUEgGxW2D7y51tjUQi6YHIpf8dyYglkBoGu9+GoClg5QK2HurzSyUSieQykYLe0cx6HRL3wzfzQDGAUxAs2agKu0QikVwGMuTS0VjYwM3fwIAb4ar/QnGmuqK0orCzLZNIJN0cKeidgccAWPAZTHgEbvsRcs7A74+qD51O2CsnTSUSySUhQy6dTdBkmPIf2P4SxG2HslzwnwDzP1YfbSeRSCQtRHroXYGJj0Kv2eDSC6b9P0g/Bh+Ogb3vQW1NZ1snkUi6CdJD7wpotHD7ynN/D7wJNj0Jfz0LyYdgwReg03eefRKJpFsgPfSuiKO/Gluf9TpE/w4rroXMiM62SiKRdHGkh96VGXM/2LjBhv+D5ROh92zodbX6II3KYrB2g5Dp6iPwAMrzwdxWfc6pRCK54pD/+V2dATeoi5D2vAPHV6oee30G3QJz34bSLFg+CZwC4IbPwa0PpIbD8R9hxgugs+wM6yUSSQciBb07YOUEM1+E6c9BUQpUFKn57Cd/hh2vQlYUmOlVT70oDT6dDBP+BQc/gfI8cA6F0fd19ruQSCTtjIyhdye0ZuAYAJ6D1BWmk5+A21dDbiykHILZr8MD+yFwkir0Gi14DIS970JNVWdbL5FI2hnpoXd3QmfA3ZsgJQwG36Z66bevhshfwK2/WuHx+wWw+T9qeMZ3VNtcd/dbcOhzmPsW9JnTNueUSCSXhVAUpVMuPGLECCUsLKxTrn1FoShqHfYzG9W/r30fhi+6vHMe+gz+eAz0DlBRANe8qxYek0gk7Y4Q4oiiKCOa2idDLj0dIdQc98fjIHiaKsQ/LYG3+0PivtafL+kAbHwCes+BRyPBewTsX6Z2HBKJpFORgn6lYO2sLlCy84KYP8FQA6vuhPyExm1Lc8/lvRsM5+LvFUWw9l6w94X5n4C5NQy7S43hp4V32FuRSCRNI2PoVxJWTnD/XmM2TDp8Pg0+nQJj/gGj7lXz28vy4MuZqkj7jYX8RPXY+3aoK1cLU2DJJtDbqdv7zVO9/hM/gffwznpnEokE6aFfeVjYqJ61Swgs/gN8x6hPUHpnIKy5F765DgqSYdxDUJqtZtSU58NXs+DESpj0OPiNPnc+SwfoNQtO/SwzaSSSTqZFgi6EmCWEOC2EiBVCPNnE/sVCiGwhxDHjz9/a3lRJm+MxQI2v378Hes2EpP1Qkg03fKrmvT94BG5fpaZD5sWp8fJJTzQ+z4glqvhvef7ctoJkKMnqsLcikUhaEHIRQmiBD4EZQApwWAixXlGUyPOarlIUZWk72ChpbzwGwo1fNr9/2F2qJ+43tumyAsHTYNTf4cCHYKiG6nI49gOY26jPUR1087nyBE1RUQgWdhduI5FILkpLPPRRQKyiKHGKolQBK4F57WuWpEshhBort3Frvs3MFyHkKjj8hVpuYPhitfzAuvvgp0Xq5GtdKWCDARL2QFUZxGyB/wXDx+Mg7Es1b14ikVwSLZkU9QaS6/2dAoxuot0CIcQk4AzwL0VRks9vIIS4D7gPwM9PPryhR2FmAXeuUcXaUANm5uqTl/a+B9tfgchfQaOD/ter5QkS94Kdtxqfdw5Rz/H7v9Tffa+Fac9CTQXo7aEwGXa8po4SRt6rPhSkrTEY1LCRrXvbn1si6SAuurBICHEjMEtRlL8Z/14IjK4fXhFCOAMliqJUCiH+DtyiKMq0C51XLiy6gsg+rQp4ZiScWAUImPAwRP2mVo1c/Ifq/WdFqcK/911VzOtj6wW1leoTnQYsUMM/yYdUsdeYqemYhhoQWrB2Bbe+amdQlKp2NjbuasfhGAglGXD0ezVjx8oRJj6mVrQ8tQbG/lN9gpSFTWfcKYnkolxoYVFLBH0s8LyiKFcb//4PgKIorzbTXgvkKYpif6HzSkG/QqkuV3/XVX80GEBzXuQv9yzE7wRLJ3UlqqEGBt8OQgP73oed/1Nj9bZeak0bQzUUp6sjAEON6mlXlzV9fVHvWraeUJwBOiuoKgb/8WrHY6ZX5wWG3qmWVDj6HdRWqR1CnzmgswYLW/Xaejv1WbAlWWonZG4Nrn3Awbd190VRVLvN9Oq55XyCpBkuV9DNUMMo04FU4DBwu6IoEfXaeCqKkm58PR/4t6IoYy50XinokkumMAVqq9VCZU0Jn8GgZuVUlaiLoGor1bz73BjIiVGLlg1dqIpu0gH4dak6RzDtGVXAT62BiHWqJw/Q5xpV/JMPQsaJltnoPVw9rrb6XPgpL17tfGqr1ZFFfqJqo99YdSSRHa0ea+Wsbht9v1p7J+0YnN6glkMuTFH3u/dXbQ6c3HiiWlHg7Da1c3LtC9WlUJwJ/uPAbwxodQ3bFyQDinyGbTfhsgTdeII5wLuAFvhSUZSXhRAvAGGKoqwXQrwKXAfUAHnAA4qiRF/onFLQJV2a2mpVFG09wHPwue1leerv8nxVoKtKVIG08VAfE1hRqArvka/VDqQ+Nu7GkYlQOxMHfzUclLBHHY30maOKcXY0xG6Bkkw1hKTUqmElr2Fg76OGnVLD1VGFlbO6lqC2Sh39VJep+wsSm35f5rbqHIT3MDWzKO2YOomNohZ3cwwwHp+sjhRs3NR74NJbXW2cE6OG0Goq1A7Fvb86wgHIOaMuSAuaAmaWao1+M716HY3mXHkIIdQRTVUpOAWqdtdWqSGyplAUdQRWlKaOsDRata1jwCV8sN2fyxb09kAKuqRHoyhQWaSGZ2qrAEUNx7SU6nI11FOcDh6DVJG0dKi3v0IV/Yi16tyETq9eS2cJ5lZqyGjgTWqnY2Gjdhjxu9SyD/E7z5V80JrD8CWqyIZ9qdpqbqOObKpK1U6ltrKhbUKrdjCm7UIV7hpjOM3cVhXdigLjNYxzGKXZ6nvwGqraXlsF7gMh76zagfa9Bqxc1LCZpYMaDss5o3YilUWN75HnYHUC3WOQWpaiJEM9piRL7XBrKtT5lMpiSD+uPoTdzkvNpPIZAW791HuSGaGOYoKnqQ9rD5yodrS11eooKnaLen88BqlrNyxs1VFgaphqq88oyDwF+fHqvfEcrF4nP1G1v7JI/dtMb+zUFHXE5+jf8u9D/dsvBV0ikTSgvECN/Vs5nQvB1NaowiU050JZiqKOSrIiVJF06QVOwapYntkMxWmq8FeVqnMKToEQ8QsoBlUADdWqyBZnqN5+QRKkHFaLuzn4wulNqkhqzODEavU4jZl6LRs39XouvcC1t9rJgCqiBUnqyuX04w3fl5le7TysnNTXxRmqOHsOVkc+pblg5wlpR9XzWLmooxWhVTu6urkXjZm6vxFCtUsxZkWB2inWnrdKum5k1RzjH4EZ/23551X/1FLQJRJJt0JRWjYxXJanhoCsnFQh19u37LjSXNWjd+17blK+ukL12FOPqAKts1Q9/OCpqminn1DnUApT1P1BU9XOMGk/eA4BryFqJ5l8SB3ZuISqTwuzdFDnSGprQKB2mA7+4Bx8SbdGCrpEIpH0EGQ9dIlEIrkCkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPQQp6BKJRNJDkIIukUgkPYROW1gkhMgGmqkgdEFcgJw2NqctkHa1nq5qm7SrdXRVu6Dr2nY5dvkriuLa1I5OE/RLRQgR1twqqc5E2tV6uqpt0q7W0VXtgq5rW3vZJUMuEolE0kOQgi6RSCQ9hO4o6J92tgHNIO1qPV3VNmlX6+iqdkHXta1d7Op2MXSJRCKRNE139NAlEolE0gRS0CUSiaSH0G0EXQgxSwhxWggRK4R4soOv7SuE2C6EiBRCRAghHjZuf14IkSqEOGb8mVPvmP8YbT0thLi6ne1LEEKcNNoQZtzmJIT4SwgRY/ztaNwuhBDvG207IYQY1k429a53X44JIYqEEI90xj0TQnwphMgSQpyqt63V90cIscjYPkYIsagdbXtDCBFtvP46IYSDcXuAEKK83r1bXu+Y4cbvQKzR/hY8tqfVdrX6s2vr/9tm7FpVz6YEIcQx4/aOvF/NaUTHfs8URenyP4AWOAsEAebAcaBfB17fExhmfG0LnAH6Ac8DjzXRvp/RRgsg0Gi7th3tSwBcztv2P+BJ4+sngdeNr+cAG1EfhjUGONhBn18G4N8Z9wyYBAwDTl3q/QGcgDjjb0fja8d2sm0mYGZ8/Xo92wLqtzvvPIeM9gqj/bPbwa5WfXbt8X/blF3n7X8LeLYT7ldzGtGh37Pu4qGPAmIVRYlTFKUKWAnM66iLK4qSrihKuPF1MRAFeF/gkHnASkVRKhVFiQdiUd9DRzIPWGF8vQK4vt72bxSVA4CDEMKznW2ZDpxVFOVCK4Pb7Z4pirILyGvieq25P1cDfymKkqcoSj7wFzCrPWxTFOVPRVHqnlB8APC50DmM9tkpinJAUVXhm3rvp83sugDNfXZt/n97IbuMXvbNwI8XOkc73a/mNKJDv2fdRdC9geR6f6dwYUFtN4QQAcBQ4KBx01LjkOnLuuEUHW+vAvwphDgihLjPuM1dUZR04+sMwL2TbAO4lYb/ZF3hnrX2/nTWd/BuVE+ujkAhxFEhxE4hxETjNm+jPR1hW2s+u46+ZxOBTEVRYupt6/D7dZ5GdOj3rLsIepdACGEDrAEeURSlCPgYCAaGAOmow73OYIKiKMOA2cA/hRCT6u80eiGdkp8qhDAHrgN+Mm7qKvfMRGfenwshhHgaqAG+N25KB/wURRkKPAr8IISw60CTutxndx630dBx6PD71YRGmOiI71l3EfRUwLfe3z7GbR2GEEKH+kF9ryjKWgBFUTIVRalVFMUAfMa5EEGH2qsoSqrxdxawzmhHZl0oxfg7qzNsQ+1kwhVFyTTa2CXuGa2/Px1qnxBiMXANcIdRCDCGNHKNr4+gxqd7Ge2oH5ZpF9su4bPrsHsmhDADbgBW1bO3Q+9XUxpBB3/PuougHwZChRCBRo/vVmB9R13cGJv7AohSFOXtetvrx57nA3Uz7+uBW4UQFkKIQCAUdRKmPWyzFkLY1r1GnVA7ZbShboZ8EfBrPdvuMs6yjwEK6w0J24MGXlNXuGf1rtea+7MZmCmEcDSGGmYat7U5QohZwBPAdYqilNXb7iqE0BpfB6HeozijfUVCiDHG7+pd9d5PW9rV2s+uI/9vrwKiFUUxhVI68n41pxF09PfscmZ2O/IHdVb4DGov+3QHX3sC6lDpBHDM+DMH+BY4ady+HvCsd8zTRltPc5kz6BexLQg1e+A4EFF3bwBnYCsQA2wBnIzbBfCh0baTwIh2tM0ayAXs623r8HuG2qGkA9WoMcl7LuX+oMazY40/S9rRtljUOGrdd225se0C42d8DAgHrq13nhGoAnsWWIZxFXgb29Xqz66t/2+bssu4/Wvg/vPaduT9ak4jOvR7Jpf+SyQSSQ+hu4RcJBKJRHIRpKBLJBJJD0EKukQikfQQpKBLJBJJD0EKukQikfQQpKBLrmiEWgHSqrPtkEjaApm2KLmiEUIkoOYA53S2LRLJ5SI9dMkVg3FV7QYhxHEhxCkhxHOAF7BdCLHd2GamEGK/ECJcCPGTsTZHXc35/wm1hvYhIURIZ74XiaQppKBLriRmAWmKogxWFGUA8C6QBkxVFGWqEMIFeAa4SlGLnYWhFnWqo1BRlIGoKwvf7VDLJZIWIAVdciVxEpghhHhdCDFRUZTC8/aPQX0owV6hPvVmEepDOer4sd7vse1trETSWsw62wCJpKNQFOWMUB/1NQd4SQix9bwmAvXhArc1d4pmXkskXQLpoUuuGIQQXkCZoijfAW+gPsqsGPWRYaA+HWh8XXzcGHPvVe8Ut9T7vb9jrJZIWo700CVXEgOBN4QQBtRqfQ+ghk42CSHSjHH0xcCPQggL4zHPoFYLBHAUQpwAKlHLAkskXQqZtiiRtACZ3ijpDsiQi0QikfQQpIcukUgkPQTpoUskEkkPQQq6RCKR9BCkoEskEkkPQQq6RCKR9BCkoEskEkkP4f8DGXMhkWX7MfAAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABjcElEQVR4nO2dd3gUx/nHP3NFvfeOJJAoQhQhwPRmbMAFA3GPDe6OjUvc4sRJXOKfe3DciR1X3G3ADQyYYnoTRVShhnrvvd3t74+9OyRUkECd+TyPHt3tzu6+u3f3nXfeeWdGKIqCRCKRSPo+mp42QCKRSCSdgxR0iUQi6SdIQZdIJJJ+ghR0iUQi6SdIQZdIJJJ+gq6nLuzh4aEEBwf31OUlEomkT3LgwIECRVE8W9rXY4IeHBxMTExMT11eIpFI+iRCiNTW9smQi0QikfQTpKBLJBJJP0EKukQikfQTeiyGLpFIJG1RX19PRkYGNTU1PW1Kj2BjY0NAQAB6vb7dx0hBl0gkvZKMjAwcHR0JDg5GCNHT5nQriqJQWFhIRkYGISEh7T5OhlwkEkmvpKamBnd394tOzAGEELi7u3e4dSIFXSKR9FouRjE3cz73fk5BF0IECiG2CCFOCCGOCyEeaqHMdCFEqRDisOnvnx22pJ3EF8fz5sE3Kakp6apLSCQSSZ+kPR56A/CooijDgEuA+4UQw1oot11RlFGmv+c61cpGpJWl8cHRD8ipyumqS0gkEkm7+eSTT1i6dGlPmwG0Q9AVRclWFOWg6XU5cBLw72rDWsPZ2hmAktqSnjJBIpFIeiUdiqELIYKB0cDeFnZPEELECiF+FUJEtHL83UKIGCFETH5+fsetBVysXQAp6BKJpOtJSUlhyJAhLFmyhPDwcG6++WY2btzIpEmTCAsLY9++fc3Kz5w5kxEjRjBr1izS0tIA+O677xg+fDgjR45k6tSpABw/fpxx48YxatQoRowYQUJCwgXb2+60RSGEA7ASeFhRlLKzdh8EBiiKUiGEmAf8AISdfQ5FUd4H3geIjo4+r7XvzIJeWlN6PodLJJI+yLM/H+dE1tmyc2EM83Pi6ata9D2bkJiYyHfffcdHH33E2LFj+fLLL9mxYwc//fQTL7zwAtdcc42l7AMPPMDixYtZvHgxH330EQ8++CA//PADzz33HOvXr8ff35+SkhIAli9fzkMPPcTNN99MXV0dBoPhgu+pXR66EEKPKuZfKIqy6uz9iqKUKYpSYXq9FtALITwu2LoWkB66RCLpTkJCQoiMjESj0RAREcGsWbMQQhAZGUlKSkqTsrt37+amm24C4JZbbmHHjh0ATJo0iSVLlvDBBx9YhHvChAm88MILvPzyy6SmpmJra3vBtp7TQxdq7syHwElFUZa1UsYHyFUURRFCjEOtKAov2LoW0Gv12OnspKBLJBcR7fGkuwpra2vLa41GY3mv0WhoaGho1zmWL1/O3r17WbNmDWPGjOHAgQPcdNNNjB8/njVr1jBv3jz++9//MnPmzAuytT0e+iTgFmBmo7TEeUKIe4UQ95rK/AE4JoSIBd4EblAU5bxCKu3BxdqFsrrObX5JJBLJhTJx4kS+/vprAL744gumTJkCQFJSEuPHj+e5557D09OT9PR0kpOTCQ0N5cEHH2T+/PkcOXLkgq9/Tg9dUZQdQJsZ7oqivA28fcHWtBNna2fpoUskkl7HW2+9xW233carr76Kp6cnH3/8MQCPP/44CQkJKIrCrFmzGDlyJC+//DIrVqxAr9fj4+PD3/72twu+vuhCR7pNoqOjlfNd4OKuDXdR1VDFF/O+6GSrJBJJb+HkyZMMHTq0p83oUVp6BkKIA4qiRLdUvk8O/XexdqG0Vma5SCQSSWP6pKDLkItEIpE0p08Kuou1C2W1ZRiMF563KZFIJP2FPivoCgrldeU9bYpEIpH0GvqkoMv5XCQSiaQ5fVLQzaNFD+Yd5LLvLyO1LLVnDZJIJJJeQJ8W9JUJK8muzGZt8tqeNUgikVxULFmyhO+//76nzWhGnxb0I/nqyKrN6ZtbLVtZX8mymGXSi5dIJP2ePinozjbOltcOegfiiuLIKM9oVq7WUMtDmx/i4+Mf86/d/6KnBlFJJJK+y7/+9S8GDx7M5MmTufHGG3nttdea7N+0aROjR48mMjKS22+/ndraWgCefPJJhg0bxogRI3jssceAlqfR7UzaPX1ub8JR74hWaDEoBu4acRevH3idzWmbuTXi1ibl3j70Nntz9jI9YDq/Z/zOzqydTPaf3KRMZX0lcUVxjPEe0523IJFIOsKvT0LO0c49p08kzH2pzSL79+9n5cqVxMbGUl9fT1RUFGPGnNGKmpoalixZwqZNmwgPD+fWW2/lvffe45ZbbmH16tXExcUhhLBMmdvSNLqdSZ/00IUQlkyXawZdQ7hrOOtS1jUrty1jG5P8J7Fs+jICHQN58+Cbzcp8FfcVt6+/naKaoi63WyKR9C127tzJ/PnzsbGxwdHRkauuuqrJ/lOnThESEkJ4eDgAixcvZtu2bTg7O2NjY8Mdd9zBqlWrsLOzA1qeRrcz6ZMeOqipi242brjZuLEwbCEv7XuJI/lHcLByoKahBl97X5JLk7lq4FXotXquHng17xx+h6r6Kuz0dpbznCo6hVExklSShJuPWw/ekUQiaZVzeNK9DZ1Ox759+9i0aRPff/89b7/9Nps3b25xGl13d/dOu26f9NAB7hh+B0tHqQuzXjPoGhz0Dvzn4H/449o/cs9v97A3R10lL8orCoBg52AA0svTm5wnsSQRgNOlp7vJcolE0leYNGkSP//8MzU1NVRUVPDLL7802T948GBSUlJITFR1ZMWKFUybNo2KigpKS0uZN28er7/+OrGxsUDL0+h2Jn3WQ58/aL7ltb3engVhC1hxYgU2WhvKDeW8dfAt9Bo9ER7qxPghTiEApJSlMNhtMAD1hnpSSlMASC5N7t4bkEgkvZ6xY8dy9dVXM2LECLy9vYmMjMTZ+UxSho2NDR9//DHXXnstDQ0NjB07lnvvvZeioiLmz59PTU0NiqKwbJm6NlBL0+h2Jn1W0M/m1mG3klicyH2j7uPJ7U+SVp5GlFcU1lp1dZFAx0CAJumLKWUpNCjqiiPJJVLQJRJJcx577DGeeeYZqqqqmDp1KmPGjOGuu+6y7J81axaHDh1qcoyvr2+zBaQBVq1qtoJnp9JnQy5n42Pvw/uXvc8or1EsDFsIQJR3lGW/nd4Obztvi0cOZ8It4a7h0kOXSCQtcvfddzNq1CiioqJYtGgRUVFR5z6oh+g3HnpjFoYt5LfU37h0wKVNtgc7Bzfx0BNLEtEKLbOCZvFe7HtU1FXgYOXQ3eZKJJJezJdfftnTJrSbfuOhN8bD1oPvrvqOCPemC8sGOwVzuuy0ZYBRYnEiQU5Blpi67BiVSCR9mX4p6K0xwGkA5XXlllkaE0sSGeQyiFDnUEB2jEokkr7NRSXowU7BgNoZujtrN2nlaYz0HEmgYyA6jY4Pj33IB0c+oN5Q37OGSiQSyXlwUQr6muQ1PLv7WYKdgrl+8PXoNDoeGfMItjpb3jz0Jks3LyW+OL5JB6pEIpH0dvplp2hr+Dn4MdJzJN+c+gaB4JM5n2CjswHglmG3cMuwW1idsJpndj/Dop8WAfDj/B8JdQml3lCPXqvvSfMlEkk34+DgQEVFRU+b0W4uKkHXarR8Pu9z4ovjKa0tbZLWaGZB2ALC3cI5ln+M5/c+T0xuDBX1FSxZt4T/zPgPUwM6f4Y0iUQi6QwuqpCLmXDXcMb6jG11f4R7BNcNvg53G3cO5x1mU9om6o31PLXjKXIrc7vRUolE0htQFIXHH3+c4cOHExkZyTfffANAdnY2U6dOZdSoUQwfPpzt27djMBhYsmSJpezrr7/ebXZeVB56RxBCMMprFIfyDuFo5UiIcwg5lTm8uO9F/jPjPz1tnkRyUfHyvpeJK4rr1HMOcRvCX8b9pV1lV61axeHDh4mNjaWgoICxY8cydepUvvzySy6//HKeeuopDAYDVVVVHD58mMzMTI4dOwbQJdPktsZF6aG3l9Feo8moyOBk0UmuDL2S6wdfz9aMrZTVlfW0aRKJpBvZsWMHN954I1qtFm9vb6ZNm8b+/fsZO3YsH3/8Mc888wxHjx7F0dGR0NBQkpOTeeCBB1i3bh1OTk7dZqf00NtgpOeZiXMm+k3EoBj45PgnbMvYxpWhV/agZRLJxUV7PenuZurUqWzbto01a9awZMkSHnnkEW699VZiY2NZv349y5cv59tvv+Wjjz7qFnukh94Gw9yHodfocbZ2ZqjbUCI9IvGy9WJT6qaeNk0ikXQjU6ZM4ZtvvsFgMJCfn8+2bdsYN24cqampeHt7c9ddd3HnnXdy8OBBCgoKMBqNLFq0iOeff56DBw92m53SQ28DK60Vs4Jm4W7rjlajBWBm0Ex+SPyh2UIZEomk/7JgwQJ2797NyJEjEULwyiuv4OPjw6effsqrr76KXq/HwcGBzz77jMzMTG677TaMRiMAL774YrfZKXpq4eTo6GglJiamR659IezL3scdG+7gpSkvcUXoFT1tjkTSbzl58iRDhw7taTN6lJaegRDigKIo0S2VlyGXDhLtE02QYxBfx30NQFpZGj1VKUokEkljpKB3EI3QcMOQGzicf5hndj3DFauvYHXi6p42SyKRSKSgnw/zB83HVmfLyoSVCAQr41f2tEkSSb/kYm79ns+9S0E/D5ysnLhv5H1cPfBqHox6kCMFRyxL2CWXJLM+ZX0PWyiR9H1sbGwoLCy8KEVdURQKCwuxsbHp0HEyy+U8WTJ8CQAF1QW8fehtfkj6gUfGPMKHxz5kbfJapgdOt6xnKpFIOk5AQAAZGRnk5+f3tCk9go2NDQEBAR06Rgr6BeJh68Fk/8msP72eR8Y8QnJJMg1KA/FF8UR6Rva0eRJJn0Wv1xMSEtLTZvQpZMilExjjPYasyiyKa4pJKk0C4EThiR62SiKRXGxIQe8EhrgNAeD39N+pbqgG4Hjh8R60SCKRXIycU9CFEIFCiC1CiBNCiONCiIdaKCOEEG8KIRKFEEeEEM0nGu/HmAV9zek1gNpperag51XlsSphVbfbJpFILh7a46E3AI8qijIMuAS4Xwgx7Kwyc4Ew09/dwHudamUvx9XGFW87b/Zl7wNgbshckkqSqGmosZT539H/8fSup0krS+spMyUSST/nnIKuKEq2oigHTa/LgZOA/1nF5gOfKSp7ABchhG+nW9uLGeo2FAUFNxs3JvhNwKAYOFV8ClBTkLZlbAPgUN6hnjRTIpH0YzoUQxdCBAOjgb1n7fIH0hu9z6C56COEuFsIESOEiOlvqUhD3NWwS6hzKBHuEQDsytwFQGJJIpkVmYAUdIlE0nW0W9CFEA7ASuBhRVHOa4UHRVHeVxQlWlGUaE9Pz/M5Ra/FHEcf6DIQH3sfZgTO4P2j73Oi8ARbM7YCqhd/ME+dStOoGHvMVolE0j9pl6ALIfSoYv6Foigt9exlAoGN3geYtl00RLhHoBEai7A/N/E53GzcuH/T/Xwd9zVD3YZyefDlnC49zfLY5Uz9ZiolNSU9a7REIulXtCfLRQAfAicVRVnWSrGfgFtN2S6XAKWKomR3op29Hh97H7676jvmD5oPgIuNC2/OfJMwlzCKa4q5MvRKorzV5J93Dr9DaW0pRwqO9KTJEomkn9GekaKTgFuAo0KIw6ZtfwOCABRFWQ6sBeYBiUAVcFunW9oHCHcNb/I+wj2C9y97H6NiRCM01BnqsNJY4WrjSn51PscKjjE1YGoPWSuRSPob5xR0RVF2AOIcZRTg/s4yqr+hEWpDyEprxbLpywh0CuTR3x/lWMGxHrZMIpH0J+RcLt3MtMBpgOq9b8/cjqIoqFEtiUQiuTDk0P8eYrjHcIpqisiuvKi6GiQSSRciBb2HiPRQZ2L89tS3LDuwzDIHTEeoN9az+NfFlkFLEonk4kaGXHqIcNdw9Bo9Hx77EAAfOx9uGnpTh86RVJLEwbyDBKYEys5ViUQiPfSeQq/VszhiMbcOu5URHiP47MRnGIyGDp3jeIE6AVhLnatZFVlU1ld2iq0SiaRvIAW9B3ko6iEeH/s4tw+/ncyKTNacXtPicluF1YXMWTmH+zbex97sM7MumGd0TC5NpqKuoskxt/56K28deqtrb0AikfQqpKD3AqYHTifEOYSndjzFgh8XkFeV12T/rqxdZFZkcqzgGA9sfqDJnOu2OlsUlCYLatQaasmtyuVk4cluvQ+JRNKzSEHvBWg1Wj6Z8wlPjnuSpNIkViesbrJ/X84+nK2deXXaq1Q3VPN7+u/UGeqIL45nbshcAI4VHmNDygayK7IpqC4A4HTp6e6+FYlE0oNIQe8luNm4cfPQm4nyimoWetmfs59o72jG+ozFy86LtclrSShJoMHYwES/iQQ6BvLZ8c94dOujrDi5gvwqdSbL4tpiimuKe+qWJBJJNyMFvZdxRegVnC49TVxRHACZFZlkVmQy1mcsGqFhXsg8dmTuYG3yWkAdoDTcfTiFNYUATTx0kF66RHIxIQW9l3F58OXoNDo+PvYx+VX5llWQxvqMBWBeyDwalAY+O/EZXnZe+Dv4c0XoFUzxn8JIz5FkVWaRX31mrvnk0uQeuQ+JRNL9yDz0XoaztTMLBi3gu/jv+DXlVwBcrV0Z5DIIgKHuQ3nv0vfQaXQMdRuKEIJpgdOYFjiNZ3Y9w5b0LeRX5aMVWnQaHcmlyeRX5WOrs8XByqEnb00ikXQxUtB7If+45B/cNOQmdmbtpLCmkBEeIywTfAFM9p/c4nF+Dn4U1RSRUZGBu407rjauxObH8oef/0C0dzT/nv5vsiuy0Wv1eNh6dNftSCSSbkIKei9ECMEg10EMch3UoeN87dVlXI/mH8XDzoMBjgMsXv7WjK1U1FVw2/rbGOw6mDdmvtHpdrcXRVFYlbCKuSFzsdPb9ZgdEkl/Q8bQ+xFmQc+oyMDT1pMQlxAARniMoNZQy4v7XiSzIpO08rSeNJP44nie2f0Mv6X+1qN2SCT9DSno/Qg/Bz/Law9bD2YEzmBawDTenvU2bjZu/JT0E0CPz/CYW5UL0KTzViKRXDhS0PsRXnZelli7p50nQ9yG8Past3G1cWVm0ExA7WCtrK+kvK6cHxJ/YGX8ym6305wn3zi9UiKRXDhS0PsROo0OLzsvADxtPZvsWxS2iBDnEO6IvANQvfRPj3/K+0feB+Bw3mG2pm/tFjvzqtWpDdoj6Efyj/DEtidoMDZ0tVkXFVkVWZaKVdJ/kILez/CzV8MuZ2exDPcYzk/X/MRIz5EAZJRnkFKWQlZlFoXVhbyy/xX+ueufTUaoFtUUYVSM1Bvrufe3eztt3nXzXDWNBd1gNLQ4J/zGtI38evpXMsozOuXaEpXHtz3Oc7uf62kzJJ2MFPR+ho+9D9DcQzdj7jiNyY2xeL17s/dyovAERTVFpJSlABCbH8us72bxQ+IPHC84zs6snby6/9UOT/HbEi2FXN4/8j7X/HBNs/Onl6UDWOySdA6pZaly0FkPsTphtWWm1M5GCno/w9wx6mnXsqB72HqgEzp2Z+22bFtxYgUGRRXSg7kHqW6o5u87/k6DsYENqRvYk70HUEX1t7S2M1NicmK45MtLyKnMabVMSx56TG4MWZVZHCtsOre7OSMnpTSlzetK2k91QzWltaVkVWTJUFY3YzAaeHb3s2xM3dgl55eC3s+YETiDuSFzW/XQtRotXnZeJJYkAmqI5ljhMXQaHS7WLhzMO8g7h94hpSyF0V6j2Ze9jy3pWxjiNoRQ51DeO/xekwm/8qrymoRKfkv9jcr6SmJyYyitLeXlfS83m6vdnN1SWV9JVX0ViqJwqvgUANsztlvKKYpCern00Dsbc2XboDRYMo4k3UN+dT4GxWBpKXc2UtD7GSM8R/DK1FfQarStlmkclhnvOx6A4e7DifaOZkfmDr6I+4KFYQtZOmop9cZ6ThSeYILfBB6LfoyM8gyu/flaThWdorK+koU/LeSFvS9Yzm325o8XHGd9yno+P/m5ZXATQIOxgcLqQkusv7CmkNyqXEprSwHYkbnDUja/Ot9SWXRU0HMqc7rVq08pTWlxcZLeSOPWk7nCbA9Gxcg/dv6D2PzYrjDrosCcMtw4xbgzkYJ+EWIW9FDnUIZ7DAfUyb9Ge42mqKYIvUbP/aPuZ7T3aBz1jgBc4nMJUwKmsGLeCssPe2X8SkprS1l3eh1ldWXkVeVZ4rLHCo6xL0edWGxT2ibLtQuqC1BQGOo+1PI+vjgeUKc0OF543BKKSStTwy3edt4dFucX9r7A/ZvuP5/H02HSy9K5+oer+THpx2653oXSWNA70tkcXxzPD4k/NPk8JR0jqyILQHroks7D/GUKcQ5hnM84rDRWTA+cbpnR8ZZht+Bl54Veo2dywGSsNFaM9h4NwDD3YTwS/Qgni07yxsE38LX3pcZQw6/Jv1qWx4v2juZk0Un25+xHINibvZfyunLgTIfoULczgn6qSA233D78dgB2Zu4EzniPUwKmUFhTaDlHe0gqSSKtPI3cyo6HFBRF6dBxJ4tOoqDwS9Ivlm0lNSWWKZA7Snp5Ojf8csN52d4ecqpUQdcJXYc89AO5BwDOadfOzJ3nfe/9HbOHLgVd0mk0FvRg52D23byPEZ4jGOo+lI8u/4h7R95rKfvomEf57+z/YquztWybFzKPCPcI6ox1/GXsXxjsOphv479lXco6nK2dWRS+iFpDLUU1RSwMW0iDscESGzfnoA9zHwaYBL34FP4O/kR7R+Nk5cShvEOA2iGqEzom+E4A1MyM9lBvqCezIhOAQ/mHOvx8VieuZs7KOaSXpVNZX8k3cd9gVIytlje3Svbl7LNUWP/c9U8W/7qYemN9q8fVNNSw7MAypn8znTcOnplbZ3PaZo4XHudg3sFWjy2tLWVX1i5qDbWWbQdyD/B/e/6vTVtBFWQ3Gzf8Hf075KHH5MQAtNnhDfD0rqf5+46/tzsElVGeweG8w+2240Koqq9i0U+LLNNSn43BaGi2BGRnkl2RjbO1c5fNYSQF/SIkwDEAgDDXMIAm8faxPmPRa/SW99723kT7RDc5XiM0PDvxWW6LuI3pgdO5bvB1xBfHsy1jGxP9JjLCY4Sl7B2Rd+Bh68HL+1/mrg13WUQh3DUcrdCSX5XPqaJThLuGI4RguMdwjhWomS6pZan4O/pbpg5u72IdGRUZTbJ2OspPST/RoKgZPp+f+Jzn9z5vsbslkkuSLWu7bkjdQHJpMlvSt1DVUEVSSVKrx30V9xUfH/uYOmNdk87gmFz1Wq3d77envmXqN1O557d7+PT4p5btHxz9gK9Pfc36lPVt3l9OZQ4+9j4EOAaQUdE+QVcU5YyH3kZHar2xnryqPE4Vn+JE0YlWyzXm1f2vsnTz0m7pg4gvjie+ON7yjM/m2/hvuWLVFc068juL7MpsS/9RVyBnW7wImeA3gXdmvUO0d/S5C7fCYLfBDHYbDMAfwv/ASM+RVDVUEeYShr3eHicrJxytHAl0DOSx6MdYn7KemNwY9mTvQSu0eNh64GbjRkpZCmnlacwJmQOoA6A+PPohVfVVpJenE+gYSIBjABqhabegmz15Z2tni7ffXnIrcy2VwIbUDZTVlgFwKO8QjlaO3LH+Dj6f9zmhLqGWY5JLkxnjPYa8qjy+jvua3Vm70QotBsXAsYJjDHEb0uK1dmftZpDLIKYHTueTY59QZ6hDp9FZbG6t32BVwipCnUPRa/T8evpX7h5xN8U1xezJUjuk3z38LqO9RrM/Zz+nS08ze8BsS58FqIIe7ByMh60HR/KPtOu5JJcmU1xbjKetJ7lVuRgVY5MpnRs/PwVVmFcnrCbCPaLN8xoVIwfyDlBaW0pGRQZ+9n7UG+ux0dm0y66OYq5gW2tl7MzcSY2hhoyKjFY/twshuzKbQMfATj+vGemhX4RohIapAVMRQnTa+Qa7DWa012gcrBwQQnDrsFtZErEEUJfVe3Pmm/xr4r8AcLd1R6tRRf231N9QFIUZgTMAdWZIg2IgNj+W1LJUghyDsNJaEeUVxcqElZY4usFoIK4orkWvzizoV4ZeSXxxfJPY+6qEVSz6aRFfxX1FvaF5OGRD6gYUFBYMWsCJwhNkVGSgERoO5x/m15RfKa8vZ3P6Zkt5g9FASlkKA50H8uDoBymuLWZrxlYWhi3EycrJ0to4m1pDLQfzDnKJ7yUMcRtCg9JAQkkCSSVJlNaWohVaTpc1r8Cq6quIK4pjRuAMFoQtILEkkYTiBDambcSgGLhv5H2klKUw+/vZ/G3H3/jg6Ad8cPSDJufIqVI99EDHQMrryi0ZRi1hVIy8d/g9Xt3/KgBzQ+bSYGygqKaoxfKNY8Rrk9e2OPq3McklyZbrnyw8yXux73Hl6iupM9S1edz5Yk7XbUnQDUaDpTLPLM/s9GsrikJWRVaXZbiAFHRJF3HPyHu4YcgNTbbNGjCLJRFLLOJtnp7g5qE3W2LqER6qR/dazGtUN1QzPXA6AI+NfYzimmLei32PekM9j297nGt/vpYl65aQXNJ0xGNqWSou1i5MD5yOUTHy7O5nOVl4EoAfE38kuSSZF/a+wLIDyyzHZFZk8vye5/n0+KcMcRvCXZF3AeCod2ReyDxi82Mtc93sytpFVkUW1/18Hb+n/06toZZQl1CmBU7j14W/8syEZ3go6iEiPSI5WnC0xedzOO8wtYZaJvhNsHQQxxXGWcIaUwOmklqW2iwefrzwOAbFwCivUcweMBuN0PBz8s+sSV5DsFMw9468lyURS7hnxD18f9X3XDbgsiY2lNeVU1lfiY+djyX01taI0aMFR3k39l0O5h1ktNdoS6uuNQ/XvP3OyDspry+3dHC3RuN+ghOFJ9iQuoHcqtxWp5k4kn+k1U7ZiroKlh1Y1mbnucVDN3UMfx//vSXzJKEkgfJ69VhzH8yFEFcU18RpKKsro6qhqss6REEKuqSbeTT6Uf5+yd8BNYYf5BjEA6MfsOz3sPXAz96P+OJ4BrsO5hLfSwB1MeyFYQtZcWIFk7+ezG+pv7EwbCHJpck8uvXRJp56alkqA5wGMMZ7DNeFX8eOzB3cueFOyurKOFJwhFuG3cKisEV8feprS5bHVye/4rv473CyduKuyLsIdApkeuB0bh52M5f4XkJ5XTnJpcm4WLtwKO8Q7x95n5NFJ3l297OAmgIK4GjlyKLwRThbOxPhEUFSSRJV9VXNnsPurN3ohI4x3mMIcAzAQe/AyaKTHMg9gJedF5P9J1PdUM3e7L1M/2a6ZWSvOQd8hMcIPGw9GOczjo+PfcyB3ANcNfAqhBA8Gv0oS0cvZbDbYEZ5jSKnMsfSWWsWXB97H6K9o7HR2vBjYuvplrsydyEQbFi0gc/mfmZJeW0s6EU1Rdzwyw2sOLHC4qFfGXolLtYuzVJWz24NxOTG4GnryVC3oWxK22QJq/2Q+EMzW2oNtdy54U5e2f9Ki7buyNrBx8c+5uNjH7d6P0mlZ0IuBdUFPLv7Wb6O+1q1xdRPohM6Misyic2P5YZfbuhQdpWZ9PJ0rvv5Or459Y1lm/mZSUGX9EsejnqYH+b/0KzH35wbvzhicZOw0BNjn+CJsU8wJ2QOr057lWcnPstj0Y+RWJJoGdAE6iCkAU4D0Gv0/GPCP3hl6iuU1ZXx3uH3aDA2MNZnLPeNug+d0PHWobcA2J+7nyivKFZdvYrLgi8D4K2Zb6n5+F6jLee+f9T9NBgbWJmwEke9I8W16qjZEOeQZvcX6RGJQTE0S+EzKkZ2ZO4g0jMSe729JWS1PWM7G9M2MjVgquV8r8W8RmFNIU/veprK+kpi82IJcQ7BxcYFgIeiHuK2iNt4Y8Yb3Bl5Z4s2ABYvvbGgO1s7c0XoFaxJXsP+nP08uPnBZlkvO7J2EOkRabmet703cKZjtLK+kj9t/BPHC4+zKW0T2ZXZuNm4Yae3Y1rANLZmbCWnMocHNz/IrO9mcfdvd1sqX0VROJh7kDHeYxjmPswyeGxm4Ex2ZO5oNhuneVqK3Vm7W8weMlcGX8Z92WIYyTxWwt3GneqGakt4JaEkAVArF38Hf0JdQsmqyLJkG7WWEdMW2zO2o6A0+V52dQ46SEGX9CBCCPRafbPtc0PmMt53PHOC5zTZbqe345Zht/DsxGct++aGzMXNxo3PT34OqDHmvKo8gp2CLcdN8J2Ak5UTX8V9hVZoifKOwsvOi5uH3syvp3/lVNEp4orimmXzmAl0DMTNxo0gxyCuGXQN1lprAF6f8TpOVk542HrgbO3c7DhzxfSX7X/h+T3PszltMzmVObyy/xVOFZ/i6oFXW8oOdRtKVmUWdjo7lo5aahH0+OJ4wlzDyKnM4cntT3Io/xCjPEc1ucYj0Y8wM2hmi52UQ9yGoBM6SyzfHF4JcgoC4MYhN1JjqOH29bezJX1Lk6yZ0tpSjhUcY6L/RMs2V2tXrLXWlorh2d3PcqroFBHuEZwoPEFmeaZFsGYFzaK8rpyb197M7qzdXOJ7CScKT3CiUM1+WXt6LblVuYzxHmMJO/k7+PPQmIcwKAZuXHMjL+97mZKaEkANdQGU15e3mOZ4uvQ09np7KusrLd8HUCuOrelbLR3Ak/wnAWdGJSeWJFqyeMZ4j8HPwY+MigyLnXtz9ja71rnYmaWGmg7mHrRMOJdVaRJ0h64TdJnlIul1XDrgUi4dcGm7ylpprbhh8A28G/su1/58raV5HOwcbCmj1+qZFTSL1YmrGeExAnu9PQDXDr6WD499yHN7nsOoGBnrPbbFawgheCz6Mez19tjobJgaMJXyunLG+47n2YnPttqp6GHrwUtTXmJ9ynp+Tvq5SfPbHPYxYxb/x6Ifw93WHUVRcNQ7Ul5fzsNRD3Oy8CT/PfJf6o31RHlHtevZANjobAhzDeNIgSpmcUVxeNl54WbjBqjZSpP9J5Neno6/gz+/JP/Cn8f8GTu9Hbuzd2NUjEzym9TkWXjbeZNTmcO60+v49fSvLB21FB97H/6+8+8cyjtkWcR8gt8EbHW25FXl8dq015joN5GZ385kZcJKDuQe4NWYVxnjPYarBl5l6QeZ5DeJUOdQlk1fxi9Jv/B13NesSV7Dy1NfZmfWToa7DyeuOI7tmdstA+HMpJSq8w/ZaG349PinLApbhI+9D9szt7N081LsdHaWa/yU9JMlvp9TmUNsfiwltSVEe0cTXxzP3uy9ljmHOuKh1xnqUFDYn7MfT1tP8qvziS+Ox9felxUnVuBn72d59l2BFHRJn+eWYbdQXl9Oelk6A5wGcG34tUzxn9KkzJzgOaxOXN3EC/d38Ge873j2Zu9Fr9EzwnPE2ae2cNXAqyyvX536qiU171wVzxWhV3BF6BXUG+qJzY8luTQZvUbP/EHzm4STLgu+zBITB1U4Q1xCyKnIYaLfRKYGTOXmoTdzrPBYh9NNIz0iWXt6LUbFSFxRXLN0vDdnvIlGaIjNj2XxusWsT1nPgrAFbE7djKOVo6WyMeNt782JwhPszNrJCM8R3BF5hyWzqMZQY4mz2+hseGTMI2g1Wi4Pvtxyn6sSVmFQDMweMJsXp7yItdaaIe5DuDL0Sq4bfB0AswfMZvaA2cQXx/OXbX/hoS0PUd1QzcNRD2Ovt2d7xnYeGfMI+VX5fBv/LXdG3klKWQrRPtHcNOQmtv+4nVf3v8q/p/+bL09+iaPekaqGKmx1tpYKMa86D53Q0aA0WCrbaO9oKusrqW6oprqhmiDHIJJKk8ivyrfMYFpeV87nJz5nccRiS7hQURS+ivuKV2NeZbTXaKobqnks+jH+tedf7M7ezZ6sPeRV5fHpnE9bbEl1FlLQJX0eBysHnhj7RJtlxvmO467Iu1gYtrDJ9msGXcPe7L1EekS2O/e5rYnPWkOv1RPtE91qWEev0VsmSjPz1PinaDA2oNOoP1MHKwdLJ3FHGOk1km/jv+VI/hFOl562ZBk1tg1gtNdoBjoP5LMTnxHtE82G1A3cNPQmy/XN+Nj5sD9nP45Wjrw05SV0Gh3BTsHY6mypbqhuEiM+O9Pp+sHX80vyL1w/+Hr+Ou6vlmep1+h5ccqLzWwPdw1n+aXLuWntTVQ3VDPJfxI6jY7XYl5jW8Y2PjvxGXuz9+Kgd6C6oZoQ5xACHAO4M/JO3jn8Di/vUz37+0fdT7hrONmV2XjaelqEfILfBLZnbmd9ynq87LwIcAxoklZ467BbeX7v8+zL2ccVoVcA8NGxj/jf0f/hbe9t+T4tj13Ou7HvEu4azv6c/VhprLgy9Eo+OvYRbx18iwalgecmPkekZ2SHP7+OIAVdclGg0+h4MOrBZtsvDbqU121fbyZyvQFzKueFMsV/ClqhZXnscgyKodUBM0IIHoh6gIe3PMwd69WlCv849I/NypnTHV+a8pJlkIxWo2WY+zAO5B5oM0Y8wnME267f1mKfQ2t423vzwWUfsCdrD4NdBxPoGMia5DU8sPkBjIoRrdDyyfFPAAhxUvsebh9+O4kliXx+8nN0Gh1/CP9Dk1W8vOy8yKrMYkrAFA7kHqCqoYox3mMQQuDv4K/ek9By1cCreOPQG2xK28QVoVdQWlvKV3FfAVgyrarqq/jsxGfMDJzJ6zNeZ1fWLspqy7DT2zHedzyrElbx1PinWBC2oN33fL5IQZdc1NjobFi3aF0zL7Q/4WrjSrRPtKWjrq0RkLOCZlkyX+aGzG1xEMwfh/2Ryf6Tm4WohrsPVwX9HFkcHRFzM6HOoZbUUHu9Pe9e+i63rbuNcNdwHK0cWZmgLnZu7ky20lrx6tRXmeQ3CQWl2ZKMPvY+ZFVmMchlEINcB3Ek/4gllGUW9IEuA7HT23HD4Bv44OgH7M3ey47MHVTWVzLFfwq7s3dTVlfGxtSNVNRXsDhiMRqhsfQhADwy5hEWhi20LP3Y1ZzzWyyE+Ai4EshTFGV4C/unAz8C5mFtqxRFkYsVSvoMLWXa9DdmB81mb/Ze7HR2Fg+7Nf467q/Y6my5PeL2Fvc7WTm12N8wM2gmO7N2tpjC2dl42Hqwev5qtELLrqxdrExYiZOVU5MORyFEq16xOf1ykMsgwlzCVEE3hcMcrBzwsvWyiPDdI+5mXco67tt4H3XGOuaFzOPmoTezPXM7W9O38t2p7xjoPLBJeqsZZ2vnbhNzaJ+H/gnwNvBZG2W2K4pyZadYJJFIOp1ZA2bxf3v/j8Fug8/ZKeds7czTE57u8DWivKNYPX/1+ZrYYcytqnE+43C0ciTEOaTd01mM9xlPYXUhrjauXBF6BQbFYAnXAHw05yNcrF0AtRX37MRneXb3s9w05CauH3y9Jdvnbzv+BsCT457stKk0LgTRnhnOhBDBwC9teOiPdVTQo6OjlZiY1mewk0gkncuymGUMdBnI/EHze9qUTmdj6kbs9fZM8JvQbdfcmr6VvTl7CXEOYcGgBd0WthNCHFAUpcXe9c4S9JVABpCFKu4tLmkthLgbuBsgKChoTGpq++a3lkgkEolKW4LeGQmRB4EBiqKMBN4CfmitoKIo7yuKEq0oSrSnZ8uLGEskEonk/LhgQVcUpUxRlArT67WAXgjhcY7DJBKJRNLJXLCgCyF8hKk3QAgxznTOwgs9r0QikUg6RnvSFr8CpgMeQogM4GlAD6AoynLgD8CfhBANQDVwg9Ida0lJJBKJpAnnFHRFUW48x/63UdMaJRKJRNKDyOlzJRKJpJ8gBV0ikUj6CVLQJRKJpJ8gBV0ikUj6CVLQJRKJpJ8gBV0ikUj6CX1a0BsMRn45koXBeP5p7wajQklVXSdaJZFIJD1DnxR0s4BvPJnL0i8P8cuRrFbLJudXcPnr21h3LLvF/d/GpDP55S1U1DZ0ia0SiUTSXfQ5QV93LIcRz6wnu7Sao5nqauvfxWS0WLa0qp47P43hVG45f111lIKK2mZlDqUVU1HbQHJ+RZfaLZFIJF1NnxP0AFdbKusM7DtdxPGsMgB2JhWQWVLdrOxrG06RXlzFCwsiqaw18NzPJ5qVScxThTw5v7JrDZdIJJIups8J+lBfJxysdew7XcSxzDLGh7ihKLDqQHMvfXdyIVPDPLlpfBA3jQ9i7dFs6g1Gy35FUUiwCLr00CUSSd+mzwm6ViOIGuDKhhO5FFTUMme4DxMHuvPF3jRqGwz8bfVR/vT5AUqr60nMq2B0kAsAkf7ONBgV0oqqLOfKK6+lvEaNnScVSA9dIpH0bfqcoAOMC3Ylv1yNh0f4OfOn6QPJKavhHz8c48u9aaw7nsNvJ3IBGB3kCsBALwcAkvLOeOLmcIujtU6GXCQSSZ+nbwp6iLvl9TA/JyYP8mBUoAvfxmTgaK1DUeD13+IRAkYGugAQ6mkPQFIj4U7ILQdgxhAvThdUYLyA9EdJx8gtq0HOsiyRdC59UtBHBDhjpdUQ4mGPg7UOIQQPXxoGwD+uGkawux2ZJdUM9nbEwVqdIdjJRo+XozVJjWLlifkVONnoGB/qRk29keyymhavl5hXzqSXNvPu74k0NIrBS86PzJJqJr60md9P5fe0KRctb29O4LPdKT1thqST6ZOCbqPXctVIP+YM97Fsmz7Yi+1PzOC66EAuj1C3m+PnZgZ6OjQR9ITcCsK8HQn1UMMxMSlF/ByrDlQ6lVPOsz8fp7rOwOa4PDJLqnll3Ske+vpwm7ZV1TWQ1ULGTVdgNCo8tfooseklzfbllddQXWfoFjs6yun8SgxGpclnIelevt6fzsoWEgkkXU9GcRU19V3z2+yTgg7w7+tG8pc5Q5psC3SzA2BupC8A0QPcmuwf6GVPcn4liqJgMKoZLoM8HRhoCsc88m0sD3x1iLs+i+Hm/+3l450pbI3P42BqCUFudtw7bSBrjmaTmFfeql1vbErgyrd2XNDo1faSUljJF3vT+Hp/WrN9C9/dxbLfTnW5DedDdqla4eW20iJqzNb4fOa/vYPaht5ZOfVFDEaF3LKaJgkCku5BURRmL9vGa+u75rfZZwW9LUYFurDqvolcM9q/yfaBng6UVtdTWFnHD4cyKaqsY2q4J56O1rjbW+Fub8U9U0PZcioPo6JgZ6Vle0IBB9OKiQpy4c4pIVjpNHy0M6XVa8fnlFNUWdcsDTK/vJYfDmV2atz4RLaah78/pbjJ9toGAxnF1RzJKO20a3UmOaWqkOeVNx/odTbb4/OJzSglpUCKT2dRUFFLvUGhuKqespr6njbnoqK4qp7qegN+LrZdcv5+KegAUUGuaDWiybaBnmpo5URWGct+iyfS35m5w30QQvD9nyay/uGp/HXeUL67ZwKr75vIhFB31hzNJq+8ltFBrng4WLNglD+rDmZQXFlHWU09T3wfy6mcMx672es5fFYY5H/bk3n4m8PE5ZQTk1LEtct3UXmB0w2YB1Yl5lVQVHlmPpqCCvV1Ui/N3DH3VTT20FfsSeUP7+1qVuGlmp7n6QIZnuksGocE06WX3q2Yn72/qxT0C8acuvinzw+QWVLNX+cOQWMS/RAPe1ztrQCIDnZjgLs9k8M8KKlSPRhzPP6OKSHUNhhZvi2JD7ef5tuYDB76+hB1DUYURSGjWP3AYjNKmlx7d3IhAGuPZrN8axL7U4o5mNbUs+4oJ7LKsNKqH+GB1GKLGOaZhLKgopbSqt7ngeWaPfSyMx7673F5xKQWcyq3aTjLLDi9tXLqi2SVnKlI0wrbL+i1DQaufGs7G47ndIVZFwVmffCXHvqF4+dsw5KJwVw+3IeXFkYycZBHm+Unm/Zb6zQM9XUCINzbkQWj/Pl4Zwof7ThNqKc9cTnlvLMlkfzyWmob1CyY2PQz4Y6ymnqOmead+f5ABltM2R2H0kraZXdBRS3Rz29k+NPruWdFjGX78awyLovwxkqr4efYLGa89jvf7E9rEspI7OaOx7oGI+uOZbcZWspuIeRiHrG7tVHmi6KcGQjW28cJ7E8p6jMZUI099I7E0Q+kFnMss4y9p4vOef7e6Ej0BszPXoZcOgEhBM9cHcGy60Zxw7igc5Yf5OWAj5MNIwKc0WvPPKpHLgsHBcprG3j7xijmRPjw8c7TpJi8nWG+TpzMLrP0ZO9LLsKowNUj/cgurcFgVHC3t2q3h743uYiCilrCvR1YfzyX0wWV5JXXUFBRS1SQK5EBzvwUm0VKYRV7Txc1EcruziRZdTCDez8/aAk51TU0F7kcUwuioraBytoGqusMpBerz25r/BlBz6+opcqUqdPRkMtzP5/gto/3nc8tdJiE3HKuXb6bFXtSLdsMRoWquvMLqSmKYnEAuoKs0mocrHU42+o7JOg7EgqAM30grbHk43088PWhC7KxK4nLKWvxewlqGPCLvakt7usMMkuqsdVrcbXTd8n5LypB7yhCCN79YxTPXxPZZHuAqx1PXTGUpTMGMczPiZlDvSiraeD3U3kAXDXSjwajwrb4fLJLq9mdXIiVTsOTc4egETAu2I3Zw7w5lFbSrk7Sw+nFWOk0/Of60QD8EpvFCVP8fJifExNC3dFqBF6O1qQXVZFfVoMQYKXTkJRXwZoj2RwyVR4bT+Sy7xwe1tnU1BuY9NJmvmkhm+Zs9qWo5z6eVcaRjBKGP72+iTjV1Bsoqqwj1EPNLMorryUpvwJFgQHuduxPKbL0LZjDLX7ONiR3cGqGPcmFbE8o6JbUTXPn9A+HMi3bXlkfx+xl29r1+X4Xk05MypnP5Pf4fK58awcHUtuu8JPymw6GW3csm1s+3HvOlLiskmp8nW0Y4G7XMUFPVAU9q7T1tFyjUeF0QSXb4vPbzAZrzPrjOfxnY3y77bgQcstquOLNHaw82HLK5oc7TvPU6mPnrLTOl6ySavxcbBBCnLvweSAF/RxEBbky2Mex2fbFE4N57PLBljIAP8Wq87JfYUqbvHvFASa8uJkv96YxJsgVPxdbll03iqevHsboIBdKq+v58XAWd38W02bn1OH0EiL8nAhytyN6gCu/HMlmT7IqAMP8nLh/xiA2/Hkq08I9SSuqIq+8Fnd7a0I97Nkan89DXx/iqdXHqK4z8OdvDreYzlhUWdfkS1xeU8+N7+/hQGoRRzNLySyp5p0tSc3SMesajE1ExSxCJ7PL2J5QQJ3ByKqDZ4TO3BFqHsGbW1ZjmYLh9kkh1BsUdiWp/Q2pphbP9CFelFTVN+n4bQtFUUgtrKTBqHDkrL6M9rDlVB6TXtpMXlkNtQ0GNp3MbVOY401x/9iMUpLzK6hrMPJdTAaZJdWcPkdF9P2BDB7//gj/2Zhg2bbT5Akfz2rZSy+vqefRb2OZ9e+tvL892bL9ox0pbE8o4P1tyS0eZyarpAY/F1uC3Oza3SlaXFlnma46u6R1sTNn0AB8uqt9nu5/tybxzpZEahsMGIxKl1bCx7NKMRiVVifj22vq6zK3GDubrJJq/F3tuuTc0FcF3dAAxt4Trwz1sMfJRkdGcTU+TjYEudvxxg2jeHFhJA/MHIS9tZYrRqgif81ofyL8nC2VwJ+/PcyGE7ks/ngfeS0Mh683GDmaWcookwBeOcKXU7nlLN+axMwhXjjZ6LG10jLQ04EgNztyy2pJK6rCy9GagZ4OxOWU02BUOJFdxusb4ymvbWjSKWbmie+PcOtHey3vv9mfzu7kQr4/kGER6bSiKksrBNSwwuzXt/LSujhA7Yw1i/DJ7DIOmo5bezTbIvrm+PmIAGf1mPJaEvLK0WkEfxgTgLVOw57kM4IuBEwN8wTaH3bJr6il0iQKB9vZT9GYT3amkFlSzRd703h3SxJ3fBpjOU9Lwn4qpwIPB2uEgB8OZ7E1Pt9S+bTVT3Iss5S/rTqKRsCRjDOtNXOMOj63ZQ/3hbVxrD6UgZejNV/sTcVoVMgrq2F/ahF2Vlre/T2RL/am8vbmBF5eF8dJUwvCTHap6iUGudmRUVx9zjETiqLw28lcFAUmDXInr7ym1f4C8zTWPk42rDyYQfk50iIrahuIzSil3qBwMrucNzbGM+O137ts4M3JbPWZmjsnz7blmKnl21XZP5kl1fi72HTJuaEvCvqxVfC8JxSf7mlLLGg0wjIJWKCb2tkxf5Q/N44L4tHLBhPz99n88ZIBTY4Z6OmAk40OBysdLy6MJKO4mnEvbCLqX7818cxO5ZRTU2+0CPq8Eb642um5YWwgy/84psk5g9zVmj82vQQvJ2vLgKlrxwRgpdVYPLfs0mrLKNPHvovFaFTYe7qQ+NwK8srUH+vHplz7bfEFxKQUE+hmi4+TDcu3Jll+tPtOF5FaWMVX+9KorjMQYxLwUYEuxOWUcyi9BHd7K3LKajiQVkx5Tb2lFWD20PPKakjIrSDYwx57ax3D/Z0t8fe0oip8nWwYYmohtTfTpXHO+rnCFmeTV1bD9oR8tBrB53tS+XCH+j3bn1LE0YxShv5zXTOhTcgrZ3yIGxNC3VmxO4W3Nifg4WCFg7WOQ+mtX//zPanotYLHLx9CWU0DKYVVlNXUWz7/+NyWK7AdiflcNsyHf1w5jPSiarYnFrDueA6KAu/eHIVA8NTqY7y2IZ73fk/ijUbef029gYKKOvycVQ+9wai0ObK5sraBeW/u4Invj+DlaM2cCB+MSutjCMwV9v0zBlJVZ2C7qbXRGvtTiiwVypGMEjacyCWnrIa1R1teYawtSqrq+OuqI2225MyVm/k7nJxfYak8YhrZkl5UTXlN/QWNHamqa2hybONn31X0PUG3cwfFCGWZ5y7bjZg97sB2Nqc0GsG/rxvFZ3eM48ZxQXx3zwT+Nm8IQgie+/mE5YtgFrfRger5vRxtiPn7bF5aNAIrXdOPzzxStrLOgJejNVPDPYn0d+Yvc4cwO8IbUOPR9QaF/IpatiXk83NsFseySi3TCO9LKWL98VwyS6qZPtiTzJJqtiXkMy7YnftmDGR/SjGTXtrMf7cmsfZoNkJAeU0Da49mE5NSjI1ew7XRAVTVqbHyP00fiLVOw52fxhD5zAb+t0OtVMK9HbHWaSwhlzBTSunoQBeOZpZS12AkraiKIHc7Alxt0WuFpd/gXKSYwhzjgt04lFbc5EdVUFHLqoMZrDqY0WImxo+HszAq8PcrhlJYWUdlXQOudnpiUor55WgWNfVGS9peSkEl1XUG0oqqCPN24Ln5EbjZW3Eko5SrRvoxMtC5VQ+9rsHIr8dymD3Mm6nhajbVkYwSDqQWY1Qg2N2OhNxyFEVpYn9OaQ3pRdWMDXHj8ggf3O2teHtzAt/GpBPm5cD0wV789shUfn9sOqeen8M1o/w42OgZmCtUPxdbSxpvWx2wOxILOJldxsOXhvHT0skEmL5j2a3E0c2Vw7xIXxxtdE2yllpid1IhVloNLnZ6Np3MI840pqNxB3Njvt2f3myMh5mt8fl8tS+d/25NavV65vNnFKuCPeeN7Xy6KwVQW0Y6jcDVTk96cRXf7E/n4W8OW8Z7dIT88lrGPr/REoaFrs9Bh74o6E5+6v+yjtfgXUnUABfgjKi2h9nDvC2e/chAF+6eOpA/zw5n7+kifj2WQ1VdA2uOZONmb2Xx/IFmA6bMBDW6tpejDdHBbvz8wGQ8HKy5fVIwQW523D9zEKCmAWYUV1PbYOTdLUmW8+47XcSHO5IJdrfj6asiAFV8xgxw5dYJwWx+dBqzhnjx7w3x/BSbxZwIH0I87FluEviRAS6MDHCx2DE5zIM/XjKAAe52jAx04VhmGY42OhysdXg72XC6oJKUwkqLoI8KcqGuwcjJ7DJSC6sIcrNDp9VweYQPX+xNtYhPTb2BPcmFLcZbTxdWotMIrhrpS2FlXZM49qvrTvHIt7E88m0sz/5yvMlxB1KL+WRXCiMDXVg8IZjh/k5cNyaQmUO8OZBaxJY4Ndy0PaGA3UmFTH/td1769SSKAoO9HRnk5cgvD0zhufkRPDAzjNGBrsTllLdo447EfEqr67lqpJ+lcjuSUcre5CL0WsG10YEUV9WzK6mQYf9cb8kwMXc6jwt2w0qn4U/T1Ur2WGYZ80epv40AVzuCPeyx1mkZM8CVvPJai0dqFhVfFxtGB7rgZm/F2mOt55X/fiofB2sd988YhI+zDb7Oarggu1F/S3J+BVe/vYNPd6mhKnsrLW72Vkwe5MG2hHwUReFQWjGPfHOYd7YkNjn/7qRCRgW5MDrQxZLhtGC0P4fSSppVNGU19fx19dFWh82bW3Ar9qRS3IKXXlNvIDm/AjsrLUWVdRxKK6GuwWgR7L3JhYwIcGagpwMZxVWW6x86j/EiG0/mUllnaDIBnTnU2VUpi9AXBd1RjUVT3vrC0D1BVJArIwOcmXSO3PZzcePYQMK9Hbjvi4NMemkze04XsnTGoHb1irvbW2FnpQXA09G6yb4xA9zY9sQMy/w2u5MKMDt+647n4GZvxcSB7vxwKJODaSXcNimEEA97BpjCOGMGqBVPqKcDLy6KxFqvobS6nitH+HHTuCAS8iqw1mt4+NJwBnk5oNUIHKx1hHk58o8rh/HT0sl8ettYAt1sLRWPt5M1G0+qIjltsBdwZv76d7YkWtIyAZ6/Zjju9tb86YsDvLMlkXlvbOeG9/cw9v82Nps1MKWgkiA3OyYO8kAj4Kq3dliyKHYkFjBjsCeLJwzgh0OZlimU1xzJZtF7u6htMPKXOYPRaAQ/3T+ZlxZFMjbYleKqeuJzK3C103MwrZgPTJ2Rn+5WPckwbzUsZGul5dYJwbjZWzE6yAWDUbF0Jjbm59hsnG31TAnzRK/VEOHnxL7TRaw7ls2IABdLH8OzPx+nut7AK+vjUBSF/aeLsLfSMtRXvd6dU0I59I/ZrHt4CvdMG9jsOubnaQ49nS5URS/QVa0o5wz3YdPJXE7llHPvigNNBhopisLvp/KYPMjDkrbrawoXmDtGj2SUsPC9XRzJKOWn2CxTFoctQgimhnuSXVrDC2tPsuDdXaw6lMnrv8VbWgmphZUcyypl4kB3RpicAGdbPf+8chhONjru/DSmiajvSizAYAoPthSbT8qvwNFGR1WdgY93Ng/JJuRWYFTO9MlsPKmumZCQp4ZdjmaWMi7EnQBXW9KLqi1Cfz79MOZWXEzqmewli4cuBb0R1g5g7QxlvUvQ7a11/Lh0MuNC3M5duA10Wg1f3nUJT8wZzKhAFz69bRy3Tw5p17FCCItYep0l6Gb8TB0y200paOZyUUGujAt2o6ymAScbHX8YEwDA7KHeeDpaWzxo9Rgbnr06gnBvB2YM8eT2ySH8tHQSWx6dzoSB7tjoVcEZM6Dp9Asudlas+tMkS+zfy0m15emrIiwVhp+zDZ6O1mw4kYu3k7VlPh4XOyvevmk0Oo2GV9efos5g5JVFIxju78Tzv5xs0omVUlhFsIc9Az0d+OaeCUQNcOWNTQnEpBSZQklePHRpOHZWOv69QRX6b2PSCXSzZevj05k4UK2UNRqBEILoYFfLuR+aFUa9QWFzXJ5l9LCVVkOwe/OW2ahAFzQC/vnjMX45ksWBVDUlc0tcHj/HZnHFCF9L2GxEgBpqSiuq4uFLwwg3VRDxuRV4OVpzJKOUzXF57E8pImqAK7pG4yJc7a0Y4uPUZKyEmSE+jthZaS0d1PE55ThY6wgwNfuvHOFLVZ2Ba5fvYt3xHN7afCbeHp9bQXZpDTOGeFq2OdnosLPSkl2qjoO4Z8UB7K10XBHpy9HMUlILq/A1CdbUcPW4D7afZkqYB78+NAWDovDZ7hRO5ZRz3X9342SjZ/4of0YGqhXYhFB3XO2t+OaeCWgELHxvF6+tP0V1nYGt8ep3tt6gtBibT8qrYFywG3OH+/DxrhTLPDWKorA5Lpctpg79S4ep4ceNpkVwkvIrOJ6ldsyOCnQh0M2O7NJqyxiOjo7orqhtYGdSIU42OtKLqskrq6HeYOTH2EysdRp8nGWnaFOcfHudoHcmHg7W3Dd9EB/fNs7yo2gvFkF3alnQHW30ONnoLFPu3jxe7awdM8DVUhndNH4A9qZ55B+fM5h1D02xTJFgZmFUABv+PA07Kx1ajWBEgEuTMu/fEs1r145sdn1PR2tLWOr2ScH865rhLJ4YbNkvhGC0qcP0nqkDsdFrLfuig93Y8th0Dv5jNpsfnc51YwP5z/Wj0WoEL5sybcwpi+aWxdhgN565OgJFgX/8qIZYJg50x83eitsnBbPueA6H00vYnVTI5cN8LPfdmFAPB1zs9AS62XLDuCCLCL+yaARTwjyIDHBuIrBm3B2seeemKMprGlj65SEWvbeb0f/6jXtWHGCorxNPzj0zW2iUqUL769yhTAnzxMvRGicb1ZbXrx9FkJsd931xkLic8maziLaFTqthZICLxcuMyykn3NvB0uIbH+KOh4MVZTUNjA5y4cfDWeSVqx60WQCnhXs1+Xx8nW3IKqnmoa8PUVRZx39vGcOVI3ypazASl1NuyeLwd7FliI8jAa62vHnDaIb6OnH5MB8+3ZXCVW/vwKjAN/dcQoiHPaMCXbHVa7nM1Ncz1NeJH5dOZu5wH97eksiDXx9iW3w+s4Z44Wyrt3jXpVX1fLorhboGI6cLKgn1tOf+GYMor2ngM1Ns/Jcj2dz+SQzLfovHVq9l4kB1gZwsU0uhrsHIL0fUEO6IAGcCXe0wKmBUIHqAK6mFVRRUnOkEzi2r4WHTvTdm3bFsJry4ids/2U9dg5H7ZqjhzZjUYv666ig7Ewt5bn5EixVvZ9H829sXcPSF8t4VQ+8tnPHQW/cC/F3tOJldhoeDNddGB7DmaBazh3kT6mHPCwsiuXKkr6WstU6LtYO21XO1RnvihGMGuDGmBXGaG+lDRnE1N7YymtfNNOcOgI+zDXdNDeXNTQmEesYzNcyDqjoDIaaBS6BmFEX6O3M0sxQPB2sGmVobSyaF8N9tySz98iB1BiOXRfg0uxaonvqjs8NxtNFjo9cyY7AnlbUGwrwd+eDW6DbT/uZG+jJzqBcnssooqqxjR2IBWSXVvLRwBE42Z0YLzhvug/+fJlhCTEIIIvycyS2vYeJAdz5cHM3X+9NJK6qyxMrby5gBrry3NYmqugZO5ZYzd/iZz1erEfzfgkhq6g2MDHBhxr9/57NdqTwyO5xvY9IZGeDczKP0c7FlU1wu9QaFFxZEMtzfuUmIr3EWx8e3jUWn0VjmSbp7Wii/nczlsmHePHt1hKWV5mZvxb6nZlkWpAG18n/jhtEM93Pm/9aeBODe6QNxtNHx+6l8ahsMPLnqCL8ey6HBqFDbYGSgpwPD/Z2ZOcSLD3ecZmFUAC+vi2OIjyPzR/njYqfHx8kGK62GOoOaPXY4vYSfDmfh4WCNr7MNAY36q26ZMICY1GIOpZUw2+TZv7kpgR8OZzHc35k7p4QC6uR7z685SaiHPYfTS/BwsGLJxGBe/y2el36NI62oiodmhXH92HOPUL8Q+qagO/lDUlxPW9ErmRTmwd7TRXg7tSHoLraczC4j1MMePxdbNvx5mmXfTeO79gvXHhaMDmDB6IB2l79v+kDSi6p4c1MCb25SQwYRfs5Nyswf5cfRzFIuCXWzeKdu9lb8YUwAX+xNw83eyhL2aYlbJgRbXr99U5Sl/6FxC6I1rHVaSyx71lDvFsvotJpmlduy60diVFRxD/NW+yLOhwkD3Xl7SyIrD2ZSUlXPYG+HJvsvb1SRzYnw4cMdp3Gx05OcX8kbN4xqdj4fJzVT6pJQN24cFwiAt5MN/i62ZJpi6GZ8z0rRiwpy5fA/Z+No03zoe0vbAO6YHMK2hHy2JxQwLcyTAFdbfjicxexl2ywjXc2ZLebMnQdnhbHovV1MfnkzRgW+uHN8k/4tPxcbUgqruHKEL4fTSyisrGPWEC+EEJZMNXd7Ky6P8EGnEcSkFDF7mDc5pTV8F6OOMv3lSDZ3TgmltKqe13+LZ8ZgT5bfMobSqnpqG4zY6LWMDHRh3+kiZg7x4qFZYS3eX2fSRwXdFypy1QFG2r55C13FjMFezBjs1WYZc5PYvM5qX8dGr+X160exKCqArJJqJgx0b5ZtdNVIP5b9Fm/xsszcMTmEL/elMXOIV6vZQ2fTlU3mxpwthufLuBA3nGx0LP9dFb3BPk6tlv3nVcPYlbSd59ecxN/F1jLquTGDvByw0Wt4YUFkk876qAGuZJZU43uOgTOtCXdraDSCt2+K4nB6CUHudgS527H8j1E8+m0s0QNcCfGw5zvT6kvmKbJHBbrw60NTeH9bMs62+mbJCv6utqQUVjEuxA0fJxtyymosHbO+zjZoNYIIf2ds9FqmhXvyya4ULovw4fM9qRgVhZvGB/Hl3jTSi6r48XAmlXUGnpgzBGudFi+nM5X8nAgfiivr+Pe1I5uFLbuCvqmGTn5qLnpl3pk0Rkm7MefBNg5L9Acmh7WeYeTtZMOBv8/GRt9UjEM9Hfj0tnEtTu/QX9BrNcwa6s1q01wzbd2rr7Mt/7dgOEu/PMRdU0Ja7Bu4fXIIC6L8m4X1xgW78nNsVpP02c7C2VbPtEb9SXOG+zIuxB1bvZb9KUV8dyADVzt9k3BcuLdji/04AAEudmhEIeHejoR5O5gEXW3V6bQarh8byHhTn9LLfxjB1W/tYNF7uwC4e2ooN5sE/T8bE9hyKo8Zgz0tM7I25vbJIe1OaugM+qagO5pz0bOkoJ8H5iZxfxP0c2Fr1XJ4pKMdz32Ry4apgu7paN1E9FriyhF+jPB3aTL2oTF6rabFPprrxwYx2MeJgC6cq6Qx5vuYMNAdFzu9xTtvD0smBRM1wAUbvZZBXg5sTyggMuBMmO6FBWcm5PNwsOaDxdG8sTGBP14ywPJ9GRngzMqDGTha6/jz7PBOuqsLo28KupOpGdiPM126kimDPLlxXBATTL39kv7P1HBPrHQayzQK5yKohTTMc2Gl01xw2u75oNdqeP36UU06VM/FUF8ni0e9ZGIwQ32c8HBoOTMM1D6Z92+NbrLt/VujyS6tYZivU7NR2z1FHxV001qhMtPlvHC20/PiwshzF5T0G+ytdfzfNcO7dNh5T3KufqO2GOBuzwD3jrdWvZ1s2kw+6An6pqDbuYPWqtfN5yKR9GaujQ7saRMkXcw52wlCiI+EEHlCiGOt7BdCiDeFEIlCiCNCiKjON7PZRdVc9OKULr+URCKR9BXaE/j5BJjTxv65QJjp727gvQs3qx0MnAEnfoTjq7vlchKJRNLbOaegK4qyDWhrzbL5wGeKyh7ARQjRPHm1s5nzMgReAqvuhi0vQHXHZ0STSCSS/kRnxND9gfRG7zNM25r1WAoh7kb14gkKusARiXobuPEr+OVh2PoybHsV/EbDlEfByh52vQUeg2HgTHAPBdcQNVQjkUgk/ZRu7RRVFOV94H2A6Ojo81sGpDF2bnDdZ5AdCyd/gRM/wNc3qfscvOH0Ntjzjum9Dwyapf6Fz1FFXyKRSPoRnSHomUDj7vMA07buw3ek+jftCTjwCdSUwoT7wdgA2UegIF4V97g1cPgLsHGBMUsg4hrwHaV67rUVoLcFTccnopJIJJLegGjPenlCiGDgF0VRhrew7wpgKTAPGA+8qSjKuHOdMzo6WomJiemwwReE0QBpu2HPe3BqrTp9gJM/uIWq2wfOhOu/AF3bI+kkEomkpxBCHFAUJbqlfef00IUQXwHTAQ8hRAbwNKAHUBRlObAWVcwTgSrgts4xuwvQaCF4svpXWQgJ61WvvSgZIhbA0e/gmz9C6DQIuww8un52NIlEIuks2uWhdwU94qGfi11vw2//BMUANs5w649qR6tEIpH0Etry0HvHBAS9hYlL4akcWBqjCvqn89Vcd4lEIukDSEE/G52VGmpZsgbcQuDbW+GnB8HQfFFaiUQi6U1IQW8NlyC4cyNM/jMc/BS+vF4OXpJIJL0aKehtodXDpc/AVW/C6a3w3mRI2dnTVkkkEkmLSEFvD2MWwx0b1HDMZ1fDwRU9bZFEIpE0Qwp6e/EfA3f/DsFT4Kel8PkfIKfFCSglEomkR5CC3hFsnOHm72D2c5AZA/+7FOI39LRVEolEAkhB7zhaPUx6CO7fD57h8PWNcOrXnrZKIpFIpKCfNw6esPgX8BkB398OmQd62iKJRHKRIwX9QrBxgpu+AXsP+GyBHIQkkUh6FCnoF4qDl+qpuw9UByEd/KynLZJIJBcpUtA7A9cBcPt6CJkK6/4GpRk9bZFEIrkIkYLeWeis1AFIikGdsXH//9Q51iUSiaSbkILembiFwJX/gdJMWPMorL4Hemg2S4lEcvEhBb2zGXk9PBYPlz4Lcb/A3v9C3Fooz+lpyyQSST+nW9cUvWgQAiY+oC6ese4v6jYrB5j1Txh3t1ysWiKRdAlS0LsKjRauXwFJW8DJD3a9Cb8+AVoriO69izpJJJK+ixT0rsTRB0bdqL4OngxfXgdrHwfv4RA4tmdtk0gk/Q4ZQ+8uNFpY+IHqrX97K1Tk9bRFEomknyEFvTuxc4MbvlAXyvj6ZkjeKldCkkgknYYU9O7GJxKueQeyY9W51d8eCyd/7mmrJBJJP0AKek8wfBE8kQzXfgI6G3Ug0q63e9oqiUTSx5Gdoj2FtQNELIAhV8HK22HDU1CRA+FzYMAkmdookUg6jPTQexqtTu0sHXqV6qV/cgVs/ldPWyWRSPog0kPvDeis4frPoaYUNvwDtv8b0veBtSNMewL8Rve0hRKJpA8gPfTehI2zOhfMhKVQXQIZ++GjuXD0+562TCKR9AGkh97b0Gjg8v9TX1fkw7e3wMo7IHETTHkE3AZCXYW6uIZEIpE0Qgp6b8bBExb/DFtfge2vQeyXoNGBsQGGXAlXLANH7562UiKR9BKE0kPTu0ZHRysxMTE9cu0+SUk6xK9TF89QDLD3fTUTZtClEH07DJwpM2MkkosAIcQBRVGiW9onPfS+gksgjLvrzPuoxbDvAzjxgzpNr3MQaPUw+o8w+c9S3CWSixDpofd1Gmrh8JdweitUFkDKdjUc4z9G3a/RQtAE8ItSUyQlEkmfRnro/RmdtTodb/Rt6upIW1+BHa+rXntjnAPVMkETwXckWNn1jL0SiaTLkB56f0RRoL4ahAbqqyBpMxz4RPXeQZ2TfcAkGHEdDLtGirtE0odoy0OXgn4xUZYF2UdUYT+1FoqSwSsC7livDmKSSCS9nrYEXQ4suphw8oPBc9Q89wcOwnUrID8OVt8LDXU9bZ1EIrlAZAz9YkUIGHa1Ku7rnoR/D4aBM8DBB8IuhZBpaoeqRCLpM0hBv9gZfy94hMHBFZB5AMpzYc874DkEbvgSHH3BUAu2rj1tqUQiOQdS0C92zIOTBl2qvq+vUTNk1j4O708HQx0oRpj0kDp3e1URRN0CXkN71GyJRNIcKeiSpuhtIPIP4B+lzvzo5A9VBbDtVXW/1kr14N0GQtAlMPs5sPfoWZslEgnQziwXIcQc4A1AC/xPUZSXztq/BHgVyDRteltRlP+1dU6Z5dLHyD8Ftm5qXP3QCnV634TfwM4dRt2kivqI69V1UyUSSZdxQWmLQggtEA/MBjKA/cCNiqKcaFRmCRCtKMrS9holBb0fkH0EVt2tZsqggN5eHbwUNht2v6vmt1/6LLgO6GlLJZJ+w4WOFB0HJCqKkmw62dfAfOBEm0dJ+j++I+D+PerrvJOwfRnseRd2v6167vU1ELcGfEZAyBR1/hm3ELUiiPkIQqepUxIk/gahM8B9YM/ej0TSx2mPh/4HYI6iKHea3t8CjG/sjZs89BeBfFRv/s+KoqS3cK67gbsBgoKCxqSmpnbSbUh6DUXJkLpLXVKvthz2LofMg5C2R50l0sEHKvMAob43o7WGyGsBRV3oQ2sFhYngGqymUGr1auaNk69aviwbco6CZzi4DFA7d+uqoKpQDf/obdVyhgb1Ohpd70zDNDTIOXYkHeJCQy7tEXR3oEJRlFohxD3A9YqizGzrvDLkcpFRmgnHV0PucXWe90kPq4t2lKZD6HTVq0/cCFYO6mpNhlpwDYHiFDDWnzmPRzg01EBJ2pltnkPVnPr9H6oduADuYWpWTt4JVdCFFuw91fNaO8HYO9VQkM4WQqbC1pfUlaFG3axWKNVFcMl96tQJh79SM320OtDo1crFyQ8Cx4ODt1pxZR1UbTfUQ0G8ek9thZqMBlj/FBz8DK54Te2HaC/1Neo9Wdm3/xhJv+FCBX0C8IyiKJeb3v8VQFGUF1sprwWKFEVxbuu8UtAlraIoqoBqtFBTplYCxgZI36t6+1b2atpkQLTaWbv/f2ocP2gijLhWXekp+7A6E6XvSLB2UL33ilzVc889Aak7zlxPZwsN1eATqXr9AAhw9IG6Sqgta91Wna0ptdPQdLvWSu0k9hul2liaoYaX6sqhMEmtqHKPmSqt0xB2GQSMVTuaC06pFcPIG2HwXLVszMdqOQdvtRLU6NW5eMbfq7ZiEjeqz8w5EHyGq+vT5hyFskw1U8lvtHr/jj5NxxTkx8P6v0FlPkRcA4Nmg9cwdeUs82dRW65ODSEEFCSoZa3swTtSLVeWDZkxakvJd0THP2+jQf18ddatl6ktV1t/igK2LuDoBzqr9p2/rhJO/KT27bSUkdVQq/71kVXALlTQdahhlFmoWSz7gZsURTneqIyvoijZptcLgL8oinJJW+eVgi7pNIxGKElRxbG988Dnx6veemmG2nIImQajb1YXEtHqoTxH7fB19Iar3lDFylCvthYM9aooZ8aox+tt1SmKDaaWhHMA7H0Pjv+gLheot1M9+sJEVejdBqqiOvIGtUWw/TV1YFdZhlqpBE1QBezo92daJz6RMGAyVOSo91lVAEe+VVsreju1JWHGvKpVS2it1f6Msiz1r6ZUFTK3gWoro/E5tFZn7tk5SBX1vONnyth5qJVIddGZbR7hauvIawjYe0HsV2prSm+npsS6hkDQeMg8BOVZaqsm+wjUV6r3GHgJBI5Tw2v7/gvxG9SWUWmGei3LfVipi7p4DVM/L43+TAtKo1Mr/8RN4B0BJalqxWbvpbacTv0K4Zerawcc/lJ9b6iFuS/D6FvUe9bbqt+lsizY8gKgwND5qnOgGAGhVq5Ovur1Ejao/UjGBvAdpT7T+PXqX1WBWmELrRpu1Ohg2Hz18z8PLnhyLiHEPOA/qGmLHymK8n9CiOeAGEVRfhJCvAhcDTQARcCfFEWJa+ucUtAlvR5FubCFQoxGVaQdvFXvs6ZUFTatvuVrVRWBvfuZbWXZqhjauasdxmfbUlWkhmxK0lSBsHNTvdisQ+oxvqPUhVEKEtRWjqOvWgklb1W9epcg1bYxS9QwWGkGnN6mtgjMQq7RqSGqjP1qKCxigTqyuCJPncVTb6OKr18U5BxRha0kXQ07KQbwHq6GphpqVdHOjlVtdA5Uxb+mVBVdOzdI36+OVm6oVu9Po1P7YjR6NXzlE6luqypS7+fUGlVwW6q8bF0h7HI15Ka3g7F3wI7/qNceOBMS1qvHWTtDxHz13pM2nzlea6Wm6daWmVqLerV11RIafdOwYOPtwZPUii9xoxoCdPRWvxejboIJ97X2zWkTOduiRCLpXuqqoDwb3EKbV0SVhaqAt1RZGurViiH7CARPViuPc6EoqjhbWlANqod8dsWpKGoZnZUadss7AeFzVK/baITDX6gtIKFVK5rqIvX1pIfUii9j/5lQoNGgVmplmWq5kOlqXwyK2jqorVBbQl0wi6kUdIlEIuknyOlzJRKJ5CJACrpEIpH0E6SgSyQSST9BCrpEIpH0E6SgSyQSST9BCrpEIpH0E6SgSyQSST9BCrpEIpH0E3psYJEQIh84n/lzPYCCTjanM5B2dZzeapu0q2P0Vrug99p2IXYNUBTFs6UdPSbo54sQIqa1UVI9ibSr4/RW26RdHaO32gW917auskuGXCQSiaSfIAVdIpFI+gl9UdDf72kDWkHa1XF6q23Sro7RW+2C3mtbl9jV52LoEolEImmZvuihSyQSiaQFpKBLJBJJP6HPCLoQYo4Q4pQQIlEI8WQ3XztQCLFFCHFCCHFcCPGQafszQohMIcRh09+8Rsf81WTrKSHE5V1sX4oQ4qjJhhjTNjchxG9CiATTf1fTdiGEeNNk2xEhRFQX2TS40XM5LIQoE0I83BPPTAjxkRAiTwhxrNG2Dj8fIcRiU/kEIcTiLrTtVSFEnOn6q4UQLqbtwUKI6kbPbnmjY8aYvgOJJvsvYO28Vu3q8GfX2b/bVuz6ppFNKUKIw6bt3fm8WtOI7v2eKYrS6/9Q1zJNAkIBKyAWGNaN1/cFokyvHVEXzR4GPAM81kL5YSYbrYEQk+3aLrQvBfA4a9srwJOm108CL5tezwN+BQRwCbC3mz6/HGBATzwzYCoQBRw73+cDuAHJpv+upteuXWTbZYDO9PrlRrYFNy531nn2mewVJvvndoFdHfrsuuJ325JdZ+3/N/DPHnherWlEt37P+oqHPg5IVBQlWVGUOuBrYH53XVxRlGxFUQ6aXpcDJwH/Ng6ZD3ytKEqtoiingUTUe+hO5gOfml5/ClzTaPtnisoewEUI4dvFtswCkhRFaWtkcJc9M0VRtqEuXn729TryfC4HflMUpUhRlGLgN2BOV9imKMoGRVHMKx/vAQLaOofJPidFUfYoqip81uh+Os2uNmjts+v0321bdpm87OuAr9o6Rxc9r9Y0olu/Z31F0P2B9EbvM2hbULsMIUQwMBrYa9q01NRk+sjcnKL77VWADUKIA0KIu03bvBVFyTa9zgG8e8g2gBto+iPrDc+so8+np76Dt6N6cmZChBCHhBBbhRBTTNv8TfZ0h20d+ey6+5lNAXIVRUlotK3bn9dZGtGt37O+Iui9AiGEA7ASeFhRlDLgPWAgMArIRm3u9QSTFUWJAuYC9wshpjbeafJCeiQ/VQhhBVwNfGfa1FuemYWefD5tIYR4CmgAvjBtygaCFEUZDTwCfCmEcOpGk3rdZ3cWN9LUcej259WCRljoju9ZXxH0TCCw0fsA07ZuQwihR/2gvlAUZRWAoii5iqIYFEUxAh9wJkTQrfYqipJp+p8HrDbZkWsOpZj+5/WEbaiVzEFFUXJNNvaKZ0bHn0+32ieEWAJcCdxsEgJMIY1C0+sDqPHpcJMdjcMyXWLbeXx23fbMhBA6YCHwTSN7u/V5taQRdPP3rK8I+n4gTAgRYvL4bgB+6q6Lm2JzHwInFUVZ1mh749jzAsDc8/4TcIMQwloIEQKEoXbCdIVt9kIIR/Nr1A61YyYbzD3ki4EfG9l2q6mX/RKgtFGTsCto4jX1hmfW6HodeT7rgcuEEK6mUMNlpm2djhBiDvAEcLWiKFWNtnsKIbSm16GozyjZZF+ZEOIS03f11kb305l2dfSz687f7aVAnKIollBKdz6v1jSC7v6eXUjPbnf+ofYKx6PWsk9187UnozaVjgCHTX/zgBXAUdP2nwDfRsc8ZbL1FBfYg34O20JRswdigePmZwO4A5uABGAj4GbaLoB3TLYdBaK70DZ7oBBwbrSt258ZaoWSDdSjxiTvOJ/ngxrPTjT93daFtiWixlHN37XlprKLTJ/xYeAgcFWj80SjCmwS8DamUeCdbFeHP7vO/t22ZJdp+yfAvWeV7c7n1ZpGdOv3TA79l0gkkn5CXwm5SCQSieQcSEGXSCSSfoIUdIlEIuknSEGXSCSSfoIUdIlEIuknSEGXXNQIdQZIu562QyLpDGTaouSiRgiRgpoDXNDTtkgkF4r00CUXDaZRtWuEELFCiGNCiKcBP2CLEGKLqcxlQojdQoiDQojvTHNzmOecf0Woc2jvE0IM6sl7kUhaQgq65GJiDpClKMpIRVGGA/8BsoAZiqLMEEJ4AH8HLlXUyc5iUCd1MlOqKEok6sjC/3Sr5RJJO5CCLrmYOArMFkK8LISYoihK6Vn7L0FdlGCnUFe9WYy6KIeZrxr9n9DVxkokHUXX0wZIJN2FoijxQl3qax7wvBBi01lFBOriAje2dopWXkskvQLpoUsuGoQQfkCVoiifA6+iLmVWjrpkGKirA00yx8dNMffwRqe4vtH/3d1jtUTSfqSHLrmYiAReFUIYUWfr+xNq6GSdECLLFEdfAnwlhLA2HfN31NkCAVyFEEeAWtRpgSWSXoVMW5RI2oFMb5T0BWTIRSKRSPoJ0kOXSCSSfoL00CUSiaSfIAVdIpFI+glS0CUSiaSfIAVdIpFI+glS0CUSiaSf8P93IEpF+wNI2gAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -1037,7 +1045,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 12, "id": "af9d6df1", "metadata": {}, "outputs": [ @@ -1079,123 +1087,123 @@ " \n", " \n", " 0\n", - " 53.753993\n", - " -2.475239\n", - " 0.404968\n", - " 406.898386\n", - " 1.788962\n", - " 450.073724\n", - " 1221.551077\n", - " 5.697177\n", - " 12.451377\n", - " 14.835445\n", - " 81.515696\n", - " 0.0\n", + " 7.400000\n", + " 0.080079\n", + " 0.000000\n", + " 65.800000\n", + " 0.025529\n", + " 67.119106\n", + " 440.000000\n", + " 1.038980\n", + " 2.720000\n", + " 1.080000\n", + " 8.0\n", + " 7\n", " \n", " \n", " 1\n", - " 241.769932\n", - " -33.905933\n", - " 7.440188\n", - " 1722.536053\n", - " 48.034556\n", - " 1312.264457\n", - " 2141.119368\n", - " 31.259074\n", - " 83.654749\n", - " 28.591759\n", - " 489.172674\n", - " 3.0\n", + " 5.088797\n", + " 0.112499\n", + " 0.370000\n", + " 0.763677\n", + " 0.009000\n", + " 288.824821\n", + " 98.000000\n", + " 0.987110\n", + " 3.240000\n", + " 0.220000\n", + " 14.2\n", + " 8\n", " \n", " \n", " 2\n", - " 25.344904\n", - " 0.769463\n", - " -11.237007\n", - " -335.794326\n", - " -3.595284\n", - " -234.179124\n", - " 382.907515\n", - " 7.637684\n", - " 17.748300\n", - " 3.380296\n", - " 73.701048\n", - " 1.0\n", + " 3.800000\n", + " 1.100000\n", + " 0.000000\n", + " 0.600000\n", + " 0.009000\n", + " 2.000000\n", + " 9.000000\n", + " 1.038980\n", + " 3.820000\n", + " 0.220000\n", + " 8.0\n", + " 4\n", " \n", " \n", " 3\n", - " 15.635557\n", - " -28.371864\n", - " -19.808469\n", - " 800.088446\n", - " 61.404066\n", - " -596.053591\n", - " -1749.797505\n", - " 28.376345\n", - " -71.868790\n", - " -14.556346\n", - " -38.315179\n", - " 1.0\n", + " 3.800000\n", + " 0.080000\n", + " 1.659603\n", + " 0.600000\n", + " 0.034734\n", + " 2.000000\n", + " 9.000000\n", + " 0.987110\n", + " 3.775879\n", + " 1.080000\n", + " 9.5\n", + " 7\n", " \n", " \n", " 4\n", - " -0.796959\n", - " -8.546869\n", - " -4.726590\n", - " 128.343028\n", - " 1.083628\n", - " -288.352104\n", - " 1184.680273\n", - " 8.081500\n", - " 23.012828\n", - " 2.168597\n", - " 36.672840\n", - " 0.0\n", + " 5.700000\n", + " 0.330000\n", + " 0.213874\n", + " 10.937306\n", + " 0.050000\n", + " 39.064968\n", + " 147.790987\n", + " 0.997247\n", + " 3.330984\n", + " 0.380000\n", + " 8.7\n", + " 6\n", " \n", " \n", " 5\n", - " -31.203381\n", - " -39.052177\n", - " -57.651032\n", - " 1269.158981\n", - " -22.793850\n", - " 101.490751\n", - " -661.997823\n", - " 5.012738\n", - " 19.615822\n", - " 26.791456\n", - " -63.773678\n", - " 3.0\n", + " 14.200000\n", + " 0.080000\n", + " 0.000000\n", + " 0.600000\n", + " 0.009000\n", + " 2.000000\n", + " 9.000000\n", + " 0.987110\n", + " 2.916428\n", + " 0.220055\n", + " 9.5\n", + " 5\n", " \n", " \n", " 6\n", - " -120.526480\n", - " -49.314650\n", - " -67.642982\n", - " 650.136816\n", - " 65.155843\n", - " 598.106999\n", - " -3468.753037\n", - " 3.750566\n", - " 52.556860\n", - " -108.310847\n", - " -91.816310\n", - " 3.0\n", + " 14.200000\n", + " 0.087887\n", + " 1.660000\n", + " 0.600000\n", + " 0.108297\n", + " 49.000000\n", + " 65.466909\n", + " 0.987117\n", + " 2.720090\n", + " 0.220006\n", + " 9.0\n", + " 6\n", " \n", " \n", " 7\n", - " 13.172627\n", - " -7.196406\n", - " -20.153565\n", - " 746.262383\n", - " -30.846688\n", - " 1592.815397\n", - " 1610.699379\n", - " -15.576660\n", - " 27.319692\n", - " 45.376814\n", - " 135.871422\n", - " 0.0\n", + " 8.870765\n", + " 1.099817\n", + " 1.657142\n", + " 12.921528\n", + " 0.025276\n", + " 288.846488\n", + " 438.337342\n", + " 0.996196\n", + " 2.724725\n", + " 0.220049\n", + " 10.2\n", + " 5\n", " \n", " \n", "\n", @@ -1203,43 +1211,43 @@ ], "text/plain": [ " fixed acidity volatile acidity citric acid residual sugar chlorides \\\n", - "0 53.753993 -2.475239 0.404968 406.898386 1.788962 \n", - "1 241.769932 -33.905933 7.440188 1722.536053 48.034556 \n", - "2 25.344904 0.769463 -11.237007 -335.794326 -3.595284 \n", - "3 15.635557 -28.371864 -19.808469 800.088446 61.404066 \n", - "4 -0.796959 -8.546869 -4.726590 128.343028 1.083628 \n", - "5 -31.203381 -39.052177 -57.651032 1269.158981 -22.793850 \n", - "6 -120.526480 -49.314650 -67.642982 650.136816 65.155843 \n", - "7 13.172627 -7.196406 -20.153565 746.262383 -30.846688 \n", + "0 7.400000 0.080079 0.000000 65.800000 0.025529 \n", + "1 5.088797 0.112499 0.370000 0.763677 0.009000 \n", + "2 3.800000 1.100000 0.000000 0.600000 0.009000 \n", + "3 3.800000 0.080000 1.659603 0.600000 0.034734 \n", + "4 5.700000 0.330000 0.213874 10.937306 0.050000 \n", + "5 14.200000 0.080000 0.000000 0.600000 0.009000 \n", + "6 14.200000 0.087887 1.660000 0.600000 0.108297 \n", + "7 8.870765 1.099817 1.657142 12.921528 0.025276 \n", "\n", - " free sulfur dioxide total sulfur dioxide density pH \\\n", - "0 450.073724 1221.551077 5.697177 12.451377 \n", - "1 1312.264457 2141.119368 31.259074 83.654749 \n", - "2 -234.179124 382.907515 7.637684 17.748300 \n", - "3 -596.053591 -1749.797505 28.376345 -71.868790 \n", - "4 -288.352104 1184.680273 8.081500 23.012828 \n", - "5 101.490751 -661.997823 5.012738 19.615822 \n", - "6 598.106999 -3468.753037 3.750566 52.556860 \n", - "7 1592.815397 1610.699379 -15.576660 27.319692 \n", + " free sulfur dioxide total sulfur dioxide density pH sulphates \\\n", + "0 67.119106 440.000000 1.038980 2.720000 1.080000 \n", + "1 288.824821 98.000000 0.987110 3.240000 0.220000 \n", + "2 2.000000 9.000000 1.038980 3.820000 0.220000 \n", + "3 2.000000 9.000000 0.987110 3.775879 1.080000 \n", + "4 39.064968 147.790987 0.997247 3.330984 0.380000 \n", + "5 2.000000 9.000000 0.987110 2.916428 0.220055 \n", + "6 49.000000 65.466909 0.987117 2.720090 0.220006 \n", + "7 288.846488 438.337342 0.996196 2.724725 0.220049 \n", "\n", - " sulphates alcohol quality \n", - "0 14.835445 81.515696 0.0 \n", - "1 28.591759 489.172674 3.0 \n", - "2 3.380296 73.701048 1.0 \n", - "3 -14.556346 -38.315179 1.0 \n", - "4 2.168597 36.672840 0.0 \n", - "5 26.791456 -63.773678 3.0 \n", - "6 -108.310847 -91.816310 3.0 \n", - "7 45.376814 135.871422 0.0 " + " alcohol quality \n", + "0 8.0 7 \n", + "1 14.2 8 \n", + "2 8.0 4 \n", + "3 9.5 7 \n", + "4 8.7 6 \n", + "5 9.5 5 \n", + "6 9.0 6 \n", + "7 10.2 5 " ] }, - "execution_count": 51, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "plugin.model.generate(8)" + "plugin.generate(8)" ] }, { @@ -1255,7 +1263,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 13, "id": "56a1fc7e", "metadata": {}, "outputs": [ @@ -1263,35 +1271,47 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2023-03-27T18:03:45.005934+0200][38480][INFO] Step 100: MLoss: 0.9066 GLoss: 1.0013 Sum: 1.9079000000000002\n", - "[2023-03-27T18:03:51.387087+0200][38480][INFO] Step 200: MLoss: 0.4735 GLoss: 1.0112 Sum: 1.4847000000000001\n", - "[2023-03-27T18:03:59.107456+0200][38480][INFO] Step 300: MLoss: 0.4567 GLoss: 1.001 Sum: 1.4577\n", - "[2023-03-27T18:04:05.835508+0200][38480][INFO] Step 400: MLoss: 0.2715 GLoss: 0.9856 Sum: 1.2571\n", - "[2023-03-27T18:04:12.739590+0200][38480][INFO] Step 500: MLoss: 0.2193 GLoss: 0.9046 Sum: 1.1239\n", - "[2023-03-27T18:04:19.417762+0200][38480][INFO] Step 600: MLoss: 0.0143 GLoss: 0.8463 Sum: 0.8606\n", - "[2023-03-27T18:04:26.022729+0200][38480][INFO] Step 700: MLoss: 0.0048 GLoss: 0.7509 Sum: 0.7557\n", - "[2023-03-27T18:04:32.757598+0200][38480][INFO] Step 800: MLoss: 0.0083 GLoss: 0.7102 Sum: 0.7185\n", - "[2023-03-27T18:04:39.550873+0200][38480][INFO] Step 900: MLoss: 0.0029 GLoss: 0.675 Sum: 0.6779000000000001\n", - "[2023-03-27T18:04:46.573464+0200][38480][INFO] Step 1000: MLoss: 0.0039 GLoss: 0.6414 Sum: 0.6453\n", - "[2023-03-27T18:04:53.438631+0200][38480][INFO] Step 1100: MLoss: 0.003 GLoss: 0.6046 Sum: 0.6076\n", - "[2023-03-27T18:05:01.283222+0200][38480][INFO] Step 1200: MLoss: 0.0013 GLoss: 0.6297 Sum: 0.631\n", - "[2023-03-27T18:05:08.559280+0200][38480][INFO] Step 1300: MLoss: 0.0012 GLoss: 0.5479 Sum: 0.5491\n", - "[2023-03-27T18:05:15.536738+0200][38480][INFO] Step 1400: MLoss: 0.0067 GLoss: 0.5275 Sum: 0.5342\n", - "[2023-03-27T18:05:22.391711+0200][38480][INFO] Step 1500: MLoss: 0.0007 GLoss: 0.5252 Sum: 0.5259\n", - "[2023-03-27T18:05:29.285959+0200][38480][INFO] Step 1600: MLoss: 0.0018 GLoss: 0.5017 Sum: 0.5035000000000001\n", - "[2023-03-27T18:05:36.288634+0200][38480][INFO] Step 1700: MLoss: 0.0012 GLoss: 0.5013 Sum: 0.5025\n", - "[2023-03-27T18:05:43.485831+0200][38480][INFO] Step 1800: MLoss: 0.0009 GLoss: 0.4927 Sum: 0.49360000000000004\n", - "[2023-03-27T18:05:50.629387+0200][38480][INFO] Step 1900: MLoss: 0.0009 GLoss: 0.4931 Sum: 0.494\n", - "[2023-03-27T18:05:58.709478+0200][38480][INFO] Step 2000: MLoss: 0.0006 GLoss: 0.4864 Sum: 0.487\n" + "[2023-03-31T01:07:08.859587+0200][12004][INFO] Encoding fixed acidity 8821222230854998919\n", + "[2023-03-31T01:07:08.873767+0200][12004][INFO] Encoding volatile acidity 3689048099044143611\n", + "[2023-03-31T01:07:08.885765+0200][12004][INFO] Encoding citric acid 735380040632581265\n", + "[2023-03-31T01:07:08.896357+0200][12004][INFO] Encoding residual sugar 2442409671939919968\n", + "[2023-03-31T01:07:08.904579+0200][12004][INFO] Encoding chlorides 7195838597182208600\n", + "[2023-03-31T01:07:08.914577+0200][12004][INFO] Encoding free sulfur dioxide 3309873879720413309\n", + "[2023-03-31T01:07:08.922581+0200][12004][INFO] Encoding total sulfur dioxide 8059822526963442530\n", + "[2023-03-31T01:07:08.930580+0200][12004][INFO] Encoding density 3625281346475756911\n", + "[2023-03-31T01:07:08.939216+0200][12004][INFO] Encoding pH 4552002723230490789\n", + "[2023-03-31T01:07:08.947216+0200][12004][INFO] Encoding sulphates 4957484118723629481\n", + "[2023-03-31T01:07:08.956217+0200][12004][INFO] Encoding alcohol 3711001505059098944\n", + "[2023-03-31T01:07:08.964215+0200][12004][INFO] Encoding quality 3457201635469827215\n", + "[2023-03-31T01:07:17.078379+0200][12004][INFO] Step 100: MLoss: 0.9932 GLoss: 0.9775 Sum: 1.9707\n", + "[2023-03-31T01:07:24.055012+0200][12004][INFO] Step 200: MLoss: 0.2957 GLoss: 0.9254 Sum: 1.2211\n", + "[2023-03-31T01:07:32.461826+0200][12004][INFO] Step 300: MLoss: 0.0748 GLoss: 0.8407 Sum: 0.9155\n", + "[2023-03-31T01:07:39.522162+0200][12004][INFO] Step 400: MLoss: 0.0289 GLoss: 0.7444 Sum: 0.7733\n", + "[2023-03-31T01:07:47.110402+0200][12004][INFO] Step 500: MLoss: 0.0292 GLoss: 0.6655 Sum: 0.6947\n", + "[2023-03-31T01:07:54.622795+0200][12004][INFO] Step 600: MLoss: 0.0229 GLoss: 0.5844 Sum: 0.6073000000000001\n", + "[2023-03-31T01:08:01.951234+0200][12004][INFO] Step 700: MLoss: 0.0218 GLoss: 0.5572 Sum: 0.5790000000000001\n", + "[2023-03-31T01:08:09.957993+0200][12004][INFO] Step 800: MLoss: 0.0091 GLoss: 0.531 Sum: 0.5401\n", + "[2023-03-31T01:08:18.931373+0200][12004][INFO] Step 900: MLoss: 0.0114 GLoss: 0.5286 Sum: 0.5399999999999999\n", + "[2023-03-31T01:08:26.898063+0200][12004][INFO] Step 1000: MLoss: 0.0099 GLoss: 0.5259 Sum: 0.5358\n", + "[2023-03-31T01:08:34.593930+0200][12004][INFO] Step 1100: MLoss: 0.0106 GLoss: 0.5196 Sum: 0.5302\n", + "[2023-03-31T01:08:41.818482+0200][12004][INFO] Step 1200: MLoss: 0.0105 GLoss: 0.5072 Sum: 0.5176999999999999\n", + "[2023-03-31T01:08:49.426481+0200][12004][INFO] Step 1300: MLoss: 0.0086 GLoss: 0.5112 Sum: 0.5198\n", + "[2023-03-31T01:08:56.953344+0200][12004][INFO] Step 1400: MLoss: 0.0106 GLoss: 0.516 Sum: 0.5266000000000001\n", + "[2023-03-31T01:09:04.509760+0200][12004][INFO] Step 1500: MLoss: 0.0075 GLoss: 0.5062 Sum: 0.5136999999999999\n", + "[2023-03-31T01:09:11.742216+0200][12004][INFO] Step 1600: MLoss: 0.0098 GLoss: 0.5012 Sum: 0.511\n", + "[2023-03-31T01:09:19.870988+0200][12004][INFO] Step 1700: MLoss: 0.0088 GLoss: 0.499 Sum: 0.5078\n", + "[2023-03-31T01:09:27.578035+0200][12004][INFO] Step 1800: MLoss: 0.0163 GLoss: 0.4956 Sum: 0.5119\n", + "[2023-03-31T01:09:34.406045+0200][12004][INFO] Step 1900: MLoss: 0.0046 GLoss: 0.4955 Sum: 0.5001\n", + "[2023-03-31T01:09:41.645411+0200][12004][INFO] Step 2000: MLoss: 0.017 GLoss: 0.5008 Sum: 0.5178\n" ] }, { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 43, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -1302,7 +1322,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 14, "id": "3fcb9493", "metadata": {}, "outputs": [ @@ -1312,13 +1332,13 @@ "" ] }, - "execution_count": 44, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABKvklEQVR4nO3dd3wc1dXw8d/dvupdsiXZsnGvuFKMC5hqeg0lYAcCIYWEh5KXhBQeyEMoCWkQegfTQgndVBvbgHvv3ZItq/ey9b5/zK4sybsq9qqf78f+aHdmdvZoJB1dnblFaa0RQgjR85m6OgAhhBCRIQldCCF6CUnoQgjRS0hCF0KIXkISuhBC9BKWrnrjlJQUnZOT01VvL4QQPdKqVauKtdapofZ1WULPyclh5cqVXfX2QgjRIyml9oXbJyUXIYToJSShCyFELyEJXQgheokuq6ELIURLPB4PeXl51NfXd3UoXcLhcJCVlYXVam3zayShCyG6pby8PGJjY8nJyUEp1dXhdCqtNSUlJeTl5TFo0KA2v05KLkKIbqm+vp7k5OQ+l8wBlFIkJye3+68TSehCiG6rLybzoKP53CWhB3x74FtyK3O7OgwhhDhqktADfrPkN7yw6YWuDkMI0cO88MIL/OIXv+jqMABJ6A3cPjdlrrKuDkMIIY6aJPQAn/ZR6a7s6jCEEN3I3r17GTFiBPPmzWPYsGFcc801fPHFF0ybNo2hQ4eyfPnyI44/7bTTGDduHLNnz2b//v0AvPXWW4wZM4bx48czY8YMADZt2sTUqVM5/vjjGTduHDt27DjmeKXbYoDX76XSJQldiO7ofz/YxOaDkf35HNU/jj+eP7rV43bu3Mlbb73Fc889x5QpU5g/fz5Llizh/fff5/777+eiiy5qOPaWW25h7ty5zJ07l+eee45f/vKXvPfee9x7770sWLCAzMxMysvLAXjiiSf41a9+xTXXXIPb7cbn8x3z5yQt9ACv3ystdCHEEQYNGsTYsWMxmUyMHj2a2bNno5Ri7Nix7N27t8mx3333HVdffTUA1157LUuWLAFg2rRpzJs3j6effrohcZ900kncf//9PPjgg+zbtw+n03nMsUoLHfBrPxpNhauiq0MRQoTQlpZ0R7Hb7Q2PTSZTw3OTyYTX623TOZ544gmWLVvGRx99xKRJk1i1ahVXX301J5xwAh999BFz5szhySef5LTTTjumWHtcC93n91HhqsDrb9uFbOs5Aao91RE9rxCibzn55JN5/fXXAXj11VeZPn06ALt27eKEE07g3nvvJTU1ldzcXHbv3s3gwYP55S9/yYUXXsj69euP+f1bTehKqWyl1NdKqc1KqU1KqV+FOGaWUqpCKbU28P8PxxxZGAv2LuCU109hf9X+iJ3Tqw8n8Sp3VcTOK4ToW/71r3/x/PPPM27cOF5++WX+8Y9/AHDnnXcyduxYxowZw8knn8z48eN58803GTNmDMcffzwbN27kuuuuO+b3V1rrlg9Qqh/QT2u9WikVC6wCLtJab250zCzgDq31eW1948mTJ+ujWeBi6YGl3PzFzbx8zsscn3Z8u18fSpW7ipNfOxmADy/+kIFxAyNyXiHE0duyZQsjR47s6jC6VKhroJRapbWeHOr4VlvoWut8rfXqwOMqYAuQGYFYj0q8PR6Acld5xM4ZLLkAUkcXQvRY7aqhK6VygAnAshC7T1JKrVNKfaKUCnkHQyl1k1JqpVJqZVFRUfuj5XBCj2TibVxykZ4uQoieqs0JXSkVA7wN3Kq1bp71VgMDtdbjgX8B74U6h9b6Ka31ZK315NTUkGuctqojErq00IUQvUGbErpSyoqRzF/VWr/TfL/WulJrXR14/DFgVUqlRDTSgBhrDCZlosItLXQhhGisLb1cFPAssEVr/UiYYzICx6GUmho4b0kkAw0yKRPxtnhpoQshRDNtGVg0DbgW2KCUWhvY9ltgAIDW+gngMuCnSikvUAdcqVvrPnMM4u2RTejSQhdC9AatJnSt9RKgxZnWtdaPAo9GKqjWxNnjpIUuhOgy8+bN47zzzuOyyy7r6lCa6HEjRQES7AmR7baoDyd0aaELIXqqHpnQ423xEU28jYf7y4yLQojG7rvvPoYPH84pp5zCVVddxV/+8pcm+7/88ksmTJjA2LFjuf7663G5XADcddddjBo1inHjxnHHHXcAoafRjaQeOTlXxGvogYTutDilhS5Ed/TJXXBoQ2TPmTEWznmgxUNWrFjB22+/zbp16/B4PEycOJFJkyY17K+vr2fevHl8+eWXDBs2jOuuu47HH3+ca6+9lnfffZetW7eilGqYMjfUNLqR1CNb6HH2OKo91Xj8noicL1hySXIkSQ1dCNFg6dKlXHjhhTgcDmJjYzn//POb7N+2bRuDBg1i2LBhAMydO5dvvvmG+Ph4HA4HN9xwA++88w5RUVFA6Gl0I6lHttAT7AmAUR5JdiYf8/mCN0WTHElsL9t+zOcTQkRYKy3p7sZisbB8+XK+/PJL/vOf//Doo4/y1VdfhZxGNzn52HNYUI9socfbAqNFIzS4KFhySXIk4fK5qPfWR+S8Qoiebdq0aXzwwQfU19dTXV3Nhx9+2GT/8OHD2bt3Lzt37gTg5ZdfZubMmVRXV1NRUcGcOXP429/+xrp164DQ0+hGUo9soUd6+H+wH3qSI6nhvA6LIyLnFkL0XFOmTOGCCy5g3LhxpKenM3bsWOLj4xv2OxwOnn/+eS6//HK8Xi9Tpkzh5ptvprS0lAsvvJD6+nq01jzyiDEm884772THjh1orZk9ezbjx4+PaLw9MqEHSy6RSujBkktaVBoAJfUlpEenR+TcQoie7Y477uCee+6htraWGTNmMGnSJG688caG/bNnz2bNmjVNXtOvX78jFpAGeOedI2ZOiagemdDj7HFABBN64KZoMIkX1xVH5LxCiJ7vpptuYvPmzdTX1zN37lwmTpzY1SGF1SMTeqTnRA/W0NOjjIReUtch09AIIXqg+fPnd3UIbdYjb4rGWmMxK3PEa+jBhC4tdCFET9QjE7pSijhbHJXuStw+Nx6/h2OZCyxYQ4+2RhNrjZWELoTokXpkQgej7PLOjneY9MokJr48kfPePY8XNr6A1++l3lvPX1b8haLatq2KFKyhW0wWkp3JktCFED1Sj6yhA1w89GLWFq5lZNJIUPD9we/566q/YrfYsZlsvLj5RQbGD+TyYZe3eq5gDd2szKQ4UyShCyF6pB6b0K8fc32T5zePu5lrPr6GVza/QrQ1GoBDNYfadK6GhG4yEvqW0i2RDVYI0SPFxMRQXV3d1WG0WY8tuTSnlOK6Udexv2p/Q0Jua0JvXHKRFroQoqfqNQkd4PSBp9Mvuh9RliiGJg5te0IP3BS1KKOGXuOpodZT25GhCiF6EK01d955J2PGjGHs2LG88cYbAOTn5zNjxgyOP/54xowZw+LFi/H5fMybN6/h2L/97W+dFmePLbmEYjFZ+PP0P1PlruKj3R+xuWRzm14X7LZoNplJdaYCRl/0KGtUh8UqhGi7B5c/yNbSrRE954ikEfy/qf+vTce+8847rF27lnXr1lFcXMyUKVOYMWMG8+fP56yzzuLuu+/G5/NRW1vL2rVrOXDgABs3bgTokGlyw+lVLXSASemTmJU9i37R/ThUc6hN3Rmb3xQFKK6XsosQwrBkyRKuuuoqzGYz6enpzJw5kxUrVjBlyhSef/557rnnHjZs2EBsbCyDBw9m9+7d3HLLLXz66afExcV1Wpy9qoXeWHp0Om6/m9L60lan2A3W0JskdKmjC9FttLUl3dlmzJjBN998w0cffcS8efO47bbbuO6661i3bh0LFizgiSee4M033+S5557rlHh6XQs9qF90P6BtN0Z9fh8WZUEp1ZD8JaELIYKmT5/OG2+8gc/no6ioiG+++YapU6eyb98+0tPTufHGG/nxj3/M6tWrKS4uxu/3c+mll/KnP/2J1atXd1qcvbaFnhGdARgJfXTK6BaP9WovZpMZgER7IiZlkoQuhGhw8cUX89133zF+/HiUUjz00ENkZGTw4osv8vDDD2O1WomJieGll17iwIED/OhHP8Lv9wPw5z//udPi7P0JvfYQawvXMiJpRNg5zr1+L2ZlJHSzyUySI0kSuhCioQ+6UoqHH36Yhx9+uMn+uXPnMnfu3CNe15mt8sZ6bckl0Z6I3Wzn490fc+0n1/Lh7g/DHuvz+7CYDv9uy4jK4GD1wc4IUwghIqbXJnSlFBnRGawvXg/QYoL26aYJPTs2m7yqvA6PUQghIqnXJnQwWtpBhbWFYY9rXHIByIrNIr8mH4/f06HxCSFadiyzqPZ0R/O59+qEPixpGNmx2QxPHE5RXfiZF33a13BTFIwWuk/72jzSVAgReQ6Hg5KSkj6Z1LXWlJSU4HC0b23jXntTFOD2Sbdzy4Rb+M3i37Cvcl/Y40K10AFyq3LJjs1u9X2K64opry9nSOKQYw9aCAFAVlYWeXl5FBW1bRrs3sbhcJCVldWu17Sa0JVS2cBLQDqggae01v9odowC/gHMAWqBeVrrrrnN24jZZMZpcpLqTGVlwcqwx/n8Pqwma8PzYBJvax399oW3U1xXzEeXfHRsAQshGlitVgYNGtTVYfQobWmhe4HbtdarlVKxwCql1Oda68YTpZwDDA38PwF4PPCxW0iLSqPCVYHL58Juth+x36ubttBTnalYTdY2JfRVBatYXbi6YcpeIYToKq3W0LXW+cHWtta6CtgCZDY77ELgJW34HkhQSvWLeLRHKTXKmHAr3I1Rn79pDd1sMpMZk0ledesJ/ZkNzwBQ46lpmBNGCCG6QrtuiiqlcoAJwLJmuzKB3EbP8zgy6aOUukkptVIptbIz62JpzjSAsEvSNW+hg1F2ya3KDXl80KGaQyw5sIS0KOP8NZ6aCEQrhBBHp80JXSkVA7wN3Kq1rjyaN9NaP6W1nqy1npyamno0pzgqDS30uvAt9MY1dDBujOZV5bV4h73CVQHA6GRjaoFK91FdFiGEiIg2JXSllBUjmb+qtX4nxCEHgMbdQbIC27qFYAu6xRa66cgWerWnmnJXedjzun1ugIYJvarcVRGIVgghjk6rCT3Qg+VZYIvW+pEwh70PXKcMJwIVWuv8CMZ5TOJscdjN9rAJ3ef3HVFy6R/dH4D8mvCfhsvnAiDZIQldCNH12tLLZRpwLbBBKbU2sO23wAAArfUTwMcYXRZ3YnRb/FHEIz0GSilSnakU1BaE3O/1e7FbmvZ+Cba6y+rLwp7X7Tda6ME51CWhCyG6UqsJXWu9BFCtHKOBn0cqqI6QFpUWdrRo87lcABIdiQCU1peGPafHZ0wNEEz+UkMXQnSlXj30v7HUqNTwNXS/F4tqmtCTHElAywk9WHKRFroQojvoMwk9IyqD/Jp8fH7fEft8+sgaeow1BqvJSkl9SdhzBksuifZEFEpa6EKILtVnEvrQxKG4fC72VR05p0vzgUVg1N2THEkt19ADvVwcFgextlhpoQshulSfSejDk4YDsL10+xH7vPrIkgsYZZeWSi7BhG41WSWhCyG6XJ9J6IPjB2NRFraVbTtin9fvPeKmKAQSel3rNXS72U6cLU4SuhCiS/WZhG4z2xiUMIjtZUe20JvPhx7UWgs9uACGzWyTFroQosv1mYQOMDxxONtKj2yhhxpYBK0n9GALPVhykZuiQoiu1OcSekFtQcMcLEHhSi6JjkTqffXUempDns/tc2Mz2VBKSQtdCNHl+lRCH5Y0DOCIVrpXh6+hQ/i+6G6fu2F+dUnoQoiu1qcS+vBEo6fLuqJ1TbaHK7kER4C2lNCtZmOWxlhbLLXeWpkTXQjRZfpUQk92JjMpfRL/2f6fJom3pZuiED6hu3wubGYbYEwABlDtro502EII0SZ9KqEDXDvyWg7WHOTr3K8btoUa+g+HE3q4wUVuf9OSC8jwfyFE1+lzCX1W9iwyYzJ5efPLAGitQ07OBYcn6Ao3/N/tczcsjBFrNRJ6pUd6ugghukafS+hmk5nzjzufNYVrcPlc+LQxt0uoGrrT4sRpcbb5pihIC10I0XX6XEKHw4tXFNYWHk7oIWro0HJfdLfP3VBDDyb0Spe00IUQXaNPJvT0qHTAWJIuOPtiqBo6GKsRtVRDb35TVAYXCSG6Sp9M6A2LRtcWNgzfD1VDB6OO3mIvF5OR0FOcKZiUqcUl64QQoiP1yYQeXDS6zSWXMBN0Na6hW81W+kX3I68qrwMiFkKI1vXJhB5cNLqwtrCh5BLqpigcrqEbq+w11XhgEUBWbJYkdCFEl+mTCV0pRVpUGoV1h1vo4UouSY4kvNobsjbeuB86QFZMFrlVuR0TtBBCtKJPJnSAVKexxmiwhh6uhR7six7qxmhwcq6g7NhsylxlMlpUCNEl+mxCT49Kb1JyCddCT3aEn8+lcbdFMEouAHnVUnYRQnS+PpvQU6NSKaorapjTZdG2EpbvOTJpJznDz+fSeC4XMFrogNTRhRBdos8m9LSoNOq8dZS7ygH4dGMh/1l1ZP073ARdWms8fk+TGnowoedW5fLlvi8pqQs9ZYAQQnSEPp3QgYZ+436/iYo6zxHHJdqNGnrzhN54+bmgWFss8fZ43t/1PrcuvJU3t73ZIbELIUQofT6hH6w+CIDPr0ImdKvZWF6ueUJvvPxcY9kx2ews3wnAvqp9EY9bCCHC6bsJ3dm8ha4orz0yoYNxY7R5Qnf73ABNSi5wuOzitDjJrZQujEKIzhO6a0cfEBz+H0zoXp+JyhAtdAg9QVcwoTcuuQCcmXMmFpMFm9nGl/u/jHTYQggRVp9toTssDhLtiYcHAunQNXQIPfzf7Q+d0E8feDr3T7+fnLgcyl3lRyxILYQQHaXVhK6Uek4pVaiU2hhm/yylVIVSam3g/x8iH2bHyIzJbKiha22mxu3D4/MfcVyiI5EyV9OBRcEaeuOBRY1lx0kXRiFE52pLC/0F4OxWjlmstT4+8P/eYw+rc2TGZjYM/Q9eilCt9CRHEmX1ZQ2DkAA8PuO45jX0oGAtfX/V/ghGLIQQ4bWa0LXW3wChpxvs4TJjMg8/0S0ndI1u6LMOjXq5mK1HHA/GvC6AzO0ihOg0kaqhn6SUWqeU+kQpNTrcQUqpm5RSK5VSK4uKiiL01kevSUJvqYUeYrRosIYeroUeZY0izZnG/kppoQshOkckEvpqYKDWejzwL+C9cAdqrZ/SWk/WWk9OTU2NwFsfm2ArGjjcQg/RdTEjKgNoWg9v6OUSpoYORh1dWuhCiM5yzAlda12pta4OPP4YsCqlUo45sjDW5ZZz51vrKK52HfO5+sf0b3istTHbYqgW+rDEYSgUW0u3NmwL122xsQGxA9hetp1/rP6H9EkXQnS4Y07oSqkMpZQKPJ4aOGeHTWJSWOXirVV5HCyvO+Zz9Y/pj0IZT1qooUdZo8iJz2FL6ZaGbQ29XFpI6CdnnoxSimc2PMPzm54/5niFEKIlbem2+BrwHTBcKZWnlLpBKXWzUurmwCGXARuVUuuAfwJX6lDL+0RISoyRQCPRQreZbQ0DjIKXItxo0ZFJI5sk9HAjRRs7O+dsvr3qW07odwIbi0P2+hRCiIhpdaSo1vqqVvY/CjwasYhakRJjJNDiKndEzpcVk0VhbWGLLXSAUcmj+HjPx5TWl5LkSGpTySVoTPIYXtz0Ii6fq8VfAEIIcSx63EjR1FgjIRZFoIUOh3u6tFRDB6OFDrC1xKijB3u5NJ+cK5SxKWPxam+TGrwQQkRaj0voDquZWLuFoqoIJfTYYNfFllvoI5JHALC5dDNwuIbelhb36BSjJ2ew7FLtrmbB3gUhF54WQoij1eMSOkBKrD0iNXSAS4ZcwrVDfwV+BwAVdaFLOXG2OLJisthcYiT04EjRtpRc0qPSSXGmsKl4EwCvbHmFOxbdwfyt8yPxKQghBNBTE3qMLWIJvV9MP2ZnXgJAtM0ctoUOMD5tPKsKVuHXflw+FxaTBZNq/RIqpRiTPIaNJUYLPTgL4yMrH2Fb6bYIfBZCCNFjE7o9YiUXAI/PKH0kxdhaTOjT+k+jtL6ULSVbcPvdLQ4qam5s6lj2VOxhyYElbC3dyg1jbiDGFsPj6x4/5viFEAJ6cEIvro5MLxcAn99I6MnR9rDdFgGmZU5DoVh8YDFun7tdPVYuGXoJsdZYblt4GwCXDruUWdmzWH5oeZNJv4QQ4mj12IReUefB7T1yqtuj4W1I6DZcXj/1ntAJNsmRxKjkUSw9sBS3zx12Yq6QMTtTuGXiLdR56xiRNILs2GymZkylyl3FtjIpuwghjl3PTOixRqmjpCYyZRdvYA705MCgpZbKLqdknsL64vXk1+S3u0/5FcOu4Jycc7hu1HUATM2YCsDy/OVHE7YQQjTRIxN6aoQHFwVb6BlxRk+Xlurzpw88HYDv879vVw0dwGwy89DMhzj/uPMBYxm8QfGDWHZo2dGELYQQTfTIhJ7SMLioPiLnC9bQ+yU4jfO2kNBHJI1g/rnzuXzY5Vwy9JJjfu+pGVNZXbAajz/8XwVB3x38jre3v33M7ymE6J165CLRHdVC7xdvtNALq1r+RTE6eTSjTwo77Xu7nNz/ZN7Y9gYLcxdyxsAzwh63t2Ivt359Kx6/h3MGnUOUNSoi7y+E6D16Zgs9JrLD/4M19H7xRgu9sDJyXSJbMzNrJjlxOfx77b9x+VysOLQCr9/bsH9PxR6e2fAMv/r6V7h9bjx+DysLVlLrqeVA9YFOi1MI0f31yBa602Ym2mZm8Y4iVu8rIzHaxpj+cZw2Ip0ByUbLtbCyntRYO4GZfVsUbKFH2czEO60URrCPe2vMJjM/O/5n/PqbX3PO2+dQVFfE0MSh/Hbqb8mKzWLep/MaJgR7ZNYj/PqbX7P0wFI+2PUBSw8s5YvLv5DWuhAC6KEJHYw6+ve7S0mLtaOB/6zK476PtvDqj08gzmHl/EeX8PR1kzhtRHqr5wrW0K1mE2mx9lZLLpF2Vs5ZvLDpBcrqy/ifSf/D61tf50cLfkSSIwmXz8V7F77HcQnHATAlYwqf7PmEclc5Gs2CvQu4eOjFnRqvEKJ76rEJfd7JORRXu/jFqUNx2szsLa7hkse/5cVv95IWa8fn12wvqG5TQg+20M0mRVqcvVNb6AAmZeLlc17GpExYTBauGnEVz2x4hje3vcmfT/lzQzIHY3DT4gOLibJEkehI5N2d7zZJ6Fpr6rx10moXog/qkTV0gB9NG8SdZ43AaTOmvc1JiebSiZl8vrmAd9YYteW2rmoUrKFbTIq0WEen1tCDbGYbFpPx+9VpcXLLhFtYfOViTh1wapPjpmdOB+CqEVdxxfArWFO4hvd2vseWEmPxjSfWPcHst2ZT6a7s3E9ACNHlemwLPZQfTMnm6cV7qKr3YjOb2pzQgyUXs1mRFmvME6O1blP9vbMNiBvA6+e+zrDEYVS4K3hszWP8funvUSjmjZ7HS5tfwqd9fH/we87MOROA0vpSHGaHtNqF6OV6bAs9lCFpsZwwKImsRCenDE3hQHnbauHBkovVZCI11o7b529xtGhXG50yGqvZSoozhQ8u/oA3znuDaZnTeH7T8yQ7kom1xbLkwBLAKMFc89E13PPdPV0btBCiw/WqFjrAv6+ZiMvr54lFu1i5t7RNr/E1qaEH+6K7SIhq30jQrtA/pj/9Y/rzj1P/wb/X/ptZ2bN4efPLLD2wFK01m0o2kVedR0l9CbWeWjaVbCLGGsPI5JFdHboQIsJ6VQsdIDnGTv8EJ/0TnFTWe6mqb72l7WlSQzf6uHdFHf1Y2Mw2bp10K8enHc8pmadQWFfI9rLtDXOv13nr+GjPR9zy1S08sPyBLo5WCNERel1CD+ofGMafX9F62cXn15gUmBon9E7uuhhJp2SeAsCHuz/ky/1fMjl9Mgn2BB5Y9gA1nhq2lm7FryMzU6UQovvotQk9M8EonRxow41Rr19jMRmXonHJpadKjUrlnJxzeGHTC+yp2MMZA8/gtAGn4fa7SbAnUOutZX/l/q4OUwgRYb02oQdb6G3p6eL1+TGbjB4tMXYLUTZzjyu5NHf/9Pu5YtgVxNpimT1gNhcPuZjMmEz+eNIfAdhSuqWLIxRCRFqvTehpsQ7MJsX2Q1Xc/PIqNh2sCHus0UI/3EUxI87Bocq2dXnsriwmC78/6fcs+sEi0qPTOT7teD699FNmZs/EarI29FsXQvQeva6XS5DZpMiIc/Dqsv14/Zrx2QmM7h8f8lifX2MxH07oWUlR5Jb27IQeZDVZj3g+NHEom0s3d1FEQoiO0vNa6K5q2Pxf0LrVQzMTnA19zAsqw9/k9Po1ZtPhS5Gd6CS3rPbYY+2mRiaNZGvpVnQbrqEQoufoeQl9y/vw5nWQt7LVQwenRhPnsJAR52g5ofv8TUouA5KiKK/1dOvBRcdiVPIoKlwVHKw52NWhCCEiqOcl9BHngtkGm95p9dDfnDOSj381nePSotvQQm+a0AFyS3tnK3186nhA1jIVorfpeQndEQ9DzoBN74G/5b7U8VFWshKjSI9zUNBCrxWfX2NtVEPPDiT0vF5adhmWOIyM6AwW5i7s6lCEEBHUakJXSj2nlCpUSm0Ms18ppf6plNqplFqvlJoY+TCbGXMJVB2E3O/bdHh6nIPCqnr8/tA14+Yt9GBC39+WFrrPw/df/Ie3332rTbF0B0opZmbN5Lv876j39twBVEKIptrSy+UF4FHgpTD7zwGGBv6fADwe+Nhxhp0NFid8fT+c/r+gfcZNUnss7FsK9RWQMQ76jQelmFL/LWt0CeUFo0jKGAjNZlF0uoq53v0feOEvUFtK/KgLudFRyPCNn0C/i+DAKtj4Ngw4AQaeAlFJ4EiA3Qvxf/9vTqwLzBkTtRbGXgYmq1EWciaA1Qm1JVBTDN56SB4KMWmHY9AafG6w2Dv0kjV3avapvLHtDZYfWs6MrBmd+t5CiI6h2tLTQSmVA3yotR4TYt+TwEKt9WuB59uAWVrr/JbOOXnyZL1yZes3NsP67t/w5b3gbWf3QosDlBksNsicBJ563PuXY9ZezJkTjP37lgLgR2EicH2yT4SCjeCubnK6rXHTeKR4KtPNm7jW/FnbYnAmQmIO+L1QkQd1ZZAxFhIGgtkKk2+AjDHGL5Kk44xjIzyVr9vnZvrr0zl38Ln84aQ/RPTcQoiOo5RapbWeHGpfJPqhZwK5jZ7nBbYdkdCVUjcBNwEMGDDg2N71pJ/B+Cth5xdGa1kpqC2FrMkQnQqHNkD+OvC52W4fw73vruT3J9kYbisxXu+qNHrK2GL4JuYc3racy+M3/sDYV3GAu99dx6oiM59eZIaYVKO176k/nIDryiizpnLe0/nYLSY+c0/h8hvuwFFfAn4P+DxGPJ5aI57oVDCZoXgHFG2B8lwjeWdOgqgUyF0GpXugphA2vQvKBMH5VuKzYeQFkDnReOyIh5ShxvmOks1sY1rmNBblLsJ/oh+T6nm3U4QQTXXqwCKt9VPAU2C00I/5hFFJMO6K0Ptyphn/gZjyOpb4XaxOH8vwqUf+Innx2WXUuLyHN8RnEpNeye4de/EfdyamYH3d6oCUIQ2HHcqvxOs/yEkDE1m8o5iyxHH0i3e2HPOQ2S3v99TByuehrhQGngylu2HH57D8KeMXRVB0Koy5DKbfZpRwjsKp2afy+b7P2VKyhdEpo4/qHEKI7iMSCf0AkN3oeVZgW7eRGmtHKTgUZuZFj8/fMDlX0ICkKNw+P4cq6xvmhWnO7fU3nB+gos7TekJvjdVp/PURdNxpMOXHxl8Hpbuh8iDUFMH2T2HF07D6JTj3r3D8Vcbxfr9RFnLEtfpW0zOnY1Imvs79WhK6EL1AJP7Ofh+4LtDb5USgorX6eWezmk0kR9vDTonbfOg/QL94Y9bFlvqvuwIJPS3WOLaitgMHIlkdkD4Khp5uJO8rXoSfLzfKMO/dDJ//EXZ8Ac+fA38dDgWtD+1PcCQwIW2CdF8UopdoS7fF14DvgOFKqTyl1A1KqZuVUjcHDvkY2A3sBJ4GfhbmVF0qI94etoXevNsiQFK00eourXGHPae7IaEfbqF3quTj4IfvwLgfwNK/w6uXQtFW48buuz8Bb/jYg2ZlzWJb2baGFY6EED1XqyUXrfVVrezXwM8jFlEHSY91cDBcQvc1nW0RIDnaWH6upDp8UnR5fUDTkkuns9jgkqfg1LuhaBv0Px5yl8Mb18B7PzXKMc6EsC8/e9DZPL/peW7+4mbOGHgGj8x6pNNCF0JEVp/p2pAe7+BQRegujs0n5wJIjgkk9BZa6K5mLfTKem/YYztc4kAYdqZxg3TkeXDq74zpEf4+Dh47AT75f1BTcsTLMqIz+PTST/nhyB/y+b7P2Va6rQuCF0JEQp9J6ENSYyir9YSso/v8/iZD/wGibBacVjMl1eGnDAiWXJJjurCFHs7MO+HHX8DoC43+7cufhn9OgG//Bd6mn5PT4uTm8TfjtDiZv3V+FwUshDhWvXY+9OZG9zd6fWw6WEnacEeTfaFq6GC00ltuoRsllyibmViHhcrulNDB6OOeOcl4XLgVPv89fPY7WPEMTL/D6A2zZzEcXEP8Zc9y3uDzeH/X+xTVFuG0OPnDSX8g3h56DnkhRPfTZ1roIwMJffPByiP2haqhg1FHbymhB1voNouJeKe1e7XQm0sbAde8ZdxEtUbB+7+AT+8yBmBpH7z3M64Zcgl+7Wd3xW4W5i7khx//kLyqvK6OXAjRRn2mhR7nsJKd5GRz/pEJ3Reihg5GKaUt3RbtPSGhBw2ZDYNmQv5aY9RpbLrRSn/xPI779gkWX/YlUY4EVh/8jl9+/Ut++NHVPHb649JPXYgeoM+00AFG94sP3UIPUUMHSIq2tdht0dWTWuiNmS3GFAmx6cbzQdNh2q9gzctEPz8H9dV9THrvNl7etwd7bSk/XnA9eyr2dG3MQohW9amEPqp/HHtLaqh2Ne2N4muphl7tDts/uyGhm03EOXpQQg/ljHvhyteM+WOW/B3qyhh85kM8X+7G6q7h1s9/SnWzicm6yq7yXRTVFnV1GEJ0O30qoY/uH4fWsLVZ2cXTQg3d7fMf8QsgyOX1YbOYUEr1rBZ6OCPmwM+Xwe8K4bbNMOUG+s/7lL9U+dlXncfl757Ptwe/7eooufmLm7lj0R1dHYYQ3U6fSuijAjdG1+aWN9luDP0PUUMPjBYNN7jI7fVjtxivi4/qBQk9yGw5PJNj0mCmXvMBT1eBuSqfn3z+E3707sU8tuZR3t7+Nh6fB601tZ7OWd2pqLaIQzWHWF24mk0lmzrlPYXoKfpUQs+IczC6fxxvrsxtUkbx+v2hW+gNg4tC90V3ef3YLUbii3dacXv91Ht8HRB5F0s+jik/WcbbQ+ZxV6WLA6VbeWL9k9zz3T1c8d4FXPrfiznl9VP4Yt8XHR7KltItDY9f3fxqh7+fED1Jn0roSinmnZzD9oJqvt11eNRk2Bp6O1rocU4rQPfrix4ptmjss+7imp9u4LOp97HOPp5/FZXjLd2DtXAzQ+pquGPhbTy25lF2lu3ssHlhNpcYk45d5Mzmk72fUFxX3CHvI0RP1KcSOsD54/uTHG3j+aV7AdBah6+htzL839W45BJI6L2m7BKOxQ5jL8N05SvM+tk6Phh7K2+M/jnPx01kWm0tT6x/kovfv5hz50/jD5/9lJc3vkCtpxatNfsr9+PxH9v12Vy4jhyPh+t3LMPr9/LhZhnZKkRQn+mHHuSwmrlkYibPLtmDx+fHFFjaLVQNPSkwQVe4rosuj3FTFPpQQm8sKglONCbdjNa/5LEVz1Cw50sWlW/na38hi1yLeDd/Ce9seI706H4sLdtMkiWKi3LO5dqJPyPFmdLut9xStJ4JLjeDZv8f49Y9wn+3vcncibegIrxEnxA9UZ9L6ACDU2PwayiscpESaIWHKrk4rGZi7BaKw8zn4vb1wRZ6OErB1BtJn3ojVwBX1BTDvm/5ds1T/L+63eTXl/DTiiq22Wp5YcebvLrzbS4beBbnj52LRVkoqC1gTcFqluYuYlrWdG6e8HPs5sMLZ28r3YZGc8hTySi/GabeyIXb3+Q+TxFbijYyKm1s133uQnQTfTKhZ8QZc7kcqqgjIZCIQ5VcoOXBRS6Pv2+30FsSnQKjLuDkURfwwdYP0HVlJI66GCoOsHfZozy7/1Pe2Psxr+77pOElZg3D3G6eqdjJ61vmo5SJQdH9sJmsrCw/PAvkyLTjwWTmrAk38+DKe3lt5SPcN+f5Lvgkhehe+mZCjw8mdBdD0oybd6Fa6HB4cFEobp8fp9XccBwYrX7RVMKI8w8/SRtBzvmPcl9tKT9b/jgb9y9E1xSTarIzOG4g8UNO5Ns1z/E5xZiBrTXl5JvN3F5ZRb7Fwla7lbHjLgQgftTF/GDpPbxctJIB3z/Aj6begcXUJ7+lhQD6akIPttAr6/H5jYRuDVFDB6OnS15Z6D7WLq+voYUf57CSEGVlf2nn9Mfu8aKS6Dfrbvpx9xG7Tp76E04u2Gj0ha8rA3cNpI4wFsrevQiGzzEONFu5/awnKVlwI//c9iqPb5vP9ORx/O60v5Ealdri23v9Xqrd1Xi1F6fFSbQ1uiM+SyE6VZ9M6AlRVmwWEwWV9Xj9xvD9sC30aBvr88pD7nN7D5dcAAYmRZErCf3Y2aIge+qR28/96xGbzIOm86fLP2TWgv9hfdlW/uNfw0Vvnc60+KGMzZzGmAEzGZE8Eqfl8OLd3x5Yyl0Lb6fMW9OwbWbcEP545pOkRqc1Ob/X72Vb2TZGJY2SG6+i2+uTCV0pRUacg0MV9Xh9Rgs9XA09OcaooWutj/iBbtxtESA7KYr1eRUdF7gIyZo8hHOu/oBzPPVc8e1feXTbfFaXbOKTim2w+TnMwBB7MiPiBlFRX8Y3lbsY7HFzU1UNVouDApuTl/zbufTts3h2zqsMTRkFgNvn5vZFt7MwdyFXjbiKu6behUn1uZ6+ogfpkwkdjLJL45JLuBZ6UrQNr19TWeclPsraZF/jm6IAA5Oj+GTjIbw+f8hukKKDWR0Mmnk3f53xWyjZRdGOT9i4fyEbSrewqTaPJTWFxPv9XOozcfukO4keezk44sHv47wv7ubG3P9y00fX8P9OuQ8Pmte2vMqGkk2cHJPDa1tfo7L6EPed+lesJmvrsQjRBfpsQk+Pd7A+rxxvKzX0lMDycsU1riMSutFt0dzwfEBSFD6/5mB5PQOSozooctEqpSBlCKkpt3DqSbdwqtZQUwRl+yAm1ZgH3nT464bJzOAzH+CpxQlcv+Ml7lzyGwAyfX4eKCllzp79PBMfxz/5mop3L+WS0dcyNusUMmL6hfzLrb382k9exX7KyncxPmf2MZ1L9G19NqFnxNn5rKIer6/lGnrjwUXHNbvP5vL4mpRcBiQZN9b2l9ZKQu9OlDIWz45Ja/Gw46bfxSdJw9i/6H485fsYnT0d07nzIGMMN7qqiPv0Fh6o2s2SZffCMhhhTeSgv54UZzL/N/NhxqSMaXdotZ5arn/vYjbVHgTguerfMmXMVUfzWQrRhxN6vBOX109xoEtiSzV0IORi0W5f05JLMInvK61hsicRe2BqXdFzRI2+hBEjL4Sa4sMLgAT8YO7XnLf9E/YWrGHp3s/5rjKf2V4v37mq+OGHVzHFmc6IhKHYzQ5OHXklozNPaPG9tNb87sNr2VJzgDu8Tp401/LWhmcloYuj1ncTeqDrYrBLYriad3CCruJmfdF9fmMOmMYll4w4BzazieV7Snngk63cPWckV04d0BHhi45kMh+RzIPbo0ecx+gR5zF65u+5qaYYyvdTsfsrnt3xFosr85hfewivgicPfMEJ5gTOG3Yp6UlD8bgq8biryEo/HpSJ57+7n++qdlOKj9tNqcydt4D8N8/lDVc+pVUHSYrt3+mf9rHyaz8Hqw+SFZvV1aH0WX03occbifpAeR3Q8khROHI+l8YLRAeZTYqsRCf/XWv8+bz+QAVXRjZs0Z1Ep0B0CvGZE7lt+h3cVlcO5fuorinkjdWP8lbVDn6/5dnQL/X7me2zcnLyeObMeRIsNi4bewOvrv4/Xvn2T9xy5mM97q+7t3e8zZ++/xPz58yXNWi7SJ9N6OmBFvqBMiOhh6uh2ywm4hyWI0ou7kYLRDc2IDmK3cVG/+a9xTWIPsSZAM4EYoAbhpzB9XXlbFn9LHXeGuz2eMxWJ7sK1lJRW8x5U35J/MBTmrx8yOgrmL7s/3j60GK+fXUap/Y7CYfFSW75Lk4ePIcZo6/CrMxszV2Cx1vH2EFnNEn6+dX5rC5czanZpxJlbd89nBpPDS9tfonLh11+VJOmASzYswC/9vPEuif41+x/HdU5xLHpswk9LdaBUpBX1nILHSA5xn7EFLour7GQha1ZQh+eHsvKvWVMGJDA7iJJ6H2ZciYwatrtTbaNbOkFJhP/uOAtPlj4W16p2MKjeZ8B4PD7eaN8I5ZVD+EEqgLfqhOXRDE+/jjSYzJJSR/H/eufoNRTSZTZzhWDL+CS0deyt3A9abHZjEqf0GKL/9+r/s5L215nWe43PDPn5bBTKCxc9QSvbH6JEl89d5/wWyaPvAyAsvoyVhasJMWZwsK8hWwu2cyo5FFtvVQiQvpsQrdZTKTF2huG6rfUbzw5+sj5XFxhWui/On0oc0/O4c2VuSzZWUy9x4fDakaItrCmjeCSK97hEq+Lij0L8flcxKWO5ptV/2Z98QYqfC7GJY2kzlvHq8UreKVsPZ7yDZD3KVkeD78vLefz6Che3P4mL+x4q+G8yVjIMDtJMjvIsMVz8fibOG7ADA6V76IazfxtbzDM5WZVyUZ++8EPuWj8j5mcPQOb2dZwjmc++Sn/KFxCf68fE3DDsnv4df4KrjntQRbmLsSnfTxYb+dWZeHP3/yG5y74D1az9NnvTG1K6Eqps4F/AGbgGa31A832zwMeBg4ENj2qtX4mgnF2iKzEKFbvLwPCl1zAqKPvK2k6pL8hoTdL1lE2C1E2C4NSotEacktrGZoeG+HIRa9nsRM/9KyGp6ed+VdOa3bI1VqjXdUU5K9k+96vGJ84gviU4ZzurefGnZ+wsmQjw5JHk1u2nRXl2ynRFZSoMlZZCnhr6a9RSzQ60GqP9fl5uv/ZPHdoCa+UbeSTRf9DjFaMNEdT7/dSrb3sUV7mqFj+75pPcdUW89v/XsEDuR+z7+0drPeU0d/rZUr5Pv5gdnOn3s19H83lypN+y4D4HGJsMZ148fquVhO6UsoMPAacAeQBK5RS72utNzc79A2t9S86IMYOk53oZNU+I6G3VnIJJv6ghpuiYVr2A5ONPul7SyShiw6iFMoRS8agU8kYdGqTXUNyTmFI4PFE4MJG+2pK9/DfpfdR4Sony55MxcFVjIzuT9KcR7hD+/nZnoWs3Po2nxevZZ+7hjhlIcNk5dyoQfz4wlcw26KwOOL469Vfc8+b5/Ja9Q4AfmZKRv38I84GNr8xh+fLNvDux1dhRTEtbghn5JxNdkx/Ssp3k5N9CoPSxmM2tf+vV7/2U1JX0uoEbEdzXq31UcXUXbSlhT4V2Km13g2glHod4/ujeULvcbISD984aqmFnhyYE93v15gCxwVr6HZr6ISeE+iTLjdGRXcTnTSIq89/LsxeM1FDzmDGkDOY0cp5LPZY/vTDRdx+aC02s43olJFgMn4e/ufqrzht5WOU5K9hVeEaPvduYWHljsMv3vw0URqGW2Lx+DzU+D1oBfXaTy1+6hWkahMZ5ijMJjMJZgf9rXEkxfTjw+I1bPdVM1Q5mRV7HKNTxpCSOoqElOGgNd+seQq7xcEF036H3RaDX/vxaz8+7aOweCvb9nzBwH6TGJh5IvmFG4lPyGFd6Sb+97v/pbiumFhrDCfFDeHcnLOZNfqqNs3fU1SxH7e3jszk4W37InSQtiT0TCC30fM8INSIiUuVUjOA7cD/aK1zmx+glLoJuAlgwICu75+dlXh4Br5wQ//BGFzk11Be52noxthQcgnzuoQoGwlRVvaWSEIXvZhSJPabcORmq53jT7oNgNl+P3ccWsemvV9R5qkmOS6bXXlL2VC6he31FcSbbPS3OFEaHBYrUWYHDrOdQ/WlFHir8Wg/2ylnkakAV81OBni8/MycxLf+Sp6r2ICvciPsPjK0v8//CB9QG6qxtv3FIzYNx84VNT7y/YdYXFfBZyVrSV7xAE4U0VoRj6IcTYnyU600qdpEvLJQo73sNRlTiGT5TQy0RJNmiyfRHk9BXQnl3lqSrdEk2xKIt8ejlImxWacwZdy1x3TpQ4nUTdEPgNe01i6l1E+AF+GIkh9a66eApwAmT57cMcvCt0ObW+iB+VxKql0NCb2h22KYFjoYZRdJ6KLPM5kw9Z/A2P6HE//oCddzQTtPo31eKkq2Ehs3ELMjlp8CdbWl7N7/DaXFWyivPkC9p44Thl9CQcV+3t/2OtFmB/G2WCzKhAlFgiOJYZknsTN/BYeqD9I/NouKwo2YKw5wuSUeW8ZE6Dceb9oIvtj1MQsLVgCaGnyUax/9lYWxJgexZjsF7gqqtIcMk4OL43Kwm2ysLNtKvqeaHZ4KSusUKX5NImZ2ecopqT+IJ3DP4vq64i5L6AeA7EbPszh88xMArXVJo6fPAA8de2gdLzvpcAu9pRr64VGldQ31cFdDDT18vS0nOYrvdpWwen8Z4zLjZQZGIY6BMltISGs6X44zKonRIy4CLmqyfQAwZcpPw55r7PjrWnwvC3D2cbM5u50xXtPosXbXoqxOYy4hQHs91NcWgdZY2jlOoK3akmFWAEOVUoOUUjbgSuD9xgcopfo1enoBsCVyIXacfvHO4LVusYU+qn8cSsG6RgtdtKWFPqZ/PIVVLi7597c8+vXOiMQshOgZlC2qIZkDKIsVZ1x/nPGZWKMSO+Q9W03oWmsv8AtgAUaiflNrvUkpda9SKvhX0y+VUpuUUuuAXwLzOiTaCLNZTA2t75Zq6DF2C0NSY5osXtFwU9QS/nU/nj6IBbfOYEhaTENvGiGE6ChtqqFrrT8GPm627Q+NHv8G+E1kQ+sc2YlR5FfUt9hCBxiXlcCi7YUN81+7Qszl0pxSiuEZsUzJSeTjDYciMne2EEKE0+eLusGeLi3V0AGOz46nuNrdMJnX4blcWu+zOjYzgYo6jywgLYToUJLQgwnd3HoLHWgou4SbyyX0a+ObvLai1sN/VuXh93d5Rx8hRC/S5xP6JROzuPOs4cTYW64+jegXi81sYl1uORB+tsVQhqUbr91wwEjoz3+7hzveWsdzS/ccW/BCCNFIn0/oOSnR/PzUIa3Wtu0WMyP7xzVMAeDy+jGp1ks1YLTiR/aPY32gl8yXWwoBeOjTbWw+WHlsn4AQQgT0+YTeHtOOS2b1/nIq6jy4vcbyc229yTkuM56NByrJLa1lw4EKbpw+iBiHhX9+uaP1FwshRBtIQm+H00ak4fNrFu8owuX1t+mGaNDpo9Kpdnn58YsrAbh8cjanjUhj2Z4SqaULISJCEno7TBiQSEKUla+2FuLy+tp0QzRo5rBULp6QybaCKrKTnAxNi+GEQUmU1XrYUVjdgVELIfoKSejtYDYpZgxNZdG2Iuo9/jbdEG3snvNHk5ng5ILx/VFKceLgZACW7Slp5ZWGeo+P8lp36wcKIfokSejtdNqINEpq3Hy9rbBdLXSA+CgrC++cxR1nGlNsZiU66R/vYNnu0lZf6/drfvziSi58bClaS4lGCHEkSejtdPaYDC6flIXb6yczwdn6C5qxmg/fSFVKccLgZJbtKWk1Sb+ybB9Ldhazr6SWbQVV5FfU8f3utrXsi6td1Li8gNHd0uvztztuIUT3Jwm9nRxWMw9fPp41fziD5+ZNOebznTg4ieJqNyvDzPVSWuPmr59t4/6PtzBhQAIAC7cV8dt3NvDDZ5aR28roU601l/z7W25/cx1aa656+nt+/Z/1xxy3EKL7kYR+lOwWc4sTerXVueP6kx5n574PN1Ne6+b15ft5c0Uuu4qqcXv9XPPMMh79eienDEnlyR9OYkRGLG+uyGXh9iK8fs3ji3axaHsR936wmcp6Dx+uP8htb65tGMm68UAl+0tr+XxLAZ9uPMSqfWV8s6NIyjZC9EKRWuBCHKUYu4XfnDOSW99Yy0l//oo6j5GIHVYTM4amsiW/kievncRZozMAmDk8lScX7cZiUswelcZbK3N5Y0UuPr/mw/UHKaxyATB5YBJXnzCAzzYfwqTA59fc8dY6AIqr3eSW1jEguWPmZBZCdA1poXcDFx7fn9NHpjMuK553f3YyX9w2kyFpMXy2uYBLJ2Y1JHOAWcPSAJgzth+/O3cUJqU4cXASz/9oChaT4vJJWYzPiuffC3fi8fn5bFMBUwclMW1IMjVuH1NyjHmYmy96LYTo+VRX/ek9efJkvXLlyi55756g2uXl3dV5XDQhk1iHtWG71+fnoQXbuGrqAAalRFNc7SIxyobZpBqm5/1ySwE3vLiS88b148P1+fz+vFHkJEfx01dX89+fT+PSx7/l8klZ/O+FY1qIQAjRHSmlVmmtJ4faJyWXbirGbuHak3KO2G4xm/jtnJENz1MC650CDb1nThuRxgXj+/PxhnysZsWZo9LJTopi/R/PxGE1Mz4rgdX7yzv6UxBCdDJJ6L2QUop/XjWBhy4bR0Wdh/TAqkwOqzFVwcSBCTy5aDd1bh9OW9unLxBCdG9SQ+/FHFZzQzJvbNLARLx+zZpcqaML0ZtIQu+Dpg5KxmpWLNpe1NWhCCEiSBJ6HxRjtzB5YBKLtklCF6I3kYTeR80ansrWQ1Ucqqjv6lCEEBEiCb2Pmjk8FYBF2wu7OBIhRKRIQu+jhqfHkhHnkDq6EL2IJPQ+SinFzGGpLN5RLLMvCtFLSELvw2YNT6Wq3sua3PKuDkUIEQGS0Puwk4ekYDYpFm4rxO/X1AcmBhNC9EwyUrQPi3damTQgkc83F7B0Zwl1bh/v3zKtXYtfCyG6D2mh93Ezh6eyvaCadXnlbCuo4rkleymtcZNfUdfVoYW1s7CK4mpXV4chmvH5NTsLq7o6jD5NEnofd/aYDJKjbTx46TjOGJXO37/Yzkl//pKz/76Ywqru10f9QHkd5/9rKT98ZpnczO1mnlm8m9Mf+YaVe1tfI1d0jDYldKXU2UqpbUqpnUqpu0Lstyul3gjsX6aUyol4pKJDHJcaw8rfnc4Vk7P5w3mjyEp0cu64ftR5fPzxv5u6Orwj3PP+Jtw+P1sPVTF/+f4Of7+Ve0u58611HCzvvn+xdAcur49nl+wB4IFPtvbIFbFcXh8+f8+Lu7FWa+hKKTPwGHAGkAesUEq9r7Xe3OiwG4AyrfUQpdSVwIPADzoiYBF5wWl3s5Oi+PL2WYCR6B9esI0T7/+ShCgrEwYkkpMcRUqMnYQoK1sPVbG3uIYR/eLIL69je2E1OclRDEuP5bjUGGIdFlxeP9UuL5kJDspqPazLLSc9zkFarDHlb4zDgs+v2V9aS2WdlxqXl1q3D7vVRJzDSrzTSpzTQo3Lx+IdRWw8WMm63HLuOmcEi3cU8fCCbewvqWXiwERGZMRSVuvGpBSDU2LQGD+YTpuZGpePWrcXk1KYlKKgsp4Ve0sxKUVGvIPBqdGYlMLl8eO0mVm4rZClO4txWM18trkAn1+zdGcxD142jpH94jArhdevqahz89XWQjw+zZyx/UiPazSVMarR9Q11zY2PtS4f2wqqiHdaGZ4eS1W9F7/WJERZG74ubVHt8vLnj7fw/e4Szhydwekj0xnVLw4VWK1KAw6LCUuzZRO9Pj8r9pbx+eYC1uWVc/64flw+ORuvXxPnsLQ5hvfXGqtlBefgf215LldMzqLG5aOy3oPNYiIlxo7Z1PbPKZx6j4/CShf9ExxHfD5Ho7Lew8vf7ePRr3ZiMStmDE1l7sk5TMlJbNfXoDtodYELpdRJwD1a67MCz38DoLX+c6NjFgSO+U4pZQEOAam6hZPLAhfdm8fn57Gvd3KgrI6CKhdr9pVR5fI2OSYp2kZpjRubxcRxqTHkltZS3eyYSImxWxibGc/EgQncevowDpTV8eu317M2txy3N/Kll+NSo6n3+DlhcBI/mJzNz+ev6fC6vc1iavhcLCaFKZD8FAR+GRm/fJUyngc/mhTUuX3UenxMGpDImtzysC1Nm9mEw2rCaTNjUorKOg81bh82i4mBSVHsKKxuONZuMZEUbUNr8Ovgr8jQKus8DEqJ5oNbTuHCR5eyOb+yyecDYDWrhsVYquq91Ht8WMwKi8kU+Nj0sVLGoi3B9w3GkV9Rj8+vsZlNxEdZURi/IBUq8PFwI0WppvuC15PAMV6/n7yyOrSGM0alkxRl47PNhyir9RBrt5AcY2uIwx94/1Ca5/3Gv9BDHXP11AH8ZOZxLVzR8Fpa4KItCf0y4Gyt9Y8Dz68FTtBa/6LRMRsDx+QFnu8KHFPc7Fw3ATcBDBgwYNK+ffuO6hMSnU9rTbXLS3G1m9IaFwOSokmNtVNYWU+sw4rTZkZrzYHyOvYW11Lj9mKzmIixW8grq8VptTBxYAJFVS7Kaz0AVNV7UQoGJEWRGGUj2m4mymbB5fVRWeelst5DZZ1x7LisBGyWI1tjbq+fzfmVbC+oIjXGjsfnZ19JLRazwq+hzu0lxm4hymZBY/xQxjosTB2UhNVk4kB5HbuLa1AYCayy3suofnGM6h/X5H0qaj2szi1jX3ENAGazCbvFxEmDkzGbjFWiat1Gt8/GP1HBH6/G6bDxj5zNbGJIegzFVS625FeREW/HbDJRUu3CrwOvM/7h9+uGbVrTJMkoBRcen8mUnCRKql2s2FvGrqJqTEoRbMTWe/zUeXxG8nd70RqibGZOOi6Z6UNTibKZWbyjmA0HKrBbTBRWuSirMf7qMZmAEEmqscsmZTFpYCL1Hh9fby1k2Z5S+ic4SIiy4fL6OVBWR0WdG69PE+uw4rCa8Pk1Hp/G5/fj8Wu8Pj9ev8brO3yRgknaeKzITHCSlehkT3ENlfWewLVodF04/JyG57rJ16bx1+C41BhOGZrCpIHG8ox1bh//XXuArYeqKKlxA2AK/hINdRl0i08D79d062kj07lgfP8Wr2c43SahNyYtdCGEaL+WEnpbClAHgOxGz7MC20IeEyi5xAMl7Q9VCCHE0WpLQl8BDFVKDVJK2YArgfebHfM+MDfw+DLgq5bq50IIISKv1V4uWmuvUuoXwALADDyntd6klLoXWKm1fh94FnhZKbUTKMVI+kIIITpRm4b+a60/Bj5utu0PjR7XA5dHNjQhhBDtISNFhRCil5CELoQQvYQkdCGE6CUkoQshRC/R6sCiDntjpYqAoxkqmgKEHbDUhSSu9uuusUlc7dNd44LuG9uxxDVQa50aakeXJfSjpZRaGW6UVFeSuNqvu8YmcbVPd40Lum9sHRWXlFyEEKKXkIQuhBC9RE9M6E91dQBhSFzt111jk7jap7vGBd03tg6Jq8fV0IUQQoTWE1voQgghQpCELoQQvUSPSeitLVTdwe+drZT6Wim1WSm1SSn1q8D2e5RSB5RSawP/5zR6zW8CsW5TSp3VwfHtVUptCMSwMrAtSSn1uVJqR+BjYmC7Ukr9MxDbeqXUxA6KaXij67JWKVWplLq1K66ZUuo5pVRhYCGW4LZ2Xx+l1NzA8TuUUnNDvVeEYntYKbU18P7vKqUSAttzlFJ1ja7dE41eMynwPbAzEP8xLYYZJq52f+0i/XMbJq43GsW0Vym1NrC9M69XuBzRud9nWutu/x9j2t5dwGDABqwDRnXi+/cDJgYexwLbgVHAPcAdIY4fFYjRDgwKxG7uwPj2AinNtj0E3BV4fBfwYODxHOATjIW0TgSWddLX7xAwsCuuGTADmAhsPNrrAyQBuwMfEwOPEzsotjMBS+Dxg41iy2l8XLPzLA/EqwLxn9MBcbXra9cRP7eh4mq2/6/AH7rgeoXLEZ36fdZTWuhTgZ1a691aazfwOnBhZ7251jpfa7068LgK2AJktvCSC4HXtdYurfUeYCfG59CZLgReDDx+Ebio0faXtOF7IEEp1a+DY5kN7NJatzQyuMOumdb6G4x5+pu/X3uuz1nA51rrUq11GfA5cHZHxKa1/kxrHVxt+3uMVcLCCsQXp7X+XhtZ4aVGn0/E4mpBuK9dxH9uW4or0Mq+AnitpXN00PUKlyM69fuspyT0TCC30fM8Wk6oHUYplQNMAJYFNv0i8CfTc8E/p+j8eDXwmVJqlTIW4gZI11rnBx4fAtK7KDYwFjxp/EPWHa5Ze69PV30PXo/RkgsapJRao5RapJSaHtiWGYinM2Jrz9eus6/ZdKBAa72j0bZOv17NckSnfp/1lITeLSilYoC3gVu11pXA48BxwPFAPsafe13hFK31ROAc4OdKqRmNdwZaIV3SP1UZyxZeALwV2NRdrlmDrrw+LVFK3Q14gVcDm/KBAVrrCcBtwHylVFwnhtTtvnbNXEXThkOnX68QOaJBZ3yf9ZSE3paFqjuUUsqK8YV6VWv9DoDWukBr7dNa+4GnOVwi6NR4tdYHAh8LgXcDcRQESymBj4VdERvGL5nVWuuCQIzd4prR/uvTqfEppeYB5wHXBBIBgZJGSeDxKoz69LBAHI3LMh0S21F87TrtmiljcfpLgDcaxdup1ytUjqCTv896SkJvy0LVHSZQm3sW2KK1fqTR9sa154uB4J3394ErlVJ2pdQgYCjGTZiOiC1aKRUbfIxxQ20jTRfungv8t1Fs1wXusp8IVDT6k7AjNGk1dYdr1uj92nN9FgBnKqUSA6WGMwPbIk4pdTbwa+ACrXVto+2pSilz4PFgjGu0OxBfpVLqxMD36nWNPp9IxtXer11n/tyeDmzVWjeUUjrzeoXLEXT299mx3NntzP8Yd4W3Y/yWvbuT3/sUjD+V1gNrA//nAC8DGwLb3wf6NXrN3YFYt3GMd9BbiW0wRu+BdcCm4LUBkoEvgR3AF0BSYLsCHgvEtgGY3IGxRQMlQHyjbZ1+zTB+oeQDHoya5A1Hc30w6tk7A/9/1IGx7cSoowa/154IHHtp4Gu8FlgNnN/oPJMxEuwu4FECo8AjHFe7v3aR/rkNFVdg+wvAzc2O7czrFS5HdOr3mQz9F0KIXqKnlFyEEEK0QhK6EEL0EpLQhRCil5CELoQQvYQkdCGE6CUkoYs+TRkzQEZ1dRxCRIJ0WxR9mlJqL0Yf4OKujkWIYyUtdNFnBEbVfqSUWqeU2qiU+iPQH/haKfV14JgzlVLfKaVWK6XeCszNEZxz/iFlzKG9XCk1pCs/FyFCkYQu+pKzgYNa6/Fa6zHA34GDwKla61OVUinA74DTtTHZ2UqMSZ2CKrTWYzFGFv69UyMXog0koYu+ZANwhlLqQaXUdK11RbP9J2IsSrBUGavezMVYlCPotUYfT+roYIVoL0tXByBEZ9Fab1fGUl9zgD8ppb5sdojCWFzgqnCnCPNYiG5BWuiiz1BK9QdqtdavAA9jLGVWhbFkGBirA00L1scDNfdhjU7xg0Yfv+ucqIVoO2mhi75kLPCwUsqPMVvfTzFKJ58qpQ4G6ujzgNeUUvbAa36HMVsgQKJSaj3gwpgWWIhuRbotCtEG0r1R9ARSchFCiF5CWuhCCNFLSAtdCCF6CUnoQgjRS0hCF0KIXkISuhBC9BKS0IUQopf4/+EJcmPE6FgSAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABHmElEQVR4nO3dd3ib1dn48e/RtizvHY84e5G9EwiEsCmrQNkkjPLCWyhddMFbeFt+BQodb0vL3quUPQIEwkoCZJO9nG073tuSrXl+f0h2nOCZ2JZl35/r0mVZOtJz67F86+h+znOO0lojhBAi8hnCHYAQQojuIQldCCH6CUnoQgjRT0hCF0KIfkISuhBC9BOmcG04OTlZ5+bmhmvzQggRkdatW1eutU5p7b6wJfTc3FzWrl0brs0LIUREUkodaOs+KbkIIUQ/IQldCCH6CUnoQgjRT4Sthi6EEO3xer0UFBTQ2NgY7lDCwmazkZWVhdls7vRjJKELIfqkgoICYmJiyM3NRSkV7nB6ldaaiooKCgoKGDJkSKcfJyUXIUSf1NjYSFJS0oBL5gBKKZKSkrr87aTDhK6UylZKfa6U2qaU2qqUur2VNqcopWqUUhtCl991KQohhGjFQEzmTY7ltXem5OIDfq61Xq+UigHWKaU+0VpvO6rdcq3197ocQRflVeXx4b4PuXbstcTb4nt6c0IIETE67KFrrYu01utD1+uA7UBmTwfWloO1B3li8xMUOYvCFYIQQjR79tlnufXWW8MdBtDFGrpSKheYDKxq5e7ZSqmNSqkPlVLj2nj8TUqptUqptWVlZV2PFkiMSgSgsrHymB4vhBD9VacTulLKAbwB/ERrXXvU3euBwVrricA/gLdbew6t9eNa62la62kpKa1ORdChRJskdCFE79i/fz+jR49m0aJFjBw5kquuuoqlS5cyd+5cRowYwerVq7/T/tRTT2XChAksWLCAgwcPAvDaa69xwgknMHHiRObNmwfA1q1bmTFjBpMmTWLChAnk5eUdd7ydGraolDITTOYvaa3fPPr+lglea/2BUupfSqlkrXX5cUd4FEnoQgw8//veVrYdOrofeXzGDorl7vNaLSYcYffu3bz22ms8/fTTTJ8+nZdffpkVK1bw7rvv8sc//pELL7ywue1tt93GwoULWbhwIU8//TQ//vGPefvtt/n973/PkiVLyMzMpLq6GoBHH32U22+/nauuugqPx4Pf7z/u19SZUS4KeArYrrX+Sxtt0kPtUErNCD1vxXFH1wqH2YHZYKaisUeeXgghjjBkyBDGjx+PwWBg3LhxLFiwAKUU48ePZ//+/Ue0/eabb7jyyisBuOaaa1ixYgUAc+fOZdGiRTzxxBPNiXv27Nn88Y9/5IEHHuDAgQNERUUdd6yd6aHPBa4BNiulNoRu+y2QA6C1fhS4BLhFKeUDGoDLdQ+tPq2UItGWSGWD9NCFGCg605PuKVartfm6wWBo/t1gMODz+Tr1HI8++iirVq1i8eLFTJ06lXXr1nHllVcyc+ZMFi9ezDnnnMNjjz3GqaeeelyxdpjQtdYrgHYHRGqtHwYePq5IuiDRliglFyFEnzNnzhz+/e9/c8011/DSSy9x0kknAbBnzx5mzpzJzJkz+fDDD8nPz6empoahQ4fy4x//mIMHD7Jp06aeT+h9UVJUkpRchBB9zj/+8Q+uu+46HnzwQVJSUnjmmWcAuOOOO8jLy0NrzYIFC5g4cSIPPPAAL7zwAmazmfT0dH77298e9/ZVD1VGOjRt2jR9rAtc3LniTlYXr+aTSz7p5qiEEH3F9u3bGTNmTLjDCKvW9oFSap3Welpr7SNyLpckWxKVDZWE68NICCH6oohM6Im2RDwBD06vM9yhCCFEnxGZCV3OFhVCiO+IzITe4uSiQ/WHwhyNEEL0DRGd0N/e/TZnvnEm60rWhTkiIYQIv4hO6O/teQ+Aj/Z9FM5whBCiT4johO4JeAD49OCnBHQgnCEJIQaQRYsW8frrr4c7jO+IyIRuMVqIMccAcPGIiylrKGNT2aYwRyWEEOEVkQkdgiNdUqJS+OnUn2IymFh6YGm4QxJC9EN/+MMfGDVqFCeeeCJXXHEFDz300BH3f/rpp0yePJnx48dz/fXX43a7Afj1r3/N2LFjmTBhAr/4xS+A1qfR7U4Reeo/wFVjrsJhdhBnjWPOoDksObCEn037GQYVsZ9RQoi2fPhrKN7cvc+ZPh7Ovr/dJmvWrOGNN95g48aNeL1epkyZwtSpU5vvb2xsZNGiRXz66aeMHDmSa6+9lkceeYRrrrmGt956ix07dqCUap4yt7VpdLtTxGa/K0ZfwXnDzgPg3CHnUuwsZn3J+jBHJYToT7766isuuOACbDYbMTExnHfeeUfcv3PnToYMGcLIkSMBWLhwIcuWLSMuLg6bzcYNN9zAm2++id1uB1qfRrc7RWwPvaX5OfOxm+y8v/d9pqW3OsWBECKSddCT7mtMJhOrV6/m008/5fXXX+fhhx/ms88+a3Ua3aSkpG7bbsT20FuKMkVx2uDT+Hj/x+yt2YvH7wl3SEKIfmDu3Lm89957NDY2Ul9fz/vvv3/E/aNGjWL//v3s3r0bgBdeeIGTTz6Z+vp6ampqOOecc/jrX//Kxo0bgcPT6P7+978nJSWF/Pz8bo23X/TQAc4bdh7v7nmXC96+gCmpU3ju7OfCHZIQIsJNnz6d888/nwkTJpCWlsb48eOJi4trvt9ms/HMM89w6aWX4vP5mD59OjfffDOVlZVccMEFNDY2orXmL38JLvbW2jS63Skip89tjdaatSVreTPvTRbvXczyy5cTZ43r+IFCiD6pr0yfW19fj8PhwOVyMW/ePB5//HGmTJnSK9seENPntkYpxfT06Vw68lI0mjXFa8IdkhCiH7jpppuYNGkSU6ZM4eKLL+61ZH4s+k3Jpcn45PFEmaJYWbSS0wafFu5whBAR7uWXXw53CJ3Wb3roTcxGM1PTprK6eDVLDyzl1R2vhjskIYToFf0uoQPMypjFvpp9/PSLn3L/6vupcdeEOyQhhOhx/TKhzxk0B4ViXNI4fNrHsoJl4Q5JCCF6XL9M6CMSRvD+Re/zwjkvkBqVyqcHPw13SEII0eP6ZUIHyInNwWwwMz9nPl8VfkWDryHcIQkhIozD4Qh3CF3SbxN6kwU5C2j0N7Ly0MpwhyKEED2q3yf0yamTAdhRtSPMkQghIpXWmjvuuIMTTjiB8ePH8+qrwdFzRUVFzJs3j0mTJnHCCSewfPly/H4/ixYtam7717/+tdfi7Hfj0I9mM9lItadSUFcQ7lCEEMfogdUPsKOyeztloxNH86sZv+pU2zfffJMNGzawceNGysvLmT59OvPmzePll1/mzDPP5M4778Tv9+NyudiwYQOFhYVs2bIFoEemyW1Lv++hA2THZJNf172T4AghBo4VK1ZwxRVXYDQaSUtL4+STT2bNmjVMnz6dZ555hnvuuYfNmzcTExPD0KFD2bt3L7fddhsfffQRsbGxvRZnv++hQzChryhcEe4whBDHqLM96d42b948li1bxuLFi1m0aBE/+9nPuPbaa9m4cSNLlizh0Ucf5T//+Q9PP/10r8QzYHro5Q3luLyucIcihIhAJ510Eq+++ip+v5+ysjKWLVvGjBkzOHDgAGlpafzwhz/kxhtvZP369ZSXlxMIBLj44ou59957Wb++9xbeGTA9dICC+gJGJowMczRCiEhz0UUX8c033zBx4kSUUvzpT38iPT2d5557jgcffBCz2YzD4eD555+nsLCQ6667jkAgAMB9993Xa3F2mNCVUtnA80AaoIHHtdb/d1QbBfwfcA7gAhZprfvMenBNCT2/Ll8SuhCi0+rr64HgbK4PPvggDz744BH3L1y4kIULF37ncb3ZK2+pMz10H/BzrfV6pVQMsE4p9YnWeluLNmcDI0KXmcAjoZ99QnMPXUa6CCH6sQ5r6Frroqbetta6DtgOZB7V7ALgeR20EohXSmV0e7THKM4aR4wlRka6CCH6tS4dFFVK5QKTgVVH3ZUJtMyWBXw36aOUukkptVYptbasrKyLoR4fGbooROQJ14pqfcGxvPZOJ3SllAN4A/iJ1rq2y1sCtNaPa62naa2npaSkHMtTHDNJ6EJEFpvNRkVFxYBM6lprKioqsNlsXXpcp0a5KKXMBJP5S1rrN1tpUghkt/g9K3Rbn5HpyOTTg58S0AEMakCM1hQiomVlZVFQUEBvf5vvK2w2G1lZWV16TGdGuSjgKWC71vovbTR7F7hVKfVvggdDa7TWRV2KpIel2lPxBXxUNlaSHJUc7nCEEB0wm80MGTIk3GFElM700OcC1wCblVIbQrf9FsgB0Fo/CnxAcMjiboLDFq/r9kiPU7o9HYBSV6kkdCFEv9RhQtdarwBUB2008KPuCqonpNpTgWBCH5s0NszRCCFE9xswxeSmhF7iLAlzJEII0TMGTEJPjkrGqIyUuCShCyH6pwGT0I0GI0lRSZLQhRD91oBJ6BA8MFrqKg13GEII0SMGVEJPtadKD10I0W8NqISeFp0mPXQhRL81oBJ6qj0Vp9dJvac+3KEIIUS3G1AJPc2eBiC9dCFEvzSgEnrzWHSpowsh+qEBldCbTv+XhC6E6I8GVEJPjU5FoSis71MTQQohRLcYUAndarSSE5vD7qrd4Q5FCCG63YBK6AAj4keQV50X7jCEEKLbDbyEnjCCg7UHafA1hDsUIYToVgMyoWs0e6v3hjsUIYToVgMvocePAGBX1a4wRyKEEN1rwCX07JhsbEab1NGFEP3OgEvoRoORYfHDyKvKw+P3DMgVxYUQ/dOAS+gQrKOvKV7DtBen8cK2F8IdjhBCdIsBmdC/N/R7zBo0iwRbAquLV4c7HCGE6BYDMqHPzJjJo6c9ypxBc9heuT3c4QghRLcYkAm9yZjEMZS6SqloqAh3KEIIcdwGdkJPGgPAjsodYY5ECCGO34BO6KMSRwFI2UUI0S8M6IQea4kly5HFtopt4Q5FCCGO24BO6BAsu0jJRQjRH0hCTxxDfl0+Tq+zS4+rbKyk2FncQ1EJIUTXDfiEPjh2MAD5dfldetwDqx/g51/+vCdCEkKIYzLgE3p2TDYAB2sPdulx5Q3lFNdLD10I0XdIQg8l9K720J1eJ1XuKpkLRgjRZ3SY0JVSTyulSpVSW9q4/xSlVI1SakPo8rvuD7PnOCwOEm2Jx5TQvQGvLJQhhOgzOtNDfxY4q4M2y7XWk0KX3x9/WL0rKyaLgrqCLj3G5XUBUOWu6omQhBCiyzpM6FrrZUBlL8QSNtkx2Rys61oN3ekLjoqpdlf3QERCCNF13VVDn62U2qiU+lApNa6tRkqpm5RSa5VSa8vKyrpp08cvOyabYmcxHr+nU+211s099JrGmp4MTQghOq07Evp6YLDWeiLwD+DtthpqrR/XWk/TWk9LSUnphk13j+yYbDSawvrCTrVv8DWgCR4MlZKLEKKvOO6ErrWu1VrXh65/AJiVUsnHHVkv6upIl5YnIUnJRQjRVxx3QldKpSulVOj6jNBzRtR8tMeT0GvcUnIRQvQNnRm2+ArwDTBKKVWglLpBKXWzUurmUJNLgC1KqY3A34HLdYQNzk6yJeEwO/hg7wedStBNB0QBqhql5CKE6BtMHTXQWl/Rwf0PAw93W0RhoJTirll38T9f/Q9Xf3A1r533GjaTrc32TQdEQXroQoi+Y8CfKdrk3KHncu/ce9lfu5/N5ZvbbduU0E0GkxwUFUL0GZLQW5gzaA4AG8s2ttuuqYY+KHqQ9NCFEH2GJPQW4m3x5MbmsqlsU7vtmmrogxyDZJSLEKLPkIR+lAkpE9hUtqndSbeaSi6ZjkxJ6EKIPkMS+lEmJE+gorGCQ85DbbZpKrlkRGfQ4GvA7Xf3VnhCCNGmiEvoNQ1elmwtxh/QeHwB3lhXgM8f6Lbnn5AyAaDdsovT6yTKFEWCLQGA6sbqbtu+EEIcq4hL6J/vKOW/XljH9qJaPtxSxM9f28j7m4q67flHJIzAZrS1e2DU6XViN9kPJ3Qpuwgh+oCIS+hzhiUB8PWecr7ZEzwh9a1vOzcHS2eYDCbGJI1hW8W2Ntu4vC6izdHEW+MBSehCiL4h4hJ6aqyN4akOvt5Twcq9wYS+PK+Msrruq2OPTRrLjsod+AP+Vu93+pxHJHQ5W1QI0RdEXEKHYC/9690V7K9wcfn0bAIa3tvY9kHMrhqTOIYGXwMHag+0er/T68RutpMSFZwxstRV2m3bFkKIYxWxCd0TOhB69azBnJAZyxvru7biUHvGJI0BYFtl62WXppJLnDUOm9FGiauk27YthBDHKiIT+qyhSSgFsTYTYzJiuWxaNlsP1bK5oHvO2hwaNxSr0cr2iu2t3u/0Ook2RaOUIi06TRK6EKJPiMiEHm+3MCM3kfmjUzEaFBdMzsRmNvDy6q4tI9cWk8HEqIRRbR4YbSq5AKTZ0yhxSkIXQoRfh7Mt9lXPXT8DQ3AadmJtZr43YRDvbCjk6z3lDE9x8NSi6cf1/GOSxrB472ICOoBBHfm55/IFSy4A6dHprC5efVzbEkKI7hCRPXQAm9mIxXQ4/GtnD8btC1DT4OXLXWU0elsfodJZY5PGUu+tZ0/1niNu9wf8NPgamhN6mj2NMldZmyNiROS6b9V9/Gfnf8IdhhCdFrEJ/WgTsuLZfM8ZPHTJRHwBzabjrKfPy5qHSZl4e/fblLnKeGjNQ1Q2VuLyBedxaZnQ/dpPRWNELdIkOuHz/M/55tA34Q5DiE6L2JJLa+wWE1MGB8/eXHegihlDEo/5uZKjkpmfM5939rzD/tr9LCtYxiHnIX45/ZfBbTXV0KPTAChxlpBqTz3OVyD6ErffTYOvIdxhCNFp/aaH3iQx2sKQ5GjWH6xCa31cpZdLR15KjbuGZQXLGJM4hk8OfMJzW58DINp0uIYOyEiXfsjr9zZ/IxMiEvS7hA4wJSeB9QeqWPTMGi7851ftToXbnpkZM8mNzWVUwiheOOcFJqdO5sXtLwIQY4kBgiUXgGJncfcEL/oM6aGLSBN5JZedH8Hin8F1H0LC4FabTB2cwBvrC/hyVxkAGwtqmJQd3+VNGZSBZ856BrPBjNVo5akznmJT+Sb21+xnVsYsAOKt8ViNVumh9zNaazwBzxHrxwrR10VeD91sg9pCqG79tHyAmUODtfPLp2djNioWbzr2aQGSo5KJs8YFN200MzVtKhePvBiz0QwEF5iWsej9jyfgAZCSi4gokZfQ40O98qq2E/qwFAdf3nEK931/PPNGpPDB5uJjLrt0hpwt2v94/MGELiUXEUkiL6HHZYEytNtDBxicFDw1/9wJGRRWN/BtfnWPhZRmT6PI2X1zsovwa1qFyuV19WhnQIjuFHkJ3WiG2Kx2e+gtnTo6OJRw9b7KHgspKyaLYmdxc69ORL6mv6VGyxKDImJEXkKH4MHQDnroTeLtFhKjLRyo6LlaaE5MDhpNYX33LbQhwqvlh7PU0UWkiMyEHj+40z10gMFJdg5UOHssnOyYbADy6/J7bBuid7XslUsdXUSKyEzoCYOhvhi8nftHy02K7tEeuiT0/ueIHroMXRQRIjITetNIl+rOJdDcpGgO1TQc94RdbUm0JWI32TlY2z3T94rwa9lDl5KLiBSRmdCbTijqZB09N9mO1pBf2TP/mEopcmJzpIfejzSNQwcpuYjIEZkJvXks+v5ONR+cFJx3ZX8Pl10kofcfUnIRkajDhK6UelopVaqU2tLG/Uop9Xel1G6l1Cal1JTuD/MojjQwWjud0HOTgjMj9vSB0YL6ApkXvZ+Qg6IiEnWmh/4scFY7958NjAhdbgIeOf6wOmAwQEIu7F8Bvo7HCMfbLcRFmdnfwwndF/BR7JJJuvoDGbYoIlGHCV1rvQxo76ycC4DnddBKIF4pldFdAbbp5F9C0QZ4/Xqo6Xj8d26SvcfHooOMdOkvWib0hk6OphIi3LpjtsVMoGUWKwjd9p1z4ZVSNxHsxZOTk3N8Wx1/CTjL4aNfwY73IW08jDoLpl0PWsO6ZyFxCAyZB7GZDE4KzpHeUwbHBuv6G0o3NM/EKCKXjHIRkahXp8/VWj8OPA4wbdq0458gY9bNMOxU2PUh7FoCy/8CX/8jONdLywNZjnRuM4/knboM/LsNGHPngMly3JtvKS06jZMyT+KFbS9w5ZgribXEduvzi94lJRcRibojoRcC2S1+zwrd1jtSRgYvc28PHiRd/mfweeCUX4O7Dg5+A4XrSNu9kl+YlsGLr0LMIJhybfBx2TMhOgX2LYfk4cHa/DG6bfJt/OD9H/D81ue5dfKt3fYSRe9r6qFHm6Ol5CIiRnck9HeBW5VS/wZmAjVa6/BMPZiQC+f/48jbMiYAsHpbCT9//gvePheG7HkBvrz/cBuLAzz1YIqCk35+ONHHpHdp82OSxnDG4DN4avNT2M12Fo1bhEEdeZjiy/wvGRw7mNy43GN4gaK3eAIeDMpAjCVGeugiYnSY0JVSrwCnAMlKqQLgbsAMoLV+FPgAOAfYDbiA63oq2OORHmejBgc7E6YwZNFlwd575T7YvRSq9sGIM2D9C/D5vcEHmKPhpJ/C6PMgeWRwZE0n3D3nbjSav677K/l1+fxu1u9QSgFQ2VjJTz7/CfNz5vOXU/7SUy9VdAOP34PVaMVussuwRRExOkzoWusrOrhfAz/qtoh6SEacDYCimsbgDdaYYO891IMHYPT3oCYf6stgxV/gs3uDl/gcmHodTL4GHCntbifWEsufT/4zf1v/N57e8jRJtiRumXgLRoORJfuX4NM+1havRWvdnOhF3+P2uzEbzNhNdumhi4gReWuKHqPEaAsWo4HipoTeGqWCyTs+By5/CSr2wIGvYdOr8On/whf3wdgLYNxFkL8KbHEw58fBOdqPeBrFT6b8hDJXGY9teowl+5dw16y7eH/v+ygUVe4qdlfvZkTCiB5+1eJYNffQzXapoYuIMWASulKK9Djb4R56ZyQNC16mXAOlO2Dt07DxFdj8GhhMEPDB7k+DB2CzZx0xckYpxb0n3sv8nPn849t/cPMnN+PTPi4bdRmv7nyVNcVrJKH3YR6/B4vRQpQpilJXabjDEaJTInMul2OUEWdrv4fentTRcM6f4Oc74Oo34Rd58P0n4NAGeO48+NfM78zRblAGTh98Oi+e8yITUydiMVi4cfyNZDoyWVO85vhfkOgxbr+7uYYuJRcRKQZcQi+qPc6vz5ZoGL4A7Ikw4Qfwi11w6bPgqoBnzoFdH8NR87nEWmJ54ownWPz9xaRHpzMtbRprS9bK0mZ9WHMP3RwlJRcRMQZUQk+Pi6Kkxk0g0I2L/lodwZr6wvdB++HlS+EvY+Dt/4bKvc3NzAYz6dHBYZALchZQ7a7m/LfOZ2XRyu6LRXQbTyCY0KWHLiLJgEroGXE2PP4Ala4eWMw5YwLcvgkufQ4Gz4Vt78Kz57W6CMf8nPk8ecaTWIwWfvb5z2Qt0j6oqeQSZYqiwddAcDCXEH3bgEro6aGhi8dcR++IyQLjLoRLn4HrPwyOdX/0RHji1OC0BJ7DPb2ZGTP512n/QqP51bJf4Q14m+97ZccrvLLjlZ6JUXSKx+/BYrBgN9vxa/8RC14I0VcNqIT+nbHoPSl9PFz7Now6Gwzm4LDHh6dDwdrmJtkx2dw9+242lm3kkQ3BWYc3lG7gvlX38cDqB9hXs6/n4xStcvvdzaNcQBa5EJFhQCX09OaE3ksHuTKnwEWPwg1LYNEHYDDCM2fDF/cHZ4oEzhpyFt8f8X2e3PwkT21+iru/vptUeypWo5W/r/9778QpvqPlmaIgi1yIyDBgxqEDJEdbcVhN5JXU9/7Gc+fCTV/Aez8OnqD05Z+Cvfic2fwqewZbE7byt/V/w2Qw8ff5f2dLxRb+teFfrC9Zz5S0nl8EShyp5SgXAKe35xZHEaK7DKiEbjAoxmbEsvVQTXgCsCfCZS9C2U7Y9J/g2abrnsW+6hFeyZ5BxZx7sWXNID4mg6lpU3kz703uXXUvL579Ih/s+4A91Xuo9dQSY4lh0bhFzaNmRPdrKrkk2ZIAqGrsubn0heguAyqhA4zLjOXfq/PxBzRGQ5jmUkkZBQv+J3jd74XNr2Ne8hvSX70WTDY45TfY59zGb2b8hts/v53TXz+dWk8tUaYoYi2xlDeU4/K6+P3c34cn/gGgqeSSFBVM6OUN5WGOSIiODagaOsC4QXE0eP3sKw9D2aU1RjNMugJu3whXvArDT4Old8P9OZz6xu2cnTyFeGs8j532GKuuXMXSS5dyychLWLx3MRUNFXj93o63IbqsaRx6Uw9dErqIBAMwoQdXEtp6qDbMkRzFFhdcQu+yF+GSZ2Dy1WC28cDGT3l/1v9jTuac5tkZrxpzFZ6Ah18u+yVzXpnDg2sexOv38uTmJ9lVtSvMLyTyaa2bx6HHWmIxG8yUN0pCF33fgCu5DE91YDEZ2FJYwwWTMsMdzncpBSd8P3ipKUQ9eRo8fgpkTgVHKsRlM2Ty1ZyUeRLLC5eTHZPN89ueZ1nBMvbX7md5wXKeO/u5cL+KiNZ0ToDFYEEpRXJUMhUNFWGOSoiODbgeutloYFRaDN8erOajLUWU1/fh+VTiMuHGpXDKb4JDHqvzYf1z8NhJ/G91A4+d/Ffeu/A9Tsk6hYL6Ak7JOoX1pevZXLaZRl8jAR0I9yuISE3riVqMwdkzk6OSpeQiIsKA66FDsOzy7zX5rD1QxaI5udxz/rhwh9S2uEw45VfBC0BDFXzzL1KWP0TKgW9g+o38bcadVJlMRJmiOP210/nd17/jUP0hZg+azV9O+ct3lsET7WuaNM1qtAKQFJXEofpD4QxJiE4ZkP/p187O5cYThzAqLYYN+dXhDqdrohLg1Dvhuo+Co2U++wPGv40n+bUbiX7lSi5NmsTu6t0Mjh3Mpwc/5cE1D7KjcscRq9iL9jXtq2qnprS2UUouImIMyB762EGxjB00lnvf38bzKw/g8QWwmCLssy1nJlz7TnBM+5on4eBKcNdy675lnHvKzxmRdSJ/KFrKi9tf5MXtL5Ibm8sf5v4BgCFxQ4izxoX5BfRdTT30l1YWsmfvTnJHJFPlrsIf8GM0GMMcnRBtG5AJvcmE7Hg8K/axq6SOEzIjNMGljIJzHgxe97gwv3I5Iz9/EHiQ/0kYzAVn/5GDRvjbur9xzYfXAJARncHL575MclRy+OLuw5om4nK5FVUuD9NsyQR0gCp3lewz0acN6IQ+MSuYxDcV1HQ5ode7ffj8AeLtlo4b9xaLHa56HQrXgbMUtfjnTHztJiaOPJO5Y25iqSmA2WTjvtX3ccvSW5iUMokRCSO4ZOQlUmdvoank4vMbcXn8R5xcJAld9GUDOqHnJNqJt5vZVFDNlTNzuvTYu9/ZSn6li//cPLuHojtGJgsMDsU0aDIsewh2fkDi1rf4QWwWnP9/xJ10P3etuIuCugLqvfV8sO8DHjr5IUlWIU0lF6/PgMvjb94vMtJF9HUDulumlGJ8ZhwbC7o+t0tRTQMHKvv4hE3xOXD+3+FnO+Dyl8EaAy9ezKnv38lXlV6+HnQR9077NdsqtnHdR9fJYsghhxO6EZfHJ6f/i4gxoBM6wMSseHaV1NHo9XfcuIV6t49KpycyVrIxmmD0uXDT5zD3JxCfg3Kkob74f1zw+m086kug1FnMhW9fyD1f38PH+z+msrESgC3lW3gr760BNaa9aToFv98ULLnI6f8iQgzokgvAsNRo/AFNQVUDw1MdnX5cvduH16+pc/uItZl7MMJuZI6C0//38O8l22Dza0xZ/xzPmww8O3IOH+x9nzfy3sBisDAvax6f53+OX/tZVrCMG8ffSE5sDjGWmPC9hl7QvHi3NtHg8WM324k2R8vQRdHnDfiEnpMYXMAgv9LVpYTudPsAqHJ6IiehHy1tLKTdDZOvZtTzF3Lf2nfwAtuyJ/JG9jjezf+c05InMsbo4O/5n7H04FJsRhs3jL+BWRmziLHEMCx+WLhfRbdrSug6EOyhQ/Bs0WJncTjDEqJDAz6hZyeEEnpV15YYc7qD/+gVTg+Dk6K7Pa5elTQMblkBhzZgrshj4hcPMDF/I79JHknU3rcBOGPWD8nLmcri/M/454Z/8s8N/wRgSuoUxiaNxWQwcd6w8xiZMBKAioYKYiwxzafPR5KmchMBKw1+P4GAZnr6dN7Me5PVRauZkTEjvAEK0YYBn9BTYqxYTQYOVnQ+oWutcXoO99D7BVscDD05eJlwGax7jqgtr8O8O8BZRvbKJ8he+QSnAtuTcqiIz2Kfzc7LzmJ2Ve3C7Xfz7NZnSYlKwaAMlLhKyIjO4M8n/5nxKeOP2FRRfRF/Xvdnrhh9BVPTprKrahe5sbl9JvkvPbCU3JjhbPYHS0sNXj93TLuDdSXr+OWyX/L6+a9H/IigB1Y/QJw1jpsn3hzuUEQ3GvAJXSlFdqK9Sz10l8dP07HQiv6S0FuyxsCcW4MXgEAAsqZDIPitZMzOD6G+mBP3r+MaeyIMmkyNq4C3E1LYG5OI257IcJ/mtbLVXP3h1QyPH06WIwu72c6YxDG8vONlCusL+SL/CyalTGJV8SpyY3O5ZOQlFNQVcP6w87/zIdBbDtUfYkPZBq4ccTObAdA0VBeTnJbFn0/+M1cuvpJfLvslj5/+OCZDZP771Hvq+ffOf2M1Wlk4bmHzQtgi8kXmO7Kb5STaOVjZ+UWAm+rnAJX9MaEfzWAIzs/eZOrC4M+iTfDurVCeR1xMOgv3rAF3bXDVJV8jlxoMvDh4HJsb68l3b6dG+3h/7/vEWGJ4ZPIdPLz/PTaVb+L69BNZUrubh9Y+hFEZeXfPu9w16y4O1h1kR8UOSlwl2M12cmNzGZs0lihTFCn2FJJtyWyp2MLQuKFMSJlAjbuGElcJcZY44m3x7Knew5f5X2IymBgWP4x5WfPaTMKVjZWsLV7Lnuo9AExPmc/rbOB+85MkP7IO5tzGiAV3c9esu7jrq7v4f6v+H3dMuwO72d7Te/87dlbupLKxktmDju0ciBWHVuAL+PAFfHyZ/yVnDTmrmyMU4SIJHchOiGLNvkq01s2LSLSnrkVC7zcll2ORMQH+a9nh330e2PoW7F8Go84lrnIvP1r1GNQE+7oYLRQk5BDjLCRu523MVEbcRjOOvJ38yBpLxagzMJbt5IaoRn674rcYNAwNKNKUmQZ7Ah+Xb+WNvDe+E4YBAxcMv4Cl+z+mznfkuQEKaBpYmmxNCJ0Rqzgpax613jpcPhenZp/KkxsfpaihDIATksaRU7yVj6y/IhYXdTkLiPn6H5C3lAtm/4hdg07h+V2vs6xgGdeNuw6bycZ7e95jRMIIJqVOAoLHFgY5BqG1ZlvlNvKq8kiJSiHVnkpKVApR5ig2l23mi/wvWFa4jCmpU/jd7N+1ecZuo6+RvKo8vin6hkc2PEKAAI+d/hizMmZ1+c/2Rf4XJFgTsBgtvL/3fc7IPQOF6tR7vzO+Lf0Wp9fJzPSZmI0dDxgoc5VR0VjB6MTR3bL99ri8Lh5Y8wAXDb+o+W/Vn6jOjKNWSp0F/B9gBJ7UWt9/1P2LgAeBwtBND2utn2zvOadNm6bXrl17LDF3uyeX7+Xexdv59n9OJyG64zrupoJqzn/4KwAumZrFQ5dO7OkQI5u7HvJXwr5lULUfLA4YcjKU7QCfG4afCl8/DAVrYdAkasp3sslfx4SEUcTF5UBtIRSuIwCUGo24laLIZKTUZGKkx8OT8XEsibYzs8HNxXV11BkM1JosxPt9nF5fh01rvrFF8Z7Djk1rGgwGVkRFkagV2mDgkAqQ6vPz08oq1kbZON3pYm5DI9sDOdzu/RH33XwZU53L4fP/F4wZ+NZq4W9pmaw3BstQuUYHxYEGGvXh8xly7GnU+9xUeqrb3DUmFKNih7C1di8Lchbg8zaQV7mDOp+TS0ZczOzsU1hTvIbXdrxCtTe4bOKC+NEc8NRS7nfxwLwHyHZk81n+ZygUGY4MRsSPwOVzcaD2AAdqDwDBKYDHJo4lNy6X018/nVOzTyUxKpHntz5PtMmO1Wjm+yMvZV7WPHLjcjEqI9HmaEqcJby0/SXm58xndOJo3t39LhajhfHJ4xkWPwylFHWeOsoaytBa88mBT5oPmMdaYrlh/A1cPury73yT2VC6gUP1hxieMJxbPrmFysZKHjr5IaalT8Ov/STaEqnz1LG5bDMZjgw8fg9Or5NMRyYBHcAX8JEdm93u267eU4/DEhy5dqj+EOnR6fxpzZ94aftLxFvjeeXcV8iKyerw7VvqKsVmshFrie2wbUc8fg8rClcwyDHomD/AlFLrtNbTWr2vo4SulDICu4DTgQJgDXCF1npbizaLgGla61s7G1RfSuhLthbzXy+s491b5zIhK77D9l/vKefKJ1YBcOroVJ5eNL2HIxwgtA6u2BQIgK8BLC1GD1Xng6sClCHYRhmCl4AfveFlDux4h8Ejz0WNPgcaquHgN8H1WqfdAEYLVB+A6oOgA+CuQ5fnoTz1+GvyWVW+mZEZM0iefxcUrIGGKnbUWTl/xWA8mHnxhpmcOCI5GFfxJvC64NC3sGMxW0o30Kh9TPUbadB+ii02vI5UvvCUsMtsJlprJjW6mex2U2UwUmoyUm404jQocj1e5noh2ufmH7njeIIasrw+JrrdeIFPou3oUK95vtPFBfVOcr1ehnp9HDCZWJg5iMpjPDXwb0MvY2TAwB0H3mZEXQUVBsWKqCh0i076IJODan8jLu1DAQkmB5W+w2vxDrVn4Pa7KXRXHvHc30uaxJmDz+Q/xctZfuhrAJIMFuwGCzn2dLJSTuA/e95Bh747JZiiGWSOZXtDMRowGUzcOP5G3t35GoXtLP03Nm44Jw4+ldSoVJRSaK0xGoyMSxrHe7te54Vd/+G8nNNxGMy8sv8DcqPSONBQymmpU1lZtROrycr0pBOorQm+L2498Q8oo5m86jxmZcwiPTqd9SXr+e+lN2MxmPnJtJ8zJG4ISbYkMqIzKHYWE2WOIjkqGafXyZ7qPVQ0VJAancqG0g08vulx7FoxxpbElNzT2V69h88Ll1Hnc3HZqMu4a9Zdx/S3O96EPhu4R2t9Zuj33wBore9r0WYREZzQtx2q5Zy/L+fhKyfzvQmDOmz/ybYSfvj8WhLsZgYnRfP2j+b2QpSixzR9kLTw4eYibnlpPQCPXzOVM8alt/5Ynxu8DcFRQi2fo/pgcJI0dz001e3ddWCyBue0j4qHlDHBlai+/jvs+YyK2kISR5+HGns+GC3s+fqvlFTvYYzBTsLEq2DsBdBYG/wgK95E4/pn+aJsI6VGxWmOIURbHBRU7yfPV4PDYGFw4kiyTbEYawsoqS9ia3w6+701uH0ubqmqwQwQkwEnXAzRyZTvWcp6ZwFFfhde7WebSWHRmutcAd60wnarmdtqG0nRipVGP0uj7cQEAox3e0i1p2JqrCG6sY4TGxpRAMrAOouZtVFWiixRuLSPzVYLBWYz59U5Odfp5BO7natq60j3+XgkIY6YQICtjni+NEOqz8evK6poNCgsBivRcTkU+l0YPU4aPfW87bCTZzETaKNUNNfVwMooG36lOL+uns1WKwGl+E9hEbuiE3g6KZEdARfx/gBlRiPlpiOnRk4z2KgOeMjwuIkNBNhks7a6nVxjNAU+J76jwpjZ6CXe52GjzUqxyYQjEGCB08VZThczJ92A+cw/tv6e6kB7Cb0zNfRMIL/F7wXAzFbaXayUmkewN/9TrXX+0Q2UUjcBNwHk5HRtMqyelJ0YPMq/4WA1p4xKxWFtf7c0HRTNTrQPjIOi/V0rCcHtOzzVQdPJRa0yWYOXo8XnBC+dcdo9cNo9JB1187DcE/nOaVtNk4KmjsY24Qec1VAV/KAIbSsOGOcsB2tscKK2kKzQBa2hJj/4IRSVCI6U5jbJJ/6UM1puy10X/EZji+M35buhvgSypoHJSm59GZcXrAl+OGVMCH6bCgSg5mDwG1LxZqjYzdRh85maMycYi9+HPvA11Xs+IX7wPFRsBnML1kBCLsTn8Mv6UshfiT60geXOg4wdehrJV98ejMOedMTrofYQV69/Hl99KVUKqNqH8rpoMJjYYPSRHjuY6XN+wI68d6nze5l+/q/Ru5bgr9yDacZUJu36iL9X7oPRl8OIM6kr3cyry+4hzuNkvN/INzHx7NbVoAP8NPd8EmIy2bRvCQ0eJyV+F0X4SDfaqQh4+FZXsMCSyMS4YSRrA8VlW3H43MzKPQs1+ntoexJF298k2ZaEJSYd6kth0KTOvTe6qDM99EuAs7TWN4Z+vwaY2bI3rpRKAuq11m6l1H8Bl2mtT23veftSDx1g7v2fUVjdQLTFyPJfnUpiO7X0F1ce4K63t3DuhAyW7Sxj8/+e2YuRit7w6pqD/OqN4MHcP140vsuzcYoI5HMHP8DMoWGcgQD43Yd/b0sr3/B6Uns99M5U4AqBlkcfsjh88BMArXWF1rppteUnganHEmg4vf2judxz3licHj9r91e227a5h55gp87tw+3r2sReou87sofua6el6DdM1iOTt8HQcTKHXk3mHelMQl8DjFBKDVFKWYDLgXdbNlBKZbT49Xxge/eF2DtSYqxcPiMHs1Gx/mB1u23r3T6UgsyE4B+7yunthQhFb3J7Dyf0hvZKLkL0IR3W0LXWPqXUrcASgsMWn9Zab1VK/R5Yq7V+F/ixUup8wAdUAot6MOYeYzMbGTsojvUHq9ptV+/24bCYSA6VZSqdHtLjbL0RouglTd+6DApcXZxaWYhw6dSJRVrrD4APjrrtdy2u/wb4TfeGFh5TcuJ5ZfVBvP4AZmPrX2Ccbh/RVlPzmHU5MNr/uH0BDAocVhMut5RcRGQY8AtcHG1yTgKN3gA7iurabON0+4m2Gkl2BBN6hdPdZlsRmdy+AFaTEbvF1P4oFyH6EEnoR5mSEw/Qbtml3u3DYTWRFhsssxTVNPZGaKIXub1+rGYDdqtRSi4iYkhCP0pmfBSpMdZ2E3pTySXGZibGaqKouvMTe4nIEOyhG7BbjHJQVEQMSehHUUoxJSehUz10gIx4G4ekh97veHwBLCYDdrPpiNk1hejLJKG3YsrgePIrGyira7023jKhD4qPoqhGeuj9TXMN3WqkQUouIkJIQm/FlJwEAL5to5feVHIByIiLoqhaeuj9jdvnby65yEFRESkkobfihMy4dk8wCo5yCfXQ42xUOD00Si+uX2mqoUeZTVJDFxFDEnorbGYjYzNiW62je3wBPP4ADmtwZraM+ODZojLSpX9xe5uGLRqb148Voq+ThN6GyTkJbCqoxusPHHF70wGylj10QEa69DNuX4thi9JDFxFCEnobpucm0ugNsGpvJXkldVzyyNesP1hFfSihtzwoCshIl36medii2YTHF8Af6HhlLyHCTdYUbcOCMakkRVt4+qt9mI2KtQequO6ZNVw/dwhwOKGnSw+9Xzp8pmiwtOby+Iixdbw+phDhJD30NtjMRq6eNZjPdpSyZGsJV8zIJsps5K9Ld2E1GRiSEt3cLinaIj30fsbtDY1ysTYldCm7iL5PeujtuHrWYB75Yg82s4Ffnz2G354zhkPVjeQm27G2WK4qI94mY9H7GbcvEKyhWyShi8ghCb0dKTFW7r3wBGJsJuKigl+3R6V/92t3RlwUByqcvR2e6EFuXwCL0UiMNfj3rmmQOe9F3ycJvQM/mJ7dYZvBiXaW55URCGgMhr6zeok4dk2jXJomYCutlZKa6Pukht4Nhqc6aPQGKJQDo/1CIKDx+jVWk4G02OAC0CVtTAMhRF8iCb0bDE91ALC7tD7MkYju4Amde2A1GUlyWDEoKJMeuogAktC7QVNCzytte1EMETma1hO1mgwYDYpkh5WSWumhi75PEno3iLdbSHZYpIfeTzStJ2o1B/890mJtlNRJD130fZLQu8mwFIck9H7C7TtccgFIjbFSKj10EQEkoXeT4anBhK61nCIe6Zp76Kbgv0dqrI1S6aGLCCAJvZsMT3VQ2+ijrL79nty6A5UUy1mlfVpjixo6QFqslfJ6z3cmahOir5GE3k06M9Kl0evnqidXce/ibb0VljgGzSUXc1PJJTgWvbyDD2shwk0SejcZlR4DwMdbS9pss2pfJY3eAMvzyvFJb6/Paiq5WIyHe+iAjHQRfZ4k9G6SGmPj6lk5PPfN/jYXmP5yZxkQPI18Y0F1L0YnuuJwD/3wKBeAEhmLLvo4Sejd6FdnjSYj1saPXlrPy6sOsqO4lhrX4TlAluWVMTE7HoM6nNxF3+M+qoaeGhPsoZfK2aKij5O5XLpRjM3MP6+awv+8s4XfvrUZAKNBMX9UKqeNSWV3aT13nTsGk0Hxxa4yfnbGqFafZ+uhGrQOrm0qel/LM0WB5rNF+9N8LnWNXowGhd0iKaA/kb9mN5uck8B7t57IxoIaCqsa2FRYzZvrC1m6PVhbP3lkCg0eP3/+ZBc/emk9/z1/GOMGHU7cXn+AHz63lkZfgC/vOKXVRRVqGrzE2kwoJROB9YRGz5HDFo0GRUqMtV+VXK55ajXJDgtPLpwe7lBEN5KE3gOUUkzKjmdSdjznTsjgjjNG8eWuMopqGhme6uC6E4dQ4fTwzoZCvtpTzpu3zGFoSnCUzIdbipsXy3h82V5+fsYotNYs2VrMtkO1fL2ngrUHqvjtOaP54UlD+XhbCROy4kiLsfHOxkKm5iSSk2RvM7ZGrx+DUlhMUm1ryzd7K4i1mZpr5wBDkx18uKWYCydlMmd4chijO34FVS425FdjNCgq6t0kOazhDkl0E0novcBkNLBgTFrz7w6riXvOH8f1c4dw0b++YtEza3hq4TSGpzp4cvlehiZHM2ZQLE8s38uMIYks3lTEv9fkY1AwMi2GiVlx/PnjXRRWNfDcNwdIjLZwQmYcy3aVkZ0YxVv/PZeVeyuItpqYNSSJnSV1JEVbqHf7WPTMatJibfznv2ZjNRnQmrBM+Vvj8hIb1fe+ZTR6/Xy8tZjvTRh0xIfeg5dO4Ppn17DwmdU8d92MiE7qn24vBcAf0CzZWsKVM3PCHFHvavT6sZmNHTeMQKozZzYqpc4C/g8wAk9qre8/6n4r8DwwFagALtNa72/vOadNm6bXrl17jGH3H98erOL6Z9fg9PgZmhzNjuI67r3wBE4emcIPHvuGolBv/Ufzh/HjBSOwmoyU1jay4C9fUtfo44yxaewrd7KnrJ5Fc4bw4soDGAyHT45pyWxUxNrMVDg9nDg8mX3lTpweH2efkE5Wgp3cpGhmD0ti9b5KdpfWYTEZqHB62FPqZNuhGrIT7cwbmcKJw5OJjTJzsNLF17vLMRkVo9JjGZHqYHtRLWv2VzIsxcGk7HjGDYojymKkrM7N1kM1JEZbePvbQzzz9T5OHZXKtXNy2VdWj8cfaP7mcMbY9Oa1Wpt4/QGMSvX4h8+Hm4u45aX1vHjDTE4ccWTSrmnwcvEjX1Pl9PD+j08kIy7quLfX6PXz5493kpVg59rZgzv8gPMHNM98tY/C6gZ+ddboY0pM1zy1isKq4FTP6XE2Xv7hrGOKvTWNXj9//GA7e8rqyYq384szR5ES0/43gM93llJQ6eLqWR2//uP17cEqrnxiFaeNTeP+748n2tr7fdqmRcejLMf2oaKUWqe1ntbqfR0ldKWUEdgFnA4UAGuAK7TW21q0+W9ggtb6ZqXU5cBFWuvL2nteSeiHldW5uf/DHZTWNTJ7WBI/PGkoZqOBRq+fN9cXYjKo7yy08en2EpbtKuPOc8cS0JqS2kYGJ0XzzoZCnly+j5tPHobZqNhYUM3YjDgKq13sK3dy66kjeH1tAX9duotxg2IZmuLgs+0lONtYYs1iNJCdGMXYQXHsLatn66Ha79zv1xp/4PD7yGE1Ue/2Nf9uMxu+8wFz+tg0lueVtfrBE2szcftpI4mxmThU3cCmghq+2l2O1WRg3KA4HDYTmfFRZMTZ2FhQjclgYEJWHNUuL06PD4Vid1k9TrePBLuFBq+PaIuJU0enolRwWGJOop195U6KQ2Uwl8dPeb2b5XnlFFQ1sOq3CzC28uGxu7SeCx5egVKKyTnxTBuciNPj4/MdpQxPdTBjSCK5SdFYTQYavH4KqhpYnlfOrpI6oq0mBsXZyE2OJjfJjlKKN9cXsP5gNQAXTBrEGWPTSXJYsFuCC1RXubzsK3NiNCiqXB4+2FzU3H7q4AQunZpFlMXYfADXH9AEtCbJYSHBbuFQdQP5lS5cXj9njE0j3m5h9n2fcv3cIVhMBv75+W5uOHEIWQl2MuOjyEyIIjbKjN+vSYmxsq/cyTsbCxmVFsOwFAdf76nAbFQMCu1/szH4LS8lxsqhmgb+9NEOVu6tZGJ2PDuLa0mJsfJ/l09mYlY8RoNCa01JrZv/rM1vXrbxldX5AHx/SiZ1jT6Kaxr56ekjiLaY2FPmxGIykBFnIyfRTrzdTFmdm4OVLty+AMkOC5nxdvJK63C6/SQ7gt9W690+Vu6tYEJmPKmxVvJK6nF5fPz439/i82uqXB5yk6K589wxnDIqNThFcr2b9QeqKalt5LSxaWTGR3GouoG3NxRiNRk5cXgyFpOBaIuRxGgLDV4/ZqPhiA/VRq+fktpGNhXUsHpfJXOGJXHmuHQCWrO/wsXS7SU889U+rp2dy4/mD2/1f64jx5vQZwP3aK3PDP3+GwCt9X0t2iwJtflGKWUCioEU3c6TS0IPH601u0rqGZHqaO7xNnr9bC6s4Zs9FUzMjmfmkEQ8/gAOi+mIXnF5vZuVeyvw+gOkOGxMHZyAwQB7y5zsKqljUHwU0wYnUFbvZlN+DduLaqkLJdbJOfFUOT1kxEcxKTuewuoGdpfWMyYjhmiLCV9AU1zTyC/f2MTG/GoAlAquCHXyyBQ8fs2O4loaPH4OVrpwefxkxkfh8Qcoq3NjNCiizEb8Ac2Q5GjiosxUuTzYLUZKat2tLkBiNiq8/iPfpv918lB+c/aYNvffhvxqXlubz7oDVewsqcNkUMwYksjeMmfzN6qWMuOjmJwTT2Mowe+vcDZ/kDmsJv50yQR2Ftfxj8/yCHTwhTkrIYrbF4zAbjHx89c2tPqB2Bmv3zyb1BgbP3x+LfsqnHh8bT+PUtDZKYrMRsWDl0zkwsmZbMiv5vpn11Dp9DSXr1puJy7KTE2DlytmZBMXZeHRL/cQazOREG3hQIXrmF4XgMVkIBDQ+EI702hQzR2OKLORN/97DlVOD3e+vYV95U6UArPB0Dy6qek120xGGrwdryWb7LBiNRlwenxUtxim3PTeclhNuDy+5r/t3OFJ3Dp/BLOHJR3T6zvehH4JcJbW+sbQ79cAM7XWt7ZosyXUpiD0+55Qm/Kjnusm4CaAnJycqQcOHDimFyT6N39Ak1/pah5d0lpZwR/Q1DR4SYy2oLWmvN5Dgt2Mydj6wV6tNXvK6rGZjZiNBvaVO8lOtJMWY+VApQuH1USKw0q9x0eMtfO1/aa1RuOizGitKatzk1/lwuvX2MxG0mKtpMfajni+QEBTWufGoIJTLzclu9pGLwWVDVS7PLg8fpweHzE2E8NTYtBo7BbTEeULVyiBNHr9NHj9KBRGgwoOsaxzU+XykBEXRXZisDT0waYiGrwBRqU7mD8qtTmmQEBT7nRTWNVAYXUDTrcPg1KU1DYSZTFx8ZRM8krrOVTdwNzhyZgMikPVjRTVNBDQENDB15PisDBlcELzVAkAFaFvPduLalFKYTUZiLYaOWNsOoOT7DR6A82lh3UHKhmeGoPNbODdDYeIizIzLjMOnz9AYVUDBytdzX/zIcnR2MxGimsaKaxuYHiqg7goM8U1jazcW4HZZODU0alszK+m2uVl3KBYbGYjQ1OiGZwUDQTLeO9vOsS+Miduf4CMWBsnZMaRGG3hwy3F1DR4SbBb+N6EjFB8VWg09W4/lfXBjkKj109hdQNevybKYiA91kZarI2hKQ7GZ8bx4ZYi1u6vIt5uJjcpmonZcQxPjenUe6stfSahtyQ9dCGE6Lr2Enpnxq4VAi0LuFmh21ptEyq5xBE8OCqEEKKXdCahrwFGKKWGKKUswOXAu0e1eRdYGLp+CfBZe/VzIYQQ3a/DMTtaa59S6lZgCcFhi09rrbcqpX4PrNVavws8BbyglNoNVBJM+kIIIXpRpwZhaq0/AD446rbftbjeCFzavaEJIYToCjn/Wwgh+glJ6EII0U9IQhdCiH5CEroQQvQTnZqcq0c2rFQZcCyniiYDbZ6wFEYSV9f11dgkrq7pq3FB343teOIarLVOae2OsCX0Y6WUWtvWWVLhJHF1XV+NTeLqmr4aF/Td2HoqLim5CCFEPyEJXQgh+olITOiPhzuANkhcXddXY5O4uqavxgV9N7YeiSviauhCCCFaF4k9dCGEEK2QhC6EEP1ExCR0pdRZSqmdSqndSqlf9/K2s5VSnyultimltiqlbg/dfo9SqlAptSF0OafFY34TinWnUurMHo5vv1JqcyiGtaHbEpVSnyil8kI/E0K3K6XU30OxbVJKTemhmEa12C8blFK1SqmfhGOfKaWeVkqVhhZiabqty/tHKbUw1D5PKbWwtW11U2wPKqV2hLb/llIqPnR7rlKqocW+e7TFY6aG3gO7Q/Ef12rLbcTV5b9dd//fthHXqy1i2q+U2hC6vTf3V1s5onffZ1rrPn8hOG3vHmAoYAE2AmN7cfsZwJTQ9RiCi2aPBe4BftFK+7GhGK3AkFDsxh6Mbz+QfNRtfwJ+Hbr+a+CB0PVzgA8BBcwCVvXS368YGByOfQbMA6YAW451/wCJwN7Qz4TQ9YQeiu0MwBS6/kCL2HJbtjvqeVaH4lWh+M/ugbi69Lfrif/b1uI66v4/A78Lw/5qK0f06vssUnroM4DdWuu9WmsP8G/ggt7auNa6SGu9PnS9DtgOZLbzkAuAf2ut3VrrfcBugq+hN10APBe6/hxwYYvbn9dBK4F4pVRGD8eyANijtW7vzOAe22da62UE5+k/entd2T9nAp9orSu11lXAJ8BZPRGb1vpjrbUv9OtKgquEtSkUX6zWeqUOZoXnW7yebourHW397br9/7a9uEK97B8Ar7T3HD20v9rKEb36PouUhJ4J5Lf4vYD2E2qPUUrlApOBVaGbbg19ZXq66esUvR+vBj5WSq1TwYW4AdK01kWh68VAWphig+CCJy3/yfrCPuvq/gnXe/B6gj25JkOUUt8qpb5USp0Uui0zFE9vxNaVv11v77OTgBKtdV6L23p9fx2VI3r1fRYpCb1PUEo5gDeAn2ita4FHgGHAJKCI4Ne9cDhRaz0FOBv4kVJqXss7Q72QsIxPVcFlC88HXgvd1Ff2WbNw7p/2KKXuBHzAS6GbioAcrfVk4GfAy0qp2F4Mqc/97Y5yBUd2HHp9f7WSI5r1xvssUhJ6Zxaq7lFKKTPBP9RLWus3AbTWJVprv9Y6ADzB4RJBr8artS4M/SwF3grFUdJUSgn9LA1HbAQ/ZNZrrUtCMfaJfUbX90+vxqeUWgR8D7gqlAgIlTQqQtfXEaxPjwzF0bIs0yOxHcPfrtf2mQouTv994NUW8fbq/motR9DL77NISeidWai6x4Rqc08B27XWf2lxe8va80VA05H3d4HLlVJWpdQQYATBgzA9EVu0Uiqm6TrBA2pbOHLh7oXAOy1iuzZ0lH0WUNPiK2FPOKLX1Bf2WYvtdWX/LAHOUEolhEoNZ4Ru63ZKqbOAXwLna61dLW5PUUoZQ9eHEtxHe0Px1SqlZoXeq9e2eD3dGVdX/3a9+X97GrBDa91cSunN/dVWjqC332fHc2S3Ny8EjwrvIvgpe2cvb/tEgl+VNgEbQpdzgBeAzaHb3wUyWjzmzlCsOznOI+gdxDaU4OiBjcDWpn0DJAGfAnnAUiAxdLsC/hmKbTMwrQdjiwYqgLgWt/X6PiP4gVIEeAnWJG84lv1DsJ69O3S5rgdj202wjtr0Xns01Pbi0N94A7AeOK/F80wjmGD3AA8TOgu8m+Pq8t+uu/9vW4srdPuzwM1Hte3N/dVWjujV95mc+i+EEP1EpJRchBBCdEASuhBC9BOS0IUQop+QhC6EEP2EJHQhhOgnJKGLAU0FZ4C0hzsOIbqDDFsUA5pSaj/BMcDl4Y5FiOMlPXQxYITOql2slNqolNqilLobGAR8rpT6PNTmDKXUN0qp9Uqp10JzczTNOf8nFZxDe7VSang4X4sQrZGELgaSs4BDWuuJWusTgL8Bh4D5Wuv5Sqlk4C7gNB2c7GwtwUmdmtRorccTPLPwb70auRCdIAldDCSbgdOVUg8opU7SWtccdf8sgosSfKWCq94sJLgoR5NXWvyc3dPBCtFVpnAHIERv0VrvUsGlvs4B7lVKfXpUE0VwcYEr2nqKNq4L0SdID10MGEqpQYBLa/0i8CDBpczqCC4ZBsHVgeY21cdDNfeRLZ7ishY/v+mdqIXoPOmhi4FkPPCgUipAcLa+WwiWTj5SSh0K1dEXAa8opayhx9xFcLZAgASl1CbATXBaYCH6FBm2KEQnyPBGEQmk5CKEEP2E9NCFEKKfkB66EEL0E5LQhRCin5CELoQQ/YQkdCGE6CckoQshRD/x/wF1R+oUEOINSAAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -1335,7 +1355,7 @@ }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 15, "id": "2ea981cd", "metadata": {}, "outputs": [ @@ -1345,7 +1365,7 @@ "array([3, 4, 5, 6, 7, 8, 9])" ] }, - "execution_count": 45, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -1357,542 +1377,187 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 16, "id": "bbd33233", "metadata": {}, "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-03-27T18:05:59.734678+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:05:59.737612+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:00.157952+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:00.160095+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:00.484737+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:00.485757+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:00.786487+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:00.788466+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:01.100020+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:01.102261+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:01.460078+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:01.462163+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:01.805568+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:01.807568+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:02.183897+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:02.185904+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:02.569835+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:02.571874+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:03.033272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:03.035146+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:03.579187+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:03.582312+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:04.128201+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:04.131216+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:04.909594+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:04.912681+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:05.506491+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:05.509890+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:06.285555+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:06.287092+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:06.748144+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:06.751143+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:07.239364+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:07.241364+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:07.833861+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:07.835862+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:08.270020+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:08.273103+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:08.579762+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:08.581664+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:08.995746+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:08.996750+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:09.387130+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:09.389133+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:09.913255+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:09.915271+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:10.414403+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:10.417511+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:10.986099+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:10.988092+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:11.384006+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:11.699391+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:11.700392+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:12.138923+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:12.140923+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:12.604077+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:12.606674+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:12.997333+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:12.999663+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:13.547570+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:13.550541+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:13.954516+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:13.956516+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:14.445116+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:14.452112+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:14.829066+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:14.832071+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:15.312829+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:15.315831+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:15.757355+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:15.759926+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:16.143136+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:16.145134+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:16.560027+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:16.562025+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:16.861918+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:16.863918+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:17.183558+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:17.637582+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:17.640281+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:17.997687+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:17.998689+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:18.345381+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:18.347383+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:18.676026+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:18.678607+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:19.007549+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:19.010506+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:19.346424+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:19.348531+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:19.696186+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:19.697186+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:20.073809+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:20.077249+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", - "[2023-03-27T18:06:20.472414+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:20.475399+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:20.942265+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:20.944268+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:21.302342+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:21.304314+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:21.665401+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:21.666987+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:22.067854+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:22.069371+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:22.392718+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:22.395677+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:22.716515+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:22.717515+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:23.047434+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:23.049434+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:23.399152+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:23.401151+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:23.745625+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:23.747624+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:24.098540+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:24.099540+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:24.421854+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:24.422839+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:24.738758+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:24.739667+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:25.058648+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:25.060550+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:25.399681+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:25.401599+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:25.737806+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:25.738793+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:26.069784+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:26.071290+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:26.416549+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:26.418554+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:26.801542+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:26.803529+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:27.139240+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:27.141225+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:27.488070+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:27.490052+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:27.823788+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:27.824814+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:28.163857+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:28.166838+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:28.499341+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:28.501342+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:28.823408+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:28.825499+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:29.125222+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:29.128129+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:29.492914+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:29.496428+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:29.833079+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:29.835167+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:30.217776+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:30.219777+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:30.536676+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:30.538667+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:30.861816+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:30.863812+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:31.177127+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:31.180126+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:31.606751+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:31.607978+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:31.949768+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:31.951785+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:32.289786+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:32.291671+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:32.629730+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 0. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:32.942556+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:32.945560+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:32.949202+0200][38480][INFO] [residual sugar] quality loss for constraints ge = 0.6. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:33.286281+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:33.287280+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:33.620445+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:33.622445+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:33.945427+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:33.947494+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:34.298877+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:34.300955+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:34.618880+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:34.620789+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:34.959467+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:34.961383+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:35.296247+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:35.298303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:35.763113+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:35.765112+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:36.178981+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:36.181338+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:36.555008+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:36.555991+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:36.880093+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:36.881104+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:37.299044+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:37.301205+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:37.708557+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:37.711544+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:38.087165+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:38.089166+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:38.482563+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:38.483562+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:38.941184+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:38.942166+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:39.291995+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:39.294883+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:39.642425+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:39.645485+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:39.965926+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:39.967445+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:40.280863+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:40.281866+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:40.567363+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:40.569362+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:40.863820+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:40.865893+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:41.406311+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:41.409435+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:42.003319+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:42.006307+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:42.470804+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:42.471786+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:42.768361+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:42.770360+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:43.102405+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:43.105718+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", - "[2023-03-27T18:06:43.426329+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:43.429478+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:43.757004+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:43.759124+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:44.083407+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:44.084408+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:44.400443+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:44.401428+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:44.706402+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:44.708999+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:45.018534+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:45.019535+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:45.519397+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:45.521407+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:45.921477+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:45.922985+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:46.265432+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:46.267956+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:46.717722+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:46.719733+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:47.062693+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:47.064691+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:47.417125+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:47.418108+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:47.758309+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:47.760595+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:48.135817+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:48.137801+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:48.458595+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:48.460608+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:48.754069+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:48.756024+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:49.049862+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:49.051462+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:49.350537+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:49.352536+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:49.766318+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:49.769390+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:50.276306+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:50.279351+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:50.665664+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:50.666685+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:51.009462+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:51.012707+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:51.308313+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:51.309313+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:51.637138+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:51.639120+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:51.979944+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:51.980946+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:52.297063+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:52.298062+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:52.625280+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:52.628303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:52.938341+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:52.939345+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:53.233624+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:53.235624+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:53.550284+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:53.552284+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:53.859100+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:53.863229+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:54.227895+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:54.229895+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:54.534473+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:54.536457+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:06:54.835486+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:54.837487+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:55.132594+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:55.134593+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:55.465635+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:55.467185+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:55.807745+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:55.810517+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:56.300336+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:56.302923+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:56.604424+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:56.605423+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:56.898530+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:56.900544+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:57.205520+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:57.206520+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:57.503438+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:57.505437+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:57.819558+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:57.821581+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", - "[2023-03-27T18:06:58.160813+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:58.163417+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:58.462315+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:58.463303+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:06:58.815614+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:58.817596+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:06:59.129940+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:59.130934+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:06:59.577632+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:59.580621+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:06:59.909210+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:06:59.910211+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:00.263906+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:00.265906+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:00.573175+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:00.574177+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:00.866210+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:00.868793+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:01.205344+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:01.207327+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:01.606906+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:01.608906+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:02.102300+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:02.105211+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:07:02.503969+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:02.506485+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:02.906864+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:02.908864+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:03.298141+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:03.300142+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:03.619687+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:03.621670+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:03.942307+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:03.946964+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:04.383317+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:04.384318+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:04.685032+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:04.687584+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:04.985829+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:04.986829+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", - "[2023-03-27T18:07:05.266858+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:05.269157+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:05.580166+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:05.582149+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:05.889785+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:05.892186+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:06.211209+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:06.213722+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:06.513714+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:06.515729+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:06.832167+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:06.834177+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:07.144798+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:07.146797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:07.479304+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:07.481835+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:07.846999+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:07.848997+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:08.195789+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:08.197813+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:08.200813+0200][38480][INFO] [residual sugar] quality loss for constraints le = 65.8. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:07:08.691113+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:08.694249+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:09.231893+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:09.235438+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:09.713446+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:09.716162+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", - "[2023-03-27T18:07:10.805837+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:10.809012+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:11.446846+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:11.450600+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:12.110297+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:12.114136+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:12.587219+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:12.589217+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:13.186604+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:13.188628+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:13.765722+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:13.767730+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:14.222493+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:14.225273+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:14.581621+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:14.582622+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:14.916005+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:14.917005+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:15.232768+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:15.233771+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:15.587426+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:15.589426+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:07:15.937914+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:15.939914+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:16.341209+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:16.343228+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:16.667291+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:16.669292+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:16.989838+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:16.991912+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:17.306825+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:17.308797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:17.659105+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:17.661131+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:18.018946+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:18.019947+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:18.393086+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:18.396311+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", - "[2023-03-27T18:07:18.830421+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:18.833527+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:19.232926+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:19.236012+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:19.669845+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:19.672139+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:20.034654+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:20.035654+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:20.365288+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:20.367291+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:20.677852+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:20.680692+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:20.988636+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:20.990732+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:21.326922+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:21.329905+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:07:21.682149+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:21.684150+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:22.042272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 1. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:22.043272+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:07:22.417916+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:22.418916+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:22.749237+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:22.751237+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:23.090475+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:23.091459+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:23.470508+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:23.473305+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:23.821072+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:23.823567+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 1. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:23.827191+0200][38480][INFO] [residual sugar] quality loss for constraints ge = 0.6. Remaining 0. prev length 1. Original dtype float64.\n", - "[2023-03-27T18:07:24.193607+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:24.194590+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:24.532529+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:24.534525+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:24.876586+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:24.878585+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:25.216076+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:25.217076+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:25.599528+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:25.601333+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:26.159795+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:26.161982+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:26.541276+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:26.542274+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:26.869887+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:26.872038+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:27.183814+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:27.186139+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:27.522592+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:27.524574+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:27.885528+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:27.886547+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n", - "[2023-03-27T18:07:28.236311+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 6. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:28.237310+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 6. Original dtype float64.\n", - "[2023-03-27T18:07:28.569622+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:28.571622+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:28.889372+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:28.890372+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:29.200272+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:29.202272+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:29.533137+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 4. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:29.535216+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 4. Original dtype float64.\n", - "[2023-03-27T18:07:29.936280+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:29.939026+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:30.369796+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 2. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:30.371797+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 2. Original dtype float64.\n", - "[2023-03-27T18:07:30.718054+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 3. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:30.720128+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 3. Original dtype float64.\n", - "[2023-03-27T18:07:31.139806+0200][38480][INFO] [alcohol] quality loss for constraints le = 14.2. Remaining 5. prev length 7. Original dtype float64.\n", - "[2023-03-27T18:07:31.140809+0200][38480][INFO] [alcohol] quality loss for constraints ge = 8.0. Remaining 0. prev length 5. Original dtype float64.\n" - ] - }, - { - "ename": "KeyboardInterrupt", - "evalue": "", - "output_type": "error", - "traceback": [ - "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", - "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mplugin\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgenerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m7\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0moutcome\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36mgenerate\u001b[1;34m(self, count, constraints, random_state, **kwargs)\u001b[0m\n\u001b[0;32m 337\u001b[0m \u001b[0msyn_schema\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mSchema\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfrom_constraints\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mgen_constraints\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 338\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 339\u001b[1;33m \u001b[0mX_syn\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_generate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msyn_schema\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0msyn_schema\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 340\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 341\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mX_syn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mis_tabular\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36m_generate\u001b[1;34m(self, count, syn_schema, **kwargs)\u001b[0m\n\u001b[0;32m 246\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 247\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 248\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_safe_generate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcallback\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0msyn_schema\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 249\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 250\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\plugin.py\u001b[0m in \u001b[0;36m_safe_generate\u001b[1;34m(self, gen_cbk, count, syn_schema, **kwargs)\u001b[0m\n\u001b[0;32m 391\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mit\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msampling_patience\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 392\u001b[0m \u001b[1;31m# sample\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 393\u001b[1;33m \u001b[0miter_samples\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgen_cbk\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 394\u001b[0m iter_samples_df = pd.DataFrame(\n\u001b[0;32m 395\u001b[0m \u001b[0miter_samples\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtraining_schema\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_ddpm.py\u001b[0m in \u001b[0;36mcallback\u001b[1;34m(count)\u001b[0m\n\u001b[0;32m 241\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 242\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mcallback\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m \u001b[1;31m# type: ignore\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 243\u001b[1;33m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgenerate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 244\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mis_classification\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 245\u001b[0m \u001b[0mdata\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0minsert\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtarget_iloc\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\__init__.py\u001b[0m in \u001b[0;36mgenerate\u001b[1;34m(self, count, cond)\u001b[0m\n\u001b[0;32m 211\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 212\u001b[0m \u001b[0mcond\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtensor\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 213\u001b[1;33m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdiffusion\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msample_all\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mcount\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdetach\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 214\u001b[0m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msample\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_col_perm\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 215\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0msample\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\gaussian_multinomial_diffsuion.py\u001b[0m in \u001b[0;36msample_all\u001b[1;34m(self, num_samples, cond, max_batch_size, ddim)\u001b[0m\n\u001b[0;32m 951\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 952\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mb\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mbs\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 953\u001b[1;33m \u001b[0msample\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msample_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcond\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 954\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0many\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msample\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0misnan\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 955\u001b[0m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"found NaNs in sample\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\autograd\\grad_mode.py\u001b[0m in \u001b[0;36mdecorate_context\u001b[1;34m(*args, **kwargs)\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 26\u001b[0m \u001b[1;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mclone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 27\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0margs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 28\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0mcast\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mF\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdecorate_context\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 29\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\gaussian_multinomial_diffsuion.py\u001b[0m in \u001b[0;36msample\u001b[1;34m(self, num_samples, cond)\u001b[0m\n\u001b[0;32m 918\u001b[0m \u001b[0mdebug\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf\"Sample timestep {i:4d}\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mend\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"\\r\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 919\u001b[0m \u001b[0mt\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfull\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mb\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlong\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 920\u001b[1;33m model_out = self.denoise_fn(\n\u001b[0m\u001b[0;32m 921\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mz_norm\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlog_z\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mt\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mcond\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 922\u001b[0m )\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\tabular_ddpm\\modules.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x, t, y)\u001b[0m\n\u001b[0;32m 111\u001b[0m \u001b[0memb\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0memb_nonlin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlabel_emb\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0my\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mproj\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0memb\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 113\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 114\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\mlp.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, X)\u001b[0m\n\u001b[0;32m 398\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mvalidate_arguments\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marbitrary_types_allowed\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 399\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 400\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 401\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 402\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m_train_epoch\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mloader\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mDataLoader\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mfloat\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\container.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 203\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 204\u001b[1;33m \u001b[0minput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 205\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 206\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.validate_arguments.validate.wrapper_function\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.call\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\pydantic\\decorator.cp39-win_amd64.pyd\u001b[0m in \u001b[0;36mpydantic.decorator.ValidatedFunction.execute\u001b[1;34m()\u001b[0m\n", - "\u001b[1;32md:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\core\\models\\mlp.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, X)\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[1;33m@\u001b[0m\u001b[0mvalidate_arguments\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mconfig\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marbitrary_types_allowed\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 113\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mX\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mtorch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 114\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mX\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 116\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\container.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 202\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 203\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 204\u001b[1;33m \u001b[0minput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 205\u001b[0m \u001b[1;32mreturn\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 206\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m 1192\u001b[0m if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m 1193\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 1195\u001b[0m \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 1196\u001b[0m \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;32md:\\DevTools\\Miniconda\\lib\\site-packages\\torch\\nn\\modules\\linear.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m 112\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 113\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 114\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlinear\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 115\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 116\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0mextra_repr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", - "\u001b[1;31mKeyboardInterrupt\u001b[0m: " - ] + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
fixed acidityvolatile aciditycitric acidresidual sugarchloridesfree sulfur dioxidetotal sulfur dioxidedensitypHsulphatesalcoholquality
03.81.100.0065.80.009000289.050.1049971.0388933.820.2200008.05
114.20.081.660.60.251377289.09.0000000.9872913.821.0800008.06
23.81.100.000.60.0090002.09.0000000.9871103.821.08000014.27
33.80.081.660.60.0090002.09.0000000.9871103.821.08000014.26
43.81.101.660.60.009000289.0440.0000000.9871103.821.08000014.26
53.81.100.000.60.0090002.09.0000000.9871103.821.0799758.06
614.20.081.660.60.009000289.09.0000000.9871103.821.08000014.27
\n", + "
" + ], + "text/plain": [ + " fixed acidity volatile acidity citric acid residual sugar chlorides \\\n", + "0 3.8 1.10 0.00 65.8 0.009000 \n", + "1 14.2 0.08 1.66 0.6 0.251377 \n", + "2 3.8 1.10 0.00 0.6 0.009000 \n", + "3 3.8 0.08 1.66 0.6 0.009000 \n", + "4 3.8 1.10 1.66 0.6 0.009000 \n", + "5 3.8 1.10 0.00 0.6 0.009000 \n", + "6 14.2 0.08 1.66 0.6 0.009000 \n", + "\n", + " free sulfur dioxide total sulfur dioxide density pH sulphates \\\n", + "0 289.0 50.104997 1.038893 3.82 0.220000 \n", + "1 289.0 9.000000 0.987291 3.82 1.080000 \n", + "2 2.0 9.000000 0.987110 3.82 1.080000 \n", + "3 2.0 9.000000 0.987110 3.82 1.080000 \n", + "4 289.0 440.000000 0.987110 3.82 1.080000 \n", + "5 2.0 9.000000 0.987110 3.82 1.079975 \n", + "6 289.0 9.000000 0.987110 3.82 1.080000 \n", + "\n", + " alcohol quality \n", + "0 8.0 5 \n", + "1 8.0 6 \n", + "2 14.2 7 \n", + "3 14.2 6 \n", + "4 14.2 6 \n", + "5 8.0 6 \n", + "6 14.2 7 " + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ From 428177b34d48ea816aaba47937d298528caa530d Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 31 Mar 2023 11:58:19 +0200 Subject: [PATCH 34/95] debug LogDistribution and LogIntDistribution --- src/synthcity/plugins/core/distribution.py | 46 +++++++++----------- src/synthcity/plugins/generic/plugin_ddpm.py | 2 +- 2 files changed, 21 insertions(+), 27 deletions(-) diff --git a/src/synthcity/plugins/core/distribution.py b/src/synthcity/plugins/core/distribution.py index 3aff6f74..96ce24db 100644 --- a/src/synthcity/plugins/core/distribution.py +++ b/src/synthcity/plugins/core/distribution.py @@ -157,7 +157,7 @@ def sample(self, count: int = 1) -> Any: if msamples is not None: return msamples - return np.random.choice(self.choices, count).tolist() + return np.random.choice(self.choices, count) def has(self, val: Any) -> bool: return val in self.choices @@ -209,8 +209,8 @@ class FloatDistribution(Distribution): :parts: 1 """ - low: float = np.iinfo(np.int64).min - high: float = np.iinfo(np.int64).max + low: float = np.finfo(np.float64).min + high: float = np.finfo(np.float64).max @validator("low", always=True) def _validate_low_thresh(cls: Any, v: float, values: Dict) -> float: @@ -274,18 +274,18 @@ def dtype(self) -> str: class LogDistribution(FloatDistribution): - low: float = np.iinfo(np.int64).min - high: float = np.iinfo(np.int64).max - base: float = 10.0 - _log_low: float = np.log(low) / np.log(base) - _log_high: float = np.log(high) / np.log(base) + low: float = np.finfo(np.float64).tiny + high: float = np.finfo(np.float64).max + base: float = 2.0 def sample(self, count: int = 1) -> Any: np.random.seed(self.random_state) msamples = self.sample_marginal(count) if msamples is not None: return msamples - return self.base ** np.random.uniform(self._log_low, self._log_high, count) + lo = np.log2(self.low) / np.log2(self.base) + hi = np.log2(self.high) / np.log2(self.base) + return self.base ** np.random.uniform(lo, hi, count) class IntegerDistribution(Distribution): @@ -322,8 +322,9 @@ def sample(self, count: int = 1) -> Any: if msamples is not None: return msamples - choices = [val for val in range(self.low, self.high + 1, self.step)] - return np.random.choice(choices, count).tolist() + high = (self.high + 1 - self.low) // self.step + s = np.random.choice(high, count) + return s * self.step + self.low def has(self, val: Any) -> bool: return self.low <= val and val <= self.high @@ -361,18 +362,18 @@ def dtype(self) -> str: class LogIntDistribution(FloatDistribution): - low: int = np.iinfo(np.int64).min - high: int = np.iinfo(np.int64).max - base: float = 10.0 - _log_low: float = np.log(low) / np.log(base) - _log_high: float = np.log(high) / np.log(base) + low: float = 1.0 + high: float = float(np.iinfo(np.int64).max) + base: float = 2.0 def sample(self, count: int = 1) -> Any: np.random.seed(self.random_state) msamples = self.sample_marginal(count) if msamples is not None: return msamples - s = self.base ** np.random.uniform(self._log_low, self._log_high, count) + lo = np.log2(self.low) / np.log2(self.base) + hi = np.log2(self.high) / np.log2(self.base) + s = self.base ** np.random.uniform(lo, hi, count) return s.astype(int) @@ -411,15 +412,8 @@ def sample(self, count: int = 1) -> Any: if msamples is not None: return msamples - samples = np.random.uniform( - datetime.timestamp(self.low), datetime.timestamp(self.high), count - ) - - samples_dt = [] - for s in samples: - samples_dt.append(datetime.fromtimestamp(s)) - - return samples_dt + delta = self.high - self.low + return self.low + delta * np.random.rand(count) def has(self, val: datetime) -> bool: return self.low <= val and val <= self.high diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 9ac18878..8eda1ea9 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -189,7 +189,7 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: return [ LogDistribution(name="lr", low=1e-5, high=1e-1), LogIntDistribution(name="batch_size", low=256, high=4096), - IntegerDistribution(name="num_timesteps", choices=[100, 1000]), + IntegerDistribution(name="num_timesteps", low=10, high=1000), LogIntDistribution(name="n_iter", low=1000, high=10000), IntegerDistribution(name="n_layers_hidden", low=2, high=8), LogIntDistribution(name="dim_hidden", low=128, high=1024), From 4705319e082c98e8f6405e479bd8a582e25318bc Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sat, 1 Apr 2023 21:57:01 +0200 Subject: [PATCH 35/95] change discrete encoding of BinEncoder to passthrough; passed all tests in test_tabular_encoder --- .gitignore | 1 + src/synthcity/plugins/core/models/tabular_encoder.py | 12 +++++------- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index b2bc0daa..c41e0784 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,4 @@ generated MNIST cifar-10* src/test.py +.tmp.py diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index e5f260e3..74b72142 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -83,7 +83,7 @@ def __init__( """ self.whitelist = whitelist self.categorical_limit = categorical_limit - self.max_clusters = max_clusters # for compatibility + self.max_clusters = max_clusters if categorical_encoder is not None: self.categorical_encoder = categorical_encoder if continuous_encoder is not None: @@ -150,10 +150,7 @@ def fit( continue column_hash = dataframe_hash(raw_data[[name]]) log.info(f"Encoding {name} {column_hash}") - if name in discrete_columns: - ftype = "discrete" - else: - ftype = "continuous" + ftype = "discrete" if name in discrete_columns else "continuous" column_transform_info = self._fit_feature(raw_data[name], ftype) self.output_dimensions += column_transform_info.output_dimensions @@ -289,8 +286,9 @@ class BinEncoder(TabularEncoder): continuous_encoder = "bayesian_gmm" cont_encoder_params = dict(n_components=2) - categorical_encoder = "onehot" - cat_encoder_params = dict(handle_unknown="ignore", sparse=False) + categorical_encoder = "passthrough" # "onehot" + # ! onehot encoder does not pass the tests + cat_encoder_params = dict() # dict(handle_unknown="ignore", sparse=False) # TODO: check if this is correct def _transform_feature( From d9d73f14026d87753e153afe3140c94d7175b8cc Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 2 Apr 2023 20:36:29 +0200 Subject: [PATCH 36/95] add tabnet to plugins/core/models --- src/synthcity/plugins/core/models/tabnet.py | 1030 +++++++++++++++++++ 1 file changed, 1030 insertions(+) create mode 100644 src/synthcity/plugins/core/models/tabnet.py diff --git a/src/synthcity/plugins/core/models/tabnet.py b/src/synthcity/plugins/core/models/tabnet.py new file mode 100644 index 00000000..25383cb9 --- /dev/null +++ b/src/synthcity/plugins/core/models/tabnet.py @@ -0,0 +1,1030 @@ +# third party +import numpy as np +import torch +from torch.autograd import Function +from torch.nn import BatchNorm1d, Linear, ReLU + + +# credits to Yandex https://github.com/Qwicen/node/blob/master/lib/nn_utils.py +def _make_ix_like(input, dim=0): + d = input.size(dim) + rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype) + view = [1] * input.dim() + view[0] = -1 + return rho.view(view).transpose(0, dim) + + +class SparsemaxFunction(Function): + """ + An implementation of sparsemax (Martins & Astudillo, 2016). See + :cite:`DBLP:journals/corr/MartinsA16` for detailed description. + By Ben Peters and Vlad Niculae + """ + + @staticmethod + def forward(ctx, input, dim=-1): + """sparsemax: normalizing sparse transform (a la softmax) + + Parameters + ---------- + ctx : torch.autograd.function._ContextMethodMixin + input : torch.Tensor + any shape + dim : int + dimension along which to apply sparsemax + + Returns + ------- + output : torch.Tensor + same shape as input + + """ + ctx.dim = dim + max_val, _ = input.max(dim=dim, keepdim=True) + input -= max_val # same numerical stability trick as for softmax + tau, supp_size = SparsemaxFunction._threshold_and_support(input, dim=dim) + output = torch.clamp(input - tau, min=0) + ctx.save_for_backward(supp_size, output) + return output + + @staticmethod + def backward(ctx, grad_output): + supp_size, output = ctx.saved_tensors + dim = ctx.dim + grad_input = grad_output.clone() + grad_input[output == 0] = 0 + + v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze() + v_hat = v_hat.unsqueeze(dim) + grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) + return grad_input, None + + @staticmethod + def _threshold_and_support(input, dim=-1): + """Sparsemax building block: compute the threshold + + Parameters + ---------- + input: torch.Tensor + any dimension + dim : int + dimension along which to apply the sparsemax + + Returns + ------- + tau : torch.Tensor + the threshold value + support_size : torch.Tensor + + """ + + input_srt, _ = torch.sort(input, descending=True, dim=dim) + input_cumsum = input_srt.cumsum(dim) - 1 + rhos = _make_ix_like(input, dim) + support = rhos * input_srt > input_cumsum + + support_size = support.sum(dim=dim).unsqueeze(dim) + tau = input_cumsum.gather(dim, support_size - 1) + tau /= support_size.to(input.dtype) + return tau, support_size + + +sparsemax = SparsemaxFunction.apply + + +def initialize_non_glu(module, input_dim, output_dim): + gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(4 * input_dim)) + torch.nn.init.xavier_normal_(module.weight, gain=gain_value) + # torch.nn.init.zeros_(module.bias) + return + + +def initialize_glu(module, input_dim, output_dim): + gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(input_dim)) + torch.nn.init.xavier_normal_(module.weight, gain=gain_value) + # torch.nn.init.zeros_(module.bias) + return + + +class GBN(torch.nn.Module): + """ + Ghost Batch Normalization + https://arxiv.org/abs/1705.08741 + """ + + def __init__(self, input_dim, virtual_batch_size=128, momentum=0.01): + super(GBN, self).__init__() + + self.input_dim = input_dim + self.virtual_batch_size = virtual_batch_size + self.bn = BatchNorm1d(self.input_dim, momentum=momentum) + + def forward(self, x): + chunks = x.chunk(int(np.ceil(x.shape[0] / self.virtual_batch_size)), 0) + res = [self.bn(x_) for x_ in chunks] + + return torch.cat(res, dim=0) + + +class TabNetEncoder(torch.nn.Module): + def __init__( + self, + input_dim, + output_dim, + n_d=8, + n_a=8, + n_steps=3, + gamma=1.3, + n_independent=2, + n_shared=2, + epsilon=1e-15, + virtual_batch_size=128, + momentum=0.02, + mask_type="sparsemax", + group_attention_matrix=None, + ): + """ + Defines main part of the TabNet network without the embedding layers. + + Parameters + ---------- + input_dim : int + Number of features + output_dim : int or list of int for multi task classification + Dimension of network output + examples : one for regression, 2 for binary classification etc... + n_d : int + Dimension of the prediction layer (usually between 4 and 64) + n_a : int + Dimension of the attention layer (usually between 4 and 64) + n_steps : int + Number of successive steps in the network (usually between 3 and 10) + gamma : float + Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) + n_independent : int + Number of independent GLU layer in each GLU block (default 2) + n_shared : int + Number of independent GLU layer in each GLU block (default 2) + epsilon : float + Avoid log(0), this should be kept very low + virtual_batch_size : int + Batch size for Ghost Batch Normalization + momentum : float + Float value between 0 and 1 which will be used for momentum in all batch norm + mask_type : str + Either "sparsemax" or "entmax" : this is the masking function to use + group_attention_matrix : torch matrix + Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j + """ + super(TabNetEncoder, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.is_multi_task = isinstance(output_dim, list) + self.n_d = n_d + self.n_a = n_a + self.n_steps = n_steps + self.gamma = gamma + self.epsilon = epsilon + self.n_independent = n_independent + self.n_shared = n_shared + self.virtual_batch_size = virtual_batch_size + self.mask_type = mask_type + self.initial_bn = BatchNorm1d(self.input_dim, momentum=0.01) + self.group_attention_matrix = group_attention_matrix + + if self.group_attention_matrix is None: + # no groups + self.group_attention_matrix = torch.eye(self.input_dim) + self.attention_dim = self.input_dim + else: + self.attention_dim = self.group_attention_matrix.shape[0] + + if self.n_shared > 0: + shared_feat_transform = torch.nn.ModuleList() + for i in range(self.n_shared): + if i == 0: + shared_feat_transform.append( + Linear(self.input_dim, 2 * (n_d + n_a), bias=False) + ) + else: + shared_feat_transform.append( + Linear(n_d + n_a, 2 * (n_d + n_a), bias=False) + ) + + else: + shared_feat_transform = None + + self.initial_splitter = FeatTransformer( + self.input_dim, + n_d + n_a, + shared_feat_transform, + n_glu_independent=self.n_independent, + virtual_batch_size=self.virtual_batch_size, + momentum=momentum, + ) + + self.feat_transformers = torch.nn.ModuleList() + self.att_transformers = torch.nn.ModuleList() + + for step in range(n_steps): + transformer = FeatTransformer( + self.input_dim, + n_d + n_a, + shared_feat_transform, + n_glu_independent=self.n_independent, + virtual_batch_size=self.virtual_batch_size, + momentum=momentum, + ) + attention = AttentiveTransformer( + n_a, + self.attention_dim, + group_matrix=group_attention_matrix, + virtual_batch_size=self.virtual_batch_size, + momentum=momentum, + mask_type=self.mask_type, + ) + self.feat_transformers.append(transformer) + self.att_transformers.append(attention) + + def forward(self, x, prior=None): + x = self.initial_bn(x) + + bs = x.shape[0] # batch size + if prior is None: + prior = torch.ones((bs, self.attention_dim)).to(x.device) + + M_loss = 0 + att = self.initial_splitter(x)[:, self.n_d :] + steps_output = [] + for step in range(self.n_steps): + M = self.att_transformers[step](prior, att) + M_loss += torch.mean( + torch.sum(torch.mul(M, torch.log(M + self.epsilon)), dim=1) + ) + # update prior + prior = torch.mul(self.gamma - M, prior) + # output + M_feature_level = torch.matmul(M, self.group_attention_matrix) + masked_x = torch.mul(M_feature_level, x) + out = self.feat_transformers[step](masked_x) + d = ReLU()(out[:, : self.n_d]) + steps_output.append(d) + # update attention + att = out[:, self.n_d :] + + M_loss /= self.n_steps + return steps_output, M_loss + + def forward_masks(self, x): + x = self.initial_bn(x) + bs = x.shape[0] # batch size + prior = torch.ones((bs, self.attention_dim)).to(x.device) + M_explain = torch.zeros(x.shape).to(x.device) + att = self.initial_splitter(x)[:, self.n_d :] + masks = {} + + for step in range(self.n_steps): + M = self.att_transformers[step](prior, att) + M_feature_level = torch.matmul(M, self.group_attention_matrix) + masks[step] = M_feature_level + # update prior + prior = torch.mul(self.gamma - M, prior) + # output + masked_x = torch.mul(M_feature_level, x) + out = self.feat_transformers[step](masked_x) + d = ReLU()(out[:, : self.n_d]) + # explain + step_importance = torch.sum(d, dim=1) + M_explain += torch.mul(M_feature_level, step_importance.unsqueeze(dim=1)) + # update attention + att = out[:, self.n_d :] + + return M_explain, masks + + +class TabNetDecoder(torch.nn.Module): + def __init__( + self, + input_dim, + n_d=8, + n_steps=3, + n_independent=1, + n_shared=1, + virtual_batch_size=128, + momentum=0.02, + ): + """ + Defines main part of the TabNet network without the embedding layers. + + Parameters + ---------- + input_dim : int + Number of features + output_dim : int or list of int for multi task classification + Dimension of network output + examples : one for regression, 2 for binary classification etc... + n_d : int + Dimension of the prediction layer (usually between 4 and 64) + n_steps : int + Number of successive steps in the network (usually between 3 and 10) + gamma : float + Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) + n_independent : int + Number of independent GLU layer in each GLU block (default 1) + n_shared : int + Number of independent GLU layer in each GLU block (default 1) + virtual_batch_size : int + Batch size for Ghost Batch Normalization + momentum : float + Float value between 0 and 1 which will be used for momentum in all batch norm + """ + super(TabNetDecoder, self).__init__() + self.input_dim = input_dim + self.n_d = n_d + self.n_steps = n_steps + self.n_independent = n_independent + self.n_shared = n_shared + self.virtual_batch_size = virtual_batch_size + + self.feat_transformers = torch.nn.ModuleList() + + if self.n_shared > 0: + shared_feat_transform = torch.nn.ModuleList() + for i in range(self.n_shared): + if i == 0: + shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) + else: + shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) + + else: + shared_feat_transform = None + + for step in range(n_steps): + transformer = FeatTransformer( + n_d, + n_d, + shared_feat_transform, + n_glu_independent=self.n_independent, + virtual_batch_size=self.virtual_batch_size, + momentum=momentum, + ) + self.feat_transformers.append(transformer) + + self.reconstruction_layer = Linear(n_d, self.input_dim, bias=False) + initialize_non_glu(self.reconstruction_layer, n_d, self.input_dim) + + def forward(self, steps_output): + res = 0 + for step_nb, step_output in enumerate(steps_output): + x = self.feat_transformers[step_nb](step_output) + res = torch.add(res, x) + res = self.reconstruction_layer(res) + return res + + +class TabNetPretraining(torch.nn.Module): + def __init__( + self, + input_dim, + pretraining_ratio=0.2, + n_d=8, + n_a=8, + n_steps=3, + gamma=1.3, + cat_idxs=[], + cat_dims=[], + cat_emb_dim=1, + n_independent=2, + n_shared=2, + epsilon=1e-15, + virtual_batch_size=128, + momentum=0.02, + mask_type="sparsemax", + n_shared_decoder=1, + n_indep_decoder=1, + group_attention_matrix=None, + ): + super(TabNetPretraining, self).__init__() + + self.cat_idxs = cat_idxs or [] + self.cat_dims = cat_dims or [] + self.cat_emb_dim = cat_emb_dim + + self.input_dim = input_dim + self.n_d = n_d + self.n_a = n_a + self.n_steps = n_steps + self.gamma = gamma + self.epsilon = epsilon + self.n_independent = n_independent + self.n_shared = n_shared + self.mask_type = mask_type + self.pretraining_ratio = pretraining_ratio + self.n_shared_decoder = n_shared_decoder + self.n_indep_decoder = n_indep_decoder + + if self.n_steps <= 0: + raise ValueError("n_steps should be a positive integer.") + if self.n_independent == 0 and self.n_shared == 0: + raise ValueError("n_shared and n_independent can't be both zero.") + + self.virtual_batch_size = virtual_batch_size + self.embedder = EmbeddingGenerator( + input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix + ) + self.post_embed_dim = self.embedder.post_embed_dim + + self.masker = RandomObfuscator( + self.pretraining_ratio, group_matrix=self.embedder.embedding_group_matrix + ) + self.encoder = TabNetEncoder( + input_dim=self.post_embed_dim, + output_dim=self.post_embed_dim, + n_d=n_d, + n_a=n_a, + n_steps=n_steps, + gamma=gamma, + n_independent=n_independent, + n_shared=n_shared, + epsilon=epsilon, + virtual_batch_size=virtual_batch_size, + momentum=momentum, + mask_type=mask_type, + group_attention_matrix=self.embedder.embedding_group_matrix, + ) + self.decoder = TabNetDecoder( + self.post_embed_dim, + n_d=n_d, + n_steps=n_steps, + n_independent=self.n_indep_decoder, + n_shared=self.n_shared_decoder, + virtual_batch_size=virtual_batch_size, + momentum=momentum, + ) + + def forward(self, x): + """ + Returns: res, embedded_x, obf_vars + res : output of reconstruction + embedded_x : embedded input + obf_vars : which variable where obfuscated + """ + embedded_x = self.embedder(x) + if self.training: + masked_x, obfuscated_groups, obfuscated_vars = self.masker(embedded_x) + # set prior of encoder with obfuscated groups + prior = 1 - obfuscated_groups + steps_out, _ = self.encoder(masked_x, prior=prior) + res = self.decoder(steps_out) + return res, embedded_x, obfuscated_vars + else: + steps_out, _ = self.encoder(embedded_x) + res = self.decoder(steps_out) + return res, embedded_x, torch.ones(embedded_x.shape).to(x.device) + + def forward_masks(self, x): + embedded_x = self.embedder(x) + return self.encoder.forward_masks(embedded_x) + + +class TabNetNoEmbeddings(torch.nn.Module): + def __init__( + self, + input_dim, + output_dim, + n_d=8, + n_a=8, + n_steps=3, + gamma=1.3, + n_independent=2, + n_shared=2, + epsilon=1e-15, + virtual_batch_size=128, + momentum=0.02, + mask_type="sparsemax", + group_attention_matrix=None, + ): + """ + Defines main part of the TabNet network without the embedding layers. + + Parameters + ---------- + input_dim : int + Number of features + output_dim : int or list of int for multi task classification + Dimension of network output + examples : one for regression, 2 for binary classification etc... + n_d : int + Dimension of the prediction layer (usually between 4 and 64) + n_a : int + Dimension of the attention layer (usually between 4 and 64) + n_steps : int + Number of successive steps in the network (usually between 3 and 10) + gamma : float + Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) + n_independent : int + Number of independent GLU layer in each GLU block (default 2) + n_shared : int + Number of independent GLU layer in each GLU block (default 2) + epsilon : float + Avoid log(0), this should be kept very low + virtual_batch_size : int + Batch size for Ghost Batch Normalization + momentum : float + Float value between 0 and 1 which will be used for momentum in all batch norm + mask_type : str + Either "sparsemax" or "entmax" : this is the masking function to use + group_attention_matrix : torch matrix + Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j + """ + super(TabNetNoEmbeddings, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.is_multi_task = isinstance(output_dim, list) + self.n_d = n_d + self.n_a = n_a + self.n_steps = n_steps + self.gamma = gamma + self.epsilon = epsilon + self.n_independent = n_independent + self.n_shared = n_shared + self.virtual_batch_size = virtual_batch_size + self.mask_type = mask_type + self.initial_bn = BatchNorm1d(self.input_dim, momentum=0.01) + + self.encoder = TabNetEncoder( + input_dim=input_dim, + output_dim=output_dim, + n_d=n_d, + n_a=n_a, + n_steps=n_steps, + gamma=gamma, + n_independent=n_independent, + n_shared=n_shared, + epsilon=epsilon, + virtual_batch_size=virtual_batch_size, + momentum=momentum, + mask_type=mask_type, + group_attention_matrix=group_attention_matrix, + ) + + if self.is_multi_task: + self.multi_task_mappings = torch.nn.ModuleList() + for task_dim in output_dim: + task_mapping = Linear(n_d, task_dim, bias=False) + initialize_non_glu(task_mapping, n_d, task_dim) + self.multi_task_mappings.append(task_mapping) + else: + self.final_mapping = Linear(n_d, output_dim, bias=False) + initialize_non_glu(self.final_mapping, n_d, output_dim) + + def forward(self, x): + res = 0 + steps_output, M_loss = self.encoder(x) + res = torch.sum(torch.stack(steps_output, dim=0), dim=0) + + if self.is_multi_task: + # Result will be in list format + out = [] + for task_mapping in self.multi_task_mappings: + out.append(task_mapping(res)) + else: + out = self.final_mapping(res) + return out, M_loss + + def forward_masks(self, x): + return self.encoder.forward_masks(x) + + +class TabNet(torch.nn.Module): + def __init__( + self, + input_dim, + output_dim, + n_d=8, + n_a=8, + n_steps=3, + gamma=1.3, + cat_idxs=[], + cat_dims=[], + cat_emb_dim=1, + n_independent=2, + n_shared=2, + epsilon=1e-15, + virtual_batch_size=128, + momentum=0.02, + mask_type="sparsemax", + group_attention_matrix=[], + ): + """ + Defines TabNet network + + Parameters + ---------- + input_dim : int + Initial number of features + output_dim : int + Dimension of network output + examples : one for regression, 2 for binary classification etc... + n_d : int + Dimension of the prediction layer (usually between 4 and 64) + n_a : int + Dimension of the attention layer (usually between 4 and 64) + n_steps : int + Number of successive steps in the network (usually between 3 and 10) + gamma : float + Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) + cat_idxs : list of int + Index of each categorical column in the dataset + cat_dims : list of int + Number of categories in each categorical column + cat_emb_dim : int or list of int + Size of the embedding of categorical features + if int, all categorical features will have same embedding size + if list of int, every corresponding feature will have specific size + n_independent : int + Number of independent GLU layer in each GLU block (default 2) + n_shared : int + Number of independent GLU layer in each GLU block (default 2) + epsilon : float + Avoid log(0), this should be kept very low + virtual_batch_size : int + Batch size for Ghost Batch Normalization + momentum : float + Float value between 0 and 1 which will be used for momentum in all batch norm + mask_type : str + Either "sparsemax" or "entmax" : this is the masking function to use + group_attention_matrix : torch matrix + Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j + """ + super(TabNet, self).__init__() + self.cat_idxs = cat_idxs or [] + self.cat_dims = cat_dims or [] + self.cat_emb_dim = cat_emb_dim + + self.input_dim = input_dim + self.output_dim = output_dim + self.n_d = n_d + self.n_a = n_a + self.n_steps = n_steps + self.gamma = gamma + self.epsilon = epsilon + self.n_independent = n_independent + self.n_shared = n_shared + self.mask_type = mask_type + + if self.n_steps <= 0: + raise ValueError("n_steps should be a positive integer.") + if self.n_independent == 0 and self.n_shared == 0: + raise ValueError("n_shared and n_independent can't be both zero.") + + self.virtual_batch_size = virtual_batch_size + self.embedder = EmbeddingGenerator( + input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix + ) + self.post_embed_dim = self.embedder.post_embed_dim + + self.tabnet = TabNetNoEmbeddings( + self.post_embed_dim, + output_dim, + n_d, + n_a, + n_steps, + gamma, + n_independent, + n_shared, + epsilon, + virtual_batch_size, + momentum, + mask_type, + self.embedder.embedding_group_matrix, + ) + + def forward(self, x): + x = self.embedder(x) + return self.tabnet(x) + + def forward_masks(self, x): + x = self.embedder(x) + return self.tabnet.forward_masks(x) + + +class AttentiveTransformer(torch.nn.Module): + def __init__( + self, + input_dim, + group_dim, + group_matrix, + virtual_batch_size=128, + momentum=0.02, + mask_type="sparsemax", + ): + """ + Initialize an attention transformer. + + Parameters + ---------- + input_dim : int + Input size + group_dim : int + Number of groups for features + virtual_batch_size : int + Batch size for Ghost Batch Normalization + momentum : float + Float value between 0 and 1 which will be used for momentum in batch norm + mask_type : str + Either "sparsemax" or "entmax" : this is the masking function to use + """ + super(AttentiveTransformer, self).__init__() + self.fc = Linear(input_dim, group_dim, bias=False) + initialize_non_glu(self.fc, input_dim, group_dim) + self.bn = GBN( + group_dim, virtual_batch_size=virtual_batch_size, momentum=momentum + ) + + if mask_type == "sparsemax": + # Sparsemax + self.selector = sparsemax.Sparsemax(dim=-1) + elif mask_type == "entmax": + # Entmax + self.selector = sparsemax.Entmax15(dim=-1) + else: + raise NotImplementedError( + "Please choose either sparsemax" + "or entmax as masktype" + ) + + def forward(self, priors, processed_feat): + x = self.fc(processed_feat) + x = self.bn(x) + x = torch.mul(x, priors) + x = self.selector(x) + return x + + +class FeatTransformer(torch.nn.Module): + def __init__( + self, + input_dim, + output_dim, + shared_layers, + n_glu_independent, + virtual_batch_size=128, + momentum=0.02, + ): + super(FeatTransformer, self).__init__() + """ + Initialize a feature transformer. + + Parameters + ---------- + input_dim : int + Input size + output_dim : int + Output_size + shared_layers : torch.nn.ModuleList + The shared block that should be common to every step + n_glu_independent : int + Number of independent GLU layers + virtual_batch_size : int + Batch size for Ghost Batch Normalization within GLU block(s) + momentum : float + Float value between 0 and 1 which will be used for momentum in batch norm + """ + + params = { + "n_glu": n_glu_independent, + "virtual_batch_size": virtual_batch_size, + "momentum": momentum, + } + + if shared_layers is None: + # no shared layers + self.shared = torch.nn.Identity() + is_first = True + else: + self.shared = GLU_Block( + input_dim, + output_dim, + first=True, + shared_layers=shared_layers, + n_glu=len(shared_layers), + virtual_batch_size=virtual_batch_size, + momentum=momentum, + ) + is_first = False + + if n_glu_independent == 0: + # no independent layers + self.specifics = torch.nn.Identity() + else: + spec_input_dim = input_dim if is_first else output_dim + self.specifics = GLU_Block( + spec_input_dim, output_dim, first=is_first, **params + ) + + def forward(self, x): + x = self.shared(x) + x = self.specifics(x) + return x + + +class GLU_Block(torch.nn.Module): + """ + Independent GLU block, specific to each step + """ + + def __init__( + self, + input_dim, + output_dim, + n_glu=2, + first=False, + shared_layers=None, + virtual_batch_size=128, + momentum=0.02, + ): + super(GLU_Block, self).__init__() + self.first = first + self.shared_layers = shared_layers + self.n_glu = n_glu + self.glu_layers = torch.nn.ModuleList() + + params = {"virtual_batch_size": virtual_batch_size, "momentum": momentum} + + fc = shared_layers[0] if shared_layers else None + self.glu_layers.append(GLU_Layer(input_dim, output_dim, fc=fc, **params)) + for glu_id in range(1, self.n_glu): + fc = shared_layers[glu_id] if shared_layers else None + self.glu_layers.append(GLU_Layer(output_dim, output_dim, fc=fc, **params)) + + def forward(self, x): + scale = torch.sqrt(torch.FloatTensor([0.5]).to(x.device)) + if self.first: # the first layer of the block has no scale multiplication + x = self.glu_layers[0](x) + layers_left = range(1, self.n_glu) + else: + layers_left = range(self.n_glu) + + for glu_id in layers_left: + x = torch.add(x, self.glu_layers[glu_id](x)) + x = x * scale + return x + + +class GLU_Layer(torch.nn.Module): + def __init__( + self, input_dim, output_dim, fc=None, virtual_batch_size=128, momentum=0.02 + ): + super(GLU_Layer, self).__init__() + + self.output_dim = output_dim + if fc: + self.fc = fc + else: + self.fc = Linear(input_dim, 2 * output_dim, bias=False) + initialize_glu(self.fc, input_dim, 2 * output_dim) + + self.bn = GBN( + 2 * output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum + ) + + def forward(self, x): + x = self.fc(x) + x = self.bn(x) + out = torch.mul(x[:, : self.output_dim], torch.sigmoid(x[:, self.output_dim :])) + return out + + +class EmbeddingGenerator(torch.nn.Module): + """ + Classical embeddings generator + """ + + def __init__(self, input_dim, cat_dims, cat_idxs, cat_emb_dims, group_matrix): + """This is an embedding module for an entire set of features + + Parameters + ---------- + input_dim : int + Number of features coming as input (number of columns) + cat_dims : list of int + Number of modalities for each categorial features + If the list is empty, no embeddings will be done + cat_idxs : list of int + Positional index for each categorical features in inputs + cat_emb_dim : list of int + Embedding dimension for each categorical features + If int, the same embedding dimension will be used for all categorical features + group_matrix : torch matrix + Original group matrix before embeddings + """ + super(EmbeddingGenerator, self).__init__() + + if cat_dims == [] and cat_idxs == []: + self.skip_embedding = True + self.post_embed_dim = input_dim + self.embedding_group_matrix = group_matrix.to(group_matrix.device) + return + else: + self.skip_embedding = False + + self.post_embed_dim = int(input_dim + np.sum(cat_emb_dims) - len(cat_emb_dims)) + + self.embeddings = torch.nn.ModuleList() + + for cat_dim, emb_dim in zip(cat_dims, cat_emb_dims): + self.embeddings.append(torch.nn.Embedding(cat_dim, emb_dim)) + + # record continuous indices + self.continuous_idx = torch.ones(input_dim, dtype=torch.bool) + self.continuous_idx[cat_idxs] = 0 + + # update group matrix + n_groups = group_matrix.shape[0] + self.embedding_group_matrix = torch.empty( + (n_groups, self.post_embed_dim), device=group_matrix.device + ) + for group_idx in range(n_groups): + post_emb_idx = 0 + cat_feat_counter = 0 + for init_feat_idx in range(input_dim): + if self.continuous_idx[init_feat_idx] == 1: + # this means that no embedding is applied to this column + self.embedding_group_matrix[group_idx, post_emb_idx] = group_matrix[ + group_idx, init_feat_idx + ] # noqa + post_emb_idx += 1 + else: + # this is a categorical feature which creates multiple embeddings + n_embeddings = cat_emb_dims[cat_feat_counter] + self.embedding_group_matrix[ + group_idx, post_emb_idx : post_emb_idx + n_embeddings + ] = ( + group_matrix[group_idx, init_feat_idx] / n_embeddings + ) # noqa + post_emb_idx += n_embeddings + cat_feat_counter += 1 + + def forward(self, x): + """ + Apply embeddings to inputs + Inputs should be (batch_size, input_dim) + Outputs will be of size (batch_size, self.post_embed_dim) + """ + if self.skip_embedding: + # no embeddings required + return x + + cols = [] + cat_feat_counter = 0 + for feat_init_idx, is_continuous in enumerate(self.continuous_idx): + # Enumerate through continuous idx boolean mask to apply embeddings + if is_continuous: + cols.append(x[:, feat_init_idx].float().view(-1, 1)) + else: + cols.append( + self.embeddings[cat_feat_counter](x[:, feat_init_idx].long()) + ) + cat_feat_counter += 1 + # concat + post_embeddings = torch.cat(cols, dim=1) + return post_embeddings + + +class RandomObfuscator(torch.nn.Module): + """ + Create and applies obfuscation masks. + The obfuscation is done at group level to match attention. + """ + + def __init__(self, pretraining_ratio, group_matrix): + """ + This create random obfuscation for self suppervised pretraining + Parameters + ---------- + pretraining_ratio : float + Ratio of feature to randomly discard for reconstruction + + """ + super(RandomObfuscator, self).__init__() + self.pretraining_ratio = pretraining_ratio + # group matrix is set to boolean here to pass all posssible information + self.group_matrix = (group_matrix > 0) + 0.0 + self.num_groups = group_matrix.shape[0] + + def forward(self, x): + """ + Generate random obfuscation mask. + + Returns + ------- + masked input and obfuscated variables. + """ + bs = x.shape[0] + + obfuscated_groups = torch.bernoulli( + self.pretraining_ratio * torch.ones((bs, self.num_groups), device=x.device) + ) + obfuscated_vars = torch.matmul(obfuscated_groups, self.group_matrix) + masked_input = torch.mul(1 - obfuscated_vars, x) + return masked_input, obfuscated_groups, obfuscated_vars From d29ef37a94d6123a7ec0b3250799164b574a1fdc Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 2 Apr 2023 21:13:24 +0200 Subject: [PATCH 37/95] add factory.py, let DDPM use TabNet, refactor --- .gitignore | 3 +- src/synthcity/plugins/core/dataloader.py | 2 +- src/synthcity/plugins/core/models/convnet.py | 4 +- src/synthcity/plugins/core/models/factory.py | 137 ++ .../{data_encoder.py => feature_encoder.py} | 39 +- .../plugins/core/models/functions.py | 152 ++ src/synthcity/plugins/core/models/layers.py | 136 +- src/synthcity/plugins/core/models/mlp.py | 145 +- src/synthcity/plugins/core/models/tabnet.py | 1246 ++++++++--------- .../core/models/tabular_ddpm/__init__.py | 14 +- .../gaussian_multinomial_diffsuion.py | 36 +- .../core/models/tabular_ddpm/modules.py | 31 +- .../plugins/core/models/tabular_encoder.py | 10 +- src/synthcity/plugins/core/models/ts_model.py | 4 +- src/synthcity/plugins/generic/plugin_ddpm.py | 30 +- tests/plugins/core/models/test_mlp.py | 8 +- tests/plugins/generic/test_ddpm.py | 6 +- 17 files changed, 1077 insertions(+), 926 deletions(-) create mode 100644 src/synthcity/plugins/core/models/factory.py rename src/synthcity/plugins/core/models/{data_encoder.py => feature_encoder.py} (91%) create mode 100644 src/synthcity/plugins/core/models/functions.py diff --git a/.gitignore b/.gitignore index c41e0784..5195f6c2 100644 --- a/.gitignore +++ b/.gitignore @@ -67,5 +67,4 @@ lightning_logs generated MNIST cifar-10* -src/test.py -.tmp.py +local_test.py diff --git a/src/synthcity/plugins/core/dataloader.py b/src/synthcity/plugins/core/dataloader.py index 099e85c5..2a13424b 100644 --- a/src/synthcity/plugins/core/dataloader.py +++ b/src/synthcity/plugins/core/dataloader.py @@ -16,7 +16,7 @@ # synthcity absolute from synthcity.plugins.core.constraints import Constraints from synthcity.plugins.core.dataset import FlexibleDataset, TensorDataset -from synthcity.plugins.core.models.data_encoder import DatetimeEncoder +from synthcity.plugins.core.models.feature_encoder import DatetimeEncoder from synthcity.utils.compression import compress_dataset, decompress_dataset from synthcity.utils.serialization import dataframe_hash diff --git a/src/synthcity/plugins/core/models/convnet.py b/src/synthcity/plugins/core/models/convnet.py index e9a7719c..ae4260e6 100644 --- a/src/synthcity/plugins/core/models/convnet.py +++ b/src/synthcity/plugins/core/models/convnet.py @@ -69,8 +69,8 @@ class ConvNet(nn.Module): @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( self, - task_type: str, - model: nn.Module, # classification/regression + task_type: str, # classification/regression + model: nn.Module, lr: float = 1e-3, weight_decay: float = 1e-3, opt_betas: tuple = (0.9, 0.999), diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py new file mode 100644 index 00000000..e2d69525 --- /dev/null +++ b/src/synthcity/plugins/core/models/factory.py @@ -0,0 +1,137 @@ +# stdlib +from importlib import import_module +from typing import Any, Union + +# third party +from pydantic import validate_arguments +from torch import nn + +# synthcity relative +from .feature_encoder import ( + BayesianGMMEncoder, + DatetimeEncoder, + FeatureEncoder, + GaussianQuantileTransformer, + LabelEncoder, + MinMaxScaler, + OneHotEncoder, + RobustScaler, + StandardScaler, +) +from .layers import GumbelSoftmax + +# should only contain nn modules that can be used as building blocks in larger models +MODELS = dict( + mlp=".mlp.MLP", + rnn=nn.RNN, + gru=nn.GRU, + lstm=nn.LSTM, + transformer=".transformer.TransformerModel", + tabnet=".tabnet.TabNet", +) + +ACTIVATIONS = dict( + none=nn.Identity, + elu=nn.ELU, + relu=nn.ReLU, + leakyrelu=nn.LeakyReLU, + selu=nn.SELU, + tanh=nn.Tanh, + sigmoid=nn.Sigmoid, + softmax=nn.Softmax, + gumbelsoftmax=GumbelSoftmax, + gelu=nn.GELU, + silu=nn.SiLU, + swish=nn.SiLU, + hardtanh=nn.Hardtanh, + relu6=nn.ReLU6, + celu=nn.CELU, + glu=nn.GLU, + logsigmoid=nn.LogSigmoid, + softplus=nn.Softplus, +) + +FEATURE_ENCODERS = dict( + datetime=DatetimeEncoder, + onehot=OneHotEncoder, + label=LabelEncoder, + standard=StandardScaler, + minmax=MinMaxScaler, + robust=RobustScaler, + quantile=GaussianQuantileTransformer, + bayesiangmm=BayesianGMMEncoder, + none=FeatureEncoder, + passthrough=FeatureEncoder, +) + + +def _factory(type_: Union[str, type], params: dict, registry: dict) -> Any: + if isinstance(type_, type): + return type_(**params) + type_ = type_.lower().replace("_", "").replace("-", "") + if type_ in registry: + cls = registry[type_] + if isinstance(cls, str): + cls = registry[type_] = _dynamic_import(cls) + return cls(**params) + raise ValueError + + +def _dynamic_import(path: str) -> type: + """Avoid circular imports by importing dynamically.""" + if path.startswith("."): + package = __name__.rsplit(".", 1)[0] + else: + package = None + mod_path, cls = path.rsplit(".", 1) + module = import_module(mod_path, package) + return getattr(module, cls) + + +@validate_arguments(config=dict(arbitrary_types_allowed=True)) +def get_model(block: Union[str, type], params: dict) -> Any: + """Get a model from a name or a class. + + Named models: + - mlp + - rnn + - lstm + - transformer + - tabnet + """ + try: + return _factory(block, params, MODELS) + except ValueError: + raise ValueError(f"Unknown nn model: {block}") + + +@validate_arguments(config=dict(arbitrary_types_allowed=True)) +def get_nonlin(nonlin: Union[str, nn.Module], params: dict = {}) -> Any: + """Get a nonlinearity layer from a name or a class.""" + try: + return _factory(nonlin, params, ACTIVATIONS) + except ValueError: + raise ValueError(f"Unknown nonlinearity: {nonlin}") + + +@validate_arguments(config=dict(arbitrary_types_allowed=True)) +def get_feature_encoder(encoder: Union[str, type], params: dict = {}) -> Any: + """Get a feature encoder from a name or a class. + + Named encoders: + - datetime + - onehot + - label + - standard + - minmax + - robust + - quantile + - bayesian_gmm + - passthrough + """ + if isinstance(encoder, type): # custom encoder + encoder = FeatureEncoder.wraps(encoder) + try: + return _factory(encoder, params, FEATURE_ENCODERS) + except ValueError: + raise ValueError(f"Unknown feature encoder: {encoder}") diff --git a/src/synthcity/plugins/core/models/data_encoder.py b/src/synthcity/plugins/core/models/feature_encoder.py similarity index 91% rename from src/synthcity/plugins/core/models/data_encoder.py rename to src/synthcity/plugins/core/models/feature_encoder.py index 518400fa..995a65a6 100644 --- a/src/synthcity/plugins/core/models/data_encoder.py +++ b/src/synthcity/plugins/core/models/feature_encoder.py @@ -119,7 +119,7 @@ def _inverse_transform(self, data: np.ndarray) -> np.ndarray: @classmethod def wraps( - cls, encoder_class: TransformerMixin, **params: Any + cls: type, encoder_class: TransformerMixin, **params: Any ) -> Type[FeatureEncoder]: """Wraps sklearn transformer to FeatureEncoder.""" @@ -260,7 +260,7 @@ def __init__( subsample: int = 10000, random_state: Any = None, copy: bool = True, - ): + ) -> None: super().__init__( n_quantiles=None, output_distribution="normal", @@ -273,38 +273,3 @@ def __init__( def fit(self, x: np.ndarray, y: Any = None) -> "GaussianQuantileTransformer": self.n_quantiles = max(min(len(x) // 30, 1000), 10) return super().fit(x, y) - - -ENCODERS = { - "datetime": DatetimeEncoder, - "onehot": OneHotEncoder, - "label": LabelEncoder, - "standard": StandardScaler, - "minmax": MinMaxScaler, - "robust": RobustScaler, - "quantile": GaussianQuantileTransformer, - "bayesian_gmm": BayesianGMMEncoder, - "passthrough": FeatureEncoder, -} - - -def get_encoder(encoder: Union[str, type]) -> Type[FeatureEncoder]: - """Get a registered encoder. - - Supported encoders: - - Datetime - - datetime - - Categorical - - onehot - - label - - Continuous - - standard - - minmax - - robust - - quantile - - bayesian_gmm - - Passthrough - """ - if isinstance(encoder, type): # custom encoder - return FeatureEncoder.wraps(encoder) - return ENCODERS[encoder] diff --git a/src/synthcity/plugins/core/models/functions.py b/src/synthcity/plugins/core/models/functions.py new file mode 100644 index 00000000..27801034 --- /dev/null +++ b/src/synthcity/plugins/core/models/functions.py @@ -0,0 +1,152 @@ +""" +Custom differentiable tensor functions. +""" +# stdlib +from typing import Any + +# third party +import torch +from torch.autograd import Function + + +# credits to Yandex https://github.com/Qwicen/node/blob/master/lib/nn_utils.py +def _make_ix_like(input: torch.Tensor, dim: int = 0) -> torch.Tensor: + d = input.size(dim) + rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype) + view = [1] * input.dim() + view[0] = -1 + return rho.view(view).transpose(0, dim) + + +class SparsemaxFunction(Function): + """ + An implementation of sparsemax (Martins & Astudillo, 2016). See + :cite:`DBLP:journals/corr/MartinsA16` for detailed description. + By Ben Peters and Vlad Niculae + """ + + @staticmethod + def forward( + ctx: Any, + input: torch.Tensor, + dim: int = -1, + ) -> torch.Tensor: + """sparsemax: normalizing sparse transform (a la softmax) + + Parameters + ---------- + ctx : torch.autograd.function._ContextMethodMixin + input : torch.Tensor + any shape + dim : int + dimension along which to apply sparsemax + + Returns + ------- + output : torch.Tensor + same shape as input + + """ + ctx.dim = dim + max_val, _ = input.max(dim=dim, keepdim=True) + input -= max_val # same numerical stability trick as for softmax + tau, supp_size = SparsemaxFunction._threshold_and_support(input, dim=dim) + output = torch.clamp(input - tau, min=0) + ctx.save_for_backward(supp_size, output) + return output + + @staticmethod + def backward(ctx: Any, grad_output: torch.Tensor) -> tuple[torch.Tensor, None]: + supp_size, output = ctx.saved_tensors + dim = ctx.dim + grad_input = grad_output.clone() + grad_input[output == 0] = 0 + + v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze() + v_hat = v_hat.unsqueeze(dim) + grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) + return grad_input, None + + @staticmethod + def _threshold_and_support( + input: torch.Tensor, dim: int = -1 + ) -> tuple[torch.Tensor, torch.Tensor]: + """Sparsemax building block: compute the threshold + + Parameters + ---------- + input: torch.Tensor + any dimension + dim : int + dimension along which to apply the sparsemax + + Returns + ------- + tau : torch.Tensor + the threshold value + support_size : torch.Tensor + + """ + + input_srt, _ = torch.sort(input, descending=True, dim=dim) + input_cumsum = input_srt.cumsum(dim) - 1 + rhos = _make_ix_like(input, dim) + support = rhos * input_srt > input_cumsum + + support_size = support.sum(dim=dim).unsqueeze(dim) + tau = input_cumsum.gather(dim, support_size - 1) + tau /= support_size.to(input.dtype) + return tau, support_size + + +class EntmaxFunction(Function): + """ + An implementation of exact Entmax with alpha=1.5 (B. Peters, V. Niculae, A. Martins). See + :cite:`https://arxiv.org/abs/1905.05702 for detailed description. + Source: https://github.com/deep-spin/entmax + """ + + @staticmethod + def forward(ctx: Any, input: torch.Tensor, dim: int = -1) -> torch.Tensor: + ctx.dim = dim + + max_val, _ = input.max(dim=dim, keepdim=True) + input = input - max_val # same numerical stability trick as for softmax + input = input / 2 # divide by 2 to solve actual Entmax + + tau_star, _ = EntmaxFunction._threshold_and_support(input, dim) + output = torch.clamp(input - tau_star, min=0) ** 2 + ctx.save_for_backward(output) + return output + + @staticmethod + def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor: + (Y,) = ctx.saved_tensors + gppr = Y.sqrt() # = 1 / g'' (Y) + dX = grad_output * gppr + q = dX.sum(ctx.dim) / gppr.sum(ctx.dim) + q = q.unsqueeze(ctx.dim) + dX -= q * gppr + return dX, None + + @staticmethod + def _threshold_and_support( + input: torch.Tensor, dim: int = -1 + ) -> tuple[torch.Tensor, torch.Tensor]: + Xsrt, _ = torch.sort(input, descending=True, dim=dim) + + rho = _make_ix_like(input, dim) + mean = Xsrt.cumsum(dim) / rho + mean_sq = (Xsrt**2).cumsum(dim) / rho + ss = rho * (mean_sq - mean**2) + delta = (1 - ss) / rho + + # NOTE this is not exactly the same as in reference algo + # Fortunately it seems the clamped values never wrongly + # get selected by tau <= sorted_z. Prove this! + delta_nz = torch.clamp(delta, 0) + tau = mean - torch.sqrt(delta_nz) + + support_size = (tau <= Xsrt).sum(dim).unsqueeze(dim) + tau_star = tau.gather(dim, support_size - 1) + return tau_star, support_size diff --git a/src/synthcity/plugins/core/models/layers.py b/src/synthcity/plugins/core/models/layers.py index fb4a8ea6..9be97abd 100644 --- a/src/synthcity/plugins/core/models/layers.py +++ b/src/synthcity/plugins/core/models/layers.py @@ -1,10 +1,18 @@ # stdlib -from typing import Any, Optional +from typing import Any, List, Optional, Tuple, Type # third party +import numpy as np import torch +from pydantic import validate_arguments from torch import nn +# synthcity absolute +from synthcity.utils.constants import DEVICE + +# synthcity relative +from .functions import EntmaxFunction, SparsemaxFunction + class Permute(nn.Module): def __init__(self, *dims: Any) -> None: @@ -34,3 +42,129 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x.transpose(*self.dims).contiguous() else: return x.transpose(*self.dims) + + +def SkipConnection(cls: Type[nn.Module]) -> Type[nn.Module]: + """Wraps a model to add a skip connection from the input to the output. + + Example: + >>> ResidualBlock = SkipConnection(MLP) + >>> ResidualBlock(n_units_in=10, n_units_out=3, n_units_hidden=64) + SkipConnection(MLP)( + (model): Sequential( + (0): LinearLayer( + (model): Sequential( + (0): Linear(in_features=10, out_features=64, bias=True) + (1): ReLU() + ) + ) + (1): Linear(in_features=64, out_features=3, bias=True) + ) + (loss): MSELoss() + ) + """ + + class WrappedModule(cls): # type: ignore + device: torch.device = DEVICE + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def forward(self, X: torch.Tensor) -> torch.Tensor: + # if X.shape[-1] == 0: + # return torch.zeros((*X.shape[:-1], self.n_units_out)).to(self.device) + X = X.float().to(self.device) + out = super().forward(X) + return torch.cat([out, X], dim=-1) + + WrappedModule.__name__ = f"SkipConnection({cls.__name__})" + WrappedModule.__qualname__ = f"SkipConnection({cls.__qualname__})" + WrappedModule.__doc__ = f"""(With skipped connection) {cls.__doc__}""" + return WrappedModule + + +# class GLU(nn.Module): +# """Gated Linear Unit (GLU).""" + +# def __init__(self, activation: Union[str, nn.Module] = "sigmoid") -> None: +# super().__init__() +# if type(activation) == str: +# self.non_lin = get_nonlin(activation) +# else: +# self.non_lin = activation + +# def forward(self, x: Tensor) -> Tensor: +# if x.shape[-1] % 2: +# raise ValueError("The last dimension of the input tensor must be even.") +# a, b = x.chunk(2, dim=-1) +# return a * self.non_lin(b) + + +class GumbelSoftmax(nn.Module): + def __init__( + self, tau: float = 0.2, hard: bool = False, eps: float = 1e-10, dim: int = -1 + ) -> None: + super(GumbelSoftmax, self).__init__() + + self.tau = tau + self.hard = hard + self.eps = eps + self.dim = dim + + def forward(self, logits: torch.Tensor) -> torch.Tensor: + return nn.functional.gumbel_softmax( + logits, tau=self.tau, hard=self.hard, eps=self.eps, dim=self.dim + ) + + +class MultiActivationHead(nn.Module): + """Final layer with multiple activations. Useful for tabular data.""" + + def __init__( + self, + activations: List[Tuple[nn.Module, int]], + device: Any = DEVICE, + ) -> None: + super(MultiActivationHead, self).__init__() + self.activations = [] + self.activation_lengths = [] + self.device = device + + for activation, length in activations: + self.activations.append(activation) + self.activation_lengths.append(length) + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def forward(self, X: torch.Tensor) -> torch.Tensor: + if X.shape[-1] != np.sum(self.activation_lengths): + raise RuntimeError( + f"Shape mismatch for the activations: expected {np.sum(self.activation_lengths)}. Got shape {X.shape}." + ) + + split = 0 + out = torch.zeros(X.shape).to(self.device) + + for activation, step in zip(self.activations, self.activation_lengths): + out[..., split : split + step] = activation(X[..., split : split + step]) + + split += step + + return out + + +class Sparsemax(nn.Module): + def __init__(self, dim: int = -1) -> None: + super(Sparsemax, self).__init__() + self.dim = dim + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def forward(self, input: torch.Tensor) -> torch.Tensor: + return SparsemaxFunction.apply(input, self.dim) + + +class Entmax(nn.Module): + def __init__(self, dim: int = -1) -> None: + super(Entmax, self).__init__() + self.dim = dim + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) + def forward(self, input: torch.Tensor) -> torch.Tensor: + return EntmaxFunction.apply(input, self.dim) diff --git a/src/synthcity/plugins/core/models/mlp.py b/src/synthcity/plugins/core/models/mlp.py index 5ab63464..5d85c1c8 100644 --- a/src/synthcity/plugins/core/models/mlp.py +++ b/src/synthcity/plugins/core/models/mlp.py @@ -1,86 +1,25 @@ # stdlib -from typing import Any, Callable, List, Optional, Tuple, Union +from typing import Any, Callable, List, Optional, Tuple # third party import numpy as np import torch from pydantic import validate_arguments -from torch import Tensor, nn +from torch import nn from torch.utils.data import DataLoader, TensorDataset # synthcity absolute import synthcity.logger as log +from synthcity.plugins.core.models.factory import get_nonlin +from synthcity.plugins.core.models.layers import ( + GumbelSoftmax, + MultiActivationHead, + SkipConnection, +) from synthcity.utils.constants import DEVICE from synthcity.utils.reproducibility import enable_reproducible_results -class GumbelSoftmax(nn.Module): - def __init__( - self, tau: float = 0.2, hard: bool = False, eps: float = 1e-10, dim: int = -1 - ) -> None: - super(GumbelSoftmax, self).__init__() - - self.tau = tau - self.hard = hard - self.eps = eps - self.dim = dim - - def forward(self, logits: torch.Tensor) -> torch.Tensor: - return nn.functional.gumbel_softmax( - logits, tau=self.tau, hard=self.hard, eps=self.eps, dim=self.dim - ) - - -class GLU(nn.Module): - """Gated Linear Unit (GLU).""" - - def __init__(self, activation: Union[str, nn.Module] = "sigmoid") -> None: - super().__init__() - if type(activation) == str: - self.non_lin = get_nonlin(activation) - else: - self.non_lin = activation - - def forward(self, x: Tensor) -> Tensor: - if x.shape[-1] % 2: - raise ValueError("The last dimension of the input tensor must be even.") - a, b = x.chunk(2, dim=-1) - return a * self.non_lin(b) - - -def get_nonlin(name: Union[str, nn.Module]) -> nn.Module: - if isinstance(name, nn.Module): - return name - elif name == "none": - return nn.Identity() - elif name == "elu": - return nn.ELU() - elif name == "relu": - return nn.ReLU() - elif name == "leaky_relu": - return nn.LeakyReLU() - elif name == "selu": - return nn.SELU() - elif name == "tanh": - return nn.Tanh() - elif name == "sigmoid": - return nn.Sigmoid() - elif name == "softmax": - return GumbelSoftmax() - elif name == "gelu": - return nn.GELU() - elif name == "glu": - return GLU() - elif name == "reglu": - return GLU("relu") - elif name == "geglu": - return GLU("gelu") - elif name in ("silu", "swish"): - return nn.SiLU() - else: - raise ValueError(f"Unknown nonlinearity {name}") - - class LinearLayer(nn.Module): @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( @@ -114,70 +53,7 @@ def forward(self, X: torch.Tensor) -> torch.Tensor: return self.model(X.float()).to(self.device) -class ResidualLayer(LinearLayer): - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def __init__( - self, - n_units_in: int, - n_units_out: int, - dropout: float = 0, - batch_norm: bool = False, - nonlin: Optional[str] = "relu", - device: Any = DEVICE, - ) -> None: - super(ResidualLayer, self).__init__( - n_units_in, - n_units_out, - dropout=dropout, - batch_norm=batch_norm, - nonlin=nonlin, - device=device, - ) - self.device = device - self.n_units_out = n_units_out - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def forward(self, X: torch.Tensor) -> torch.Tensor: - if X.shape[-1] == 0: - return torch.zeros((*X.shape[:-1], self.n_units_out)).to(self.device) - - out = self.model(X.float()) - return torch.cat([out, X], dim=-1).to(self.device) - - -class MultiActivationHead(nn.Module): - """Final layer with multiple activations. Useful for tabular data.""" - - def __init__( - self, - activations: List[Tuple[nn.Module, int]], - device: Any = DEVICE, - ) -> None: - super(MultiActivationHead, self).__init__() - self.activations = [] - self.activation_lengths = [] - self.device = device - - for activation, length in activations: - self.activations.append(activation) - self.activation_lengths.append(length) - - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def forward(self, X: torch.Tensor) -> torch.Tensor: - if X.shape[-1] != np.sum(self.activation_lengths): - raise RuntimeError( - f"Shape mismatch for the activations: expected {np.sum(self.activation_lengths)}. Got shape {X.shape}." - ) - - split = 0 - out = torch.zeros(X.shape).to(self.device) - - for activation, step in zip(self.activations, self.activation_lengths): - out[..., split : split + step] = activation(X[..., split : split + step]) - - split += step - - return out +ResidualLayer = SkipConnection(LinearLayer) class MLP(nn.Module): @@ -235,9 +111,10 @@ class MLP(nn.Module): @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( self, - task_type: str, # classification/regression + *, n_units_in: int, n_units_out: int, + task_type: str = "regression", # classification/regression n_layers_hidden: int = 1, n_units_hidden: int = 100, nonlin: str = "relu", diff --git a/src/synthcity/plugins/core/models/tabnet.py b/src/synthcity/plugins/core/models/tabnet.py index 25383cb9..5a4f3051 100644 --- a/src/synthcity/plugins/core/models/tabnet.py +++ b/src/synthcity/plugins/core/models/tabnet.py @@ -1,105 +1,215 @@ +# stdlib +from typing import List, Optional, Tuple + # third party import numpy as np import torch -from torch.autograd import Function from torch.nn import BatchNorm1d, Linear, ReLU +# synthcity relative +from .layers import Entmax, Sparsemax + +# class TabNet(torch.nn.Module): +# def __init__( +# self, +# input_dim, +# output_dim, +# n_d=8, +# n_a=8, +# n_steps=3, +# gamma=1.3, +# n_independent=2, +# n_shared=2, +# epsilon=1e-15, +# virtual_batch_size=128, +# momentum=0.02, +# mask_type="sparsemax", +# group_attention_matrix=None, +# ): +# """ +# Defines TabNet network + +# Parameters +# ---------- +# input_dim : int +# Initial number of features +# output_dim : int +# Dimension of network output +# examples : one for regression, 2 for binary classification etc... +# n_d : int +# Dimension of the prediction layer (usually between 4 and 64) +# n_a : int +# Dimension of the attention layer (usually between 4 and 64) +# n_steps : int +# Number of successive steps in the network (usually between 3 and 10) +# gamma : float +# Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) +# n_independent : int +# Number of independent GLU layer in each GLU block (default 2) +# n_shared : int +# Number of independent GLU layer in each GLU block (default 2) +# epsilon : float +# Avoid log(0), this should be kept very low +# virtual_batch_size : int +# Batch size for Ghost Batch Normalization +# momentum : float +# Float value between 0 and 1 which will be used for momentum in all batch norm +# mask_type : str +# Either "sparsemax" or "entmax" : this is the masking function to use +# group_attention_matrix : torch matrix +# Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j +# """ +# super(TabNet, self).__init__() + +# if group_attention_matrix is None: +# group_attention_matrix = torch.Tensor([]) + +# self.input_dim = input_dim +# self.output_dim = output_dim +# self.n_d = n_d +# self.n_a = n_a +# self.n_steps = n_steps +# self.gamma = gamma +# self.epsilon = epsilon +# self.n_independent = n_independent +# self.n_shared = n_shared +# self.mask_type = mask_type + +# self.virtual_batch_size = virtual_batch_size +# self.post_embed_dim = self.embedder.post_embed_dim + +# self.tabnet = TabNetNoEmbeddings( +# self.post_embed_dim, +# output_dim, +# n_d, +# n_a, +# n_steps, +# gamma, +# n_independent, +# n_shared, +# epsilon, +# virtual_batch_size, +# momentum, +# mask_type, +# self.embedder.embedding_group_matrix, +# ) + +# def forward(self, x): +# x = self.embedder(x) +# return self.tabnet(x) + +# def forward_masks(self, x): +# x = self.embedder(x) +# return self.tabnet.forward_masks(x) -# credits to Yandex https://github.com/Qwicen/node/blob/master/lib/nn_utils.py -def _make_ix_like(input, dim=0): - d = input.size(dim) - rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype) - view = [1] * input.dim() - view[0] = -1 - return rho.view(view).transpose(0, dim) - - -class SparsemaxFunction(Function): - """ - An implementation of sparsemax (Martins & Astudillo, 2016). See - :cite:`DBLP:journals/corr/MartinsA16` for detailed description. - By Ben Peters and Vlad Niculae - """ - - @staticmethod - def forward(ctx, input, dim=-1): - """sparsemax: normalizing sparse transform (a la softmax) - - Parameters - ---------- - ctx : torch.autograd.function._ContextMethodMixin - input : torch.Tensor - any shape - dim : int - dimension along which to apply sparsemax - - Returns - ------- - output : torch.Tensor - same shape as input +class TabNet(torch.nn.Module): + def __init__( + self, + input_dim: int, + output_dim: int, + n_d: int = 8, + n_a: int = 8, + n_steps: int = 3, + gamma: float = 1.3, + n_independent: int = 2, + n_shared: int = 2, + epsilon: float = 1e-15, + virtual_batch_size: int = 128, + momentum: float = 0.02, + mask_type: str = "sparsemax", + group_attention_matrix: Optional[torch.Tensor] = None, + ) -> None: """ - ctx.dim = dim - max_val, _ = input.max(dim=dim, keepdim=True) - input -= max_val # same numerical stability trick as for softmax - tau, supp_size = SparsemaxFunction._threshold_and_support(input, dim=dim) - output = torch.clamp(input - tau, min=0) - ctx.save_for_backward(supp_size, output) - return output - - @staticmethod - def backward(ctx, grad_output): - supp_size, output = ctx.saved_tensors - dim = ctx.dim - grad_input = grad_output.clone() - grad_input[output == 0] = 0 - - v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze() - v_hat = v_hat.unsqueeze(dim) - grad_input = torch.where(output != 0, grad_input - v_hat, grad_input) - return grad_input, None - - @staticmethod - def _threshold_and_support(input, dim=-1): - """Sparsemax building block: compute the threshold + Defines main part of the TabNet network without the embedding layers. Parameters ---------- - input: torch.Tensor - any dimension - dim : int - dimension along which to apply the sparsemax + input_dim : int + Number of features + output_dim : int or list of int for multi task classification + Dimension of network output + examples : one for regression, 2 for binary classification etc... + n_d : int + Dimension of the prediction layer (usually between 4 and 64) + n_a : int + Dimension of the attention layer (usually between 4 and 64) + n_steps : int + Number of successive steps in the network (usually between 3 and 10) + gamma : float + Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) + n_independent : int + Number of independent GLU layer in each GLU block (default 2) + n_shared : int + Number of independent GLU layer in each GLU block (default 2) + epsilon : float + Avoid log(0), this should be kept very low + virtual_batch_size : int + Batch size for Ghost Batch Normalization + momentum : float + Float value between 0 and 1 which will be used for momentum in all batch norm + mask_type : str + Either "sparsemax" or "entmax" : this is the masking function to use + group_attention_matrix : torch matrix + Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j + """ - Returns - ------- - tau : torch.Tensor - the threshold value - support_size : torch.Tensor + if n_steps <= 0: + raise ValueError("n_steps should be a positive integer.") + if n_independent == 0 and n_shared == 0: + raise ValueError("n_shared and n_independent can't be both zero.") - """ + super(TabNet, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.n_d = n_d + self.n_a = n_a + self.n_steps = n_steps + self.gamma = gamma + self.epsilon = epsilon + self.n_independent = n_independent + self.n_shared = n_shared + self.virtual_batch_size = virtual_batch_size + self.mask_type = mask_type + self.initial_bn = BatchNorm1d(self.input_dim, momentum=0.01) - input_srt, _ = torch.sort(input, descending=True, dim=dim) - input_cumsum = input_srt.cumsum(dim) - 1 - rhos = _make_ix_like(input, dim) - support = rhos * input_srt > input_cumsum + self.encoder = TabNetEncoder( + input_dim=input_dim, + output_dim=output_dim, + n_d=n_d, + n_a=n_a, + n_steps=n_steps, + gamma=gamma, + n_independent=n_independent, + n_shared=n_shared, + epsilon=epsilon, + virtual_batch_size=virtual_batch_size, + momentum=momentum, + mask_type=mask_type, + group_attention_matrix=group_attention_matrix, + ) - support_size = support.sum(dim=dim).unsqueeze(dim) - tau = input_cumsum.gather(dim, support_size - 1) - tau /= support_size.to(input.dtype) - return tau, support_size + self.final_mapping = Linear(n_d, output_dim, bias=False) + initialize_non_glu(self.final_mapping, n_d, output_dim) + def forward(self, x: torch.Tensor) -> torch.Tensor: + steps_output, M_loss = self.encoder(x) + self.M_loss = M_loss + res = torch.sum(torch.stack(steps_output, dim=0), dim=0) + return self.final_mapping(res) -sparsemax = SparsemaxFunction.apply + def forward_masks(self, x: torch.Tensor) -> torch.Tensor: + return self.encoder.forward_masks(x) -def initialize_non_glu(module, input_dim, output_dim): +def initialize_non_glu(module: Linear, input_dim: int, output_dim: int) -> None: gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(4 * input_dim)) torch.nn.init.xavier_normal_(module.weight, gain=gain_value) # torch.nn.init.zeros_(module.bias) return -def initialize_glu(module, input_dim, output_dim): +def initialize_glu(module: Linear, input_dim: int, output_dim: int) -> None: gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(input_dim)) torch.nn.init.xavier_normal_(module.weight, gain=gain_value) # torch.nn.init.zeros_(module.bias) @@ -112,14 +222,16 @@ class GBN(torch.nn.Module): https://arxiv.org/abs/1705.08741 """ - def __init__(self, input_dim, virtual_batch_size=128, momentum=0.01): + def __init__( + self, input_dim: int, virtual_batch_size: int = 128, momentum: float = 0.01 + ) -> None: super(GBN, self).__init__() self.input_dim = input_dim self.virtual_batch_size = virtual_batch_size self.bn = BatchNorm1d(self.input_dim, momentum=momentum) - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: chunks = x.chunk(int(np.ceil(x.shape[0] / self.virtual_batch_size)), 0) res = [self.bn(x_) for x_ in chunks] @@ -129,20 +241,20 @@ def forward(self, x): class TabNetEncoder(torch.nn.Module): def __init__( self, - input_dim, - output_dim, - n_d=8, - n_a=8, - n_steps=3, - gamma=1.3, - n_independent=2, - n_shared=2, - epsilon=1e-15, - virtual_batch_size=128, - momentum=0.02, - mask_type="sparsemax", - group_attention_matrix=None, - ): + input_dim: int, + output_dim: int, + n_d: int = 8, + n_a: int = 8, + n_steps: int = 3, + gamma: float = 1.3, + n_independent: int = 2, + n_shared: int = 2, + epsilon: float = 1e-15, + virtual_batch_size: int = 128, + momentum: float = 0.02, + mask_type: str = "sparsemax", + group_attention_matrix: Optional[torch.Tensor] = None, + ) -> None: """ Defines main part of the TabNet network without the embedding layers. @@ -173,8 +285,6 @@ def __init__( Float value between 0 and 1 which will be used for momentum in all batch norm mask_type : str Either "sparsemax" or "entmax" : this is the masking function to use - group_attention_matrix : torch matrix - Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j """ super(TabNetEncoder, self).__init__() self.input_dim = input_dim @@ -210,7 +320,6 @@ def __init__( shared_feat_transform.append( Linear(n_d + n_a, 2 * (n_d + n_a), bias=False) ) - else: shared_feat_transform = None @@ -238,7 +347,6 @@ def __init__( attention = AttentiveTransformer( n_a, self.attention_dim, - group_matrix=group_attention_matrix, virtual_batch_size=self.virtual_batch_size, momentum=momentum, mask_type=self.mask_type, @@ -246,14 +354,16 @@ def __init__( self.feat_transformers.append(transformer) self.att_transformers.append(attention) - def forward(self, x, prior=None): + def forward( + self, x: torch.Tensor, prior: Optional[torch.Tensor] = None + ) -> Tuple[List[torch.Tensor], torch.Tensor]: x = self.initial_bn(x) bs = x.shape[0] # batch size if prior is None: prior = torch.ones((bs, self.attention_dim)).to(x.device) - M_loss = 0 + M_loss = 0.0 att = self.initial_splitter(x)[:, self.n_d :] steps_output = [] for step in range(self.n_steps): @@ -275,18 +385,18 @@ def forward(self, x, prior=None): M_loss /= self.n_steps return steps_output, M_loss - def forward_masks(self, x): + def forward_masks(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]: x = self.initial_bn(x) bs = x.shape[0] # batch size prior = torch.ones((bs, self.attention_dim)).to(x.device) M_explain = torch.zeros(x.shape).to(x.device) att = self.initial_splitter(x)[:, self.n_d :] - masks = {} + masks = [] for step in range(self.n_steps): M = self.att_transformers[step](prior, att) M_feature_level = torch.matmul(M, self.group_attention_matrix) - masks[step] = M_feature_level + masks.append(M_feature_level) # update prior prior = torch.mul(self.gamma - M, prior) # output @@ -302,423 +412,200 @@ def forward_masks(self, x): return M_explain, masks -class TabNetDecoder(torch.nn.Module): - def __init__( - self, - input_dim, - n_d=8, - n_steps=3, - n_independent=1, - n_shared=1, - virtual_batch_size=128, - momentum=0.02, - ): - """ - Defines main part of the TabNet network without the embedding layers. - - Parameters - ---------- - input_dim : int - Number of features - output_dim : int or list of int for multi task classification - Dimension of network output - examples : one for regression, 2 for binary classification etc... - n_d : int - Dimension of the prediction layer (usually between 4 and 64) - n_steps : int - Number of successive steps in the network (usually between 3 and 10) - gamma : float - Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) - n_independent : int - Number of independent GLU layer in each GLU block (default 1) - n_shared : int - Number of independent GLU layer in each GLU block (default 1) - virtual_batch_size : int - Batch size for Ghost Batch Normalization - momentum : float - Float value between 0 and 1 which will be used for momentum in all batch norm - """ - super(TabNetDecoder, self).__init__() - self.input_dim = input_dim - self.n_d = n_d - self.n_steps = n_steps - self.n_independent = n_independent - self.n_shared = n_shared - self.virtual_batch_size = virtual_batch_size - - self.feat_transformers = torch.nn.ModuleList() - - if self.n_shared > 0: - shared_feat_transform = torch.nn.ModuleList() - for i in range(self.n_shared): - if i == 0: - shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) - else: - shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) - - else: - shared_feat_transform = None - - for step in range(n_steps): - transformer = FeatTransformer( - n_d, - n_d, - shared_feat_transform, - n_glu_independent=self.n_independent, - virtual_batch_size=self.virtual_batch_size, - momentum=momentum, - ) - self.feat_transformers.append(transformer) - - self.reconstruction_layer = Linear(n_d, self.input_dim, bias=False) - initialize_non_glu(self.reconstruction_layer, n_d, self.input_dim) - - def forward(self, steps_output): - res = 0 - for step_nb, step_output in enumerate(steps_output): - x = self.feat_transformers[step_nb](step_output) - res = torch.add(res, x) - res = self.reconstruction_layer(res) - return res - - -class TabNetPretraining(torch.nn.Module): - def __init__( - self, - input_dim, - pretraining_ratio=0.2, - n_d=8, - n_a=8, - n_steps=3, - gamma=1.3, - cat_idxs=[], - cat_dims=[], - cat_emb_dim=1, - n_independent=2, - n_shared=2, - epsilon=1e-15, - virtual_batch_size=128, - momentum=0.02, - mask_type="sparsemax", - n_shared_decoder=1, - n_indep_decoder=1, - group_attention_matrix=None, - ): - super(TabNetPretraining, self).__init__() - - self.cat_idxs = cat_idxs or [] - self.cat_dims = cat_dims or [] - self.cat_emb_dim = cat_emb_dim - - self.input_dim = input_dim - self.n_d = n_d - self.n_a = n_a - self.n_steps = n_steps - self.gamma = gamma - self.epsilon = epsilon - self.n_independent = n_independent - self.n_shared = n_shared - self.mask_type = mask_type - self.pretraining_ratio = pretraining_ratio - self.n_shared_decoder = n_shared_decoder - self.n_indep_decoder = n_indep_decoder - - if self.n_steps <= 0: - raise ValueError("n_steps should be a positive integer.") - if self.n_independent == 0 and self.n_shared == 0: - raise ValueError("n_shared and n_independent can't be both zero.") - - self.virtual_batch_size = virtual_batch_size - self.embedder = EmbeddingGenerator( - input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix - ) - self.post_embed_dim = self.embedder.post_embed_dim - - self.masker = RandomObfuscator( - self.pretraining_ratio, group_matrix=self.embedder.embedding_group_matrix - ) - self.encoder = TabNetEncoder( - input_dim=self.post_embed_dim, - output_dim=self.post_embed_dim, - n_d=n_d, - n_a=n_a, - n_steps=n_steps, - gamma=gamma, - n_independent=n_independent, - n_shared=n_shared, - epsilon=epsilon, - virtual_batch_size=virtual_batch_size, - momentum=momentum, - mask_type=mask_type, - group_attention_matrix=self.embedder.embedding_group_matrix, - ) - self.decoder = TabNetDecoder( - self.post_embed_dim, - n_d=n_d, - n_steps=n_steps, - n_independent=self.n_indep_decoder, - n_shared=self.n_shared_decoder, - virtual_batch_size=virtual_batch_size, - momentum=momentum, - ) - - def forward(self, x): - """ - Returns: res, embedded_x, obf_vars - res : output of reconstruction - embedded_x : embedded input - obf_vars : which variable where obfuscated - """ - embedded_x = self.embedder(x) - if self.training: - masked_x, obfuscated_groups, obfuscated_vars = self.masker(embedded_x) - # set prior of encoder with obfuscated groups - prior = 1 - obfuscated_groups - steps_out, _ = self.encoder(masked_x, prior=prior) - res = self.decoder(steps_out) - return res, embedded_x, obfuscated_vars - else: - steps_out, _ = self.encoder(embedded_x) - res = self.decoder(steps_out) - return res, embedded_x, torch.ones(embedded_x.shape).to(x.device) - - def forward_masks(self, x): - embedded_x = self.embedder(x) - return self.encoder.forward_masks(embedded_x) - - -class TabNetNoEmbeddings(torch.nn.Module): - def __init__( - self, - input_dim, - output_dim, - n_d=8, - n_a=8, - n_steps=3, - gamma=1.3, - n_independent=2, - n_shared=2, - epsilon=1e-15, - virtual_batch_size=128, - momentum=0.02, - mask_type="sparsemax", - group_attention_matrix=None, - ): - """ - Defines main part of the TabNet network without the embedding layers. - - Parameters - ---------- - input_dim : int - Number of features - output_dim : int or list of int for multi task classification - Dimension of network output - examples : one for regression, 2 for binary classification etc... - n_d : int - Dimension of the prediction layer (usually between 4 and 64) - n_a : int - Dimension of the attention layer (usually between 4 and 64) - n_steps : int - Number of successive steps in the network (usually between 3 and 10) - gamma : float - Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) - n_independent : int - Number of independent GLU layer in each GLU block (default 2) - n_shared : int - Number of independent GLU layer in each GLU block (default 2) - epsilon : float - Avoid log(0), this should be kept very low - virtual_batch_size : int - Batch size for Ghost Batch Normalization - momentum : float - Float value between 0 and 1 which will be used for momentum in all batch norm - mask_type : str - Either "sparsemax" or "entmax" : this is the masking function to use - group_attention_matrix : torch matrix - Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j - """ - super(TabNetNoEmbeddings, self).__init__() - self.input_dim = input_dim - self.output_dim = output_dim - self.is_multi_task = isinstance(output_dim, list) - self.n_d = n_d - self.n_a = n_a - self.n_steps = n_steps - self.gamma = gamma - self.epsilon = epsilon - self.n_independent = n_independent - self.n_shared = n_shared - self.virtual_batch_size = virtual_batch_size - self.mask_type = mask_type - self.initial_bn = BatchNorm1d(self.input_dim, momentum=0.01) - - self.encoder = TabNetEncoder( - input_dim=input_dim, - output_dim=output_dim, - n_d=n_d, - n_a=n_a, - n_steps=n_steps, - gamma=gamma, - n_independent=n_independent, - n_shared=n_shared, - epsilon=epsilon, - virtual_batch_size=virtual_batch_size, - momentum=momentum, - mask_type=mask_type, - group_attention_matrix=group_attention_matrix, - ) - - if self.is_multi_task: - self.multi_task_mappings = torch.nn.ModuleList() - for task_dim in output_dim: - task_mapping = Linear(n_d, task_dim, bias=False) - initialize_non_glu(task_mapping, n_d, task_dim) - self.multi_task_mappings.append(task_mapping) - else: - self.final_mapping = Linear(n_d, output_dim, bias=False) - initialize_non_glu(self.final_mapping, n_d, output_dim) - - def forward(self, x): - res = 0 - steps_output, M_loss = self.encoder(x) - res = torch.sum(torch.stack(steps_output, dim=0), dim=0) - - if self.is_multi_task: - # Result will be in list format - out = [] - for task_mapping in self.multi_task_mappings: - out.append(task_mapping(res)) - else: - out = self.final_mapping(res) - return out, M_loss - - def forward_masks(self, x): - return self.encoder.forward_masks(x) - - -class TabNet(torch.nn.Module): - def __init__( - self, - input_dim, - output_dim, - n_d=8, - n_a=8, - n_steps=3, - gamma=1.3, - cat_idxs=[], - cat_dims=[], - cat_emb_dim=1, - n_independent=2, - n_shared=2, - epsilon=1e-15, - virtual_batch_size=128, - momentum=0.02, - mask_type="sparsemax", - group_attention_matrix=[], - ): - """ - Defines TabNet network - - Parameters - ---------- - input_dim : int - Initial number of features - output_dim : int - Dimension of network output - examples : one for regression, 2 for binary classification etc... - n_d : int - Dimension of the prediction layer (usually between 4 and 64) - n_a : int - Dimension of the attention layer (usually between 4 and 64) - n_steps : int - Number of successive steps in the network (usually between 3 and 10) - gamma : float - Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) - cat_idxs : list of int - Index of each categorical column in the dataset - cat_dims : list of int - Number of categories in each categorical column - cat_emb_dim : int or list of int - Size of the embedding of categorical features - if int, all categorical features will have same embedding size - if list of int, every corresponding feature will have specific size - n_independent : int - Number of independent GLU layer in each GLU block (default 2) - n_shared : int - Number of independent GLU layer in each GLU block (default 2) - epsilon : float - Avoid log(0), this should be kept very low - virtual_batch_size : int - Batch size for Ghost Batch Normalization - momentum : float - Float value between 0 and 1 which will be used for momentum in all batch norm - mask_type : str - Either "sparsemax" or "entmax" : this is the masking function to use - group_attention_matrix : torch matrix - Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j - """ - super(TabNet, self).__init__() - self.cat_idxs = cat_idxs or [] - self.cat_dims = cat_dims or [] - self.cat_emb_dim = cat_emb_dim - - self.input_dim = input_dim - self.output_dim = output_dim - self.n_d = n_d - self.n_a = n_a - self.n_steps = n_steps - self.gamma = gamma - self.epsilon = epsilon - self.n_independent = n_independent - self.n_shared = n_shared - self.mask_type = mask_type - - if self.n_steps <= 0: - raise ValueError("n_steps should be a positive integer.") - if self.n_independent == 0 and self.n_shared == 0: - raise ValueError("n_shared and n_independent can't be both zero.") - - self.virtual_batch_size = virtual_batch_size - self.embedder = EmbeddingGenerator( - input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix - ) - self.post_embed_dim = self.embedder.post_embed_dim - - self.tabnet = TabNetNoEmbeddings( - self.post_embed_dim, - output_dim, - n_d, - n_a, - n_steps, - gamma, - n_independent, - n_shared, - epsilon, - virtual_batch_size, - momentum, - mask_type, - self.embedder.embedding_group_matrix, - ) - - def forward(self, x): - x = self.embedder(x) - return self.tabnet(x) - - def forward_masks(self, x): - x = self.embedder(x) - return self.tabnet.forward_masks(x) +# class TabNetDecoder(torch.nn.Module): +# def __init__( +# self, +# input_dim, +# n_d=8, +# n_steps=3, +# n_independent=1, +# n_shared=1, +# virtual_batch_size=128, +# momentum=0.02, +# ): +# """ +# Defines main part of the TabNet network without the embedding layers. + +# Parameters +# ---------- +# input_dim : int +# Number of features +# output_dim : int or list of int for multi task classification +# Dimension of network output +# examples : one for regression, 2 for binary classification etc... +# n_d : int +# Dimension of the prediction layer (usually between 4 and 64) +# n_steps : int +# Number of successive steps in the network (usually between 3 and 10) +# gamma : float +# Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) +# n_independent : int +# Number of independent GLU layer in each GLU block (default 1) +# n_shared : int +# Number of independent GLU layer in each GLU block (default 1) +# virtual_batch_size : int +# Batch size for Ghost Batch Normalization +# momentum : float +# Float value between 0 and 1 which will be used for momentum in all batch norm +# """ +# super(TabNetDecoder, self).__init__() +# self.input_dim = input_dim +# self.n_d = n_d +# self.n_steps = n_steps +# self.n_independent = n_independent +# self.n_shared = n_shared +# self.virtual_batch_size = virtual_batch_size + +# self.feat_transformers = torch.nn.ModuleList() + +# if self.n_shared > 0: +# shared_feat_transform = torch.nn.ModuleList() +# for i in range(self.n_shared): +# if i == 0: +# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) +# else: +# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) + +# else: +# shared_feat_transform = None + +# for step in range(n_steps): +# transformer = FeatTransformer( +# n_d, +# n_d, +# shared_feat_transform, +# n_glu_independent=self.n_independent, +# virtual_batch_size=self.virtual_batch_size, +# momentum=momentum, +# ) +# self.feat_transformers.append(transformer) + +# self.reconstruction_layer = Linear(n_d, self.input_dim, bias=False) +# initialize_non_glu(self.reconstruction_layer, n_d, self.input_dim) + +# def forward(self, steps_output): +# res = 0 +# for step_nb, step_output in enumerate(steps_output): +# x = self.feat_transformers[step_nb](step_output) +# res = torch.add(res, x) +# res = self.reconstruction_layer(res) +# return res + + +# class TabNetPretraining(torch.nn.Module): +# def __init__( +# self, +# input_dim, +# pretraining_ratio=0.2, +# n_d=8, +# n_a=8, +# n_steps=3, +# gamma=1.3, +# cat_idxs=[], +# cat_dims=[], +# cat_emb_dim=1, +# n_independent=2, +# n_shared=2, +# epsilon=1e-15, +# virtual_batch_size=128, +# momentum=0.02, +# mask_type="sparsemax", +# n_shared_decoder=1, +# n_indep_decoder=1, +# group_attention_matrix=None, +# ): +# super(TabNetPretraining, self).__init__() + +# self.cat_idxs = cat_idxs or [] +# self.cat_dims = cat_dims or [] +# self.cat_emb_dim = cat_emb_dim + +# self.input_dim = input_dim +# self.n_d = n_d +# self.n_a = n_a +# self.n_steps = n_steps +# self.gamma = gamma +# self.epsilon = epsilon +# self.n_independent = n_independent +# self.n_shared = n_shared +# self.mask_type = mask_type +# self.pretraining_ratio = pretraining_ratio +# self.n_shared_decoder = n_shared_decoder +# self.n_indep_decoder = n_indep_decoder + +# if self.n_steps <= 0: +# raise ValueError("n_steps should be a positive integer.") +# if self.n_independent == 0 and self.n_shared == 0: +# raise ValueError("n_shared and n_independent can't be both zero.") + +# self.virtual_batch_size = virtual_batch_size +# self.embedder = EmbeddingGenerator( +# input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix +# ) +# self.post_embed_dim = self.embedder.post_embed_dim + +# self.masker = RandomObfuscator( +# self.pretraining_ratio, group_matrix=self.embedder.embedding_group_matrix +# ) +# self.encoder = TabNetEncoder( +# input_dim=self.post_embed_dim, +# output_dim=self.post_embed_dim, +# n_d=n_d, +# n_a=n_a, +# n_steps=n_steps, +# gamma=gamma, +# n_independent=n_independent, +# n_shared=n_shared, +# epsilon=epsilon, +# virtual_batch_size=virtual_batch_size, +# momentum=momentum, +# mask_type=mask_type, +# group_attention_matrix=self.embedder.embedding_group_matrix, +# ) +# self.decoder = TabNetDecoder( +# self.post_embed_dim, +# n_d=n_d, +# n_steps=n_steps, +# n_independent=self.n_indep_decoder, +# n_shared=self.n_shared_decoder, +# virtual_batch_size=virtual_batch_size, +# momentum=momentum, +# ) + +# def forward(self, x): +# """ +# Returns: res, embedded_x, obf_vars +# res : output of reconstruction +# embedded_x : embedded input +# obf_vars : which variable where obfuscated +# """ +# embedded_x = self.embedder(x) +# if self.training: +# masked_x, obfuscated_groups, obfuscated_vars = self.masker(embedded_x) +# # set prior of encoder with obfuscated groups +# prior = 1 - obfuscated_groups +# steps_out, _ = self.encoder(masked_x, prior=prior) +# res = self.decoder(steps_out) +# return res, embedded_x, obfuscated_vars +# else: +# steps_out, _ = self.encoder(embedded_x) +# res = self.decoder(steps_out) +# return res, embedded_x, torch.ones(embedded_x.shape).to(x.device) + +# def forward_masks(self, x): +# embedded_x = self.embedder(x) +# return self.encoder.forward_masks(embedded_x) class AttentiveTransformer(torch.nn.Module): def __init__( self, - input_dim, - group_dim, - group_matrix, - virtual_batch_size=128, - momentum=0.02, - mask_type="sparsemax", - ): + input_dim: int, + group_dim: int, + virtual_batch_size: int = 128, + momentum: float = 0.02, + mask_type: str = "sparsemax", + ) -> None: """ Initialize an attention transformer. @@ -743,17 +630,17 @@ def __init__( ) if mask_type == "sparsemax": - # Sparsemax - self.selector = sparsemax.Sparsemax(dim=-1) + self.selector = Sparsemax() elif mask_type == "entmax": - # Entmax - self.selector = sparsemax.Entmax15(dim=-1) + self.selector = Entmax() else: raise NotImplementedError( - "Please choose either sparsemax" + "or entmax as masktype" + "Please choose either sparsemax or entmax as masktype" ) - def forward(self, priors, processed_feat): + def forward( + self, priors: torch.Tensor, processed_feat: torch.Tensor + ) -> torch.Tensor: x = self.fc(processed_feat) x = self.bn(x) x = torch.mul(x, priors) @@ -764,13 +651,13 @@ def forward(self, priors, processed_feat): class FeatTransformer(torch.nn.Module): def __init__( self, - input_dim, - output_dim, - shared_layers, - n_glu_independent, - virtual_batch_size=128, - momentum=0.02, - ): + input_dim: int, + output_dim: int, + shared_layers: torch.nn.ModuleList, + n_glu_independent: int, + virtual_batch_size: int = 128, + momentum: float = 0.02, + ) -> None: super(FeatTransformer, self).__init__() """ Initialize a feature transformer. @@ -819,10 +706,10 @@ def __init__( else: spec_input_dim = input_dim if is_first else output_dim self.specifics = GLU_Block( - spec_input_dim, output_dim, first=is_first, **params + spec_input_dim, output_dim, first=is_first, **params # type: ignore ) - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.shared(x) x = self.specifics(x) return x @@ -835,14 +722,15 @@ class GLU_Block(torch.nn.Module): def __init__( self, - input_dim, - output_dim, - n_glu=2, - first=False, - shared_layers=None, - virtual_batch_size=128, - momentum=0.02, - ): + input_dim: int, + output_dim: int, + *, + n_glu: int = 2, + first: bool = False, + shared_layers: Optional[torch.nn.ModuleList] = None, + virtual_batch_size: int = 128, + momentum: float = 0.02, + ) -> None: super(GLU_Block, self).__init__() self.first = first self.shared_layers = shared_layers @@ -852,12 +740,12 @@ def __init__( params = {"virtual_batch_size": virtual_batch_size, "momentum": momentum} fc = shared_layers[0] if shared_layers else None - self.glu_layers.append(GLU_Layer(input_dim, output_dim, fc=fc, **params)) + self.glu_layers.append(GLU_Layer(input_dim, output_dim, fc=fc, **params)) # type: ignore for glu_id in range(1, self.n_glu): fc = shared_layers[glu_id] if shared_layers else None - self.glu_layers.append(GLU_Layer(output_dim, output_dim, fc=fc, **params)) + self.glu_layers.append(GLU_Layer(output_dim, output_dim, fc=fc, **params)) # type: ignore - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: scale = torch.sqrt(torch.FloatTensor([0.5]).to(x.device)) if self.first: # the first layer of the block has no scale multiplication x = self.glu_layers[0](x) @@ -873,8 +761,14 @@ def forward(self, x): class GLU_Layer(torch.nn.Module): def __init__( - self, input_dim, output_dim, fc=None, virtual_batch_size=128, momentum=0.02 - ): + self, + input_dim: int, + output_dim: int, + *, + fc: Optional[Linear] = None, + virtual_batch_size: int = 128, + momentum: float = 0.02, + ) -> None: super(GLU_Layer, self).__init__() self.output_dim = output_dim @@ -888,143 +782,143 @@ def __init__( 2 * output_dim, virtual_batch_size=virtual_batch_size, momentum=momentum ) - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.fc(x) x = self.bn(x) out = torch.mul(x[:, : self.output_dim], torch.sigmoid(x[:, self.output_dim :])) return out -class EmbeddingGenerator(torch.nn.Module): - """ - Classical embeddings generator - """ - - def __init__(self, input_dim, cat_dims, cat_idxs, cat_emb_dims, group_matrix): - """This is an embedding module for an entire set of features - - Parameters - ---------- - input_dim : int - Number of features coming as input (number of columns) - cat_dims : list of int - Number of modalities for each categorial features - If the list is empty, no embeddings will be done - cat_idxs : list of int - Positional index for each categorical features in inputs - cat_emb_dim : list of int - Embedding dimension for each categorical features - If int, the same embedding dimension will be used for all categorical features - group_matrix : torch matrix - Original group matrix before embeddings - """ - super(EmbeddingGenerator, self).__init__() - - if cat_dims == [] and cat_idxs == []: - self.skip_embedding = True - self.post_embed_dim = input_dim - self.embedding_group_matrix = group_matrix.to(group_matrix.device) - return - else: - self.skip_embedding = False - - self.post_embed_dim = int(input_dim + np.sum(cat_emb_dims) - len(cat_emb_dims)) - - self.embeddings = torch.nn.ModuleList() - - for cat_dim, emb_dim in zip(cat_dims, cat_emb_dims): - self.embeddings.append(torch.nn.Embedding(cat_dim, emb_dim)) - - # record continuous indices - self.continuous_idx = torch.ones(input_dim, dtype=torch.bool) - self.continuous_idx[cat_idxs] = 0 - - # update group matrix - n_groups = group_matrix.shape[0] - self.embedding_group_matrix = torch.empty( - (n_groups, self.post_embed_dim), device=group_matrix.device - ) - for group_idx in range(n_groups): - post_emb_idx = 0 - cat_feat_counter = 0 - for init_feat_idx in range(input_dim): - if self.continuous_idx[init_feat_idx] == 1: - # this means that no embedding is applied to this column - self.embedding_group_matrix[group_idx, post_emb_idx] = group_matrix[ - group_idx, init_feat_idx - ] # noqa - post_emb_idx += 1 - else: - # this is a categorical feature which creates multiple embeddings - n_embeddings = cat_emb_dims[cat_feat_counter] - self.embedding_group_matrix[ - group_idx, post_emb_idx : post_emb_idx + n_embeddings - ] = ( - group_matrix[group_idx, init_feat_idx] / n_embeddings - ) # noqa - post_emb_idx += n_embeddings - cat_feat_counter += 1 - - def forward(self, x): - """ - Apply embeddings to inputs - Inputs should be (batch_size, input_dim) - Outputs will be of size (batch_size, self.post_embed_dim) - """ - if self.skip_embedding: - # no embeddings required - return x - - cols = [] - cat_feat_counter = 0 - for feat_init_idx, is_continuous in enumerate(self.continuous_idx): - # Enumerate through continuous idx boolean mask to apply embeddings - if is_continuous: - cols.append(x[:, feat_init_idx].float().view(-1, 1)) - else: - cols.append( - self.embeddings[cat_feat_counter](x[:, feat_init_idx].long()) - ) - cat_feat_counter += 1 - # concat - post_embeddings = torch.cat(cols, dim=1) - return post_embeddings - - -class RandomObfuscator(torch.nn.Module): - """ - Create and applies obfuscation masks. - The obfuscation is done at group level to match attention. - """ - - def __init__(self, pretraining_ratio, group_matrix): - """ - This create random obfuscation for self suppervised pretraining - Parameters - ---------- - pretraining_ratio : float - Ratio of feature to randomly discard for reconstruction - - """ - super(RandomObfuscator, self).__init__() - self.pretraining_ratio = pretraining_ratio - # group matrix is set to boolean here to pass all posssible information - self.group_matrix = (group_matrix > 0) + 0.0 - self.num_groups = group_matrix.shape[0] - - def forward(self, x): - """ - Generate random obfuscation mask. - - Returns - ------- - masked input and obfuscated variables. - """ - bs = x.shape[0] - - obfuscated_groups = torch.bernoulli( - self.pretraining_ratio * torch.ones((bs, self.num_groups), device=x.device) - ) - obfuscated_vars = torch.matmul(obfuscated_groups, self.group_matrix) - masked_input = torch.mul(1 - obfuscated_vars, x) - return masked_input, obfuscated_groups, obfuscated_vars +# class EmbeddingGenerator(torch.nn.Module): +# """ +# Classical embeddings generator +# """ + +# def __init__(self, input_dim, cat_dims, cat_idxs, cat_emb_dims, group_matrix): +# """This is an embedding module for an entire set of features + +# Parameters +# ---------- +# input_dim : int +# Number of features coming as input (number of columns) +# cat_dims : list of int +# Number of modalities for each categorial features +# If the list is empty, no embeddings will be done +# cat_idxs : list of int +# Positional index for each categorical features in inputs +# cat_emb_dim : list of int +# Embedding dimension for each categorical features +# If int, the same embedding dimension will be used for all categorical features +# group_matrix : torch matrix +# Original group matrix before embeddings +# """ +# super(EmbeddingGenerator, self).__init__() + +# if cat_dims == [] and cat_idxs == []: +# self.skip_embedding = True +# self.post_embed_dim = input_dim +# self.embedding_group_matrix = group_matrix.to(group_matrix.device) +# return +# else: +# self.skip_embedding = False + +# self.post_embed_dim = int(input_dim + np.sum(cat_emb_dims) - len(cat_emb_dims)) + +# self.embeddings = torch.nn.ModuleList() + +# for cat_dim, emb_dim in zip(cat_dims, cat_emb_dims): +# self.embeddings.append(torch.nn.Embedding(cat_dim, emb_dim)) + +# # record continuous indices +# self.continuous_idx = torch.ones(input_dim, dtype=torch.bool) +# self.continuous_idx[cat_idxs] = 0 + +# # update group matrix +# n_groups = group_matrix.shape[0] +# self.embedding_group_matrix = torch.empty( +# (n_groups, self.post_embed_dim), device=group_matrix.device +# ) +# for group_idx in range(n_groups): +# post_emb_idx = 0 +# cat_feat_counter = 0 +# for init_feat_idx in range(input_dim): +# if self.continuous_idx[init_feat_idx] == 1: +# # this means that no embedding is applied to this column +# self.embedding_group_matrix[group_idx, post_emb_idx] = group_matrix[ +# group_idx, init_feat_idx +# ] # noqa +# post_emb_idx += 1 +# else: +# # this is a categorical feature which creates multiple embeddings +# n_embeddings = cat_emb_dims[cat_feat_counter] +# self.embedding_group_matrix[ +# group_idx, post_emb_idx : post_emb_idx + n_embeddings +# ] = ( +# group_matrix[group_idx, init_feat_idx] / n_embeddings +# ) # noqa +# post_emb_idx += n_embeddings +# cat_feat_counter += 1 + +# def forward(self, x): +# """ +# Apply embeddings to inputs +# Inputs should be (batch_size, input_dim) +# Outputs will be of size (batch_size, self.post_embed_dim) +# """ +# if self.skip_embedding: +# # no embeddings required +# return x + +# cols = [] +# cat_feat_counter = 0 +# for feat_init_idx, is_continuous in enumerate(self.continuous_idx): +# # Enumerate through continuous idx boolean mask to apply embeddings +# if is_continuous: +# cols.append(x[:, feat_init_idx].float().view(-1, 1)) +# else: +# cols.append( +# self.embeddings[cat_feat_counter](x[:, feat_init_idx].long()) +# ) +# cat_feat_counter += 1 +# # concat +# post_embeddings = torch.cat(cols, dim=1) +# return post_embeddings + + +# class RandomObfuscator(torch.nn.Module): +# """ +# Create and applies obfuscation masks. +# The obfuscation is done at group level to match attention. +# """ + +# def __init__(self, pretraining_ratio, group_matrix): +# """ +# This create random obfuscation for self suppervised pretraining +# Parameters +# ---------- +# pretraining_ratio : float +# Ratio of feature to randomly discard for reconstruction + +# """ +# super(RandomObfuscator, self).__init__() +# self.pretraining_ratio = pretraining_ratio +# # group matrix is set to boolean here to pass all posssible information +# self.group_matrix = (group_matrix > 0) + 0.0 +# self.num_groups = group_matrix.shape[0] + +# def forward(self, x): +# """ +# Generate random obfuscation mask. + +# Returns +# ------- +# masked input and obfuscated variables. +# """ +# bs = x.shape[0] + +# obfuscated_groups = torch.bernoulli( +# self.pretraining_ratio * torch.ones((bs, self.num_groups), device=x.device) +# ) +# obfuscated_vars = torch.matmul(obfuscated_groups, self.group_matrix) +# masked_input = torch.mul(1 - obfuscated_vars, x) +# return masked_input, obfuscated_groups, obfuscated_vars diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 0332fa03..20a293b4 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -40,7 +40,7 @@ def __init__( print_interval: int = 100, # model params model_type: str = "mlp", - mlp_params: Optional[dict] = None, + model_params: Optional[dict] = None, dim_embed: int = 128, # early stopping n_iter_min: int = 100, @@ -95,13 +95,6 @@ def fit( cat_counts = [0] self.feature_names_out = self.feature_names - model_params = dict( - num_classes=self.n_classes, - conditional=cond is not None, - mlp_params=self.mlp_params, - dim_emb=self.dim_embed, - ) - dataset = TensorDataset( torch.tensor(X.values, dtype=torch.float32, device=self.device), torch.tensor([torch.nan] * len(X), dtype=torch.float32, device=self.device) @@ -117,11 +110,14 @@ def fit( self.diffusion = GaussianMultinomialDiffusion( model_type=self.model_type, - model_params=model_params, + model_params=self.model_params, num_categorical_features=cat_counts, num_numerical_features=X.shape[1] - len(cat_cols), gaussian_loss_type=self.gaussian_loss_type, num_timesteps=self.num_timesteps, + num_classes=self.n_classes, + conditional=cond is not None, + dim_emb=self.dim_embed, scheduler=self.scheduler, device=self.device, ).to(self.device) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 2e29aec3..6c11c5be 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -18,7 +18,7 @@ from synthcity.logger import debug, info, warning # synthcity relative -from .modules import MLPDiffusion, ResNetDiffusion +from .modules import DiffusionModel from .utils import ( discretized_gaussian_log_likelihood, index_to_log_onehot, @@ -67,11 +67,15 @@ def alpha_bar(t: float) -> float: class GaussianMultinomialDiffusion(torch.nn.Module): def __init__( self, + *, num_numerical_features: int, num_categorical_features: tuple, - model_type: str = "mlp", - model_params: Optional[dict] = None, + model_type: str, + model_params: dict, num_timesteps: int = 1000, + num_classes: int = 0, + conditional: bool = False, + dim_emb: int = 128, gaussian_loss_type: str = "mse", gaussian_parametrization: str = "eps", multinomial_loss_type: str = "vb_stochastic", @@ -110,24 +114,14 @@ def __init__( self.slices_for_classes.append(np.arange(offsets[i - 1], offsets[i])) self.offsets = torch.from_numpy(np.append([0], offsets)).to(device).long() - if model_params is None: - model_params = dict( - dim_in=self.dim_input, num_classes=0, conditional=False, mlp_params=None - ) - else: - model_params["dim_in"] = self.dim_input - - if model_params["mlp_params"] is None: - model_params["mlp_params"] = dict( - n_units_hidden=256, n_layers_hidden=3, dropout=0.0 - ) - - if model_type == "mlp": - self.denoise_fn = MLPDiffusion(**model_params) - elif model_type == "resnet": - self.denoise_fn = ResNetDiffusion(**model_params) - else: - raise NotImplementedError(f"unknown model type: {model_type}") + self.denoise_fn = DiffusionModel( + dim_in=self.dim_input, + dim_emb=dim_emb, + num_classes=num_classes, + conditional=conditional, + model_type=model_type, + model_params=model_params, + ) self.gaussian_loss_type = gaussian_loss_type self.gaussian_parametrization = gaussian_parametrization diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py index 8d3a777b..5e49f76e 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/modules.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/modules.py @@ -11,7 +11,7 @@ from torch import Tensor, nn # synthcity absolute -from synthcity.plugins.core.models.mlp import MLP, get_nonlin +from synthcity.plugins.core.models.factory import get_model, get_nonlin class TimeStepEmbedding(nn.Module): @@ -58,15 +58,14 @@ def forward(self, timesteps: Tensor) -> Tensor: return self.fc(emb) -class MLPDiffusion(nn.Module): - add_residual = False - +class DiffusionModel(nn.Module): def __init__( self, dim_in: int, dim_emb: int = 128, *, - mlp_params: dict = {}, + model_type: str = "mlp", + model_params: dict = {}, conditional: bool = False, num_classes: int = 0, emb_nonlin: Union[str, nn.Module] = "silu", @@ -91,13 +90,17 @@ def __init__( elif self.num_classes == 0: # regression self.label_emb = nn.Linear(1, dim_emb) - self.model = MLP( - n_units_in=dim_emb, - n_units_out=dim_in, - task_type="/", - residual=self.add_residual, - **mlp_params, - ) + if not model_params: + model_params = {} # avoid changing the default dict + + if model_type == "mlp": + if not model_params: + model_params = dict(n_units_hidden=256, n_layers_hidden=3, dropout=0.0) + model_params.update(n_units_in=dim_emb, n_units_out=dim_in) + elif model_type == "tabnet": + model_params.update(input_dim=dim_emb, output_dim=dim_in) + + self.model = get_model(model_type, model_params) def forward(self, x: Tensor, t: Tensor, y: Optional[Tensor] = None) -> Tensor: emb = self.time_emb(t) @@ -111,7 +114,3 @@ def forward(self, x: Tensor, t: Tensor, y: Optional[Tensor] = None) -> Tensor: emb += self.emb_nonlin(self.label_emb(y)) x = self.proj(x) + emb return self.model(x) - - -class ResNetDiffusion(MLPDiffusion): - add_residual = True diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 74b72142..ad07a06c 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -17,7 +17,7 @@ from synthcity.utils.serialization import dataframe_hash # synthcity relative -from .data_encoder import get_encoder +from .factory import get_feature_encoder class FeatureInfo(BaseModel): @@ -114,9 +114,13 @@ def _fit_feature(self, feature: pd.Series, feature_type: str) -> FeatureInfo: Information of the fitted feature encoder. """ if feature_type == "discrete": - encoder = get_encoder(self.categorical_encoder)(**self.cat_encoder_params) + encoder = get_feature_encoder( + self.categorical_encoder, self.cat_encoder_params + ) else: - encoder = get_encoder(self.continuous_encoder)(**self.cont_encoder_params) + encoder = get_feature_encoder( + self.continuous_encoder, self.cont_encoder_params + ) encoder.fit(feature) diff --git a/src/synthcity/plugins/core/models/ts_model.py b/src/synthcity/plugins/core/models/ts_model.py index a7026146..34f6ac4c 100644 --- a/src/synthcity/plugins/core/models/ts_model.py +++ b/src/synthcity/plugins/core/models/ts_model.py @@ -20,7 +20,9 @@ # synthcity absolute import synthcity.logger as log -from synthcity.plugins.core.models.mlp import MLP, MultiActivationHead, get_nonlin +from synthcity.plugins.core.models.factory import get_nonlin +from synthcity.plugins.core.models.layers import MultiActivationHead +from synthcity.plugins.core.models.mlp import MLP from synthcity.utils.constants import DEVICE from synthcity.utils.reproducibility import enable_reproducible_results from synthcity.utils.samplers import ImbalancedDatasetSampler diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 8eda1ea9..88df67f2 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -48,14 +48,16 @@ class TabDDPMPlugin(Plugin): L2 weight decay. batch_size: int = 1024 Size of mini-batches. - model_type: str = "mlp" - Type of model to use. Either "mlp" or "resnet". num_timesteps: int = 1000 Number of timesteps to use in the diffusion process. gaussian_loss_type: str = "mse" Type of loss to use for the Gaussian diffusion process. Either "mse" or "kl". scheduler: str = "cosine" The scheduler of forward process variance 'beta' to use. Either "cosine" or "linear". + model_type: str = "mlp" + Type of diffusion model to use ("mlp", "resnet", or "tabnet"). + model_params: dict = dict(n_layers_hidden=3, n_units_hidden=256, dropout=0.0) + Parameters of the diffusion model. Should be different for different model types. device: Any = DEVICE Device to use for training. callbacks: Sequence[Callback] = () @@ -101,7 +103,6 @@ def __init__( lr: float = 0.002, weight_decay: float = 1e-4, batch_size: int = 1024, - model_type: str = "mlp", num_timesteps: int = 1000, gaussian_loss_type: str = "mse", scheduler: str = "cosine", @@ -109,11 +110,11 @@ def __init__( callbacks: Sequence[Callback] = (), log_interval: int = 100, print_interval: int = 500, - # model params - n_layers_hidden: int = 3, - dim_hidden: int = 256, - dropout: float = 0.0, + model_type: str = "mlp", + model_params: dict = {}, dim_embed: int = 128, + continuous_encoder: str = "quantile", + cont_encoder_params: dict = {}, # core plugin arguments random_state: int = 0, workspace: Path = Path("workspace"), @@ -132,10 +133,6 @@ def __init__( self.is_classification = is_classification - mlp_params = dict( - n_layers_hidden=n_layers_hidden, n_units_hidden=dim_hidden, dropout=dropout - ) - self.model = TabDDPM( n_iter=n_iter, lr=lr, @@ -150,14 +147,17 @@ def __init__( log_interval=log_interval, print_interval=print_interval, model_type=model_type, - mlp_params=mlp_params, + model_params=model_params.copy(), dim_embed=dim_embed, ) + cont_encoder_params = cont_encoder_params.copy() + cont_encoder_params.update(random_state=random_state) + self.encoder = TabularEncoder( - continuous_encoder="quantile", - categorical_encoder="passthrough", - cont_encoder_params=dict(random_state=random_state), + continuous_encoder=continuous_encoder, + cont_encoder_params=cont_encoder_params, + categorical_encoder="none", cat_encoder_params=dict(), ) diff --git a/tests/plugins/core/models/test_mlp.py b/tests/plugins/core/models/test_mlp.py index e70b76de..ac9a2db3 100644 --- a/tests/plugins/core/models/test_mlp.py +++ b/tests/plugins/core/models/test_mlp.py @@ -5,12 +5,8 @@ from sklearn.datasets import load_diabetes, load_digits # synthcity absolute -from synthcity.plugins.core.models.mlp import ( - MLP, - LinearLayer, - MultiActivationHead, - ResidualLayer, -) +from synthcity.plugins.core.models.layers import MultiActivationHead +from synthcity.plugins.core.models.mlp import MLP, LinearLayer, ResidualLayer def test_network_config() -> None: diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index 2f9afeae..d7f93da3 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -19,10 +19,12 @@ plugin_name = "ddpm" plugin_params = dict( n_iter=1000, - batch_size=200, - num_timesteps=500, + batch_size=1000, + num_timesteps=100, log_interval=10, print_interval=100, + model_type="tabnet", + # model_params=dict() ) From 6e58cf3b97fd79e303e72834793dfd553fc2a515 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 2 Apr 2023 23:20:38 +0200 Subject: [PATCH 38/95] update docstrings and refactor --- src/synthcity/plugins/core/models/tabnet.py | 491 ++++++++----------- src/synthcity/plugins/generic/plugin_ddpm.py | 18 +- tests/plugins/generic/test_ddpm.py | 11 +- 3 files changed, 210 insertions(+), 310 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabnet.py b/src/synthcity/plugins/core/models/tabnet.py index 5a4f3051..c7e41c52 100644 --- a/src/synthcity/plugins/core/models/tabnet.py +++ b/src/synthcity/plugins/core/models/tabnet.py @@ -9,98 +9,19 @@ # synthcity relative from .layers import Entmax, Sparsemax -# class TabNet(torch.nn.Module): -# def __init__( -# self, -# input_dim, -# output_dim, -# n_d=8, -# n_a=8, -# n_steps=3, -# gamma=1.3, -# n_independent=2, -# n_shared=2, -# epsilon=1e-15, -# virtual_batch_size=128, -# momentum=0.02, -# mask_type="sparsemax", -# group_attention_matrix=None, -# ): -# """ -# Defines TabNet network -# Parameters -# ---------- -# input_dim : int -# Initial number of features -# output_dim : int -# Dimension of network output -# examples : one for regression, 2 for binary classification etc... -# n_d : int -# Dimension of the prediction layer (usually between 4 and 64) -# n_a : int -# Dimension of the attention layer (usually between 4 and 64) -# n_steps : int -# Number of successive steps in the network (usually between 3 and 10) -# gamma : float -# Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) -# n_independent : int -# Number of independent GLU layer in each GLU block (default 2) -# n_shared : int -# Number of independent GLU layer in each GLU block (default 2) -# epsilon : float -# Avoid log(0), this should be kept very low -# virtual_batch_size : int -# Batch size for Ghost Batch Normalization -# momentum : float -# Float value between 0 and 1 which will be used for momentum in all batch norm -# mask_type : str -# Either "sparsemax" or "entmax" : this is the masking function to use -# group_attention_matrix : torch matrix -# Matrix of size (n_groups, input_dim), m_ij = importance within group i of feature j -# """ -# super(TabNet, self).__init__() - -# if group_attention_matrix is None: -# group_attention_matrix = torch.Tensor([]) - -# self.input_dim = input_dim -# self.output_dim = output_dim -# self.n_d = n_d -# self.n_a = n_a -# self.n_steps = n_steps -# self.gamma = gamma -# self.epsilon = epsilon -# self.n_independent = n_independent -# self.n_shared = n_shared -# self.mask_type = mask_type - -# self.virtual_batch_size = virtual_batch_size -# self.post_embed_dim = self.embedder.post_embed_dim - -# self.tabnet = TabNetNoEmbeddings( -# self.post_embed_dim, -# output_dim, -# n_d, -# n_a, -# n_steps, -# gamma, -# n_independent, -# n_shared, -# epsilon, -# virtual_batch_size, -# momentum, -# mask_type, -# self.embedder.embedding_group_matrix, -# ) +def initialize_non_glu(module: Linear, input_dim: int, output_dim: int) -> None: + gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(4 * input_dim)) + torch.nn.init.xavier_normal_(module.weight, gain=gain_value) + # torch.nn.init.zeros_(module.bias) + return -# def forward(self, x): -# x = self.embedder(x) -# return self.tabnet(x) -# def forward_masks(self, x): -# x = self.embedder(x) -# return self.tabnet.forward_masks(x) +def initialize_glu(module: Linear, input_dim: int, output_dim: int) -> None: + gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(input_dim)) + torch.nn.init.xavier_normal_(module.weight, gain=gain_value) + # torch.nn.init.zeros_(module.bias) + return class TabNet(torch.nn.Module): @@ -121,7 +42,7 @@ def __init__( group_attention_matrix: Optional[torch.Tensor] = None, ) -> None: """ - Defines main part of the TabNet network without the embedding layers. + Defines main part of the TabNet network. Parameters ---------- @@ -131,9 +52,9 @@ def __init__( Dimension of network output examples : one for regression, 2 for binary classification etc... n_d : int - Dimension of the prediction layer (usually between 4 and 64) + Dimension of the prediction layer (usually between 4 and 64) n_a : int - Dimension of the attention layer (usually between 4 and 64) + Dimension of the attention layer (usually between 4 and 64) n_steps : int Number of successive steps in the network (usually between 3 and 10) gamma : float @@ -202,20 +123,6 @@ def forward_masks(self, x: torch.Tensor) -> torch.Tensor: return self.encoder.forward_masks(x) -def initialize_non_glu(module: Linear, input_dim: int, output_dim: int) -> None: - gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(4 * input_dim)) - torch.nn.init.xavier_normal_(module.weight, gain=gain_value) - # torch.nn.init.zeros_(module.bias) - return - - -def initialize_glu(module: Linear, input_dim: int, output_dim: int) -> None: - gain_value = np.sqrt((input_dim + output_dim) / np.sqrt(input_dim)) - torch.nn.init.xavier_normal_(module.weight, gain=gain_value) - # torch.nn.init.zeros_(module.bias) - return - - class GBN(torch.nn.Module): """ Ghost Batch Normalization @@ -412,191 +319,6 @@ def forward_masks(self, x: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tenso return M_explain, masks -# class TabNetDecoder(torch.nn.Module): -# def __init__( -# self, -# input_dim, -# n_d=8, -# n_steps=3, -# n_independent=1, -# n_shared=1, -# virtual_batch_size=128, -# momentum=0.02, -# ): -# """ -# Defines main part of the TabNet network without the embedding layers. - -# Parameters -# ---------- -# input_dim : int -# Number of features -# output_dim : int or list of int for multi task classification -# Dimension of network output -# examples : one for regression, 2 for binary classification etc... -# n_d : int -# Dimension of the prediction layer (usually between 4 and 64) -# n_steps : int -# Number of successive steps in the network (usually between 3 and 10) -# gamma : float -# Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) -# n_independent : int -# Number of independent GLU layer in each GLU block (default 1) -# n_shared : int -# Number of independent GLU layer in each GLU block (default 1) -# virtual_batch_size : int -# Batch size for Ghost Batch Normalization -# momentum : float -# Float value between 0 and 1 which will be used for momentum in all batch norm -# """ -# super(TabNetDecoder, self).__init__() -# self.input_dim = input_dim -# self.n_d = n_d -# self.n_steps = n_steps -# self.n_independent = n_independent -# self.n_shared = n_shared -# self.virtual_batch_size = virtual_batch_size - -# self.feat_transformers = torch.nn.ModuleList() - -# if self.n_shared > 0: -# shared_feat_transform = torch.nn.ModuleList() -# for i in range(self.n_shared): -# if i == 0: -# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) -# else: -# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) - -# else: -# shared_feat_transform = None - -# for step in range(n_steps): -# transformer = FeatTransformer( -# n_d, -# n_d, -# shared_feat_transform, -# n_glu_independent=self.n_independent, -# virtual_batch_size=self.virtual_batch_size, -# momentum=momentum, -# ) -# self.feat_transformers.append(transformer) - -# self.reconstruction_layer = Linear(n_d, self.input_dim, bias=False) -# initialize_non_glu(self.reconstruction_layer, n_d, self.input_dim) - -# def forward(self, steps_output): -# res = 0 -# for step_nb, step_output in enumerate(steps_output): -# x = self.feat_transformers[step_nb](step_output) -# res = torch.add(res, x) -# res = self.reconstruction_layer(res) -# return res - - -# class TabNetPretraining(torch.nn.Module): -# def __init__( -# self, -# input_dim, -# pretraining_ratio=0.2, -# n_d=8, -# n_a=8, -# n_steps=3, -# gamma=1.3, -# cat_idxs=[], -# cat_dims=[], -# cat_emb_dim=1, -# n_independent=2, -# n_shared=2, -# epsilon=1e-15, -# virtual_batch_size=128, -# momentum=0.02, -# mask_type="sparsemax", -# n_shared_decoder=1, -# n_indep_decoder=1, -# group_attention_matrix=None, -# ): -# super(TabNetPretraining, self).__init__() - -# self.cat_idxs = cat_idxs or [] -# self.cat_dims = cat_dims or [] -# self.cat_emb_dim = cat_emb_dim - -# self.input_dim = input_dim -# self.n_d = n_d -# self.n_a = n_a -# self.n_steps = n_steps -# self.gamma = gamma -# self.epsilon = epsilon -# self.n_independent = n_independent -# self.n_shared = n_shared -# self.mask_type = mask_type -# self.pretraining_ratio = pretraining_ratio -# self.n_shared_decoder = n_shared_decoder -# self.n_indep_decoder = n_indep_decoder - -# if self.n_steps <= 0: -# raise ValueError("n_steps should be a positive integer.") -# if self.n_independent == 0 and self.n_shared == 0: -# raise ValueError("n_shared and n_independent can't be both zero.") - -# self.virtual_batch_size = virtual_batch_size -# self.embedder = EmbeddingGenerator( -# input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix -# ) -# self.post_embed_dim = self.embedder.post_embed_dim - -# self.masker = RandomObfuscator( -# self.pretraining_ratio, group_matrix=self.embedder.embedding_group_matrix -# ) -# self.encoder = TabNetEncoder( -# input_dim=self.post_embed_dim, -# output_dim=self.post_embed_dim, -# n_d=n_d, -# n_a=n_a, -# n_steps=n_steps, -# gamma=gamma, -# n_independent=n_independent, -# n_shared=n_shared, -# epsilon=epsilon, -# virtual_batch_size=virtual_batch_size, -# momentum=momentum, -# mask_type=mask_type, -# group_attention_matrix=self.embedder.embedding_group_matrix, -# ) -# self.decoder = TabNetDecoder( -# self.post_embed_dim, -# n_d=n_d, -# n_steps=n_steps, -# n_independent=self.n_indep_decoder, -# n_shared=self.n_shared_decoder, -# virtual_batch_size=virtual_batch_size, -# momentum=momentum, -# ) - -# def forward(self, x): -# """ -# Returns: res, embedded_x, obf_vars -# res : output of reconstruction -# embedded_x : embedded input -# obf_vars : which variable where obfuscated -# """ -# embedded_x = self.embedder(x) -# if self.training: -# masked_x, obfuscated_groups, obfuscated_vars = self.masker(embedded_x) -# # set prior of encoder with obfuscated groups -# prior = 1 - obfuscated_groups -# steps_out, _ = self.encoder(masked_x, prior=prior) -# res = self.decoder(steps_out) -# return res, embedded_x, obfuscated_vars -# else: -# steps_out, _ = self.encoder(embedded_x) -# res = self.decoder(steps_out) -# return res, embedded_x, torch.ones(embedded_x.shape).to(x.device) - -# def forward_masks(self, x): -# embedded_x = self.embedder(x) -# return self.encoder.forward_masks(embedded_x) - - class AttentiveTransformer(torch.nn.Module): def __init__( self, @@ -789,9 +511,194 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return out +# class TabNetDecoder(torch.nn.Module): +# def __init__( +# self, +# input_dim, +# n_d=8, +# n_steps=3, +# n_independent=1, +# n_shared=1, +# virtual_batch_size=128, +# momentum=0.02, +# ): +# """ +# Defines main part of the TabNet network without the embedding layers. + +# Parameters +# ---------- +# input_dim : int +# Number of features +# output_dim : int or list of int for multi task classification +# Dimension of network output +# examples : one for regression, 2 for binary classification etc... +# n_d : int +# Dimension of the prediction layer (usually between 4 and 64) +# n_steps : int +# Number of successive steps in the network (usually between 3 and 10) +# gamma : float +# Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) +# n_independent : int +# Number of independent GLU layer in each GLU block (default 1) +# n_shared : int +# Number of independent GLU layer in each GLU block (default 1) +# virtual_batch_size : int +# Batch size for Ghost Batch Normalization +# momentum : float +# Float value between 0 and 1 which will be used for momentum in all batch norm +# """ +# super(TabNetDecoder, self).__init__() +# self.input_dim = input_dim +# self.n_d = n_d +# self.n_steps = n_steps +# self.n_independent = n_independent +# self.n_shared = n_shared +# self.virtual_batch_size = virtual_batch_size + +# self.feat_transformers = torch.nn.ModuleList() + +# if self.n_shared > 0: +# shared_feat_transform = torch.nn.ModuleList() +# for i in range(self.n_shared): +# if i == 0: +# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) +# else: +# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) + +# else: +# shared_feat_transform = None + +# for step in range(n_steps): +# transformer = FeatTransformer( +# n_d, +# n_d, +# shared_feat_transform, +# n_glu_independent=self.n_independent, +# virtual_batch_size=self.virtual_batch_size, +# momentum=momentum, +# ) +# self.feat_transformers.append(transformer) + +# self.reconstruction_layer = Linear(n_d, self.input_dim, bias=False) +# initialize_non_glu(self.reconstruction_layer, n_d, self.input_dim) + +# def forward(self, steps_output): +# res = 0 +# for step_nb, step_output in enumerate(steps_output): +# x = self.feat_transformers[step_nb](step_output) +# res = torch.add(res, x) +# res = self.reconstruction_layer(res) +# return res + + +# class TabNetPretraining(torch.nn.Module): +# def __init__( +# self, +# input_dim, +# pretraining_ratio=0.2, +# n_d=8, +# n_a=8, +# n_steps=3, +# gamma=1.3, +# cat_idxs=[], +# cat_dims=[], +# cat_emb_dim=1, +# n_independent=2, +# n_shared=2, +# epsilon=1e-15, +# virtual_batch_size=128, +# momentum=0.02, +# mask_type="sparsemax", +# n_shared_decoder=1, +# n_indep_decoder=1, +# group_attention_matrix=None, +# ): +# super(TabNetPretraining, self).__init__() + +# self.cat_idxs = cat_idxs or [] +# self.cat_dims = cat_dims or [] +# self.cat_emb_dim = cat_emb_dim + +# self.input_dim = input_dim +# self.n_d = n_d +# self.n_a = n_a +# self.n_steps = n_steps +# self.gamma = gamma +# self.epsilon = epsilon +# self.n_independent = n_independent +# self.n_shared = n_shared +# self.mask_type = mask_type +# self.pretraining_ratio = pretraining_ratio +# self.n_shared_decoder = n_shared_decoder +# self.n_indep_decoder = n_indep_decoder + +# if self.n_steps <= 0: +# raise ValueError("n_steps should be a positive integer.") +# if self.n_independent == 0 and self.n_shared == 0: +# raise ValueError("n_shared and n_independent can't be both zero.") + +# self.virtual_batch_size = virtual_batch_size +# self.embedder = EmbeddingGenerator( +# input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix +# ) +# self.post_embed_dim = self.embedder.post_embed_dim + +# self.masker = RandomObfuscator( +# self.pretraining_ratio, group_matrix=self.embedder.embedding_group_matrix +# ) +# self.encoder = TabNetEncoder( +# input_dim=self.post_embed_dim, +# output_dim=self.post_embed_dim, +# n_d=n_d, +# n_a=n_a, +# n_steps=n_steps, +# gamma=gamma, +# n_independent=n_independent, +# n_shared=n_shared, +# epsilon=epsilon, +# virtual_batch_size=virtual_batch_size, +# momentum=momentum, +# mask_type=mask_type, +# group_attention_matrix=self.embedder.embedding_group_matrix, +# ) +# self.decoder = TabNetDecoder( +# self.post_embed_dim, +# n_d=n_d, +# n_steps=n_steps, +# n_independent=self.n_indep_decoder, +# n_shared=self.n_shared_decoder, +# virtual_batch_size=virtual_batch_size, +# momentum=momentum, +# ) + +# def forward(self, x): +# """ +# Returns: res, embedded_x, obf_vars +# res : output of reconstruction +# embedded_x : embedded input +# obf_vars : which variable where obfuscated +# """ +# embedded_x = self.embedder(x) +# if self.training: +# masked_x, obfuscated_groups, obfuscated_vars = self.masker(embedded_x) +# # set prior of encoder with obfuscated groups +# prior = 1 - obfuscated_groups +# steps_out, _ = self.encoder(masked_x, prior=prior) +# res = self.decoder(steps_out) +# return res, embedded_x, obfuscated_vars +# else: +# steps_out, _ = self.encoder(embedded_x) +# res = self.decoder(steps_out) +# return res, embedded_x, torch.ones(embedded_x.shape).to(x.device) + +# def forward_masks(self, x): +# embedded_x = self.embedder(x) +# return self.encoder.forward_masks(embedded_x) + + # class EmbeddingGenerator(torch.nn.Module): # """ -# Classical embeddings generator +# Categorical embeddings generator # """ # def __init__(self, input_dim, cat_dims, cat_idxs, cat_emb_dims, group_matrix): diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 88df67f2..09826a97 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -66,12 +66,6 @@ class TabDDPMPlugin(Plugin): Number of iterations between logging. print_interval: int = 500 Number of iterations between printing. - n_layers_hidden: int = 3 - Number of hidden layers in the MLP. - dim_hidden: int = 256 - Number of hidden units per hidden layer in the MLP. - dropout: float = 0.0 - Dropout rate. dim_embed: int = 128 Dimensionality of the embedding space. random_state: int @@ -191,8 +185,8 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: LogIntDistribution(name="batch_size", low=256, high=4096), IntegerDistribution(name="num_timesteps", low=10, high=1000), LogIntDistribution(name="n_iter", low=1000, high=10000), - IntegerDistribution(name="n_layers_hidden", low=2, high=8), - LogIntDistribution(name="dim_hidden", low=128, high=1024), + # IntegerDistribution(name="n_layers_hidden", low=2, high=8), + # LogIntDistribution(name="dim_hidden", low=128, high=1024), ] def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": @@ -205,8 +199,6 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": If the task is regression, the target variable is not specially treated. There is no condition by default, but can be given by the user, either as a column name or an array-like. """ df = X.dataframe() - self.feature_names = df.columns - cond = kwargs.pop("cond", None) self.loss_history = None @@ -228,10 +220,8 @@ def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": if cond is not None: if type(cond) is str: cond = df[cond] - self.expecting_conditional = True - - if cond is not None: cond = pd.Series(cond, index=df.index) + self.expecting_conditional = True # NOTE: cond may also be included in the dataframe self.model.fit(df, cond, **kwargs) @@ -254,7 +244,7 @@ def callback(count): # type: ignore df = self.encoder.inverse_transform(df) if self.is_classification: df = df.join(pd.Series(cond, name=self.target_name)) - return df[self.feature_names] # reorder columns + return df return self._safe_generate(callback, count, syn_schema, **kwargs) diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index d7f93da3..1e8766ac 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -21,11 +21,14 @@ n_iter=1000, batch_size=1000, num_timesteps=100, - log_interval=10, - print_interval=100, - model_type="tabnet", - # model_params=dict() + model_type="mlp", ) +# plugin_params = dict( +# n_iter=1000, +# batch_size=1000, +# num_timesteps=30, +# model_type="tabnet", +# ) def extend_fixtures( From 2a6ca6fde60cd1cc8c64746b31815dfa11de1a80 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 2 Apr 2023 23:44:57 +0200 Subject: [PATCH 39/95] fix type annotation compatibility --- src/synthcity/plugins/core/models/functions.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/synthcity/plugins/core/models/functions.py b/src/synthcity/plugins/core/models/functions.py index 27801034..51839e1a 100644 --- a/src/synthcity/plugins/core/models/functions.py +++ b/src/synthcity/plugins/core/models/functions.py @@ -2,7 +2,7 @@ Custom differentiable tensor functions. """ # stdlib -from typing import Any +from typing import Any, Tuple # third party import torch @@ -56,7 +56,7 @@ def forward( return output @staticmethod - def backward(ctx: Any, grad_output: torch.Tensor) -> tuple[torch.Tensor, None]: + def backward(ctx: Any, grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: supp_size, output = ctx.saved_tensors dim = ctx.dim grad_input = grad_output.clone() @@ -70,7 +70,7 @@ def backward(ctx: Any, grad_output: torch.Tensor) -> tuple[torch.Tensor, None]: @staticmethod def _threshold_and_support( input: torch.Tensor, dim: int = -1 - ) -> tuple[torch.Tensor, torch.Tensor]: + ) -> Tuple[torch.Tensor, torch.Tensor]: """Sparsemax building block: compute the threshold Parameters @@ -132,7 +132,7 @@ def backward(ctx: Any, grad_output: torch.Tensor) -> torch.Tensor: @staticmethod def _threshold_and_support( input: torch.Tensor, dim: int = -1 - ) -> tuple[torch.Tensor, torch.Tensor]: + ) -> Tuple[torch.Tensor, torch.Tensor]: Xsrt, _ = torch.sort(input, descending=True, dim=dim) rho = _make_ix_like(input, dim) From 36acaa04853d6c94cd986809da9adcbff27d3c41 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 3 Apr 2023 13:45:22 +0200 Subject: [PATCH 40/95] make SkipConnection serializable --- src/synthcity/plugins/core/models/layers.py | 47 +++++++++------------ 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/src/synthcity/plugins/core/models/layers.py b/src/synthcity/plugins/core/models/layers.py index 9be97abd..54ccf5d0 100644 --- a/src/synthcity/plugins/core/models/layers.py +++ b/src/synthcity/plugins/core/models/layers.py @@ -44,41 +44,36 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return x.transpose(*self.dims) +@validate_arguments(config=dict(arbitrary_types_allowed=True)) +def _forward_skip_connection( + self: nn.Module, X: torch.Tensor, *args: Any, **kwargs: Any +) -> torch.Tensor: + # if X.shape[-1] == 0: + # return torch.zeros((*X.shape[:-1], self.n_units_out)).to(self.device) + X = X.float().to(self.device) + out = self._forward(X, *args, **kwargs) + return torch.cat([out, X], dim=-1) + + def SkipConnection(cls: Type[nn.Module]) -> Type[nn.Module]: """Wraps a model to add a skip connection from the input to the output. Example: >>> ResidualBlock = SkipConnection(MLP) - >>> ResidualBlock(n_units_in=10, n_units_out=3, n_units_hidden=64) - SkipConnection(MLP)( - (model): Sequential( - (0): LinearLayer( - (model): Sequential( - (0): Linear(in_features=10, out_features=64, bias=True) - (1): ReLU() - ) - ) - (1): Linear(in_features=64, out_features=3, bias=True) - ) - (loss): MSELoss() - ) + >>> res_block = ResidualBlock(n_units_in=10, n_units_out=3, n_units_hidden=64) + >>> res_block(torch.ones(10, 10)).shape + (10, 13) """ - class WrappedModule(cls): # type: ignore + class Wrapper(cls): # type: ignore device: torch.device = DEVICE - @validate_arguments(config=dict(arbitrary_types_allowed=True)) - def forward(self, X: torch.Tensor) -> torch.Tensor: - # if X.shape[-1] == 0: - # return torch.zeros((*X.shape[:-1], self.n_units_out)).to(self.device) - X = X.float().to(self.device) - out = super().forward(X) - return torch.cat([out, X], dim=-1) - - WrappedModule.__name__ = f"SkipConnection({cls.__name__})" - WrappedModule.__qualname__ = f"SkipConnection({cls.__qualname__})" - WrappedModule.__doc__ = f"""(With skipped connection) {cls.__doc__}""" - return WrappedModule + Wrapper._forward = cls.forward + Wrapper.forward = _forward_skip_connection + Wrapper.__name__ = f"SkipConnection({cls.__name__})" + Wrapper.__qualname__ = f"SkipConnection({cls.__qualname__})" + Wrapper.__doc__ = f"""(With skipped connection) {cls.__doc__}""" + return Wrapper # class GLU(nn.Module): From de15b9bcd4dbf2d29a4a90280caf695cb4108138 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 3 Apr 2023 18:29:19 +0200 Subject: [PATCH 41/95] fix TabularEncoder.activation_layout --- .gitignore | 2 +- .../plugins/core/models/feature_encoder.py | 7 +++---- .../plugins/core/models/tabular_encoder.py | 14 ++++++++++---- .../core/models/test_tabular_encoder.py | 18 +++++++----------- tests/plugins/generic/test_ddpm.py | 2 +- 5 files changed, 22 insertions(+), 21 deletions(-) diff --git a/.gitignore b/.gitignore index 5195f6c2..20037c5f 100644 --- a/.gitignore +++ b/.gitignore @@ -67,4 +67,4 @@ lightning_logs generated MNIST cifar-10* -local_test.py +local_test*.py diff --git a/src/synthcity/plugins/core/models/feature_encoder.py b/src/synthcity/plugins/core/models/feature_encoder.py index 995a65a6..70807e31 100644 --- a/src/synthcity/plugins/core/models/feature_encoder.py +++ b/src/synthcity/plugins/core/models/feature_encoder.py @@ -172,6 +172,8 @@ def _inverse_transform(self, data: np.ndarray) -> np.ndarray: class BayesianGMMEncoder(FeatureEncoder): """Bayesian Gaussian Mixture encoder""" + n_dim_in = 2 + def __init__( self, n_components: int = 10, @@ -181,7 +183,6 @@ def __init__( std_multiplier: int = 4, ) -> None: self.n_components = n_components - self.random_state = random_state self.weight_threshold = weight_threshold self.clip_output = clip_output self.std_multiplier = std_multiplier @@ -190,13 +191,12 @@ def __init__( random_state=random_state, weight_concentration_prior=1e-3, ) - self.weights: List[float] def _fit(self, x: np.ndarray, **kwargs: Any) -> "BayesianGaussianMixture": self.min_value = x.min() self.max_value = x.max() - self.model.fit(x.reshape(-1, 1)) + self.model.fit(x) self.weights = self.model.weights_ self.means = self.model.means_.reshape(-1) self.stds = np.sqrt(self.model.covariances_).reshape(-1) @@ -204,7 +204,6 @@ def _fit(self, x: np.ndarray, **kwargs: Any) -> "BayesianGaussianMixture": return self def _transform(self, x: np.ndarray) -> np.ndarray: - x = x.reshape(-1, 1) means = self.means.reshape(1, -1) stds = self.stds.reshape(1, -1) diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index ad07a06c..364a6b57 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -274,10 +274,18 @@ def activation_layout( - discrete, and with length , the length of the one-hot encoding. """ out = [] + acts = dict(discrete=discrete_activation, continuous=continuous_activation) + # NOTE: be careful with the dim of softmax! for column_transform_info in self._column_transform_info_list: + ct = column_transform_info.trans_feature_types[0] + d = 0 for t in column_transform_info.trans_feature_types: - act = discrete_activation if t == "discrete" else continuous_activation - out.append((act, 1)) + if t != ct: + out.append((acts[ct], d)) + ct = t + d = 0 + d += 1 + out.append((acts[ct], d)) return out @@ -291,10 +299,8 @@ class BinEncoder(TabularEncoder): continuous_encoder = "bayesian_gmm" cont_encoder_params = dict(n_components=2) categorical_encoder = "passthrough" # "onehot" - # ! onehot encoder does not pass the tests cat_encoder_params = dict() # dict(handle_unknown="ignore", sparse=False) - # TODO: check if this is correct def _transform_feature( self, column_transform_info: FeatureInfo, feature: pd.Series ) -> pd.DataFrame: diff --git a/tests/plugins/core/models/test_tabular_encoder.py b/tests/plugins/core/models/test_tabular_encoder.py index 6837190a..9050c826 100644 --- a/tests/plugins/core/models/test_tabular_encoder.py +++ b/tests/plugins/core/models/test_tabular_encoder.py @@ -105,21 +105,17 @@ def check_equal_layouts( layout: list, act_layout: list, disc_act: str, cont_act: str ) -> None: expected_act_layout = [] + for col_info in layout: if col_info.feature_type == "continuous": - expected_act_layout.append(cont_act) - for _ in range(col_info.output_dimensions - 1): - expected_act_layout.append(disc_act) + expected_act_layout.append((cont_act, 1)) + r = col_info.output_dimensions - 1 + if r > 0: + expected_act_layout.append((disc_act, r)) else: - for _ in range(col_info.output_dimensions): - expected_act_layout.append(disc_act) - - expanded_act_layout = [] - for act, num in act_layout: - for _ in range(num): - expanded_act_layout.append(act) + expected_act_layout.append((disc_act, col_info.output_dimensions)) - assert expanded_act_layout == expected_act_layout + assert expected_act_layout == act_layout def test_encoder_activation_layout() -> None: diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index 1e8766ac..8fc4664a 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -120,7 +120,7 @@ def test_plugin_generate_constraints(test_plugin: Plugin) -> None: @pytest.mark.parametrize("test_plugin", extend_fixtures()) def test_plugin_hyperparams(test_plugin: Plugin) -> None: - assert len(test_plugin.hyperparameter_space()) == 6 + assert len(test_plugin.hyperparameter_space()) == 4 def test_sample_hyperparams() -> None: From 694cd223c7d6398d2ee3a1ace3998d1f0abce961 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 3 Apr 2023 22:06:30 +0200 Subject: [PATCH 42/95] remove unnecessary code --- src/synthcity/plugins/core/models/factory.py | 2 +- src/synthcity/plugins/core/models/mlp.py | 2 +- src/synthcity/plugins/core/models/tabnet.py | 320 ------------------ .../core/models/tabular_ddpm/__init__.py | 3 +- src/synthcity/utils/dataframe.py | 12 - 5 files changed, 4 insertions(+), 335 deletions(-) diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py index e2d69525..586186d5 100644 --- a/src/synthcity/plugins/core/models/factory.py +++ b/src/synthcity/plugins/core/models/factory.py @@ -68,7 +68,7 @@ def _factory(type_: Union[str, type], params: dict, registry: dict) -> Any: if isinstance(type_, type): return type_(**params) - type_ = type_.lower().replace("_", "").replace("-", "") + type_ = type_.lower().replace("_", "") if type_ in registry: cls = registry[type_] if isinstance(cls, str): diff --git a/src/synthcity/plugins/core/models/mlp.py b/src/synthcity/plugins/core/models/mlp.py index 5d85c1c8..877dbe9c 100644 --- a/src/synthcity/plugins/core/models/mlp.py +++ b/src/synthcity/plugins/core/models/mlp.py @@ -111,9 +111,9 @@ class MLP(nn.Module): @validate_arguments(config=dict(arbitrary_types_allowed=True)) def __init__( self, - *, n_units_in: int, n_units_out: int, + *, task_type: str = "regression", # classification/regression n_layers_hidden: int = 1, n_units_hidden: int = 100, diff --git a/src/synthcity/plugins/core/models/tabnet.py b/src/synthcity/plugins/core/models/tabnet.py index c7e41c52..c5fc702f 100644 --- a/src/synthcity/plugins/core/models/tabnet.py +++ b/src/synthcity/plugins/core/models/tabnet.py @@ -509,323 +509,3 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.bn(x) out = torch.mul(x[:, : self.output_dim], torch.sigmoid(x[:, self.output_dim :])) return out - - -# class TabNetDecoder(torch.nn.Module): -# def __init__( -# self, -# input_dim, -# n_d=8, -# n_steps=3, -# n_independent=1, -# n_shared=1, -# virtual_batch_size=128, -# momentum=0.02, -# ): -# """ -# Defines main part of the TabNet network without the embedding layers. - -# Parameters -# ---------- -# input_dim : int -# Number of features -# output_dim : int or list of int for multi task classification -# Dimension of network output -# examples : one for regression, 2 for binary classification etc... -# n_d : int -# Dimension of the prediction layer (usually between 4 and 64) -# n_steps : int -# Number of successive steps in the network (usually between 3 and 10) -# gamma : float -# Float above 1, scaling factor for attention updates (usually between 1.0 to 2.0) -# n_independent : int -# Number of independent GLU layer in each GLU block (default 1) -# n_shared : int -# Number of independent GLU layer in each GLU block (default 1) -# virtual_batch_size : int -# Batch size for Ghost Batch Normalization -# momentum : float -# Float value between 0 and 1 which will be used for momentum in all batch norm -# """ -# super(TabNetDecoder, self).__init__() -# self.input_dim = input_dim -# self.n_d = n_d -# self.n_steps = n_steps -# self.n_independent = n_independent -# self.n_shared = n_shared -# self.virtual_batch_size = virtual_batch_size - -# self.feat_transformers = torch.nn.ModuleList() - -# if self.n_shared > 0: -# shared_feat_transform = torch.nn.ModuleList() -# for i in range(self.n_shared): -# if i == 0: -# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) -# else: -# shared_feat_transform.append(Linear(n_d, 2 * n_d, bias=False)) - -# else: -# shared_feat_transform = None - -# for step in range(n_steps): -# transformer = FeatTransformer( -# n_d, -# n_d, -# shared_feat_transform, -# n_glu_independent=self.n_independent, -# virtual_batch_size=self.virtual_batch_size, -# momentum=momentum, -# ) -# self.feat_transformers.append(transformer) - -# self.reconstruction_layer = Linear(n_d, self.input_dim, bias=False) -# initialize_non_glu(self.reconstruction_layer, n_d, self.input_dim) - -# def forward(self, steps_output): -# res = 0 -# for step_nb, step_output in enumerate(steps_output): -# x = self.feat_transformers[step_nb](step_output) -# res = torch.add(res, x) -# res = self.reconstruction_layer(res) -# return res - - -# class TabNetPretraining(torch.nn.Module): -# def __init__( -# self, -# input_dim, -# pretraining_ratio=0.2, -# n_d=8, -# n_a=8, -# n_steps=3, -# gamma=1.3, -# cat_idxs=[], -# cat_dims=[], -# cat_emb_dim=1, -# n_independent=2, -# n_shared=2, -# epsilon=1e-15, -# virtual_batch_size=128, -# momentum=0.02, -# mask_type="sparsemax", -# n_shared_decoder=1, -# n_indep_decoder=1, -# group_attention_matrix=None, -# ): -# super(TabNetPretraining, self).__init__() - -# self.cat_idxs = cat_idxs or [] -# self.cat_dims = cat_dims or [] -# self.cat_emb_dim = cat_emb_dim - -# self.input_dim = input_dim -# self.n_d = n_d -# self.n_a = n_a -# self.n_steps = n_steps -# self.gamma = gamma -# self.epsilon = epsilon -# self.n_independent = n_independent -# self.n_shared = n_shared -# self.mask_type = mask_type -# self.pretraining_ratio = pretraining_ratio -# self.n_shared_decoder = n_shared_decoder -# self.n_indep_decoder = n_indep_decoder - -# if self.n_steps <= 0: -# raise ValueError("n_steps should be a positive integer.") -# if self.n_independent == 0 and self.n_shared == 0: -# raise ValueError("n_shared and n_independent can't be both zero.") - -# self.virtual_batch_size = virtual_batch_size -# self.embedder = EmbeddingGenerator( -# input_dim, cat_dims, cat_idxs, cat_emb_dim, group_attention_matrix -# ) -# self.post_embed_dim = self.embedder.post_embed_dim - -# self.masker = RandomObfuscator( -# self.pretraining_ratio, group_matrix=self.embedder.embedding_group_matrix -# ) -# self.encoder = TabNetEncoder( -# input_dim=self.post_embed_dim, -# output_dim=self.post_embed_dim, -# n_d=n_d, -# n_a=n_a, -# n_steps=n_steps, -# gamma=gamma, -# n_independent=n_independent, -# n_shared=n_shared, -# epsilon=epsilon, -# virtual_batch_size=virtual_batch_size, -# momentum=momentum, -# mask_type=mask_type, -# group_attention_matrix=self.embedder.embedding_group_matrix, -# ) -# self.decoder = TabNetDecoder( -# self.post_embed_dim, -# n_d=n_d, -# n_steps=n_steps, -# n_independent=self.n_indep_decoder, -# n_shared=self.n_shared_decoder, -# virtual_batch_size=virtual_batch_size, -# momentum=momentum, -# ) - -# def forward(self, x): -# """ -# Returns: res, embedded_x, obf_vars -# res : output of reconstruction -# embedded_x : embedded input -# obf_vars : which variable where obfuscated -# """ -# embedded_x = self.embedder(x) -# if self.training: -# masked_x, obfuscated_groups, obfuscated_vars = self.masker(embedded_x) -# # set prior of encoder with obfuscated groups -# prior = 1 - obfuscated_groups -# steps_out, _ = self.encoder(masked_x, prior=prior) -# res = self.decoder(steps_out) -# return res, embedded_x, obfuscated_vars -# else: -# steps_out, _ = self.encoder(embedded_x) -# res = self.decoder(steps_out) -# return res, embedded_x, torch.ones(embedded_x.shape).to(x.device) - -# def forward_masks(self, x): -# embedded_x = self.embedder(x) -# return self.encoder.forward_masks(embedded_x) - - -# class EmbeddingGenerator(torch.nn.Module): -# """ -# Categorical embeddings generator -# """ - -# def __init__(self, input_dim, cat_dims, cat_idxs, cat_emb_dims, group_matrix): -# """This is an embedding module for an entire set of features - -# Parameters -# ---------- -# input_dim : int -# Number of features coming as input (number of columns) -# cat_dims : list of int -# Number of modalities for each categorial features -# If the list is empty, no embeddings will be done -# cat_idxs : list of int -# Positional index for each categorical features in inputs -# cat_emb_dim : list of int -# Embedding dimension for each categorical features -# If int, the same embedding dimension will be used for all categorical features -# group_matrix : torch matrix -# Original group matrix before embeddings -# """ -# super(EmbeddingGenerator, self).__init__() - -# if cat_dims == [] and cat_idxs == []: -# self.skip_embedding = True -# self.post_embed_dim = input_dim -# self.embedding_group_matrix = group_matrix.to(group_matrix.device) -# return -# else: -# self.skip_embedding = False - -# self.post_embed_dim = int(input_dim + np.sum(cat_emb_dims) - len(cat_emb_dims)) - -# self.embeddings = torch.nn.ModuleList() - -# for cat_dim, emb_dim in zip(cat_dims, cat_emb_dims): -# self.embeddings.append(torch.nn.Embedding(cat_dim, emb_dim)) - -# # record continuous indices -# self.continuous_idx = torch.ones(input_dim, dtype=torch.bool) -# self.continuous_idx[cat_idxs] = 0 - -# # update group matrix -# n_groups = group_matrix.shape[0] -# self.embedding_group_matrix = torch.empty( -# (n_groups, self.post_embed_dim), device=group_matrix.device -# ) -# for group_idx in range(n_groups): -# post_emb_idx = 0 -# cat_feat_counter = 0 -# for init_feat_idx in range(input_dim): -# if self.continuous_idx[init_feat_idx] == 1: -# # this means that no embedding is applied to this column -# self.embedding_group_matrix[group_idx, post_emb_idx] = group_matrix[ -# group_idx, init_feat_idx -# ] # noqa -# post_emb_idx += 1 -# else: -# # this is a categorical feature which creates multiple embeddings -# n_embeddings = cat_emb_dims[cat_feat_counter] -# self.embedding_group_matrix[ -# group_idx, post_emb_idx : post_emb_idx + n_embeddings -# ] = ( -# group_matrix[group_idx, init_feat_idx] / n_embeddings -# ) # noqa -# post_emb_idx += n_embeddings -# cat_feat_counter += 1 - -# def forward(self, x): -# """ -# Apply embeddings to inputs -# Inputs should be (batch_size, input_dim) -# Outputs will be of size (batch_size, self.post_embed_dim) -# """ -# if self.skip_embedding: -# # no embeddings required -# return x - -# cols = [] -# cat_feat_counter = 0 -# for feat_init_idx, is_continuous in enumerate(self.continuous_idx): -# # Enumerate through continuous idx boolean mask to apply embeddings -# if is_continuous: -# cols.append(x[:, feat_init_idx].float().view(-1, 1)) -# else: -# cols.append( -# self.embeddings[cat_feat_counter](x[:, feat_init_idx].long()) -# ) -# cat_feat_counter += 1 -# # concat -# post_embeddings = torch.cat(cols, dim=1) -# return post_embeddings - - -# class RandomObfuscator(torch.nn.Module): -# """ -# Create and applies obfuscation masks. -# The obfuscation is done at group level to match attention. -# """ - -# def __init__(self, pretraining_ratio, group_matrix): -# """ -# This create random obfuscation for self suppervised pretraining -# Parameters -# ---------- -# pretraining_ratio : float -# Ratio of feature to randomly discard for reconstruction - -# """ -# super(RandomObfuscator, self).__init__() -# self.pretraining_ratio = pretraining_ratio -# # group matrix is set to boolean here to pass all posssible information -# self.group_matrix = (group_matrix > 0) + 0.0 -# self.num_groups = group_matrix.shape[0] - -# def forward(self, x): -# """ -# Generate random obfuscation mask. - -# Returns -# ------- -# masked input and obfuscated variables. -# """ -# bs = x.shape[0] - -# obfuscated_groups = torch.bernoulli( -# self.pretraining_ratio * torch.ones((bs, self.num_groups), device=x.device) -# ) -# obfuscated_vars = torch.matmul(obfuscated_groups, self.group_matrix) -# masked_input = torch.mul(1 - obfuscated_vars, x) -# return masked_input, obfuscated_groups, obfuscated_vars diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 20a293b4..05edf404 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -88,8 +88,9 @@ def fit( if cat_cols: cat_cols, cat_counts = zip(*cat_cols) + num_cols = X.columns.difference(cat_cols) # reorder the columns so that the categorical ones go to the end - X = X[np.hstack([X.columns[~X.keys().isin(cat_cols)], cat_cols])] + X = X[num_cols.append(cat_cols)] self.feature_names_out = X.columns else: cat_counts = [0] diff --git a/src/synthcity/utils/dataframe.py b/src/synthcity/utils/dataframe.py index 80104e23..a313b91e 100644 --- a/src/synthcity/utils/dataframe.py +++ b/src/synthcity/utils/dataframe.py @@ -1,19 +1,7 @@ -# stdlib -import enum - # third party import pandas as pd -class TaskType(enum.Enum): - BINARY = "binary" - MULTICLASS = "multiclass" - REGRESSION = "regression" - - def __str__(self) -> str: - return self.value - - def constant_columns(dataframe: pd.DataFrame) -> list: """ Find constant value columns in a pandas dataframe. From a45978510f20915fc849b54948c99bcae8dd997a Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 6 Apr 2023 19:33:54 +0200 Subject: [PATCH 43/95] fix minor bug and add more nn models in factory --- setup.cfg | 3 +- src/synthcity/plugins/core/models/factory.py | 44 +- .../core/models/tabular_ddpm/__init__.py | 2 +- ...al8_tabular_modelling_with_diffusion.ipynb | 1061 ++++++++++------- 4 files changed, 682 insertions(+), 428 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7be378de..7e20f43c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,7 +58,8 @@ install_requires = monai tsai; python_version>"3.7" importlib-metadata; python_version<"3.8" - + igraph + pytest-cov [options.packages.find] where = src diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py index 586186d5..a23ffc06 100644 --- a/src/synthcity/plugins/core/models/factory.py +++ b/src/synthcity/plugins/core/models/factory.py @@ -5,6 +5,14 @@ # third party from pydantic import validate_arguments from torch import nn +from tsai.models.InceptionTime import InceptionTime +from tsai.models.InceptionTimePlus import InceptionTimePlus +from tsai.models.OmniScaleCNN import OmniScaleCNN +from tsai.models.ResCNN import ResCNN +from tsai.models.RNN_FCN import MLSTM_FCN +from tsai.models.TCN import TCN +from tsai.models.XceptionTime import XceptionTime +from tsai.models.XCM import XCM # synthcity relative from .feature_encoder import ( @@ -23,11 +31,22 @@ # should only contain nn modules that can be used as building blocks in larger models MODELS = dict( mlp=".mlp.MLP", + # attention models + transformer=".transformer.TransformerModel", + tabnet=".tabnet.TabNet", + # rnn models rnn=nn.RNN, gru=nn.GRU, lstm=nn.LSTM, - transformer=".transformer.TransformerModel", - tabnet=".tabnet.TabNet", + # time series models + inceptiontime=InceptionTime, + omniscalecnn=OmniScaleCNN, + rescnn=ResCNN, + mlstmfcn=MLSTM_FCN, + tcn=TCN, + xceptiontime=XceptionTime, + xcm=XCM, + inceptiontimeplus=InceptionTimePlus, ) ACTIVATIONS = dict( @@ -74,7 +93,7 @@ def _factory(type_: Union[str, type], params: dict, registry: dict) -> Any: if isinstance(cls, str): cls = registry[type_] = _dynamic_import(cls) return cls(**params) - raise ValueError + raise ValueError(f"Unknown type: {type_}") def _dynamic_import(path: str) -> type: @@ -83,9 +102,9 @@ def _dynamic_import(path: str) -> type: package = __name__.rsplit(".", 1)[0] else: package = None - mod_path, cls = path.rsplit(".", 1) + mod_path, name = path.rsplit(".", 1) module = import_module(mod_path, package) - return getattr(module, cls) + return getattr(module, name) @validate_arguments(config=dict(arbitrary_types_allowed=True)) @@ -99,19 +118,13 @@ def get_model(block: Union[str, type], params: dict) -> Any: - transformer - tabnet """ - try: - return _factory(block, params, MODELS) - except ValueError: - raise ValueError(f"Unknown nn model: {block}") + return _factory(block, params, MODELS) @validate_arguments(config=dict(arbitrary_types_allowed=True)) def get_nonlin(nonlin: Union[str, nn.Module], params: dict = {}) -> Any: """Get a nonlinearity layer from a name or a class.""" - try: - return _factory(nonlin, params, ACTIVATIONS) - except ValueError: - raise ValueError(f"Unknown nonlinearity: {nonlin}") + return _factory(nonlin, params, ACTIVATIONS) @validate_arguments(config=dict(arbitrary_types_allowed=True)) @@ -131,7 +144,4 @@ def get_feature_encoder(encoder: Union[str, type], params: dict = {}) -> Any: """ if isinstance(encoder, type): # custom encoder encoder = FeatureEncoder.wraps(encoder) - try: - return _factory(encoder, params, FEATURE_ENCODERS) - except ValueError: - raise ValueError(f"Unknown feature encoder: {encoder}") + return _factory(encoder, params, FEATURE_ENCODERS) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index 05edf404..713f815e 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -90,7 +90,7 @@ def fit( cat_cols, cat_counts = zip(*cat_cols) num_cols = X.columns.difference(cat_cols) # reorder the columns so that the categorical ones go to the end - X = X[num_cols.append(cat_cols)] + X = X[list(num_cols) + list(cat_cols)] self.feature_names_out = X.columns else: cat_counts = [0] diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb index d73d0f60..d07618a1 100644 --- a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb +++ b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb @@ -217,26 +217,17 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2023-03-31T01:04:28.062034+0200][12004][INFO] Encoding sepal length (cm) 8461685668942494555\n", - "[2023-03-31T01:04:28.068034+0200][12004][INFO] Encoding sepal width (cm) 7372477013158199918\n", - "[2023-03-31T01:04:28.074037+0200][12004][INFO] Encoding petal length (cm) 8795408021141068254\n", - "[2023-03-31T01:04:28.081036+0200][12004][INFO] Encoding petal width (cm) 1839870727438321343\n", - "[2023-03-31T01:04:29.905425+0200][12004][INFO] Step 100: MLoss: 0.0 GLoss: 0.3103 Sum: 0.3103\n", - "[2023-03-31T01:04:31.486761+0200][12004][INFO] Step 200: MLoss: 0.0 GLoss: 0.3111 Sum: 0.3111\n", - "[2023-03-31T01:04:33.076905+0200][12004][INFO] Step 300: MLoss: 0.0 GLoss: 0.317 Sum: 0.317\n", - "[2023-03-31T01:04:34.611746+0200][12004][INFO] Step 400: MLoss: 0.0 GLoss: 0.3009 Sum: 0.3009\n", - "[2023-03-31T01:04:36.176039+0200][12004][INFO] Step 500: MLoss: 0.0 GLoss: 0.3154 Sum: 0.3154\n", - "[2023-03-31T01:04:37.956754+0200][12004][INFO] Step 600: MLoss: 0.0 GLoss: 0.3055 Sum: 0.3055\n", - "[2023-03-31T01:04:39.561269+0200][12004][INFO] Step 700: MLoss: 0.0 GLoss: 0.2917 Sum: 0.2917\n", - "[2023-03-31T01:04:41.195544+0200][12004][INFO] Step 800: MLoss: 0.0 GLoss: 0.2817 Sum: 0.2817\n", - "[2023-03-31T01:04:42.967236+0200][12004][INFO] Step 900: MLoss: 0.0 GLoss: 0.266 Sum: 0.266\n", - "[2023-03-31T01:04:44.913448+0200][12004][INFO] Step 1000: MLoss: 0.0 GLoss: 0.2793 Sum: 0.2793\n" + "[2023-04-06T19:07:53.035827+0200][45392][INFO] Encoding sepal length (cm) 8461685668942494555\n", + "[2023-04-06T19:07:53.045457+0200][45392][INFO] Encoding sepal width (cm) 7372477013158199918\n", + "[2023-04-06T19:07:53.054429+0200][45392][INFO] Encoding petal length (cm) 8795408021141068254\n", + "[2023-04-06T19:07:53.066673+0200][45392][INFO] Encoding petal width (cm) 1839870727438321343\n", + "[2023-04-06T19:07:55.483186+0200][45392][INFO] Step 100: MLoss: 0.0 GLoss: 0.3032 Sum: 0.3032\n" ] }, { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 4, @@ -253,11 +244,13 @@ " weight_decay = 1e-4,\n", " batch_size = 1000,\n", " model_type = \"mlp\", # or \"resnet\"\n", + " model_params = dict(\n", + " n_layers_hidden = 3,\n", + " n_units_hidden = 256,\n", + " dropout = 0.0,\n", + " ),\n", " num_timesteps = 500, # timesteps in diffusion\n", - " n_layers_hidden = 3,\n", - " dim_hidden = 256,\n", " dim_embed = 128,\n", - " dropout = 0.0,\n", " # performance logging\n", " log_interval = 10,\n", " print_interval = 100,\n", @@ -278,7 +271,7 @@ "text/plain": [ "TabDDPM(\n", " (diffusion): GaussianMultinomialDiffusion(\n", - " (denoise_fn): MLPDiffusion(\n", + " (denoise_fn): DiffusionModel(\n", " (emb_nonlin): SiLU()\n", " (proj): Linear(in_features=4, out_features=128, bias=True)\n", " (time_emb): TimeStepEmbedding(\n", @@ -315,7 +308,7 @@ " )\n", " )\n", " )\n", - " (ema_model): MLPDiffusion(\n", + " (ema_model): DiffusionModel(\n", " (emb_nonlin): SiLU()\n", " (proj): Linear(in_features=4, out_features=128, bias=True)\n", " (time_emb): TimeStepEmbedding(\n", @@ -372,7 +365,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 6, @@ -381,14 +374,12 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAAAx7ElEQVR4nO3dd3gU5drH8e+9SSAJTSkqCpioFCEUIYB0qQIqRUSpgooczxHxHI8ovnbsYi9HQQUEhVBVmiCCSBXpAqGFDoKETtgkJLv3+0eWGCAkCynLbu7PdeVid+bZmXt2uH47++zMPKKqGGOM8X8OXxdgjDEmd1igG2NMgLBAN8aYAGGBbowxAcIC3RhjAkSwr1ZcunRpjYiI8NXqjTHGL61cufKQqpbJbJ7PAj0iIoIVK1b4avXGGOOXRGTXheZZl4sxxgQIC3RjjAkQFujGGBMgfNaHbowxWUlJSWHv3r0kJSX5uhSfCA0NpVy5coSEhHj9Ggt0Y8xlae/evRQrVoyIiAhExNfl5CtV5fDhw+zdu5fIyEivX2ddLsaYy1JSUhKlSpUqcGEOICKUKlXqor+dWKAbYy5bBTHMz7iUbfe7QF+1djQfTemKK/W0r0sxxpjLit8F+rq9C/ni5CYSEw/7uhRjjGHUqFEMGDDA12UAfhjoYSFFAHA6LdCNMSYjPwz0ogAkJh31cSXGmEC3c+dOqlSpQt++falUqRI9e/bk559/plGjRlSsWJHff//9vPYtWrSgRo0atGzZkt27dwMwceJEoqKiqFmzJk2bNgVgw4YN1KtXj1q1alGjRg22bt2a43r97rTF8MLFAHAmHfFxJcaY/PLytA3E/nkiV5dZ9drivHhXtWzbxcXFMXHiREaMGEHdunUZO3YsixYtYurUqbz++ut06tQpve1jjz1Gnz596NOnDyNGjGDgwIF8//33DBkyhNmzZ3Pddddx7NgxAD7//HMef/xxevbsyenTp3G5XDneJr87Qg8vXAIAZ9Ix3xZijCkQIiMjqV69Og6Hg2rVqtGyZUtEhOrVq7Nz586z2i5dupQePXoA0Lt3bxYtWgRAo0aN6Nu3L1988UV6cDdo0IDXX3+dt956i127dhEWFpbjWv3wCP0KAJzJx31biDEm33hzJJ1XChcunP7Y4XCkP3c4HKSmpnq1jM8//5xly5YxY8YM6tSpw8qVK+nRowf169dnxowZtG/fnmHDhtGiRYsc1ep3R+hhoWlH6InJufv1yxhjcqphw4bExMQA8O2339KkSRMAtm3bRv369RkyZAhlypRhz549bN++nRtuuIGBAwfSsWNH/vjjjxyv36tAF5G2IrJZROJEZHAm898XkTWevy0icizHlV1AeOiVADhPn8yrVRhjzCX5+OOPGTlyJDVq1GDMmDF8+OGHAAwaNIjq1asTFRVFw4YNqVmzJhMmTCAqKopatWqxfv167r///hyvX1Q16wYiQcAWoDWwF1gOdFfV2Au0fwy4RVUfzGq50dHReikDXBw5EkezaZ155uqm9Gj76UW/3hjjHzZu3MjNN9/s6zJ8KrP3QERWqmp0Zu29OUKvB8Sp6nZVPQ3EAB2zaN8dGOdlvRctPKwUAM6UU3m1CmOM8UveBPp1wJ4Mz/d6pp1HRK4HIoF5OS8tc4ULl0BUSUxNzKtVGGOMX8rtH0W7AZNUNdMTKkWkv4isEJEV8fHxl7QCcTgIV3BaoBtjzFm8CfR9QPkMz8t5pmWmG1l0t6jqcFWNVtXoMmUyHbTaK+EKiakF86b3xhhzId4E+nKgoohEikgh0kJ76rmNRKQKcCWwNHdLPF84gtOVnNerMcYYv5JtoKtqKjAAmA1sBCao6gYRGSIiHTI07QbEaHanzeSCMHGQ6E7J69UYY4xf8epKUVWdCcw8Z9oL5zx/KffKylq4BON02/3QjTH5r2/fvtx5553cc889vi7lPH53pShAmATjzPx3V2OMKbD8MtDDHYUs0I0xee6VV16hcuXKNG7cmO7du/POO++cNX/u3LnccsstVK9enQcffJDk5LTf9gYPHkzVqlWpUaMGTz75JJD5LXRzm9/dnAsgLKgQiSluX5dhjMkvPw6GA+tyd5nXVId2b15w9vLly5k8eTJr164lJSWF2rVrU6dOnfT5SUlJ9O3bl7lz51KpUiXuv/9+PvvsM3r37s13333Hpk2bEJH02+Vmdgvd3OafR+hBhXEW3LFjjTH5YPHixXTs2JHQ0FCKFSvGXXfdddb8zZs3ExkZSaVKlQDo06cPCxYsoESJEoSGhvLQQw8xZcoUwsPDgcxvoZvb/PIIPTw4zALdmIIkiyPpy01wcDC///47c+fOZdKkSXzyySfMmzcv01volipVKlfX7Z9H6MHhpIiQkuL0dSnGmADVqFEjpk2bRlJSEgkJCUyfPv2s+ZUrV2bnzp3ExcUBMGbMGJo1a0ZCQgLHjx+nffv2vP/++6xduxbI/Ba6uc0vj9DDQtJG9kh0HiakRLiPqzHGBKK6devSoUMHatSowdVXX0316tUpUaJE+vzQ0FBGjhxJ165dSU1NpW7dujzyyCMcOXKEjh07kpSUhKry3nvvAWm30N26dSuqSsuWLalZs2au1+yXgR7uGSjamXiY4iXKZ9PaGGMuzZNPPslLL72E0+mkadOm1KlTh4cffjh9fsuWLVm9evVZrylbtux5g0cDTJkyJc/r9c9AL+QZKDrRBoo2xuSd/v37ExsbS1JSEn369KF27dq+LilL/hnohYsDkJh01MeVGGMC2dixY31dwkXxyx9Fw84codtA0cYYk84vAz28cNq4ookW6MYYk84/Az30CgCcySd8W4gxxlxG/DPQw0oC4Dx90seVGGPM5cMvAz3ME+iJNlC0MSYPFS1a1NclXBS/DPTw8NIAOC3QjTEmnV8GekhIEYJVcdql/8aYfKCqDBo0iKioKKpXr8748eMB2L9/P02bNqVWrVpERUWxcOFCXC4Xffv2TW/7/vvv51udfnkeujgchCk4XTZQtDEFwVu/v8WmI5tydZlVSlbh6XpPe9V2ypQprFmzhrVr13Lo0CHq1q1L06ZNGTt2LLfffjvPPvssLpcLp9PJmjVr2LdvH+vXrwfIs1vlZsarI3QRaSsim0UkTkQGX6DNvSISKyIbRCTPz8YPV0i0QDfG5INFixbRvXt3goKCuPrqq2nWrBnLly+nbt26jBw5kpdeeol169ZRrFgxbrjhBrZv385jjz3GrFmzKF68eL7Vme0RuogEAZ8CrYG9wHIRmaqqsRnaVASeARqp6lERuSqvCj4jDAdOl40rakxB4O2RdH5r2rQpCxYsYMaMGfTt25cnnniC+++/n7Vr1zJ79mw+//xzJkyYwIgRI/KlHm+O0OsBcaq6XVVPAzFAx3PaPAx8qqpHAVT1YO6Web5wcdhA0caYfNGkSRPGjx+Py+UiPj6eBQsWUK9ePXbt2sXVV1/Nww8/TL9+/Vi1ahWHDh3C7XbTpUsXXn31VVatWpVvdXrTh34dkPHGvXuB+ue0qQQgIouBIOAlVZ117oJEpD/QH6BChQqXUm+6cAnG6U7N0TKMMcYbnTt3ZunSpdSsWRMR4e233+aaa67h66+/ZujQoYSEhFC0aFFGjx7Nvn37eOCBB3C704bJfOONN/Ktztz6UTQYqAjcBpQDFohIdVU9lrGRqg4HhgNER0drTlYY7gjhkCsxJ4swxpgsJSQkACAiDB06lKFDh541v0+fPvTp0+e81+XnUXlG3nS57AMy3nS8nGdaRnuBqaqaoqo7gC2kBXyeCXOE4FQbKNoYY87wJtCXAxVFJFJECgHdgKnntPmetKNzRKQ0aV0w23OvzPOFBxXCiQW6McackW2gq2oqMACYDWwEJqjqBhEZIiIdPM1mA4dFJBb4BRikqofzqmiA8KAwEm2gaGMCmmqOemb92qVsu1d96Ko6E5h5zrQXMjxW4AnPX74IDw4lUUDdbsThlxe8GmOyEBoayuHDhylVqhQiBevoTVU5fPgwoaGhF/U6v7xSFCAsOIxUEVJSTlGocDFfl2OMyWXlypVj7969xMfH+7oUnwgNDaVcuXIX9Rq/DfTwkCIAOJ2HLNCNCUAhISFERkb6ugy/4rd9FeEhabe1dCbmaVe9Mcb4Db8N9DPjiiYm2kDRxhgDfhzo4Z5uFmfSMd8WYowxlwm/DfSwwiUAcCbbEboxxoAfB7oNFG2MMWfz40D3jCtqA0UbYwzgx4EeduYI3QLdGGMAPw708LBSADhTEnxciTHGXB78NtDDws8Eug0UbYwx4MeBHhISTiFVElPtnujGGAN+HOgAYQpOC3RjjAH8PNDDFZyuZF+XYYwxlwX/DnQcJFqgG2MM4O+BLkE43Sm+LsMYYy4Lfh3oYRJEoqb6ugxjjLks+HWghzsK4bRAN8YYwMtAF5G2IrJZROJEZHAm8/uKSLyIrPH89cv9Us8X5gjBqTZQtDHGgBcjFolIEPAp0BrYCywXkamqGntO0/GqOiAParyg8KDCJFJwB5E1xpiMvDlCrwfEqep2VT0NxAAd87Ys74QFh+IsWGPHGmPMBXkT6NcBezI83+uZdq4uIvKHiEwSkfKZLUhE+ovIChFZkRsDv4YHh+EUULd1uxhjTG79KDoNiFDVGsAc4OvMGqnqcFWNVtXoMmXK5Hil4cHhqAhJNmqRMcZ4Fej7gIxH3OU809Kp6mFVPXOFz5dAndwpL2vhIUUASEw6kh+rM8aYy5o3gb4cqCgikSJSCOgGTM3YQETKZnjaAdiYeyVe2JXhVwHwV/yG/FidMcZc1rINdFVNBQYAs0kL6gmqukFEhohIB0+zgSKyQUTWAgOBvnlVcEY3X98cgI17FubH6owx5rKW7WmLAKo6E5h5zrQXMjx+Bngmd0vLXvlyDSjqVmIPbeDu/F65McZcZvz6SlFHUDA3SygbEw/4uhRjjPE5vw50gJuLXMdmUkhNSfJ1KcYY41N+H+hVy9Qk2SFs2znX16UYY4xP+X+gR7QAIHb3rz6uxBhjfMvvA/368o0JdysbD9mpi8aYgs3vA90RFEwVKUysc7+vSzHGGJ/y+0AHqFrkOjZz2n4YNcYUaIER6KVrkOQQdu5e4OtSjDHGZwIj0Cs0AyB213zfFmKMMT4UEIEecX0zwtxK7KF1vi7FGGN8JiACPSi4EFWkMBudf/q6FGOM8ZmACHSAm8PLslGTcaWe9nUpxhjjEwET6FVLR5HoELbvnOfrUowxxicCJtDrV+0GwK8bY3xciTHG+EbABPo119Qiyh3MvENrfV2KMcb4RMAEOkDLUjVY50jlr7/+8HUpxhiT7wIq0FtU7wPAvNXDfVyJMcbkv4AK9BsiWxDhEub+tczXpRhjTL7zKtBFpK2IbBaROBEZnEW7LiKiIhKdeyVenJYlKrFCEzl+fLevSjDGGJ/INtBFJAj4FGgHVAW6i0jVTNoVAx4HfHp43LLKfbhEWLBqmC/LMMaYfOfNEXo9IE5Vt6vqaSAG6JhJu1eAtwCf3vKwWpXOXOVS5u6d78syjDEm33kT6NcBezI83+uZlk5EagPlVXVGVgsSkf4iskJEVsTHx190sd5wBAXTokgFFqceZ/uOeaSkOPNkPcYYc7nJ8Y+iIuIA3gP+m11bVR2uqtGqGl2mTJmcrvqC2lS+hySH0HHB49T7th6dR9Rgw8bJebY+Y4y5HAR70WYfUD7D83KeaWcUA6KA+SICcA0wVUQ6qOqK3Cr0YtSt9SAxoVcQ9+fv7Dq2jR+ObWTw0peYcH1zwsJL+qIkY4zJc6KqWTcQCQa2AC1JC/LlQA9VzXQQTxGZDzyZXZhHR0frihX5k/fLVg2n37qP6RUWwdP3TsuXdRpjTF4QkZWqmumZhNl2uahqKjAAmA1sBCao6gYRGSIiHXK31LxRv3Z/uoWW55vEnSxfMyLTNm5Xaj5XZYwxuSvbI/S8kp9H6ABO5yHuiWmOG5hy71zCi16VPu/E8T30nnwHd5auxcMdRudbTcYYc7FydIQeKMLDS/NqnUH86VCG/HAf6nanz3tv5kNsD1JGHl7FqYQDPqzSGGMuXYEJdIDaNe/n0StvYUbqIWJ+GgjA0pWfM/n0fppIEU46hO8XDvFxlcYYc2kKVKADPHznSJpJUd4+MJ+lKz/n5bWfEuGC97r+SA13CN/uX2ijHhlj/FKBC3RHUDCvdRzPNW7hH+s+4U+H8nKdQYSGXUnvGzuxJwgWLP/I12UaY8xFK3CBDlCiRAU+aPwGYQq9itxI7Zr3A9CqwVOUdSljNtuoR8YY/+PNhUUBqXKlO5l3bTTh4X+f7RIcEkqPq27l3cPL2LR5KlUq+8VZmcYYAxTQI/QzihS9BnGc/Rbc3eRFwtxKzKpPfFSVMcZcmgId6JkpXqI8jYOvZGHin2ed2miMMZc7C/RMNLymLgeDhO075mY6f8+epQz7oTff/PhIPleWZuJP/2Hl2q9zvJy4bT/x3uS77SpZYwKEBXomGlbrAcDizRPPmr5s1XB6jaxN+3n9+eTYGt46uJhlqy5+/NKU5FPEH8z0VjjZOnZ0B6/9OYfXV72X428QY34fysiEraxZPzZHyzHGXB4s0DNx7bXRRLiEJfFr06elpiTx7JqPOKin+U/Jukxr9gnlXfDKmo9JTjqe7TITTu7noyn30mdUHRqMrU+LH7td0tHxL6s+wyXCFoeb9Tm4JbC63SxK3A/AT+d8cBlj/JMF+gU0LFqBle5T6WG9aOWn/BUkPFWpJw/eNYKIiGY8X/NRdgXBFz/+I9vlfTSzH1+eiCVV3dxb9EbuCi7DyIStPBfT5qIG4Zi7byFXuZQwtzL5jy+ybX/gwBp+/PUlFv3+8VnTt26fzcEgIcytzEnYYd0uxgQAC/QLaHR9S5IcwuoNaeekT9oyidIupVn9x9PbNKjzCHcGl+ar4+vZtm3OBZe1Z89SJibuokvodXz7wGqe6voDr3X/mQFX1GJaajwDxt6GM+FgtjWdSjjAEvdJbi92I+1CyzIz6U8STu7PtO0P8wbTdkR1Ws/uzVM7JzMwdhhHj2xLn79oY9pR+SOl63EwSPgjdrxX74sx5vJlgX4B0dV6EqzKkh2zOLB/NQvdJ+l8xc2EhISf1W5Quy8oovDMgkHs3Plrpsv6dMH/Eazwz+bvpk8Th4N/dBzDy9fdzjJ18uDENhw5EpdlTQtXDSNFhFaVunBPjf4kOoSZS986r138wQ28tms6xSWIp69qxHs3didFhB+WvJHeZtHhP6jodnDvba9RSJXZmwI70J0JB73+zSE56Tivj7+DL6f2yeOqjMldFugXEF70Km4hlCUntzPlt7dR4O5654+yV7LkTbxcuTe7SaXz/Ed5Z2InTp74e0CnzVumMzMlnp7Fq3DV1VHnvf7uVu/wQZUHiCOV+7/vzN69v12wpp93z6WUS6lZrRtRN3ehstvBpH3zz2v3v3lPkCLwbqtP6dXuc1o3/j9qayEmHlyG25XKqYQDrCaJxsVvpGixsjR0FGfOye1+2e2ibvcFv6WccfjQFlpNbEHf0XWzfH/PtH1oXHPGJe3mq8MrbUxa41cs0LPQsFQUmx1uxh9bR0MpQrlyt2barmXDp5l+50Q6FC7L6FNxtJ18O0MndmTXroV8sPRViik82Or9C67ntlv/y5fRz3BUlN4/9WP+b++edzSZnHScBSlHaBFejqDgQojDQZeyTdjocLFh05T0dtt3zGVK0j66hUdSvnyj9OldI9qzOwh+X/MVv/3xNakiNLnhDgDalG/OX0HCuo2TANi0eSqvxLRlz57Fl/ze5SVX6mlmLXiZ58a2otWoGjSY0oZnx7bk+LGdmbb/bslrnHQImzWZLnP6MXnOfzM9Wt8aN4ueU7uwmdN0KVSWBIewdsOEHNXqdqWyY+f8HC3DGG8VmAEuLkXspu+5b9nzALx/Yw9aNX7Gq9d8teJd5qUeJTVtjFX+U7IuD96V+UhJGW3bNoeBvz7B7iCo5g7iX1UfoEndxxCHg/m/vctjm0cxrOojNKz7KJA2MEfLKe2oKoV5pcWHVKjQmMdGN2SF6wQzO/7AlSVvTF92ctJxWo1rRN3gKykREs7MxH0s6rGMkMJFOHliH82m3E73IjdSrlh5hh6YT4oIRd3Kyzd1o02T586rVd1uVv4xmqo3tT9rsJALSUk+xajZ/2TziZ0UDQolPDiMYHGQ4k7ltDuFKiVv5p5W75535W5mPv7uPoafiKW4W6kffCUlCxVjcuJuiis8c+O9tG36QnpbV+pp2o2uTYWgMF5p/TnPz3mEZSRR0x3CP6r1pXH0AE6c2M3ncwYSc2o7V7jh41tfJKJ8Y5pMbk2fYlX4d5dJ2daUGXW7eXl8Wyaf3s+b13fmjtvs1swm57Ia4MKrQBeRtsCHQBDwpaq+ec78R4BHAReQAPRX1dislukPge52pdJ8dC0cCj/1/v28/vOsxB/cwHe/vcm2k3t4+e7vCA270qvXpaQ4mf7riwzbPYt9QVDOBbeXqMzWU/tY7TrJr54QPmPKz0/y5p5ZpAi0DSnD9NRDPF6yDv3uGnXest+Z2IlvT8VRXKFWcAk+7P33EfiA0Q1Y6D6JW4QmUoRH6z3F60tf4Q9HKveFlufJDt+mb4O63Xz43b18lbCZ2lqIYff9nD7PlXqayfOewiEOWtcdSIkrIti+Yx7PzH+CWIeL8i5IRkmQtP8shQABTjiER6+oySMdv8ny/dm8ZTrdlgzm9pAyvHbfbIKCC6VPf2Hxc8Q6XDx/TQvuvf1DAH5Z+g4Dt3yd/oHsdqUyee4ghu+dw4EgoYrbwZ+4OCnQuXBZHmv1EaXL3AzAA6OiSdAUJj6w9oL1nHk/Dh3aSGLSUSpUaJw+7Z3JnRnt3E5xtxIMTO08nRJXRGS5LH/iSj3Ng9804JZikTzeeYJXH8Ym53IU6CISRNog0a2BvaQNEt09Y2CLSHFVPeF53AH4l6q2zWq5/hDoAD8tfJXQQkVoWv8/+brelBQnsxa9xozdP/ObnsIlwl3BZXi957zz2sYf3MCHPz/ODyl/cZVLmdF9YaYfIDt3/spdvw4A4IWyreja5u9uoLlL3mLQljE8XqYBvdt+hiMomJTkU3w4tSdfO7dxvQterP0fomv05e1JHfkmcSe3EsYydXKbowTvdZ/L6eQTPD2lE/P1JADBqtSVcFa7nYQCL1XqRctGg8+ry+1K5fnxbZiaEs9TZRrSu/0wDsVvZMyC59iXGM/Apq9ToUJjUlOS6PnNrRwglR86TeOKKyPPWk5qShIDxjZjpfsUE5p/QmTEbfzj63rEuZzM7r2C4JDQv9/f5FNMW/gi3+yZw9WOMP7d8HkqV7zjrOV9Oa0vHx5Zybx2MZS5qhoASYlHWbx6ODsOb2THyT3sTD7CDlI46Uj7NlbF7aDTNY045DzIlwmb6RF2PXfXeoT7lgymU+Frean7T5lu/7ifBtI0qjflyzfI9P/D5ej31V/y0B9pH5xPlr6VPndkfxqtybmcBnoD4CVVvd3z/BkAVX3jAu27A/eraruslusvgX45OHpkG4v/GEXdm7ty9dU1Lthua9wsChcqmn6UmJl+X9dlGUnMaTOaa8recta81JSks0LvjN9WDuPltZ+w19MVtMHhomdYBE/f8wPjfnqMN/5aQPvgUmw7fZQ4cfH0NbdRK7I1M9eN4ucTcdwUXIznbx+e6Y/CGdf9VEwr5riPc5sUY4n7BKlAqOe/53MRnTh0aj/vHf6dd264l9ubPJ/pcg7+tZ67Z3ajHMG81nQonRY94dWRf2Y2b5nOPUufYUi5dnRu+TaQ9k3mV00A4CqXEuEIJTLsKiKLR6C4mXbgN2IdLgA6hlzNkPtm4QgK5t1JnRl1Ko6vaw1Kv13zGQuWvc+jm0Zwg0sY120e4eGlL7pWSDuT57nv7uav1FOM6D6fwqElLmk53hoS05bpiXtpGFyCue4TvB3RhXbNXuLY0R0sWjuCitfWp3KlO/O0hoIop4F+D9BWVft5nvcG6qvqgHPaPQo8Qdq36BaqujWr5Vqg+8amzVP5fdt07m9/cbcsSHQe4X8zH2JMwlb6FKvMvztPTP+K/cl33Rh2YgNF3co7UY/QqO6AbJaWuZTkUzw+viW/uRPoUPhaHmz0AoVCivD07H6sktM4VLktqAQf9FyY5df7OYte54lt47jKpRxxwJw7JqZ3o1wMdbtpOaoGtQuV5J1eC9KD918lqtO75bsULVY209dt3jKd2D0L6NDs1fQuIafzEJ1ibiMcBxN7Lj2r26zf13WJdSeSINCh0DW82uPns2rYsGky09eP5qeEHTQOu4Yh3X8+b50H/1rPgJm92CypuEXoX7wqj3W+9FNR1e3m5yVvUrNih0w/iFNSnLT8ph63hpTilXum0j+mBetIpiahrCYJlwiRLuGHvmu86orZGjeLX2PHcd9tr1Os+HXZtncmHGTsL0/RqcFgSpeu4tX2fD2zP7UiWlArqke27S9n+RLoGdr3AG5X1fNO4hWR/kB/gAoVKtTZtWvXRW2I8b2kxKPndeeo2820+c9R/YY2REbclqPlu1JPk5h4+KywTE1JYvj0B/jlWCyftv82yyP9M54b24ofUv6ibdCVDO214JLreX5sK+YmH+Dn++bTNaY5QQiTzwlkb535QMj4jWFL3I90WfwUj5esQ3JqMp+fWM9r5e+iXaNnmbFoCKN2/ci2ICVElRs1iE0ONx9X7sttt/59Cu2WuB/514JBnBR4p1p/ZsV9z8zTB4lp+Gb6EfIfG8azYvts+rT7PP1DJitzF7/Jv+O+JUSVDoWv5YGGz3P99U3S5y9e/gmPxA7jg4q9aNnwaY4f28nD33XitLppcUUVRIThJ2IZWfO/RNfqm+W6Ek7u556JbdgXBKVcyn8jO3FnsyFZfhC8GtOO8cl7qeJ2MKLLzGw/BEbP7M/Q+KVUdwcz9oHV2W6/txKdR9jz5zIq3ZRlh0Suyu8uFwdwVFWz/L5nR+gmLyWc3M/7Mx6g162Dc/QhM2vBEAbtmEhzKc4veuKss4wuxVPfNGNO6mEmNXmfG29szQvjWvFj0gF+7jydokWvpd83DdmgSZRQOBAkVHY76F6uFa3r/Zuw0JLcO7YhJ9TF9/fMpljx69i4+QceXvIshRX+1zgtwI8d3UHH7+/iGoL5ptcSxv70OB/ELyVVhIeKVj7rrJ15S95mxOYYht7+BWWvrZM+vd/XddnpSuS28PJ8l7SHVODl8nfQqWXahWzPj23Fz8kHmN99caZdO4nOI7Qc35QmhUrzVs/5Wb4nz41txbTTB3j22lZ8t+9X1jtSiXAJAhwTNyEKb9V+Mv2DYdmq4fRb9zGNCGeZnuIWQvms29wLdjGtXPs1D60eSjGFYw5h+m3/O+vDKaOFv3/E+j9/o1yJSMqXqUaliFYXPItr8fJPeGXdMPYFwfj6r1C1SqcstzO3ZBXo3vwsvRyoKCKRIlII6AZMPWcFFTM8vQPIsrvFmLxWtFhZnu82K8ffGBrU7ItDlV/0BM2leI7CHODptl9QVOGFBU8Tf3ADM5IP0CGsHCWuiCAouBBvtRvFFQrXOgrzv5sfZmKf1XRp/S7FS5QnpHARhtR7jkMOeH/mQ+lhHq7wdZsv04/Gr7gykv+76T5iHS7uHlOfdw79RrOgEnQKuZqvEjYz89cXgbQfwv+7ZTRrHSl8Ov/p9Bq375jLMpK4r3Rtnuv2I7PvmEhdCeOVPTOI3fQ9p5NPMjf5AC0KX3XBEA0LL8md4dczJ+XQWbeciN30PdPnP0+i8wiQdtLBDyl/0a9ENe5t8wHf3r+cl65tQ9mgUG4KKUHrsHKEI/xr9TssX/0VzoSDvLjmY653wftdf+SViM4sl2QGT7wj08HdD8VvZNDKoZRzC6Oavo+oMnP1Z+e1S0lx8s7ETvxr4xf87/g6/m/3VHqvfIOOE1qcdwX3saM7eOqbZjwSO4wQ0u6HFLP68hgQx9vTFtsDH5B22uIIVX1NRIYAK1R1qoh8CLQCUoCjwABVzfL+sHaEbvxF71G1ieU037f8IlfOQpk+/3me2fU9N7mEuCBlarOPz/rgUbc7y+6GdyZ24mvnNoq6lWIKI9p8ed5Fb+p2859vm/Cr6zhPXNWIXm0/IzUlkX7jmhKryfyjVB0+PbKSqhpCxdAyfJf8J1M83xpeH38HkxJ3MefOKZQqXQmAI0fiuPf7TgQj/CviLp7dM43/3fwwTeoNvGCdW+NmcffiQelnwOzYOZ+evwzgpEMo5lbuKhLBjFM7KU8Io3stvuBpwYcObaLf1HvZJ27qOIqwRE8x6pan0n9cHjPzH7wdv4S7C13DS/fNTn/vnAkH+eekdsRqMt82GUqlm9rx4KhoDrqTmdZ3bXq7+IMbeHLm/ayS03QLLc+/7xjBX/GxbN69gOd2TKK+oyif9FyEIygYZ8JBHpzYhi2SysNX1OChdsN447t7mJa4j7kZTkt1u1KZv+w9Nv21mh2n9rE/JYFbr6hEl/qDzvomdClyfB56XrBAN/5i85bpHD6xm4bR/8qV5anbzaPfNGShnqKJFOF/92d9O4JzJTqP0DWmGafRTMP8jJTkUxw5tu2sM6MOH9pCt6l3cyBIqOEO5vMu00lNTaLdDx25NbgEr3WeTMuJrWheqAxv9PzlrOWtXR9D3xWvAlBE4Zde2V+b0XtUbY65UxjTeSq9vuvASVGeq9iDOTtnMyf1MCEKE5p/SkREsyyXc/jQFh6e1pWtDje9wiN5uutZnQR8NOVevji5kb5FbuKJuydz7NgOHv3hHjZICm/d0JW2TdO+lUye819e+vMnxtV9gaiqXXEmHOS+Ca04IG5ejOzMnbe9ctZyx80awOt//cqTpW+lR5sPeWxsc37TU3xY5UGa3foE8PfZUBlP3Txzyquocp1bKCkhrJO0bxCNHUXpf8tj1KreM8ttvpCsAh1V9clfnTp11JiCav+fq/ShUdG6PnbSJb3+xPG9eurkX5f02i1bf9Q3J9ylJ47vTZ/2v+96aNSoKH3225YaNSpK166PyfS138z8p0aNitIXx7b2al0/zH1Go0ZF6R1fRektI6vpqrVj0ucdit+su3cv8bruI4fj9JuZ/9RE55Hz5rldLn01pp1GjYrSoRM66p1fRWmdEdV07uK3zmp3/NhuvWVkNX1z/J2qqvp/3zbXGiOr6bJVX2S6TrfLpY+Pbqi1RlbTf4yqp1GjonTynP+e1673yNra7qsodaWmaOym77XWyGr6n9GNNSnxWHqbffuW68dT7tMWX1XTWQuGeL3d5yKtZyTTXLUjdGMMpxIO0H5iK444hKruIGL6rMq020fdbmYveoV6UT0pWfKmbJeblHiUljFNOOEQXq/Qkbuav5oX5QNp3Rz/F9OaGamHKOZWPqn91Hnn/AP8e0wj1qQcZ2CFtry4bzb/KlGdf3a68Khdx4/vpuvk9uwPuvDVzDN/fZGnd07h/Rt78PGWcSTgZsrdmV8ZnJqSBJDpNR/esC4XY0y2zvRFZ7yQKjfMXvgKp5KPc3erd3JtmReSkuJk7E+P07jKvdx4Y+tM25y5TsGhSh1C+aLXkmxP5dy+Yy5rt8+mU/M3M/2gS0k+Retv65MgkOwQhlX7Z6510Z3LAt0Yk63UlCQWrviEpnUHenWuur9KTjpO83GNCAEmto/x6roGb5y5aVyvsAievndariwzMxboxhiTwdr1MRQrchU3RLbItWUmnNzPjCVv0KnZK3l62wULdGOMCRA5vbDIGGOMH7BAN8aYAGGBbowxAcIC3RhjAoQFujHGBAgLdGOMCRAW6MYYEyAs0I0xJkBYoBtjTICwQDfGmABhgW6MMQHCAt0YYwKEV4EuIm1FZLOIxInI4EzmPyEisSLyh4jMFZHrc79UY4wxWck20EUkCPgUaAdUBbqLSNVzmq0GolW1BjAJyL274xtjjPGKN0fo9YA4Vd2uqqeBGKBjxgaq+ouqOj1PfwPK5W6ZxhhjsuNNoF8H7MnwfK9n2oU8BPyY2QwR6S8iK0RkRXx8vPdVGmOMyVau/igqIr2AaGBoZvNVdbiqRqtqdJkyZXJz1cYYU+AFe9FmH1A+w/NynmlnEZFWwLNAM1VNzp3yjDHGeMubI/TlQEURiRSRQkA3YGrGBiJyCzAM6KCqB3O/TGOMMdnJNtBVNRUYAMwGNgITVHWDiAwRkQ6eZkOBosBEEVkjIlMvsDhjjDF5xJsuF1R1JjDznGkvZHjcKpfrMsYYc5HsSlFjjAkQFujGGBMgLNCNMSZAWKAbY0yAsEA3xpgAYYFujDEBwgLdGGMChAW6McYECAt0Y4wJEBboxhgTICzQjTEmQFigG2NMgLBAN8aYAGGBbowxAcIC3RhjAoQFujHGBAgLdGOMCRAW6MYYEyC8CnQRaSsim0UkTkQGZzK/qYisEpFUEbkn98s0xhiTnWwDXUSCgE+BdkBVoLuIVD2n2W6gLzA2tws0xhjjHW8Gia4HxKnqdgARiQE6ArFnGqjqTs88dx7UaIwxxgvedLlcB+zJ8HyvZ9pFE5H+IrJCRFbEx8dfyiKMMcZcQL7+KKqqw1U1WlWjy5Qpk5+rNsaYgOdNoO8Dymd4Xs4zzRhjzGXEm0BfDlQUkUgRKQR0A6bmbVnGGGMuVraBrqqpwABgNrARmKCqG0RkiIh0ABCRuiKyF+gKDBORDXlZtDHGmPN5c5YLqjoTmHnOtBcyPF5OWleMMcYYH7ErRY0xJkBYoBtjTICwQDfGmABhgW6MMQHCAt0YYwKEBboxxgQIC3RjjAkQFujGGBMgLNCNMSZAWKAbY0yAsEA3xpgAYYFujDEBwgLdGGMChAW6McYECAt0Y4wJEBboxhgTICzQjTEmQFigG2NMgPAq0EWkrYhsFpE4ERmcyfzCIjLeM3+ZiETkeqXGGGOylG2gi0gQ8CnQDqgKdBeRquc0ewg4qqo3Ae8Db+V2ocYYY7LmzSDR9YA4Vd0OICIxQEcgNkObjsBLnseTgE9ERFRVc7FWAF6etoHYP0/k9mKNMSbfVL22OC/eVS3Xl+tNl8t1wJ4Mz/d6pmXaRlVTgeNAqXMXJCL9RWSFiKyIj4+/tIqNMcZkypsj9FyjqsOB4QDR0dGXdPSeF59qxhgTCLw5Qt8HlM/wvJxnWqZtRCQYKAEczo0CjTHGeMebQF8OVBSRSBEpBHQDpp7TZirQx/P4HmBeXvSfG2OMubBsu1xUNVVEBgCzgSBghKpuEJEhwApVnQp8BYwRkTjgCGmhb4wxJh951YeuqjOBmedMeyHD4ySga+6WZowx5mLYlaLGGBMgLNCNMSZAWKAbY0yAsEA3xpgAIb46u1BE4oFdF/GS0sChPCrnclYQt7sgbjMUzO0uiNsMOdvu61W1TGYzfBboF0tEVqhqtK/ryG8FcbsL4jZDwdzugrjNkHfbbV0uxhgTICzQjTEmQPhToA/3dQE+UhC3uyBuMxTM7S6I2wx5tN1+04dujDEma/50hG6MMSYLFujGGBMg/CLQsxuk2l+JSHkR+UVEYkVkg4g87pleUkTmiMhWz79XeqaLiHzkeR/+EJHavt2CSyciQSKyWkSme55HegYYj/MMOF7IMz1gBiAXkStEZJKIbBKRjSLSIND3tYj8x/N/e72IjBOR0EDc1yIyQkQOisj6DNMuet+KSB9P+60i0iezdWXlsg90Lwep9lepwH9VtSpwK/CoZ9sGA3NVtSIw1/Mc0t6Dip6//sBn+V9yrnkc2Jjh+VvA+56Bxo+SNvA4BNYA5B8Cs1S1ClCTtO0P2H0tItcBA4FoVY0i7fbb3QjMfT0KaHvOtIvatyJSEngRqE/aWM4vnvkQ8JqqXtZ/QANgdobnzwDP+LquPNrWH4DWwGagrGdaWWCz5/EwoHuG9unt/OmPtFGv5gItgOmAkHbVXPC5+5y0+/A38DwO9rQTX2/DJWxzCWDHubUH8r7m77GGS3r23XTg9kDd10AEsP5S9y3QHRiWYfpZ7bz5u+yP0PFukGq/5/l6eQuwDLhaVfd7Zh0ArvY8DpT34gPgKcDteV4KOKZpA4zD2dvl1QDkfiASiAdGerqavhSRIgTwvlbVfcA7wG5gP2n7biWBv6/PuNh9m+N97g+BHvBEpCgwGfi3qp7IOE/TPqoD5txSEbkTOKiqK31dSz4LBmoDn6nqLcAp/v4KDgTkvr4S6Ejah9m1QBHO75YoEPJr3/pDoHszSLXfEpEQ0sL8W1Wd4pn8l4iU9cwvCxz0TA+E96IR0EFEdgIxpHW7fAhc4RlgHM7erkAZgHwvsFdVl3meTyIt4AN5X7cCdqhqvKqmAFNI2/+Bvq/PuNh9m+N97g+B7s0g1X5JRIS08Vg3qup7GWZlHHS7D2l962em3+/5lfxW4HiGr3R+QVWfUdVyqhpB2r6cp6o9gV9IG2Aczt9mvx+AXFUPAHtEpLJnUksglgDe16R1tdwqIuGe/+tntjmg93UGF7tvZwNtRORKz7ebNp5p3vP1Dwle/tjQHtgCbAOe9XU9ubhdjUn7GvYHsMbz1560fsO5wFbgZ6Ckp72QdsbPNmAdaWcP+Hw7crD9twHTPY9vAH4H4oCJQGHP9FDP8zjP/Bt8XXcOtrcWsMKzv78Hrgz0fQ28DGwC1gNjgMKBuK+BcaT9TpBC2rexhy5l3wIPerY/DnjgYuuwS/+NMSZA+EOXizHGGC9YoBtjTICwQDfGmABhgW6MMQHCAt0YYwKEBbop0ETk3yIS7us6jMkNdtqiKdA8V6xGq+ohX9diTE7ZEbopMESkiIjMEJG1nvtzv0jaPUZ+EZFfPG3aiMhSEVklIhM999lBRHaKyNsisk5EfheRm3y5LcZkxgLdFCRtgT9Vtaam3Z/7A+BPoLmqNheR0sBzQCtVrU3aVZ1PZHj9cVWtDnziea0xlxULdFOQrANai8hbItJEVY+fM/9W0gZRWSwia0i7/8b1GeaPy/Bvg7wu1piLFZx9E2MCg6pu8Qz31R54VUTmntNEgDmq2v1Ci7jAY2MuC3aEbgoMEbkWcKrqN8BQ0m5fexIo5mnyG9DoTP+4p8+9UoZF3Jfh36X5U7Ux3rMjdFOQVAeGioibtLvi/ZO0rpNZIvKnpx+9LzBORAp7XvMcaXf6BLhSRP4AkkkbLsyYy4qdtmiMF+z0RuMPrMvFGGMChB2hG2NMgLAjdGOMCRAW6MYYEyAs0I0xJkBYoBtjTICwQDfGmADx/6vbHhm3NqFwAAAAAElFTkSuQmCC", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGwCAYAAAB7MGXBAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABHI0lEQVR4nO3deXRU9eH+8ffMZCc7gQTiQEBZAoQEE4gxqGiiSK1fRKvUgiwurRZRTH8qaA1uNVbF8q2oKFVrQYWvFsUiUugAChgBWUUQBUISliwsWUhClpn7+4M6EgXNQJKb5XmdM6fmzl2emXs48/RzN4thGAYiIiIiJrGaHUBERETaN5URERERMZXKiIiIiJhKZURERERMpTIiIiIiplIZEREREVOpjIiIiIipvMwO0BAul4uDBw8SFBSExWIxO46IiIg0gGEYlJeX07VrV6zWM49/tIoycvDgQex2u9kxRERE5Czk5+dz3nnnnfH9VlFGgoKCgJMfJjg42OQ0IiIi0hBlZWXY7Xb37/iZtIoy8t2hmeDgYJURERGRVubnTrHQCawiIiJiKpURERERMZXKiIiIiJiqVZwzIiIicrZcLhc1NTVmx2iTvL29sdls57welREREWmzampqyMnJweVymR2lzQoNDSUqKuqc7gOmMiIiIm2SYRgcOnQIm82G3W7/yZtuiecMw6CyspKioiIAunTpctbrUhkREZE2qa6ujsrKSrp27UpAQIDZcdokf39/AIqKiujcufNZH7JRTRQRkTbJ6XQC4OPjY3KStu27oldbW3vW61AZERGRNk3PNGtajfH9qoyIiIiIqVRGRERExFQqIyIiIq3YqlWrsFgslJSUmB3lrLXrMuKsqyF742yzY4iIiLRr7fbS3rraE1w/dwg5NoN5vqHED/i12ZFERETapXY7MuLl7cdAv84AzNvykslpRESkqRmGQWVNnSkvwzAanHPYsGFMnjyZKVOmEBYWRmRkJHPmzKGiooKJEycSFBTEBRdcwMcff3zGdfzzn/+kf//++Pr6EhMTw4wZM+q9/9JLL9GrVy/8/PyIjIzkV7/6lfu99957j7i4OPz9/enYsSPp6elUVFR4/oV7oN2OjACMTbyHRZ8/zPK6oxQc2kxUl0FmRxIRkSZSVeukX+a/Tdn2jseHE+DT8J/cN998kwceeID169ezYMEC7rrrLt5//31GjRrFQw89xF/+8hduueUW8vLyfrTsxo0buemmm3j00UcZPXo0n332Gb///e/p2LEjEyZM4IsvvuCee+5h7ty5XHzxxRw9epTVq1cDcOjQIW6++WaeeeYZRo0aRXl5OatXr/aoTJ0Ni9HUW2gEZWVlhISEUFpaSnBwcKOu+9a/J7HBUs2tgb2574Z/Nuq6RUTEPCdOnCAnJ4cePXrg5+dHZU1dqygjw4YNw+l0uguC0+kkJCSE66+/nn/84x8AFBQU0KVLF7Kzszlx4gSXX345x44dIzQ0lDFjxlBcXMyyZcvc63zggQf46KOP+Oqrr1i4cCETJ05k//79BAUF1dv2pk2bSExMZN++fXTv3r1BeX/4PZ+qob/f7XpkBGBs75vY8O1c3ivbxe8qDxMQEGF2JBERaQL+3jZ2PD7ctG17YuDAge7/ttlsdOzYkbi4OPe0yMhI4ORt2H/4I79z505GjhxZb1pqaiozZ87E6XRy5ZVX0r17d3r27MnVV1/N1VdfzahRowgICCA+Pp60tDTi4uIYPnw4V111Fb/61a8ICwvz9CN7pN2eM/Kdy4ZM4TwnlFktLF79hNlxRESkiVgsFgJ8vEx5eXqXUm9v7x9lP3Xad+s7m6cRBwUFsWnTJt555x26dOlCZmYm8fHxlJSUYLPZWL58OR9//DH9+vXjhRdeoE+fPuTk5Hi8HU+0+zJi8/JhTJdUAOYdWInLWWdyIhERkbMXGxvL2rVr601bu3YtvXv3dj/IzsvLi/T0dJ555hm2bdvGvn37WLFiBXCy6KSmpvLYY4+xefNmfHx8eP/995s0c7s/TANw3dDpzHrvSnJs8NnGlxk6ZLLZkURERM7KH/7wBwYPHswTTzzB6NGjyc7OZtasWbz00skrRxcvXszevXu59NJLCQsLY8mSJbhcLvr06cO6detwOBxcddVVdO7cmXXr1lFcXExsbGyTZm73IyMAgUFdGBXYE4B5O+eZnEZEROTsXXjhhfzf//0f8+fPZ8CAAWRmZvL4448zYcIEAEJDQ1m4cCFXXHEFsbGxzJ49m3feeYf+/fsTHBzMp59+yi9+8Qt69+7NH//4R2bMmMGIESOaNHO7v5rmO/n52VzjuAPDYuGDoc9z/vlXNsl2RESkefzUVR7SeBrjahqNjPyX3Z7C5bYQAN7aMONn5hYREZHGojJyirFxtwHwr6r9lBxr2jOHRURE5CSVkVMkDZxAX5eVE1YL7615zOw4IiIi7YLKyCksVitju588Seed4i+ora00OZGIiEjbd1Zl5MUXXyQmJgY/Pz+Sk5NZv379GecdNmwYFovlR69rrrnmrEM3pRGpDxPuMiiyWfjPZ8+YHUdERKTN87iMLFiwgIyMDKZPn86mTZuIj49n+PDhFBUVnXb+hQsXcujQIfdr+/bt2Gw2brzxxnMO3xR8fIP4ddjJ2/DO27vI5DQiIiJtn8dl5Pnnn+eOO+5g4sSJ9OvXj9mzZxMQEMDrr79+2vnDw8OJiopyv5YvX05AQECLLSMANw7NxNsw2GatY+v2+WbHERERadM8KiM1NTVs3LiR9PT071dgtZKenk52dnaD1vHaa6/x61//mg4dOpxxnurqasrKyuq9mlNERF9+4RMFwFtbXm7WbYuIiLQ3HpWRw4cP43Q63U8L/E5kZCQFBQU/u/z69evZvn07t99++0/Ol5WVRUhIiPtlt9s9idkoxibeA8CyuiMUFGxp9u2LiIicSUxMDDNnzjQ7RqNp1qtpXnvtNeLi4hgyZMhPzjdt2jRKS0vdr/z8/GZK+L2+ff6HwYYvTouF+WufbPbti4iItBcelZGIiAhsNhuFhYX1phcWFhIVFfWTy1ZUVDB//nxuu+22n92Or68vwcHB9V5mGNv7JgDeLfuaqsqjpmQQERFp6zwqIz4+PiQmJuJwONzTXC4XDoeDlJSUn1z23Xffpbq6mrFjx55dUhNcNmQK5zmhzGrhX2seNzuOiIi0E+Xl5YwZM4YOHTrQpUsX/vKXvzBs2DCmTJly2vnz8vIYOXIkgYGBBAcHc9NNN9UbONi6dSuXX345QUFBBAcHk5iYyBdffAFAbm4u1157LWFhYXTo0IH+/fuzZMmS5viYbl6eLpCRkcH48eNJSkpiyJAhzJw5k4qKCiZOnAjAuHHjiI6OJisrq95yr732Gtdddx0dO3ZsnOTNwOblw5guqfy5aC3z9q/gV846rDaPvzIREWkJDAPMupmldwBYLA2ePSMjg7Vr1/Lhhx8SGRlJZmYmmzZtIiEh4UfzulwudxH55JNPqKurY9KkSYwePZpVq1YBMGbMGAYNGsTLL7+MzWZjy5YteHt7AzBp0iRqamr49NNP6dChAzt27CAwMLAxPnWDefzLOnr0aIqLi8nMzKSgoICEhASWLl3qPqk1Ly8Pq7X+gMuuXbtYs2YNy5Yta5zUzei6odOZ9d6V5Ngge9NsUgffbXYkERE5G7WV8FRXc7b90EHwOfNVpKcqLy/nzTff5O233yYtLQ2AN954g65dT5/d4XDw5ZdfkpOT477g4x//+Af9+/dnw4YNDB48mLy8PO6//3769u0LQK9evdzL5+XlccMNNxAXFwdAz549z/pjnq2zOoH17rvvJjc3l+rqatatW0dycrL7vVWrVvH3v/+93vx9+vTBMAyuvPLKcwprhsCgLowKPLlj5u6Ya3IaERFp6/bu3UttbW29iz1CQkLo06fPaeffuXMndru93pWn/fr1IzQ0lJ07dwInR1puv/120tPTefrpp9mzZ4973nvuuYcnn3yS1NRUpk+fzrZt25rok52Zjjk0wG8umsZbjjtYa6lkb46Dnj3SzI4kIiKe8g44OUJh1rZN9Oijj/Kb3/yGjz76iI8//pjp06czf/58Ro0axe23387w4cP56KOPWLZsGVlZWcyYMYPJkyc3Wz49KK8B7PYULreFAPDWumdNTiMiImfFYjl5qMSMlwfni/Ts2RNvb282bNjgnlZaWso333xz2vljY2PJz8+vdxuMHTt2UFJSQr9+/dzTevfuzX333ceyZcu4/vrreeONN9zv2e127rzzThYuXMgf/vAH5syZ48k3e85URhpobNzJS5I/rNpPack+c8OIiEibFRQUxPjx47n//vtZuXIlX331FbfddhtWqxXLaUpNeno6cXFxjBkzhk2bNrF+/XrGjRvHZZddRlJSElVVVdx9992sWrWK3Nxc1q5dy4YNG4iNjQVgypQp/Pvf/yYnJ4dNmzaxcuVK93vNRWWkgZIGTqCvy8oJq4X3Vj9mdhwREWnDnn/+eVJSUvjlL39Jeno6qampxMbG4ufn96N5LRYLixYtIiwsjEsvvZT09HR69uzJggULALDZbBw5coRx48bRu3dvbrrpJkaMGMFjj538LXM6nUyaNInY2FiuvvpqevfuzUsvvdSsn9diGIbRrFs8C2VlZYSEhFBaWmraDdAAFq2Yyh/zPyLSafDxLevxNvkYoIiInNmJEyfIycmhR48ep/0Rb00qKiqIjo5mxowZDbp5aHP6qe+5ob/fGhnxwIjUhwl3GRTaLDiyde6IiIg0jc2bN/POO++wZ88eNm3axJgxYwAYOXKkycmahsqIB3x8g/h12EAA5u5ZZHIaERFpy5577jni4+NJT0+noqKC1atXExERYXasJqEy4qEbh2bibRhss9aydft8s+OIiEgbNGjQIDZu3Mjx48c5evQoy5cvd9+UrC1SGfFQRERffuFz8qGAb2152eQ0IiIirZ/KyFkYm3gPAMvqjlBQsMXcMCIiIq2cyshZ6Nvnfxhs+OK0WJi/9kmz44iIiLRqKiNnaWzvmwB4t+xrqiqPmpxGRESk9VIZOUuXDZnCeU4os1r415rHzY4jIiLSaqmMnCWblw9juqQCMG//ClzOOpMTiYiItE4qI+fguqHT6eAyyLEZZG+abXYcERFpA4YNG8aUKVPMjtGsVEbOQWBQF0YF9gRg7o65JqcRERFpnVRGztFvLpqGxTBYSyV7cxxmxxEREWl1VEbOkd2ewuW2EADeWqfn1YiISOM5duwY48aNIywsjICAAEaMGMG3337rfj83N5drr72WsLAwOnToQP/+/VmyZIl72TFjxtCpUyf8/f3p1asXb7zxhlkf5Sd5mR2gLRgbdxsrtv6FD6v2c0/JPkJCY8yOJCIiP2AYBlV1VaZs29/LH4vF4vFyEyZM4Ntvv+XDDz8kODiYBx98kF/84hfs2LEDb29vJk2aRE1NDZ9++ikdOnRgx44dBAYGAvDII4+wY8cOPv74YyIiIti9ezdVVeZ8/p+jMtIIkgZOoO/m/+Vrq4v3Vj/Gbde2zOYpItKeVdVVkfx2sinbXvebdQR4B3i0zHclZO3atVx88cUAvPXWW9jtdj744ANuvPFG8vLyuOGGG9zPrenZs6d7+by8PAYNGkRSUhIAMTExjfNhmoAO0zQCi9XK2O4jAHineAO1tZUmJxIRkdZu586deHl5kZz8fYHq2LEjffr0YefOnQDcc889PPnkk6SmpjJ9+nS2bdvmnveuu+5i/vz5JCQk8MADD/DZZ581+2doKI2MNJIRqQ/z/NuLKbRZcGQ/y9WXTjc7koiInMLfy591v1ln2rabwu23387w4cP56KOPWLZsGVlZWcyYMYPJkyczYsQIcnNzWbJkCcuXLyctLY1Jkybx3HPPNUmWc6GRkUbi4xvE6NCTw2Rz9ywyOY2IiPyQxWIhwDvAlNfZnC8SGxtLXV0d69Z9X6COHDnCrl276Nevn3ua3W7nzjvvZOHChfzhD39gzpw57vc6derE+PHjmTdvHjNnzuTVV189ty+xiaiMNKKbhmbibRhss9aydft8s+OIiEgr1qtXL0aOHMkdd9zBmjVr2Lp1K2PHjiU6OpqRI0cCMGXKFP7973+Tk5PDpk2bWLlyJbGxsQBkZmayaNEidu/ezVdffcXixYvd77U0KiONKKJTLCN8IgF4a8vLJqcREZHW7o033iAxMZFf/vKXpKSkYBgGS5YswdvbGwCn08mkSZOIjY3l6quvpnfv3rz00ksA+Pj4MG3aNAYOHMill16KzWZj/vyW+X+ULYZhGGaH+DllZWWEhIRQWlpKcHCw2XF+0s5di7jp8z9iMwyWXj2PqKgEsyOJiLRLJ06cICcnhx49euDn52d2nDbrp77nhv5+a2SkkcX2GUmS4YvTYmH+2ifNjiMiItLiqYw0gbG9bwTg3bKvqao8anIaERGRlk1lpAkMG3If0U4os1r415rHzY4jIiLSoqmMNAGblw9jok7eLW/e/hW4nHUmJxIREWm5VEaayKhLHqWDyyDHZpC9abbZcURE2q1WcJ1Gq9YY36/KSBMJDOrCqA49AJi7Y67JaURE2h+bzQZATU2NyUnatsrKk49A+e5y47Oh28E3od+kTOUtx+9Ya6lkb46Dnj3SzI4kItJueHl5ERAQQHFxMd7e3lit+v/fjckwDCorKykqKiI0NNRd/s6GykgTsttTGWYNYaVRxlvrnuURlRERkWZjsVjo0qULOTk55Obmmh2nzQoNDSUqKuqc1qEy0sRuibuVldtm8mHVfu4p2UdIaIzZkURE2g0fHx969eqlQzVNxNvb+5xGRL5zVmXkxRdf5Nlnn6WgoID4+HheeOEFhgwZcsb5S0pKePjhh1m4cCFHjx6le/fuzJw5k1/84hdnHby1SIqfSJ8tf2WX1cV7qx/jtmvfMDuSiEi7YrVadQfWFs7jA2gLFiwgIyOD6dOns2nTJuLj4xk+fDhFRUWnnb+mpoYrr7ySffv28d5777Fr1y7mzJlDdHT0OYdvDSxWK2O7XQ3AO8UbqK2tNDmRiIhIy+Lxs2mSk5MZPHgws2bNAsDlcmG325k8eTJTp0790fyzZ8/m2Wef5euvvz7rM21b07NpTqf6RClXvZPKUauFZ3v8iqsvnW52JBERkSbXJM+mqampYePGjaSnp3+/AquV9PR0srOzT7vMhx9+SEpKCpMmTSIyMpIBAwbw1FNP4XQ6z7id6upqysrK6r1aM1+/EEaHxgEwd88ik9OIiIi0LB6VkcOHD+N0OomMjKw3PTIykoKCgtMus3fvXt577z2cTidLlizhkUceYcaMGTz55JkfIpeVlUVISIj7ZbfbPYnZIt00NBNvw2CbtZat21vmI5xFRETM0OQXXbtcLjp37syrr75KYmIio0eP5uGHH2b27DPflXTatGmUlpa6X/n5+U0ds8lFdIplhM/JEvfWlpdNTiMiItJyeFRGIiIisNlsFBYW1pteWFh4xmuMu3TpQu/evetd+hMbG0tBQcEZL7Xy9fUlODi43qstGJt4DwDL6o5QULDF3DAiIiIthEdlxMfHh8TERBwOh3uay+XC4XCQkpJy2mVSU1PZvXs3LpfLPe2bb76hS5cu+Pj4nGXs1im2z0iSDF+cFgvz1575MJWIiEh74vFhmoyMDObMmcObb77Jzp07ueuuu6ioqGDixIkAjBs3jmnTprnnv+uuuzh69Cj33nsv33zzDR999BFPPfUUkyZNarxP0YqM7X0jAO+WfU1V5VGT04iIiJjP45uejR49muLiYjIzMykoKCAhIYGlS5e6T2rNy8urd/9/u93Ov//9b+677z4GDhxIdHQ09957Lw8++GDjfYpWZNiQ+4j+eh4HbBYWr3mCG6/6i9mRRERETOXxfUbM0NrvM/JDc5f8jmeKP6On08IHE7Zg0cObRESkDWqS+4xI4xh1yaN0cBnstRlkb9SVNSIi0r6pjJggMKgLozr0AGDujrkmpxERETGXyohJfpMyFYthsIYK9uasMDuOiIiIaVRGTGK3pzLMGgLA2+ueNTmNiIiIeVRGTHRL3K0AfFiVT2nJPnPDiIiImERlxERJ8RPp47JSZbXwz9WPmx1HRETEFCojJrJYrYztdjUAbxevp7a20uREIiIizU9lxGQjUh8i3GVQaLPgyH7O7DgiIiLNTmXEZL5+IYwOjQNg3p4PzA0jIiJiApWRFuCmoZl4GwZbrbVs+2qB2XFERESalcpICxDRKZYRPief7TNv80smpxEREWleKiMtxNjEewBYXneEgoIt5oYRERFpRiojLURsn5EkGb7UWSwsWPuk2XFERESajcpICzK2940AvFv2NVWVR01OIyIi0jxURlqQYUPuI9oJpVYLi9c8YXYcERGRZqEy0oLYvHwYE3UxAPP2OzBcLpMTiYiIND2VkRZm1CWP0sFlsNdmkL3xZbPjiIiINDmVkRYmMKgLozr0AGDujrkmpxEREWl6KiMt0G9SpmIxDNZQwd6cFWbHERERaVIqIy2Q3Z7KMGsIAG+ve9bkNCIiIk1LZaSFuiXuVgA+rMqntGSfuWFERESakMpIC5UUP5E+LitVVgv/XP242XFERESajMpIC2WxWhnb7WoA3i5eT21tpcmJREREmobKSAs2IvUhwl0GhTYLjuznzI4jIiLSJFRGWjBfvxBGh8YBMG/PB+aGERERaSIqIy3cTUMz8TYMtlpr2fbVArPjiIiINDqVkRYuolMsI3wiAZi3+SWT04iIiDQ+lZFWYGziPQAsrztCQcEWc8OIiIg0MpWRViC2z0iSDF/qLBYWrH3S7DgiIiKNSmWklRjb+0YA3i37mqrKoyanERERaTwqI63EsCH3Ee2EUquFxWueMDuOiIhIo1EZaSVsXj6MiboYgHn7HRgul8mJREREGofKSCsy6pJH6eAy2GszyN74stlxREREGoXKSCsSGNSFUR16ADB3x1yT04iIiDQOlZFW5jcpU7EYBmuoYG/OCrPjiIiInLOzKiMvvvgiMTEx+Pn5kZyczPr1688479///ncsFku9l5+f31kHbu/s9lSGWUMAeHvdsyanEREROXcel5EFCxaQkZHB9OnT2bRpE/Hx8QwfPpyioqIzLhMcHMyhQ4fcr9zc3HMK3d7dEncrAB9W5VNass/cMCIiIufI4zLy/PPPc8cddzBx4kT69evH7NmzCQgI4PXXXz/jMhaLhaioKPcrMjLyJ7dRXV1NWVlZvZd8Lyl+In1cVqqsFv65+nGz44iIiJwTj8pITU0NGzduJD09/fsVWK2kp6eTnZ19xuWOHz9O9+7dsdvtjBw5kq+++uont5OVlUVISIj7ZbfbPYnZ5lmsVsZ2uxqAt4vXU1tbaXIiERGRs+dRGTl8+DBOp/NHIxuRkZEUFBScdpk+ffrw+uuvs2jRIubNm4fL5eLiiy9m//79Z9zOtGnTKC0tdb/y8/M9idkujEh9iHCXQaHNgiP7ObPjiIiInLUmv5omJSWFcePGkZCQwGWXXcbChQvp1KkTr7zyyhmX8fX1JTg4uN5L6vP1C2F0aBwA8/Z8YG4YERGRc+BRGYmIiMBms1FYWFhvemFhIVFRUQ1ah7e3N4MGDWL37t2ebFpO46ahmXgbBluttWz7aoHZcURERM6KR2XEx8eHxMREHA6He5rL5cLhcJCSktKgdTidTr788ku6dOniWVL5kYhOsYzwOXnIbN7ml0xOIyIicnY8PkyTkZHBnDlzePPNN9m5cyd33XUXFRUVTJw4EYBx48Yxbdo09/yPP/44y5YtY+/evWzatImxY8eSm5vL7bff3nifoh0bm3gPAMvrjrDr249MTiMiIuI5L08XGD16NMXFxWRmZlJQUEBCQgJLly51n9Sal5eH1fp9xzl27Bh33HEHBQUFhIWFkZiYyGeffUa/fv0a71O0Y7F9RjI4+wk2WKq5ce2DjFj/LL+7OJOePa4wO5qIiEiDWAzDMMwO8XPKysoICQmhtLRUJ7OeRsGhzTy1/G5WGifvx2IxDK726sidqZn07JFmcjoREWmvGvr7rTLShuzctYjZ659hhat+Kfldyh85//wrTU4nIiLtjcpIO3a6UjLcK5zfpfyRC86/yuR0IiLSXqiMCF/v+pDZ6/+M45RScpVXOHeqlIiISDNQGRG3Xd8sZva6p/mPqxT4vpT87qKH6HXB1SanExGRtkplRH5k17cf8crnWSz/bykBuMoWyp0XPaxSIiIijU5lRM7oZCl5muWuEve0K62h3JnyEL0vGGFeMBERaVNURuRnfbP7Y175/CmWOUvc0660hvC7i6bRp9c15gUTEZE2QWVEGuzb3UtPlpK6oxgWCwDp1hDuTJ5Kn96/NDmdiIi0Vioj4rHde5bxSvaT/PuUUpJmDebOIQ/St8//mJxORERaG5UROWunKyVXWIO5S6VEREQ8oDIi52zPnuW8kv0kS+uOuEvJ5ZZg7kp+gNg+I01OJyIiLZ3KiDSavTkOZq99XKVEREQ8ojIijW5vjoNXPnuCj2sPu0vJMEsQdw15gH59rzM3nIiItDgqI9Jk9uas4NXPnuDj2mJcp5SSO4f8P/r3vd7kdCIi0lKojEiTy9m3ilfXPsaSU0rJZZZA7hr8/+gfe4PJ6URExGwqI9Js9u37hFfXPsZHtUXuUnKpJZC7kjIY0O9Gk9OJiIhZVEak2e3b9wlzPnuMxTXfl5JLLB24K/EPxPVXKRERaW9URsQ0ubmreXXtoyyuKXSXkqF04K6k+xjYf7TJ6UREpLmojIjpvislH9UU4vxvKUklgLsS7yN+wK9NTiciIk1NZURajLy8Nby65lEW1xSolIiItCMqI9Li5Oev5dXV0/nXD0rJnYn3kjDgNyanExGRxqYyIi1Wfn42c9Y8wofV35eSi/HnrgvvJSFujMnpRESksaiMSIuXn5/N39Zk8mH1Ier+W0ouwp9r7emkDpxAx4jeJicUEZFzoTIircb+/Z/zt9WZLKo+6C4lAP1dNoaG9uWSXtcxoO/12Lx8TEwpIiKeUhmRVufAgfX8c91zrCn9hp1WZ733Ql0GKd4duST6ElLjJxAefoFJKUVEpKFURqRVO1y8kzXb/s7qQ5+RXXuMcuv3IyYWw6C/4X1y1KT3dfTvM0qjJiIiLZDKiLQZdbUn2Lbj/1i9+0PWlH3L11ZXvfdDXQapPp0YGj2U1IETCAs/36SkIiJyKpURabOKi75izZdvsvpgNp/X/XjUJM7wZmhYLEN7jaJ/31FYbV4mphURab9URqRdqK2tZNuOd1m9+1+sKfuWXT8YNQlzj5pcQmr8RELDepiUVESk/VEZkXapsHAba7/8B2sOfU52XQnHfzRq4sPQsFgu6X0d/fpo1EREpCmpjEi7V1tbyZav5rNmz0esKdvNNz8YNQl3GaT6dGboeZdw8cAJGjUREWlkKiMiP1BQsIW1X85lTcE6sutKqDhl1MT63ahJeD8u6XM9sb3+R6MmIiLnSGVE5CfUVlewZcd8Vu/9iNVle9h9mlGTob6dueS8y0iJn0hISDeTkoqItF4qIyIeKDi0mdVf/oM1hev5vK6Uyh+MmgzEh0vC+zO09w307fVLjZqIiDRAk5aRF198kWeffZaCggLi4+N54YUXGDJkyM8uN3/+fG6++WZGjhzJBx980ODtqYxIc6qtrmDzjndYvecj1pTtYbet/j+Rjk6DoX5RDD3vUlLiJ2jURETkDJqsjCxYsIBx48Yxe/ZskpOTmTlzJu+++y67du2ic+fOZ1xu3759DB06lJ49exIeHq4yIq3GoYMbWb19LmsKNvC5s5SqU0ZNbIZBPL4MDe/P0D430LfXtVisVhPTioi0HE1WRpKTkxk8eDCzZs0CwOVyYbfbmTx5MlOnTj3tMk6nk0svvZRbb72V1atXU1JSojIirVJNdTmbtr/Nmr0fs6Z8L3t+MGrSyWmQ6hdF6nmXYu88gPDg7oSG9sA/INykxCIi5mmSMlJTU0NAQADvvfce1113nXv6+PHjKSkpYdGiRaddbvr06Wzbto3333+fCRMm/GwZqa6uprq6ut6HsdvtKiPS4hw8+AVrvpzL6sINrHOW1Rs1OZWfyyDUgDCLF2FWH0K9AgjzDiLMN5Qwv3BCAzoRFtiFsGA7oSHdCA2Jwcvbr5k/jYhI42poGfHoLLzDhw/jdDqJjIysNz0yMpKvv/76tMusWbOG1157jS1btjR4O1lZWTz22GOeRBMxRdeuSdzUNYmbODlqsnH7W6zZ+zEbjudyxKjjqBXqLBZOWC0UAAU4gSqoq4K6I1B15nUHuQzCDQuhFm/CbH4nC4xvMGG+YYT6dySsQyRhgdGEBZ9HaEh3goKidYhIRFqlJr0koLy8nFtuuYU5c+YQERHR4OWmTZtGRkaG++/vRkZEWjIf3yBSEu8kJfFO9zTD5aKiopBjJfsoKdvPseMHOVZxiJKqIxw7cYySmjKO1lZQ4jrBMVctJRYXpRYwLBbKrRbKgVxqwaiF2nKoLYTjp9++l2EQ6oJQi40wizehNn/CvQMJ9Q0hzC+MUP9OhAVGERYUTVhwN0JDY/DzD2ueL0dE5Cd4VEYiIiKw2WwUFhbWm15YWEhUVNSP5t+zZw/79u3j2muvdU9zuU7ez8HLy4tdu3Zx/vk/fsKqr68vvr6+nkQTaZEsViuBQV0IDOpCQ+t0Xe0Jysr3U1Kay7Gy/RwrP8ixyiJKThzhWHUpx2rKOFZXSYmrmhLDyVGLQZXVQp3FwmEbHMYFVIOrGqpLoHo/lJ1+W/4ugzDDcrLAWH0J8wog1CeIMN8QQv06Eh4QSZeIPnSPTiEwqEsjfSsiIvV5VEZ8fHxITEzE4XC4zxlxuVw4HA7uvvvuH83ft29fvvzyy3rT/vjHP1JeXs7//u//arRD5DS8vP0ID7+A8PALGrzMiapjlJTmUlKax9Hy/ZRUFHKsspiSE8c4Vl3CsbrjlNRVccyo5ZjhpOS/h4+qrBaqgIM4gUqoq4S6w1B5ysr3nPyfcJeBHR+6+4Zi79CVbqHn073zQOxdhxAcon/LInL2PD5Mk5GRwfjx40lKSmLIkCHMnDmTiooKJk6cCMC4ceOIjo4mKysLPz8/BgwYUG/50NBQgB9NF5Gz5+cfRpR/GFFRCQ2a33C5OH78ECWl+zhamk/J8YMcqyikpOoIR6uPUlJTxrHaSo46q9hPLUetlpMvatlaWwwlxVCyFfYtBCDUZdANH7r5hNCtQxe6hV5At05xdIseTEhoTNN9cBFpEzwuI6NHj6a4uJjMzEwKCgpISEhg6dKl7pNa8/LysOokOpEWzWK1EhQcTVBwNA0ZoDxefoi8A+vIK9pGfsluco8fJL/mGHmuag7bLJRYLZRQy7a6w1B6GEq/hNz34QsIdhl0xxu7TwjdArrQLfR8unUaQLeugwkN7aGTbkVEt4MXkXNTebyIvAOfk1e0jbyS3eQd309edQn5rhMU2U5/qfN3glwG3fCim/d/R1RCetKtU3+6RScTFtpTRUWkldOzaUTEdJWVh8k/sI78wm3kHvuG/OMHyKs+Sm4Dikqgy8COF929g7EHRNEtpAfdO8Vh75pEx/DeKioirYDKiIi0aFWVR9l/cD15RVvJO/oNecf3k199lFxnFQU/U1Q6/HdExe4dRDf/SLqF9qRbRH+6dUkiIiJWRUWkhVAZEZFW60TVMQ4c/ILcws3kH/uWvPJ8cquPkO88wSGrgWE5c1nxdxl0w0Y3ryDsAZF0D+mBPaIfMV2TVVREmpnKiIi0STXV5SdHVAq3kHd0F3ll+eRVHyHPWcUhq4HrJ4pKgMugO17EeIcQ06ErMeG96R4ZT8x5F9Mh8Mf3ShKRc6MyIiLtTm11BfsPbSC/cDN5R3eRW5ZH/okj5DorOfgzRaWT06C71Y8Yv47EBHUjpmMs3aMSiY4ejLd3QDN+irahrvYERcXbOVC0jYNHv+FgWR4HKos4WFPCQVc1IRYbl4cPIK3/WC7oeZVGrNoolRERkVN8N6Ky79BG9h35mtzj+eyrPso+o5qjZ3jAIZy8zf55LgvdvQKJ8T952Cem0wBioi9q14d96mpPUFi0jYNFX3Lgu7JRVcTBmlIOuaopsBo4f6L8naqbE9KCLyCt72jiYn+F1dakTyqRZqQyIiLSQGWl+eTu/5x9RVvZV/ItuRWHyK0tJRfnGZ/EDCdPpO2OF93b4GGf2tpKCgu3cbB4+3/LRj4Hqwo5UFPGIVc1hQ0oG96GQVeXha42f7r6hBLdIYouwd3pGtaL3MPbcRxYTbarnJpT1tPJaXBFgJ20XiNJGjhOo1KtnMqIiMg5cjnrKCrezr4D68g9/BX7yvaxr7KIXGcFB1r5YZ/a2koKCrZysPhLDh79lgPl+RysLOJgbRkHXScotPKTnw/A59Sy4RtG14BIugbHEN2xD107xxMR0fdnRzkqjhewZvMcHLn/4dPaI1ScUv6CXQaX+XQmLeYqLh70W/wDwhvls0vzURkREWlCLf2wT211BQVFWzhQtJ2Dx06WjUOVxRyoLeWgq5qiBpcNK11tfnT1DSM6IIquITF07diH6M7xdOzYp1EPqdRUl7Nu6xs49ixm5YmD9b5HP5dBqlcIaecN49ILf0dISLdG2640HZURERGTnO6wz77/HvY50UiHfWqqyyko3MqB4u0cPPotB4/vd49sHPhv2fipS6ABfF0GXQwr0d+NbHSIIjo4hq4d+xIdGU94eC/Tzt9w1tWw5at3cHyzkBXlezlg+/49L8NgsCWAtKiLuDzht3SO1LPOWiqVERGRFuZHh31Kc9hXVezRYR8nBgdc1RQ3sGx0Nax0tfkT7RtG1w5diA45WTa6Rsa3mjvZGi4Xu75dzH92vI2jZCe7ra567w90eZPeaRBpAyfSrdtQk1LK6aiMiIi0It8d9sk59AW5R3Y16LCPn7tsBLjLRteQ7q2ubHgqN3c1jm1v4Di8hW3W2nrvXeCykhYaS3q/39Cn1y/b5OdvTVRGRETaiO8O++QWb8PL6kV0x350jYwnPPyCdv9jW1S4nZVbXsVR8DkbjErqThktinbCFUE9Set9PQn9b8bm5WNi0vZJZURERNqV0tI8Pt30Co79q1hbV1rv/Jxwl8Hlfl1JO/+XJMdPxMc3yMSk7YfKiIiItFtVlUf5bPOrOPYtY1VNEeWnFJNAl8El3h1J657OJYN+R0BgZxOTtm0qIyIiIpy8p8oX2/6B49tFrKjMp/iUp0L7GAYp1iDSoi9h2KDfERZ+volJ2x6VERERkR9wOev4cud7OL5egKNsN3mnXDJsNQwS8SMtcjBpCb8lqssg84K2ESojIiIiP8Fwudi9dxmOr+ax4uh2dlqd9d7v77KR1nEgaXHj6dkjzaSUrZvKiIiIiAf27/+cFdtex1G0kc1U17uPSw+nhbSQ3qTFjqZ/3xva/VVMDaUyIiIicpYOH/6aVZtfxXHwMz43jte7ZDjSaZAWGENar+u4cMBYvLz9TEzasqmMiIiINILj5YdYvekV/pPvYHXtsXpPcg51GVzmG8UVMVdx8aA78PMPMzFpy6MyIiIi0siqT5Ty+ZbX+M/eJayqLqDklGLi7zK4+LuH+Q26g5DQGPOCthAqIyIiIk2orvYEm796mxXfvI/jeA6HTrlk2GYYJFn8uSJyCFck3EFUVIJ5QU2kMiIiItJMDJeLr7/9F44d77CiZCff/uBhfgNcXlzRMY60uPH06H55uzkBVmVERETEJHl5a1j55Zs4ijex5QdX5sQ44YqQ3lzR50biYn+F1eZlYtKmpTIiIiLSAnx3Zc6KQ5/xues4tacUk05Og8sDziPt/GsZPHAC3r4dTEza+FRGREREWpjj5YdYs/lVVuSt4NPaI1SccgJskMvgEp8I0rqlM3TQb9vEM3NURkRERFqwmupy1m/9O469i1lZeYAjp3lmzhVdUxl24Z2Eh19gYtKzpzIiIiLSSricdWzbsYAVu9477TNzEvAlrXMiVwy8lfPOu8i8oB5SGREREWmFDJeLPTn/wbF9LiuOfsmOHzwzp4/LyhWh/UjrP4beF/yiRV+ZozIiIiLSBhw6uJEVW//GisINbOQEzlNOgI12whVBPUnrfT0J/W/G5uVjYtIfUxkRERFpY0qO5fDJ5ldw7F/NZ85Sqk85ATbcZTDMrwtX9BjBRQm34esXYmLSk1RGRERE2rDKysNkb/4bjn3L+KSmiLIf3Jp+qFcYafbLuWTQHQSH2E3JqDIiIiLSTtTWVrJx21xW7P6QFRW5FJ5yZY6XYTDEEsAVURdxecJv6Rw5oNlyqYyIiIi0Q4bLxY5d7+PYOR9HyS722ur/zA90eXFFRAJpcROIibmsSbM09Pf7rE7BffHFF4mJicHPz4/k5GTWr19/xnkXLlxIUlISoaGhdOjQgYSEBObOnXs2mxUREZGfYbFa6R97A/dc/y6Lbt3Gh5e9wJTwJAa6vAHYZq1j5tEvuPaTuxn5+kD+uvBGtu94F8Pl+pk1N2FmT0dGFixYwLhx45g9ezbJycnMnDmTd999l127dtG584/vFrdq1SqOHTtG37598fHxYfHixfzhD3/go48+Yvjw4Q3apkZGREREzl1R4XZWbZmDoyCb9UYldadcmfNMzA2MuOzRRt1ekx2mSU5OZvDgwcyaNQsAl8uF3W5n8uTJTJ06tUHruPDCC7nmmmt44oknTvt+dXU11dXV7r/Lysqw2+0qIyIiIo2krDSf1ZvnsGL/Sj6vPcaS65cQEtKtcbfRFIdpampq2LhxI+np6d+vwGolPT2d7Ozsn13eMAwcDge7du3i0ksvPeN8WVlZhISEuF92uzlnAYuIiLRVwSF2rhn2ODPGruaTsV80ehHxhEdl5PDhwzidTiIjI+tNj4yMpKCg4IzLlZaWEhgYiI+PD9dccw0vvPACV1555RnnnzZtGqWlpe5Xfn6+JzFFRETEA17efuZuvzk2EhQUxJYtWzh+/DgOh4OMjAx69uzJsGHDTju/r68vvr6+zRFNRERETOZRGYmIiMBms1FYWFhvemFhIVFRUWdczmq1csEFJ584mJCQwM6dO8nKyjpjGREREZH2w6PDND4+PiQmJuJwONzTXC4XDoeDlJSUBq/H5XLVO0FVRERE2i+PD9NkZGQwfvx4kpKSGDJkCDNnzqSiooKJEycCMG7cOKKjo8nKygJOnoyalJTE+eefT3V1NUuWLGHu3Lm8/PLLjftJREREpFXyuIyMHj2a4uJiMjMzKSgoICEhgaVLl7pPas3Ly8N6yuOMKyoq+P3vf8/+/fvx9/enb9++zJs3j9GjRzfepxAREZFWS7eDFxERkSbRpLeDFxEREWksKiMiIiJiKpURERERMZXKiIiIiJhKZURERERMpTIiIiIiplIZEREREVOpjIiIiIipVEZERETEVCojIiIiYiqVERERETGVyoiIiIiYSmVERERETKUyIiIiIqZSGRERERFTqYyIiIiIqVRGRERExFQqIyIiImIqlRERERExlcqIiIiImEplREREREylMiIiIiKmUhkRERERU6mMiIiIiKlURkRERMRUKiMiIiJiKpURERERMZXKiIiIiJhKZURERERMpTIiIiIiplIZEREREVOpjIiIiIipVEZERETEVCojIiIiYqqzKiMvvvgiMTEx+Pn5kZyczPr1688475w5c7jkkksICwsjLCyM9PT0n5xfRERE2hePy8iCBQvIyMhg+vTpbNq0ifj4eIYPH05RUdFp51+1ahU333wzK1euJDs7G7vdzlVXXcWBAwfOObyIiIi0fhbDMAxPFkhOTmbw4MHMmjULAJfLhd1uZ/LkyUydOvVnl3c6nYSFhTFr1izGjRvXoG2WlZUREhJCaWkpwcHBnsQVERERkzT099ujkZGamho2btxIenr69yuwWklPTyc7O7tB66isrKS2tpbw8PAzzlNdXU1ZWVm9l4iIiLRNHpWRw4cP43Q6iYyMrDc9MjKSgoKCBq3jwQcfpGvXrvUKzQ9lZWUREhLiftntdk9iioiISCvSrFfTPP3008yfP5/3338fPz+/M843bdo0SktL3a/8/PxmTCkiIiLNycuTmSMiIrDZbBQWFtabXlhYSFRU1E8u+9xzz/H000/zn//8h4EDB/7kvL6+vvj6+noSTURERFopj0ZGfHx8SExMxOFwuKe5XC4cDgcpKSlnXO6ZZ57hiSeeYOnSpSQlJZ19WhEREWlzPBoZAcjIyGD8+PEkJSUxZMgQZs6cSUVFBRMnTgRg3LhxREdHk5WVBcCf//xnMjMzefvtt4mJiXGfWxIYGEhgYGAjfhQRERFpjTwuI6NHj6a4uJjMzEwKCgpISEhg6dKl7pNa8/LysFq/H3B5+eWXqamp4Ve/+lW99UyfPp1HH3303NKLiIhIq+fxfUbMoPuMiIiItD5Ncp8RERERkcamMiIiIiKmUhkRERERU6mMiIiIiKlURkRERMRUKiMiIiJiKpURERERMZXKiIiIiJhKZURERERMpTIiIiIiplIZEREREVOpjIiIiIipVEZERETEVCojIiIiYiqVERERETGVyoiIiIiYSmVERERETKUyIiIiIqZSGRERERFTqYyIiIiIqVRGRERExFQqIyIiImIqlRERERExlcqIiIiImEplREREREylMiIiIiKmUhkRERERU6mMiIiIiKlURkRERMRUKiMiIiJiKpURERERMZXKiIiIiJhKZURERERMpTIiIiIiplIZEREREVOdVRl58cUXiYmJwc/Pj+TkZNavX3/Geb/66ituuOEGYmJisFgszJw582yzioiISBvkcRlZsGABGRkZTJ8+nU2bNhEfH8/w4cMpKio67fyVlZX07NmTp59+mqioqHMOLCIiIm2LxTAMw5MFkpOTGTx4MLNmzQLA5XJht9uZPHkyU6dO/cllY2JimDJlClOmTPnJ+aqrq6murnb/XVZWht1up7S0lODgYE/iioiIiEnKysoICQn52d9vj0ZGampq2LhxI+np6d+vwGolPT2d7Ozss0/7A1lZWYSEhLhfdru90dYtIiIiLYtHZeTw4cM4nU4iIyPrTY+MjKSgoKDRQk2bNo3S0lL3Kz8/v9HWLSIiIi2Ll9kBTsfX1xdfX1+zY4iIiEgz8GhkJCIiApvNRmFhYb3phYWFOjlVREREzopHZcTHx4fExEQcDod7msvlwuFwkJKS0ujhREREpO3z+DBNRkYG48ePJykpiSFDhjBz5kwqKiqYOHEiAOPGjSM6OpqsrCzg5EmvO3bscP/3gQMH2LJlC4GBgVxwwQWN+FFERESkNfK4jIwePZri4mIyMzMpKCggISGBpUuXuk9qzcvLw2r9fsDl4MGDDBo0yP33c889x3PPPcdll13GqlWrzv0TiIiISKvm8X1GzNDQ65RFRESk5WiS+4yIiIiINDaVERERETGVyoiIiIiYSmVERERETKUyIiIiIqZSGRERERFTqYyIiIiIqVRGRERExFQqIyIiImIqlRERERExlcqIiIiImEplREREREylMiIiIiKmUhkRERERU6mMiIiIiKlURkRERMRUKiMiIiJiKpURERERMZXKiIiIiJhKZURERERMpTIiIiIiplIZEREREVOpjIiIiIipVEZERETEVCojIiIiYiqVERERETGVyoiIiIiYSmVERERETKUyIiIiIqZSGRERERFTqYyIiIiIqVRGRERExFQqIyIiImIqlREREREx1VmVkRdffJGYmBj8/PxITk5m/fr1Pzn/u+++S9++ffHz8yMuLo4lS5acVVgRERFpezwuIwsWLCAjI4Pp06ezadMm4uPjGT58OEVFRaed/7PPPuPmm2/mtttuY/PmzVx33XVcd911bN++/ZzDi4iISOtnMQzD8GSB5ORkBg8ezKxZswBwuVzY7XYmT57M1KlTfzT/6NGjqaioYPHixe5pF110EQkJCcyePbtB2ywrKyMkJITS0lKCg4M9iXtGhmFQVetslHWJiIi0dv7eNiwWS6Ous6G/316erLSmpoaNGzcybdo09zSr1Up6ejrZ2dmnXSY7O5uMjIx604YPH84HH3xwxu1UV1dTXV3t/rusrMyTmA1SVeukX+a/G329IiIirdGOx4cT4ONRLWg0Hh2mOXz4ME6nk8jIyHrTIyMjKSgoOO0yBQUFHs0PkJWVRUhIiPtlt9s9iSkiIiKtiDkV6GdMmzat3mhKWVlZoxcSf28bOx4f3qjrFBERaa38vW2mbdujMhIREYHNZqOwsLDe9MLCQqKiok67TFRUlEfzA/j6+uLr6+tJNI9ZLBbThqNERETkex4dpvHx8SExMRGHw+Ge5nK5cDgcpKSknHaZlJSUevMDLF++/Izzi4iISPvi8dBARkYG48ePJykpiSFDhjBz5kwqKiqYOHEiAOPGjSM6OpqsrCwA7r33Xi677DJmzJjBNddcw/z58/niiy949dVXG/eTiIiISKvkcRkZPXo0xcXFZGZmUlBQQEJCAkuXLnWfpJqXl4fV+v2Ay8UXX8zbb7/NH//4Rx566CF69erFBx98wIABAxrvU4iIiEir5fF9RszQFPcZERERkabV0N9vPZtGRERETKUyIiIiIqZSGRERERFTqYyIiIiIqVRGRERExFQqIyIiImIqlRERERExlcqIiIiImEplREREREzVKh5b+91NYsvKykxOIiIiIg313e/2z93svVWUkfLycgDsdrvJSURERMRT5eXlhISEnPH9VvFsGpfLxcGDBwkKCsJisZgdp8UpKyvDbreTn5+vZ/e0ENonLYv2R8ui/dGyNOX+MAyD8vJyunbtWu8huj/UKkZGrFYr5513ntkxWrzg4GD9w25htE9aFu2PlkX7o2Vpqv3xUyMi39EJrCIiImIqlRERERExlcpIG+Dr68v06dPx9fU1O4r8l/ZJy6L90bJof7QsLWF/tIoTWEVERKTt0siIiIiImEplREREREylMiIiIiKmUhkRERERU6mMtCJZWVkMHjyYoKAgOnfuzHXXXceuXbvqzXPixAkmTZpEx44dCQwM5IYbbqCwsNCkxO3L008/jcViYcqUKe5p2h/N68CBA4wdO5aOHTvi7+9PXFwcX3zxhft9wzDIzMykS5cu+Pv7k56ezrfffmti4rbL6XTyyCOP0KNHD/z9/Tn//PN54okn6j2jRPujaX366adce+21dO3aFYvFwgcffFDv/YZ8/0ePHmXMmDEEBwcTGhrKbbfdxvHjxxs9q8pIK/LJJ58wadIkPv/8c5YvX05tbS1XXXUVFRUV7nnuu+8+/vWvf/Huu+/yySefcPDgQa6//noTU7cPGzZs4JVXXmHgwIH1pmt/NJ9jx46RmpqKt7c3H3/8MTt27GDGjBmEhYW553nmmWf461//yuzZs1m3bh0dOnRg+PDhnDhxwsTkbdOf//xnXn75ZWbNmsXOnTv585//zDPPPMMLL7zgnkf7o2lVVFQQHx/Piy++eNr3G/L9jxkzhq+++orly5ezePFiPv30U3772982flhDWq2ioiIDMD755BPDMAyjpKTE8Pb2Nt599133PDt37jQAIzs726yYbV55ebnRq1cvY/ny5cZll11m3HvvvYZhaH80twcffNAYOnToGd93uVxGVFSU8eyzz7qnlZSUGL6+vsY777zTHBHblWuuuca49dZb6027/vrrjTFjxhiGof3R3ADj/fffd//dkO9/x44dBmBs2LDBPc/HH39sWCwW48CBA42aTyMjrVhpaSkA4eHhAGzcuJHa2lrS09Pd8/Tt25du3bqRnZ1tSsb2YNKkSVxzzTX1vnfQ/mhuH374IUlJSdx444107tyZQYMGMWfOHPf7OTk5FBQU1NsfISEhJCcna380gYsvvhiHw8E333wDwNatW1mzZg0jRowAtD/M1pDvPzs7m9DQUJKSktzzpKenY7VaWbduXaPmaRUPypMfc7lcTJkyhdTUVAYMGABAQUEBPj4+hIaG1ps3MjKSgoICE1K2ffPnz2fTpk1s2LDhR+9pfzSvvXv38vLLL5ORkcFDDz3Ehg0buOeee/Dx8WH8+PHu7zwyMrLectofTWPq1KmUlZXRt29fbDYbTqeTP/3pT4wZMwZA+8NkDfn+CwoK6Ny5c733vby8CA8Pb/R9pDLSSk2aNInt27ezZs0as6O0W/n5+dx7770sX74cPz8/s+O0ey6Xi6SkJJ566ikABg0axPbt25k9ezbjx483OV3783//93+89dZbvP322/Tv358tW7YwZcoUunbtqv0hP6LDNK3Q3XffzeLFi1m5ciXnnXeee3pUVBQ1NTWUlJTUm7+wsJCoqKhmTtn2bdy4kaKiIi688EK8vLzw8vLik08+4a9//SteXl5ERkZqfzSjLl260K9fv3rTYmNjycvLA3B/5z+8mkn7o2ncf//9TJ06lV//+tfExcVxyy23cN9995GVlQVof5itId9/VFQURUVF9d6vq6vj6NGjjb6PVEZaEcMwuPvuu3n//fdZsWIFPXr0qPd+YmIi3t7eOBwO97Rdu3aRl5dHSkpKc8dt89LS0vjyyy/ZsmWL+5WUlMSYMWPc/6390XxSU1N/dKn7N998Q/fu3QHo0aMHUVFR9fZHWVkZ69at0/5oApWVlVit9X9ibDYbLpcL0P4wW0O+/5SUFEpKSti4caN7nhUrVuByuUhOTm7cQI16Oqw0qbvuussICQkxVq1aZRw6dMj9qqysdM9z5513Gt26dTNWrFhhfPHFF0ZKSoqRkpJiYur25dSraQxD+6M5rV+/3vDy8jL+9Kc/Gd9++63x1ltvGQEBAca8efPc8zz99NNGaGiosWjRImPbtm3GyJEjjR49ehhVVVUmJm+bxo8fb0RHRxuLFy82cnJyjIULFxoRERHGAw884J5H+6NplZeXG5s3bzY2b95sAMbzzz9vbN682cjNzTUMo2Hf/9VXX20MGjTIWLdunbFmzRqjV69exs0339zoWVVGWhHgtK833njDPU9VVZXx+9//3ggLCzMCAgKMUaNGGYcOHTIvdDvzwzKi/dG8/vWvfxkDBgwwfH19jb59+xqvvvpqvfddLpfxyCOPGJGRkYavr6+RlpZm7Nq1y6S0bVtZWZlx7733Gt26dTP8/PyMnj17Gg8//LBRXV3tnkf7o2mtXLnytL8Z48ePNwyjYd//kSNHjJtvvtkIDAw0goODjYkTJxrl5eWNntViGKfcDk9ERESkmemcERERETGVyoiIiIiYSmVERERETKUyIiIiIqZSGRERERFTqYyIiIiIqVRGRERExFQqIyIiImIqlRERERExlcqIiDS5CRMmcN1115kdQ0RaKJURERERMZXKiIg0mvfee4+4uDj8/f3p2LEj6enp3H///bz55pssWrQIi8WCxWJh1apVAOTn53PTTTcRGhpKeHg4I0eOZN++fe71fTei8thjj9GpUyeCg4O58847qampMecDikiT8DI7gIi0DYcOHeLmm2/mmWeeYdSoUZSXl7N69WrGjRtHXl4eZWVlvPHGGwCEh4dTW1vL8OHDSUlJYfXq1Xh5efHkk09y9dVXs23bNnx8fABwOBz4+fmxatUq9u3bx8SJE+nYsSN/+tOfzPy4ItKIVEZEpFEcOnSIuro6rr/+erp37w5AXFwcAP7+/lRXVxMVFeWef968ebhcLv72t79hsVgAeOONNwgNDWXVqlVcddVVAPj4+PD6668TEBBA//79efzxx7n//vt54oknsFo1uCvSFuhfsog0ivj4eNLS0oiLi+PGG29kzpw5HDt27Izzb926ld27dxMUFERgYCCBgYGEh4dz4sQJ9uzZU2+9AQEB7r9TUlI4fvw4+fn5Tfp5RKT5aGRERBqFzWZj+fLlfPbZZyxbtowXXniBhx9+mHXr1p12/uPHj5OYmMhbb731o/c6derU1HFFpAVRGRGRRmOxWEhNTSU1NZXMzEy6d+/O+++/j4+PD06ns968F154IQsWLKBz584EBwefcZ1bt26lqqoKf39/AD7//HMCAwOx2+1N+llEpPnoMI2INIp169bx1FNP8cUXX5CXl8fChQspLi4mNjaWmJgYtm3bxq5duzh8+DC1tbWMGTOGiIgIRo4cyerVq8nJyWHVqlXcc8897N+/373empoabrvtNnbs2MGSJUuYPn06d999t84XEWlDNDIiIo0iODiYTz/9lJkzZ1JWVkb37t2ZMWMGI0aMICkpiVWrVpGUlMTx48dZuXIlw4YN49NPP+XBBx/k+uuvp7y8nOjoaNLS0uqNlKSlpdGrVy8uvfRSqqurufnmm3n00UfN+6Ai0ugshmEYZocQETmdCRMmUFJSwgcffGB2FBFpQhrnFBEREVOpjIiIiIipdJhGRERETKWRERERETGVyoiIiIiYSmVERERETKUyIiIiIqZSGRERERFTqYyIiIiIqVRGRERExFQqIyIiImKq/w8ClJv+Q+HynwAAAABJRU5ErkJggg==", "text/plain": [ - "
" + "
" ] }, - "metadata": { - "needs_background": "light" - }, + "metadata": {}, "output_type": "display_data" } ], @@ -445,82 +436,82 @@ " \n", " \n", " 0\n", - " 6.491386\n", - " 2.937301\n", - " 4.396537\n", - " 1.363964\n", + " 4.300000\n", + " 4.400000\n", + " 1.000000\n", + " 0.100000\n", " 1\n", " \n", " \n", " 1\n", - " 6.272807\n", - " 2.878930\n", - " 5.028617\n", - " 1.973149\n", + " 7.900000\n", + " 4.400000\n", + " 6.900000\n", + " 2.500000\n", " 2\n", " \n", " \n", " 2\n", - " 4.912787\n", - " 2.239502\n", - " 2.384605\n", - " 0.845205\n", + " 5.740312\n", + " 2.060491\n", + " 2.659118\n", + " 0.982462\n", " 1\n", " \n", " \n", " 3\n", - " 5.115768\n", - " 2.636920\n", - " 3.933653\n", - " 1.100583\n", + " 4.300000\n", + " 2.000000\n", + " 1.000000\n", + " 0.100002\n", " 1\n", " \n", " \n", " 4\n", - " 5.946947\n", - " 2.976103\n", - " 4.557983\n", - " 1.417799\n", + " 4.300000\n", + " 2.000000\n", + " 1.000000\n", + " 0.100000\n", " 1\n", " \n", " \n", " 5\n", - " 5.528565\n", - " 2.197114\n", - " 4.133016\n", - " 1.296019\n", + " 4.505079\n", + " 2.025100\n", + " 3.619546\n", + " 0.208050\n", " 1\n", " \n", " \n", " 6\n", - " 5.275113\n", - " 2.565652\n", - " 3.698843\n", - " 1.068934\n", + " 6.108867\n", + " 2.511960\n", + " 4.668570\n", + " 1.392304\n", " 1\n", " \n", " \n", " 7\n", - " 7.900000\n", - " 4.400000\n", - " 6.899995\n", - " 2.500000\n", - " 2\n", + " 5.536599\n", + " 2.549499\n", + " 4.443977\n", + " 1.345480\n", + " 1\n", " \n", " \n", " 8\n", - " 6.899334\n", - " 2.847685\n", - " 6.243627\n", - " 1.561012\n", + " 7.900000\n", + " 4.400000\n", + " 6.900000\n", + " 2.500000\n", " 2\n", " \n", " \n", " 9\n", - " 5.267148\n", - " 2.780006\n", - " 3.565531\n", - " 1.128439\n", + " 4.300000\n", + " 2.000000\n", + " 1.000000\n", + " 0.100000\n", " 1\n", " \n", " \n", @@ -529,16 +520,16 @@ ], "text/plain": [ " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", - "0 6.491386 2.937301 4.396537 1.363964 \n", - "1 6.272807 2.878930 5.028617 1.973149 \n", - "2 4.912787 2.239502 2.384605 0.845205 \n", - "3 5.115768 2.636920 3.933653 1.100583 \n", - "4 5.946947 2.976103 4.557983 1.417799 \n", - "5 5.528565 2.197114 4.133016 1.296019 \n", - "6 5.275113 2.565652 3.698843 1.068934 \n", - "7 7.900000 4.400000 6.899995 2.500000 \n", - "8 6.899334 2.847685 6.243627 1.561012 \n", - "9 5.267148 2.780006 3.565531 1.128439 \n", + "0 4.300000 4.400000 1.000000 0.100000 \n", + "1 7.900000 4.400000 6.900000 2.500000 \n", + "2 5.740312 2.060491 2.659118 0.982462 \n", + "3 4.300000 2.000000 1.000000 0.100002 \n", + "4 4.300000 2.000000 1.000000 0.100000 \n", + "5 4.505079 2.025100 3.619546 0.208050 \n", + "6 6.108867 2.511960 4.668570 1.392304 \n", + "7 5.536599 2.549499 4.443977 1.345480 \n", + "8 7.900000 4.400000 6.900000 2.500000 \n", + "9 4.300000 2.000000 1.000000 0.100000 \n", "\n", " target \n", "0 1 \n", @@ -548,7 +539,7 @@ "4 1 \n", "5 1 \n", "6 1 \n", - "7 2 \n", + "7 1 \n", "8 2 \n", "9 1 " ] @@ -608,75 +599,75 @@ " \n", " \n", " 0\n", - " 5.230361\n", - " 3.371515\n", - " 1.408195\n", - " 0.201252\n", + " 7.900000\n", + " 4.400000\n", + " 1.000000\n", + " 2.459848\n", " 0\n", " \n", " \n", " 1\n", - " 4.705658\n", - " 3.064075\n", - " 1.388975\n", - " 0.386298\n", + " 4.300000\n", + " 4.400000\n", + " 1.000000\n", + " 0.100000\n", " 0\n", " \n", " \n", " 2\n", - " 4.711709\n", - " 3.056369\n", - " 1.451635\n", - " 0.195365\n", + " 7.900000\n", + " 4.400000\n", + " 1.000000\n", + " 0.100000\n", " 0\n", " \n", " \n", " 3\n", - " 6.981074\n", - " 3.274333\n", - " 4.803886\n", - " 1.623058\n", + " 5.499909\n", + " 2.023536\n", + " 3.788866\n", + " 1.262810\n", " 1\n", " \n", " \n", " 4\n", - " 5.999308\n", - " 2.927207\n", - " 4.040594\n", - " 1.389657\n", + " 4.300000\n", + " 2.000000\n", + " 1.000000\n", + " 0.100000\n", " 1\n", " \n", " \n", " 5\n", - " 5.698102\n", - " 2.521559\n", - " 3.288451\n", - " 0.966808\n", + " 4.300000\n", + " 2.000000\n", + " 1.000000\n", + " 0.197491\n", " 1\n", " \n", " \n", " 6\n", - " 6.776549\n", - " 3.012238\n", - " 6.285867\n", - " 2.134174\n", + " 7.900000\n", + " 4.400000\n", + " 6.900000\n", + " 2.500000\n", " 2\n", " \n", " \n", " 7\n", - " 7.900000\n", + " 4.300000\n", " 4.400000\n", - " 6.896603\n", - " 2.500000\n", - " 2\n", + " 1.000000\n", + " 0.100000\n", + " 0\n", " \n", " \n", " 8\n", - " 7.900000\n", - " 4.400000\n", - " 6.898989\n", - " 2.500000\n", - " 2\n", + " 4.300000\n", + " 2.000000\n", + " 1.000001\n", + " 0.142259\n", + " 1\n", " \n", " \n", "\n", @@ -684,15 +675,15 @@ ], "text/plain": [ " sepal length (cm) sepal width (cm) petal length (cm) petal width (cm) \\\n", - "0 5.230361 3.371515 1.408195 0.201252 \n", - "1 4.705658 3.064075 1.388975 0.386298 \n", - "2 4.711709 3.056369 1.451635 0.195365 \n", - "3 6.981074 3.274333 4.803886 1.623058 \n", - "4 5.999308 2.927207 4.040594 1.389657 \n", - "5 5.698102 2.521559 3.288451 0.966808 \n", - "6 6.776549 3.012238 6.285867 2.134174 \n", - "7 7.900000 4.400000 6.896603 2.500000 \n", - "8 7.900000 4.400000 6.898989 2.500000 \n", + "0 7.900000 4.400000 1.000000 2.459848 \n", + "1 4.300000 4.400000 1.000000 0.100000 \n", + "2 7.900000 4.400000 1.000000 0.100000 \n", + "3 5.499909 2.023536 3.788866 1.262810 \n", + "4 4.300000 2.000000 1.000000 0.100000 \n", + "5 4.300000 2.000000 1.000000 0.197491 \n", + "6 7.900000 4.400000 6.900000 2.500000 \n", + "7 4.300000 4.400000 1.000000 0.100000 \n", + "8 4.300000 2.000000 1.000001 0.142259 \n", "\n", " target \n", "0 0 \n", @@ -702,8 +693,8 @@ "4 1 \n", "5 1 \n", "6 2 \n", - "7 2 \n", - "8 2 " + "7 0 \n", + "8 1 " ] }, "execution_count": 8, @@ -941,7 +932,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "id": "14bca1cd", "metadata": {}, "outputs": [ @@ -949,47 +940,31 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2023-03-31T01:04:50.377220+0200][12004][INFO] Encoding fixed acidity 8821222230854998919\n", - "[2023-03-31T01:04:50.427480+0200][12004][INFO] Encoding volatile acidity 3689048099044143611\n", - "[2023-03-31T01:04:50.442050+0200][12004][INFO] Encoding citric acid 735380040632581265\n", - "[2023-03-31T01:04:50.457233+0200][12004][INFO] Encoding residual sugar 2442409671939919968\n", - "[2023-03-31T01:04:50.473234+0200][12004][INFO] Encoding chlorides 7195838597182208600\n", - "[2023-03-31T01:04:50.488234+0200][12004][INFO] Encoding free sulfur dioxide 3309873879720413309\n", - "[2023-03-31T01:04:50.501098+0200][12004][INFO] Encoding total sulfur dioxide 8059822526963442530\n", - "[2023-03-31T01:04:50.512236+0200][12004][INFO] Encoding density 3625281346475756911\n", - "[2023-03-31T01:04:50.523222+0200][12004][INFO] Encoding pH 4552002723230490789\n", - "[2023-03-31T01:04:50.532220+0200][12004][INFO] Encoding sulphates 4957484118723629481\n", - "[2023-03-31T01:04:50.540983+0200][12004][INFO] Encoding alcohol 3711001505059098944\n", - "[2023-03-31T01:04:50.547987+0200][12004][INFO] Encoding quality 3457201635469827215\n", - "[2023-03-31T01:04:58.399971+0200][12004][INFO] Step 100: MLoss: 1.3342 GLoss: 0.9783 Sum: 2.3125\n", - "[2023-03-31T01:05:04.973385+0200][12004][INFO] Step 200: MLoss: 1.2858 GLoss: 0.9031 Sum: 2.1889000000000003\n", - "[2023-03-31T01:05:11.741000+0200][12004][INFO] Step 300: MLoss: 1.186 GLoss: 0.7758 Sum: 1.9618\n", - "[2023-03-31T01:05:18.619270+0200][12004][INFO] Step 400: MLoss: 1.1481 GLoss: 0.6615 Sum: 1.8095999999999999\n", - "[2023-03-31T01:05:24.930108+0200][12004][INFO] Step 500: MLoss: 1.1661 GLoss: 0.6094 Sum: 1.7755\n", - "[2023-03-31T01:05:31.651906+0200][12004][INFO] Step 600: MLoss: 1.1902 GLoss: 0.5381 Sum: 1.7283\n", - "[2023-03-31T01:05:38.246164+0200][12004][INFO] Step 700: MLoss: 1.1305 GLoss: 0.5087 Sum: 1.6392000000000002\n", - "[2023-03-31T01:05:44.776216+0200][12004][INFO] Step 800: MLoss: 1.1131 GLoss: 0.4832 Sum: 1.5963\n", - "[2023-03-31T01:05:51.917105+0200][12004][INFO] Step 900: MLoss: 1.1014 GLoss: 0.4786 Sum: 1.58\n", - "[2023-03-31T01:05:59.098745+0200][12004][INFO] Step 1000: MLoss: 1.1479 GLoss: 0.4707 Sum: 1.6185999999999998\n", - "[2023-03-31T01:06:05.690366+0200][12004][INFO] Step 1100: MLoss: 1.1712 GLoss: 0.4693 Sum: 1.6405\n", - "[2023-03-31T01:06:12.549553+0200][12004][INFO] Step 1200: MLoss: 1.1199 GLoss: 0.4611 Sum: 1.581\n", - "[2023-03-31T01:06:19.575478+0200][12004][INFO] Step 1300: MLoss: 1.1525 GLoss: 0.4614 Sum: 1.6139000000000001\n", - "[2023-03-31T01:06:26.641319+0200][12004][INFO] Step 1400: MLoss: 1.1164 GLoss: 0.4671 Sum: 1.5835000000000001\n", - "[2023-03-31T01:06:33.249503+0200][12004][INFO] Step 1500: MLoss: 1.1356 GLoss: 0.4577 Sum: 1.5933\n", - "[2023-03-31T01:06:40.025759+0200][12004][INFO] Step 1600: MLoss: 1.1367 GLoss: 0.4541 Sum: 1.5908\n", - "[2023-03-31T01:06:46.754777+0200][12004][INFO] Step 1700: MLoss: 1.0896 GLoss: 0.4524 Sum: 1.5419999999999998\n", - "[2023-03-31T01:06:54.036939+0200][12004][INFO] Step 1800: MLoss: 1.075 GLoss: 0.4471 Sum: 1.5221\n", - "[2023-03-31T01:07:00.554405+0200][12004][INFO] Step 1900: MLoss: 1.1154 GLoss: 0.4495 Sum: 1.5649\n", - "[2023-03-31T01:07:07.289610+0200][12004][INFO] Step 2000: MLoss: 1.266 GLoss: 0.454 Sum: 1.72\n" + "[2023-04-06T19:09:16.010623+0200][45392][INFO] Encoding fixed acidity 8821222230854998919\n", + "[2023-04-06T19:09:16.022381+0200][45392][INFO] Encoding volatile acidity 3689048099044143611\n", + "[2023-04-06T19:09:16.035202+0200][45392][INFO] Encoding citric acid 735380040632581265\n", + "[2023-04-06T19:09:16.046041+0200][45392][INFO] Encoding residual sugar 2442409671939919968\n", + "[2023-04-06T19:09:16.057037+0200][45392][INFO] Encoding chlorides 7195838597182208600\n", + "[2023-04-06T19:09:16.069198+0200][45392][INFO] Encoding free sulfur dioxide 3309873879720413309\n", + "[2023-04-06T19:09:16.079198+0200][45392][INFO] Encoding total sulfur dioxide 8059822526963442530\n", + "[2023-04-06T19:09:16.089218+0200][45392][INFO] Encoding density 3625281346475756911\n", + "[2023-04-06T19:09:16.100269+0200][45392][INFO] Encoding pH 4552002723230490789\n", + "[2023-04-06T19:09:16.108269+0200][45392][INFO] Encoding sulphates 4957484118723629481\n", + "[2023-04-06T19:09:16.118284+0200][45392][INFO] Encoding alcohol 3711001505059098944\n", + "[2023-04-06T19:09:16.128449+0200][45392][INFO] Encoding quality 3457201635469827215\n", + "[2023-04-06T19:09:23.491031+0200][45392][INFO] Step 100: MLoss: 1.3299 GLoss: 0.9771 Sum: 2.307\n", + "[2023-04-06T19:09:31.041829+0200][45392][INFO] Step 200: MLoss: 1.2726 GLoss: 0.9268 Sum: 2.1994\n", + "[2023-04-06T19:09:39.672711+0200][45392][INFO] Step 300: MLoss: 1.2003 GLoss: 0.8693 Sum: 2.0696\n", + "[2023-04-06T19:09:48.042293+0200][45392][INFO] Step 400: MLoss: 1.1578 GLoss: 0.8343 Sum: 1.9921\n" ] }, { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 10, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -1002,8 +977,11 @@ " lr = 5e-4,\n", " weight_decay = 1e-4,\n", " batch_size = 1250,\n", - " n_layers_hidden = 3,\n", - " dim_hidden = 256,\n", + " model_params = dict(\n", + " n_layers_hidden = 3,\n", + " n_units_hidden = 256,\n", + " dropout = 0.0,\n", + " ),\n", " num_timesteps = 100, # timesteps in diffusion\n", ")\n", "plugin = Plugins().get(\"ddpm\", **plugin_params)\n", @@ -1012,30 +990,28 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "id": "83064f94", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 11, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABjcElEQVR4nO2dd3gUx/nHP3NFvfeOJJAoQhQhwPRmbMAFA3GPDe6OjUvc4sRJXOKfe3DciR1X3G3ADQyYYnoTRVShhnrvvd3t74+9OyRUkECd+TyPHt3tzu6+u3f3nXfeeWdGKIqCRCKRSPo+mp42QCKRSCSdgxR0iUQi6SdIQZdIJJJ+ghR0iUQi6SdIQZdIJJJ+gq6nLuzh4aEEBwf31OUlEomkT3LgwIECRVE8W9rXY4IeHBxMTExMT11eIpFI+iRCiNTW9smQi0QikfQTpKBLJBJJP0EKukQikfQTeiyGLpFIJG1RX19PRkYGNTU1PW1Kj2BjY0NAQAB6vb7dx0hBl0gkvZKMjAwcHR0JDg5GCNHT5nQriqJQWFhIRkYGISEh7T5OhlwkEkmvpKamBnd394tOzAGEELi7u3e4dSIFXSKR9FouRjE3cz73fk5BF0IECiG2CCFOCCGOCyEeaqHMdCFEqRDisOnvnx22pJ3EF8fz5sE3Kakp6apLSCQSSZ+kPR56A/CooijDgEuA+4UQw1oot11RlFGmv+c61cpGpJWl8cHRD8ipyumqS0gkEkm7+eSTT1i6dGlPmwG0Q9AVRclWFOWg6XU5cBLw72rDWsPZ2hmAktqSnjJBIpFIeiUdiqELIYKB0cDeFnZPEELECiF+FUJEtHL83UKIGCFETH5+fsetBVysXQAp6BKJpOtJSUlhyJAhLFmyhPDwcG6++WY2btzIpEmTCAsLY9++fc3Kz5w5kxEjRjBr1izS0tIA+O677xg+fDgjR45k6tSpABw/fpxx48YxatQoRowYQUJCwgXb2+60RSGEA7ASeFhRlLKzdh8EBiiKUiGEmAf8AISdfQ5FUd4H3geIjo4+r7XvzIJeWlN6PodLJJI+yLM/H+dE1tmyc2EM83Pi6ata9D2bkJiYyHfffcdHH33E2LFj+fLLL9mxYwc//fQTL7zwAtdcc42l7AMPPMDixYtZvHgxH330EQ8++CA//PADzz33HOvXr8ff35+SkhIAli9fzkMPPcTNN99MXV0dBoPhgu+pXR66EEKPKuZfKIqy6uz9iqKUKYpSYXq9FtALITwu2LoWkB66RCLpTkJCQoiMjESj0RAREcGsWbMQQhAZGUlKSkqTsrt37+amm24C4JZbbmHHjh0ATJo0iSVLlvDBBx9YhHvChAm88MILvPzyy6SmpmJra3vBtp7TQxdq7syHwElFUZa1UsYHyFUURRFCjEOtKAov2LoW0Gv12OnspKBLJBcR7fGkuwpra2vLa41GY3mv0WhoaGho1zmWL1/O3r17WbNmDWPGjOHAgQPcdNNNjB8/njVr1jBv3jz++9//MnPmzAuytT0e+iTgFmBmo7TEeUKIe4UQ95rK/AE4JoSIBd4EblAU5bxCKu3BxdqFsrrObX5JJBLJhTJx4kS+/vprAL744gumTJkCQFJSEuPHj+e5557D09OT9PR0kpOTCQ0N5cEHH2T+/PkcOXLkgq9/Tg9dUZQdQJsZ7oqivA28fcHWtBNna2fpoUskkl7HW2+9xW233carr76Kp6cnH3/8MQCPP/44CQkJKIrCrFmzGDlyJC+//DIrVqxAr9fj4+PD3/72twu+vuhCR7pNoqOjlfNd4OKuDXdR1VDFF/O+6GSrJBJJb+HkyZMMHTq0p83oUVp6BkKIA4qiRLdUvk8O/XexdqG0Vma5SCQSSWP6pKDLkItEIpE0p08Kuou1C2W1ZRiMF563KZFIJP2FPivoCgrldeU9bYpEIpH0GvqkoMv5XCQSiaQ5fVLQzaNFD+Yd5LLvLyO1LLVnDZJIJJJeQJ8W9JUJK8muzGZt8tqeNUgikVxULFmyhO+//76nzWhGnxb0I/nqyKrN6ZtbLVtZX8mymGXSi5dIJP2ePinozjbOltcOegfiiuLIKM9oVq7WUMtDmx/i4+Mf86/d/6KnBlFJJJK+y7/+9S8GDx7M5MmTufHGG3nttdea7N+0aROjR48mMjKS22+/ndraWgCefPJJhg0bxogRI3jssceAlqfR7UzaPX1ub8JR74hWaDEoBu4acRevH3idzWmbuTXi1ibl3j70Nntz9jI9YDq/Z/zOzqydTPaf3KRMZX0lcUVxjPEe0523IJFIOsKvT0LO0c49p08kzH2pzSL79+9n5cqVxMbGUl9fT1RUFGPGnNGKmpoalixZwqZNmwgPD+fWW2/lvffe45ZbbmH16tXExcUhhLBMmdvSNLqdSZ/00IUQlkyXawZdQ7hrOOtS1jUrty1jG5P8J7Fs+jICHQN58+Cbzcp8FfcVt6+/naKaoi63WyKR9C127tzJ/PnzsbGxwdHRkauuuqrJ/lOnThESEkJ4eDgAixcvZtu2bTg7O2NjY8Mdd9zBqlWrsLOzA1qeRrcz6ZMeOqipi242brjZuLEwbCEv7XuJI/lHcLByoKahBl97X5JLk7lq4FXotXquHng17xx+h6r6Kuz0dpbznCo6hVExklSShJuPWw/ekUQiaZVzeNK9DZ1Ox759+9i0aRPff/89b7/9Nps3b25xGl13d/dOu26f9NAB7hh+B0tHqQuzXjPoGhz0Dvzn4H/449o/cs9v97A3R10lL8orCoBg52AA0svTm5wnsSQRgNOlp7vJcolE0leYNGkSP//8MzU1NVRUVPDLL7802T948GBSUlJITFR1ZMWKFUybNo2KigpKS0uZN28er7/+OrGxsUDL0+h2Jn3WQ58/aL7ltb3engVhC1hxYgU2WhvKDeW8dfAt9Bo9ER7qxPghTiEApJSlMNhtMAD1hnpSSlMASC5N7t4bkEgkvZ6xY8dy9dVXM2LECLy9vYmMjMTZ+UxSho2NDR9//DHXXnstDQ0NjB07lnvvvZeioiLmz59PTU0NiqKwbJm6NlBL0+h2Jn1W0M/m1mG3klicyH2j7uPJ7U+SVp5GlFcU1lp1dZFAx0CAJumLKWUpNCjqiiPJJVLQJRJJcx577DGeeeYZqqqqmDp1KmPGjOGuu+6y7J81axaHDh1qcoyvr2+zBaQBVq1qtoJnp9JnQy5n42Pvw/uXvc8or1EsDFsIQJR3lGW/nd4Obztvi0cOZ8It4a7h0kOXSCQtcvfddzNq1CiioqJYtGgRUVFR5z6oh+g3HnpjFoYt5LfU37h0wKVNtgc7Bzfx0BNLEtEKLbOCZvFe7HtU1FXgYOXQ3eZKJJJezJdfftnTJrSbfuOhN8bD1oPvrvqOCPemC8sGOwVzuuy0ZYBRYnEiQU5Blpi67BiVSCR9mX4p6K0xwGkA5XXlllkaE0sSGeQyiFDnUEB2jEokkr7NRSXowU7BgNoZujtrN2nlaYz0HEmgYyA6jY4Pj33IB0c+oN5Q37OGSiQSyXlwUQr6muQ1PLv7WYKdgrl+8PXoNDoeGfMItjpb3jz0Jks3LyW+OL5JB6pEIpH0dvplp2hr+Dn4MdJzJN+c+gaB4JM5n2CjswHglmG3cMuwW1idsJpndj/Dop8WAfDj/B8JdQml3lCPXqvvSfMlEkk34+DgQEVFRU+b0W4uKkHXarR8Pu9z4ovjKa0tbZLWaGZB2ALC3cI5ln+M5/c+T0xuDBX1FSxZt4T/zPgPUwM6f4Y0iUQi6QwuqpCLmXDXcMb6jG11f4R7BNcNvg53G3cO5x1mU9om6o31PLXjKXIrc7vRUolE0htQFIXHH3+c4cOHExkZyTfffANAdnY2U6dOZdSoUQwfPpzt27djMBhYsmSJpezrr7/ebXZeVB56RxBCMMprFIfyDuFo5UiIcwg5lTm8uO9F/jPjPz1tnkRyUfHyvpeJK4rr1HMOcRvCX8b9pV1lV61axeHDh4mNjaWgoICxY8cydepUvvzySy6//HKeeuopDAYDVVVVHD58mMzMTI4dOwbQJdPktsZF6aG3l9Feo8moyOBk0UmuDL2S6wdfz9aMrZTVlfW0aRKJpBvZsWMHN954I1qtFm9vb6ZNm8b+/fsZO3YsH3/8Mc888wxHjx7F0dGR0NBQkpOTeeCBB1i3bh1OTk7dZqf00NtgpOeZiXMm+k3EoBj45PgnbMvYxpWhV/agZRLJxUV7PenuZurUqWzbto01a9awZMkSHnnkEW699VZiY2NZv349y5cv59tvv+Wjjz7qFnukh94Gw9yHodfocbZ2ZqjbUCI9IvGy9WJT6qaeNk0ikXQjU6ZM4ZtvvsFgMJCfn8+2bdsYN24cqampeHt7c9ddd3HnnXdy8OBBCgoKMBqNLFq0iOeff56DBw92m53SQ28DK60Vs4Jm4W7rjlajBWBm0Ex+SPyh2UIZEomk/7JgwQJ2797NyJEjEULwyiuv4OPjw6effsqrr76KXq/HwcGBzz77jMzMTG677TaMRiMAL774YrfZKXpq4eTo6GglJiamR659IezL3scdG+7gpSkvcUXoFT1tjkTSbzl58iRDhw7taTN6lJaegRDigKIo0S2VlyGXDhLtE02QYxBfx30NQFpZGj1VKUokEkljpKB3EI3QcMOQGzicf5hndj3DFauvYHXi6p42SyKRSKSgnw/zB83HVmfLyoSVCAQr41f2tEkSSb/kYm79ns+9S0E/D5ysnLhv5H1cPfBqHox6kCMFRyxL2CWXJLM+ZX0PWyiR9H1sbGwoLCy8KEVdURQKCwuxsbHp0HEyy+U8WTJ8CQAF1QW8fehtfkj6gUfGPMKHxz5kbfJapgdOt6xnKpFIOk5AQAAZGRnk5+f3tCk9go2NDQEBAR06Rgr6BeJh68Fk/8msP72eR8Y8QnJJMg1KA/FF8UR6Rva0eRJJn0Wv1xMSEtLTZvQpZMilExjjPYasyiyKa4pJKk0C4EThiR62SiKRXGxIQe8EhrgNAeD39N+pbqgG4Hjh8R60SCKRXIycU9CFEIFCiC1CiBNCiONCiIdaKCOEEG8KIRKFEEeEEM0nGu/HmAV9zek1gNpperag51XlsSphVbfbJpFILh7a46E3AI8qijIMuAS4Xwgx7Kwyc4Ew09/dwHudamUvx9XGFW87b/Zl7wNgbshckkqSqGmosZT539H/8fSup0krS+spMyUSST/nnIKuKEq2oigHTa/LgZOA/1nF5gOfKSp7ABchhG+nW9uLGeo2FAUFNxs3JvhNwKAYOFV8ClBTkLZlbAPgUN6hnjRTIpH0YzoUQxdCBAOjgb1n7fIH0hu9z6C56COEuFsIESOEiOlvqUhD3NWwS6hzKBHuEQDsytwFQGJJIpkVmYAUdIlE0nW0W9CFEA7ASuBhRVHOa4UHRVHeVxQlWlGUaE9Pz/M5Ra/FHEcf6DIQH3sfZgTO4P2j73Oi8ARbM7YCqhd/ME+dStOoGHvMVolE0j9pl6ALIfSoYv6Foigt9exlAoGN3geYtl00RLhHoBEai7A/N/E53GzcuH/T/Xwd9zVD3YZyefDlnC49zfLY5Uz9ZiolNSU9a7REIulXtCfLRQAfAicVRVnWSrGfgFtN2S6XAKWKomR3op29Hh97H7676jvmD5oPgIuNC2/OfJMwlzCKa4q5MvRKorzV5J93Dr9DaW0pRwqO9KTJEomkn9GekaKTgFuAo0KIw6ZtfwOCABRFWQ6sBeYBiUAVcFunW9oHCHcNb/I+wj2C9y97H6NiRCM01BnqsNJY4WrjSn51PscKjjE1YGoPWSuRSPob5xR0RVF2AOIcZRTg/s4yqr+hEWpDyEprxbLpywh0CuTR3x/lWMGxHrZMIpH0J+RcLt3MtMBpgOq9b8/cjqIoqFEtiUQiuTDk0P8eYrjHcIpqisiuvKi6GiQSSRciBb2HiPRQZ2L89tS3LDuwzDIHTEeoN9az+NfFlkFLEonk4kaGXHqIcNdw9Bo9Hx77EAAfOx9uGnpTh86RVJLEwbyDBKYEys5ViUQiPfSeQq/VszhiMbcOu5URHiP47MRnGIyGDp3jeIE6AVhLnatZFVlU1ld2iq0SiaRvIAW9B3ko6iEeH/s4tw+/ncyKTNacXtPicluF1YXMWTmH+zbex97sM7MumGd0TC5NpqKuoskxt/56K28deqtrb0AikfQqpKD3AqYHTifEOYSndjzFgh8XkFeV12T/rqxdZFZkcqzgGA9sfqDJnOu2OlsUlCYLatQaasmtyuVk4cluvQ+JRNKzSEHvBWg1Wj6Z8wlPjnuSpNIkViesbrJ/X84+nK2deXXaq1Q3VPN7+u/UGeqIL45nbshcAI4VHmNDygayK7IpqC4A4HTp6e6+FYlE0oNIQe8luNm4cfPQm4nyimoWetmfs59o72jG+ozFy86LtclrSShJoMHYwES/iQQ6BvLZ8c94dOujrDi5gvwqdSbL4tpiimuKe+qWJBJJNyMFvZdxRegVnC49TVxRHACZFZlkVmQy1mcsGqFhXsg8dmTuYG3yWkAdoDTcfTiFNYUATTx0kF66RHIxIQW9l3F58OXoNDo+PvYx+VX5llWQxvqMBWBeyDwalAY+O/EZXnZe+Dv4c0XoFUzxn8JIz5FkVWaRX31mrvnk0uQeuQ+JRNL9yDz0XoaztTMLBi3gu/jv+DXlVwBcrV0Z5DIIgKHuQ3nv0vfQaXQMdRuKEIJpgdOYFjiNZ3Y9w5b0LeRX5aMVWnQaHcmlyeRX5WOrs8XByqEnb00ikXQxUtB7If+45B/cNOQmdmbtpLCmkBEeIywTfAFM9p/c4nF+Dn4U1RSRUZGBu407rjauxObH8oef/0C0dzT/nv5vsiuy0Wv1eNh6dNftSCSSbkIKei9ECMEg10EMch3UoeN87dVlXI/mH8XDzoMBjgMsXv7WjK1U1FVw2/rbGOw6mDdmvtHpdrcXRVFYlbCKuSFzsdPb9ZgdEkl/Q8bQ+xFmQc+oyMDT1pMQlxAARniMoNZQy4v7XiSzIpO08rSeNJP44nie2f0Mv6X+1qN2SCT9DSno/Qg/Bz/Law9bD2YEzmBawDTenvU2bjZu/JT0E0CPz/CYW5UL0KTzViKRXDhS0PsRXnZelli7p50nQ9yG8Past3G1cWVm0ExA7WCtrK+kvK6cHxJ/YGX8ym6305wn3zi9UiKRXDhS0PsROo0OLzsvADxtPZvsWxS2iBDnEO6IvANQvfRPj3/K+0feB+Bw3mG2pm/tFjvzqtWpDdoj6Efyj/DEtidoMDZ0tVkXFVkVWZaKVdJ/kILez/CzV8MuZ2exDPcYzk/X/MRIz5EAZJRnkFKWQlZlFoXVhbyy/xX+ueufTUaoFtUUYVSM1Bvrufe3eztt3nXzXDWNBd1gNLQ4J/zGtI38evpXMsozOuXaEpXHtz3Oc7uf62kzJJ2MFPR+ho+9D9DcQzdj7jiNyY2xeL17s/dyovAERTVFpJSlABCbH8us72bxQ+IPHC84zs6snby6/9UOT/HbEi2FXN4/8j7X/HBNs/Onl6UDWOySdA6pZaly0FkPsTphtWWm1M5GCno/w9wx6mnXsqB72HqgEzp2Z+22bFtxYgUGRRXSg7kHqW6o5u87/k6DsYENqRvYk70HUEX1t7S2M1NicmK45MtLyKnMabVMSx56TG4MWZVZHCtsOre7OSMnpTSlzetK2k91QzWltaVkVWTJUFY3YzAaeHb3s2xM3dgl55eC3s+YETiDuSFzW/XQtRotXnZeJJYkAmqI5ljhMXQaHS7WLhzMO8g7h94hpSyF0V6j2Ze9jy3pWxjiNoRQ51DeO/xekwm/8qrymoRKfkv9jcr6SmJyYyitLeXlfS83m6vdnN1SWV9JVX0ViqJwqvgUANsztlvKKYpCern00Dsbc2XboDRYMo4k3UN+dT4GxWBpKXc2UtD7GSM8R/DK1FfQarStlmkclhnvOx6A4e7DifaOZkfmDr6I+4KFYQtZOmop9cZ6ThSeYILfBB6LfoyM8gyu/flaThWdorK+koU/LeSFvS9Yzm325o8XHGd9yno+P/m5ZXATQIOxgcLqQkusv7CmkNyqXEprSwHYkbnDUja/Ot9SWXRU0HMqc7rVq08pTWlxcZLeSOPWk7nCbA9Gxcg/dv6D2PzYrjDrosCcMtw4xbgzkYJ+EWIW9FDnUIZ7DAfUyb9Ge42mqKYIvUbP/aPuZ7T3aBz1jgBc4nMJUwKmsGLeCssPe2X8SkprS1l3eh1ldWXkVeVZ4rLHCo6xL0edWGxT2ibLtQuqC1BQGOo+1PI+vjgeUKc0OF543BKKSStTwy3edt4dFucX9r7A/ZvuP5/H02HSy9K5+oer+THpx2653oXSWNA70tkcXxzPD4k/NPk8JR0jqyILQHroks7D/GUKcQ5hnM84rDRWTA+cbpnR8ZZht+Bl54Veo2dywGSsNFaM9h4NwDD3YTwS/Qgni07yxsE38LX3pcZQw6/Jv1qWx4v2juZk0Un25+xHINibvZfyunLgTIfoULczgn6qSA233D78dgB2Zu4EzniPUwKmUFhTaDlHe0gqSSKtPI3cyo6HFBRF6dBxJ4tOoqDwS9Ivlm0lNSWWKZA7Snp5Ojf8csN52d4ecqpUQdcJXYc89AO5BwDOadfOzJ3nfe/9HbOHLgVd0mk0FvRg52D23byPEZ4jGOo+lI8u/4h7R95rKfvomEf57+z/YquztWybFzKPCPcI6ox1/GXsXxjsOphv479lXco6nK2dWRS+iFpDLUU1RSwMW0iDscESGzfnoA9zHwaYBL34FP4O/kR7R+Nk5cShvEOA2iGqEzom+E4A1MyM9lBvqCezIhOAQ/mHOvx8VieuZs7KOaSXpVNZX8k3cd9gVIytlje3Svbl7LNUWP/c9U8W/7qYemN9q8fVNNSw7MAypn8znTcOnplbZ3PaZo4XHudg3sFWjy2tLWVX1i5qDbWWbQdyD/B/e/6vTVtBFWQ3Gzf8Hf075KHH5MQAtNnhDfD0rqf5+46/tzsElVGeweG8w+2240Koqq9i0U+LLNNSn43BaGi2BGRnkl2RjbO1c5fNYSQF/SIkwDEAgDDXMIAm8faxPmPRa/SW99723kT7RDc5XiM0PDvxWW6LuI3pgdO5bvB1xBfHsy1jGxP9JjLCY4Sl7B2Rd+Bh68HL+1/mrg13WUQh3DUcrdCSX5XPqaJThLuGI4RguMdwjhWomS6pZan4O/pbpg5u72IdGRUZTbJ2OspPST/RoKgZPp+f+Jzn9z5vsbslkkuSLWu7bkjdQHJpMlvSt1DVUEVSSVKrx30V9xUfH/uYOmNdk87gmFz1Wq3d77envmXqN1O557d7+PT4p5btHxz9gK9Pfc36lPVt3l9OZQ4+9j4EOAaQUdE+QVcU5YyH3kZHar2xnryqPE4Vn+JE0YlWyzXm1f2vsnTz0m7pg4gvjie+ON7yjM/m2/hvuWLVFc068juL7MpsS/9RVyBnW7wImeA3gXdmvUO0d/S5C7fCYLfBDHYbDMAfwv/ASM+RVDVUEeYShr3eHicrJxytHAl0DOSx6MdYn7KemNwY9mTvQSu0eNh64GbjRkpZCmnlacwJmQOoA6A+PPohVfVVpJenE+gYSIBjABqhabegmz15Z2tni7ffXnIrcy2VwIbUDZTVlgFwKO8QjlaO3LH+Dj6f9zmhLqGWY5JLkxnjPYa8qjy+jvua3Vm70QotBsXAsYJjDHEb0uK1dmftZpDLIKYHTueTY59QZ6hDp9FZbG6t32BVwipCnUPRa/T8evpX7h5xN8U1xezJUjuk3z38LqO9RrM/Zz+nS08ze8BsS58FqIIe7ByMh60HR/KPtOu5JJcmU1xbjKetJ7lVuRgVY5MpnRs/PwVVmFcnrCbCPaLN8xoVIwfyDlBaW0pGRQZ+9n7UG+ux0dm0y66OYq5gW2tl7MzcSY2hhoyKjFY/twshuzKbQMfATj+vGemhX4RohIapAVMRQnTa+Qa7DWa012gcrBwQQnDrsFtZErEEUJfVe3Pmm/xr4r8AcLd1R6tRRf231N9QFIUZgTMAdWZIg2IgNj+W1LJUghyDsNJaEeUVxcqElZY4usFoIK4orkWvzizoV4ZeSXxxfJPY+6qEVSz6aRFfxX1FvaF5OGRD6gYUFBYMWsCJwhNkVGSgERoO5x/m15RfKa8vZ3P6Zkt5g9FASlkKA50H8uDoBymuLWZrxlYWhi3EycrJ0to4m1pDLQfzDnKJ7yUMcRtCg9JAQkkCSSVJlNaWohVaTpc1r8Cq6quIK4pjRuAMFoQtILEkkYTiBDambcSgGLhv5H2klKUw+/vZ/G3H3/jg6Ad8cPSDJufIqVI99EDHQMrryi0ZRi1hVIy8d/g9Xt3/KgBzQ+bSYGygqKaoxfKNY8Rrk9e2OPq3McklyZbrnyw8yXux73Hl6iupM9S1edz5Yk7XbUnQDUaDpTLPLM/s9GsrikJWRVaXZbiAFHRJF3HPyHu4YcgNTbbNGjCLJRFLLOJtnp7g5qE3W2LqER6qR/dazGtUN1QzPXA6AI+NfYzimmLei32PekM9j297nGt/vpYl65aQXNJ0xGNqWSou1i5MD5yOUTHy7O5nOVl4EoAfE38kuSSZF/a+wLIDyyzHZFZk8vye5/n0+KcMcRvCXZF3AeCod2ReyDxi82Mtc93sytpFVkUW1/18Hb+n/06toZZQl1CmBU7j14W/8syEZ3go6iEiPSI5WnC0xedzOO8wtYZaJvhNsHQQxxXGWcIaUwOmklqW2iwefrzwOAbFwCivUcweMBuN0PBz8s+sSV5DsFMw9468lyURS7hnxD18f9X3XDbgsiY2lNeVU1lfiY+djyX01taI0aMFR3k39l0O5h1ktNdoS6uuNQ/XvP3OyDspry+3dHC3RuN+ghOFJ9iQuoHcqtxWp5k4kn+k1U7ZiroKlh1Y1mbnucVDN3UMfx//vSXzJKEkgfJ69VhzH8yFEFcU18RpKKsro6qhqss6REEKuqSbeTT6Uf5+yd8BNYYf5BjEA6MfsOz3sPXAz96P+OJ4BrsO5hLfSwB1MeyFYQtZcWIFk7+ezG+pv7EwbCHJpck8uvXRJp56alkqA5wGMMZ7DNeFX8eOzB3cueFOyurKOFJwhFuG3cKisEV8feprS5bHVye/4rv473CyduKuyLsIdApkeuB0bh52M5f4XkJ5XTnJpcm4WLtwKO8Q7x95n5NFJ3l297OAmgIK4GjlyKLwRThbOxPhEUFSSRJV9VXNnsPurN3ohI4x3mMIcAzAQe/AyaKTHMg9gJedF5P9J1PdUM3e7L1M/2a6ZWSvOQd8hMcIPGw9GOczjo+PfcyB3ANcNfAqhBA8Gv0oS0cvZbDbYEZ5jSKnMsfSWWsWXB97H6K9o7HR2vBjYuvplrsydyEQbFi0gc/mfmZJeW0s6EU1Rdzwyw2sOLHC4qFfGXolLtYuzVJWz24NxOTG4GnryVC3oWxK22QJq/2Q+EMzW2oNtdy54U5e2f9Ki7buyNrBx8c+5uNjH7d6P0mlZ0IuBdUFPLv7Wb6O+1q1xdRPohM6Misyic2P5YZfbuhQdpWZ9PJ0rvv5Or459Y1lm/mZSUGX9EsejnqYH+b/0KzH35wbvzhicZOw0BNjn+CJsU8wJ2QOr057lWcnPstj0Y+RWJJoGdAE6iCkAU4D0Gv0/GPCP3hl6iuU1ZXx3uH3aDA2MNZnLPeNug+d0PHWobcA2J+7nyivKFZdvYrLgi8D4K2Zb6n5+F6jLee+f9T9NBgbWJmwEke9I8W16qjZEOeQZvcX6RGJQTE0S+EzKkZ2ZO4g0jMSe729JWS1PWM7G9M2MjVgquV8r8W8RmFNIU/veprK+kpi82IJcQ7BxcYFgIeiHuK2iNt4Y8Yb3Bl5Z4s2ABYvvbGgO1s7c0XoFaxJXsP+nP08uPnBZlkvO7J2EOkRabmet703cKZjtLK+kj9t/BPHC4+zKW0T2ZXZuNm4Yae3Y1rANLZmbCWnMocHNz/IrO9mcfdvd1sqX0VROJh7kDHeYxjmPswyeGxm4Ex2ZO5oNhuneVqK3Vm7W8weMlcGX8Z92WIYyTxWwt3GneqGakt4JaEkAVArF38Hf0JdQsmqyLJkG7WWEdMW2zO2o6A0+V52dQ46SEGX9CBCCPRafbPtc0PmMt53PHOC5zTZbqe345Zht/DsxGct++aGzMXNxo3PT34OqDHmvKo8gp2CLcdN8J2Ak5UTX8V9hVZoifKOwsvOi5uH3syvp3/lVNEp4orimmXzmAl0DMTNxo0gxyCuGXQN1lprAF6f8TpOVk542HrgbO3c7DhzxfSX7X/h+T3PszltMzmVObyy/xVOFZ/i6oFXW8oOdRtKVmUWdjo7lo5aahH0+OJ4wlzDyKnM4cntT3Io/xCjPEc1ucYj0Y8wM2hmi52UQ9yGoBM6SyzfHF4JcgoC4MYhN1JjqOH29bezJX1Lk6yZ0tpSjhUcY6L/RMs2V2tXrLXWlorh2d3PcqroFBHuEZwoPEFmeaZFsGYFzaK8rpyb197M7qzdXOJ7CScKT3CiUM1+WXt6LblVuYzxHmMJO/k7+PPQmIcwKAZuXHMjL+97mZKaEkANdQGU15e3mOZ4uvQ09np7KusrLd8HUCuOrelbLR3Ak/wnAWdGJSeWJFqyeMZ4j8HPwY+MigyLnXtz9ja71rnYmaWGmg7mHrRMOJdVaRJ0h64TdJnlIul1XDrgUi4dcGm7ylpprbhh8A28G/su1/58raV5HOwcbCmj1+qZFTSL1YmrGeExAnu9PQDXDr6WD499yHN7nsOoGBnrPbbFawgheCz6Mez19tjobJgaMJXyunLG+47n2YnPttqp6GHrwUtTXmJ9ynp+Tvq5SfPbHPYxYxb/x6Ifw93WHUVRcNQ7Ul5fzsNRD3Oy8CT/PfJf6o31RHlHtevZANjobAhzDeNIgSpmcUVxeNl54WbjBqjZSpP9J5Neno6/gz+/JP/Cn8f8GTu9Hbuzd2NUjEzym9TkWXjbeZNTmcO60+v49fSvLB21FB97H/6+8+8cyjtkWcR8gt8EbHW25FXl8dq015joN5GZ385kZcJKDuQe4NWYVxnjPYarBl5l6QeZ5DeJUOdQlk1fxi9Jv/B13NesSV7Dy1NfZmfWToa7DyeuOI7tmdstA+HMpJSq8w/ZaG349PinLApbhI+9D9szt7N081LsdHaWa/yU9JMlvp9TmUNsfiwltSVEe0cTXxzP3uy9ljmHOuKh1xnqUFDYn7MfT1tP8qvziS+Ox9felxUnVuBn72d59l2BFHRJn+eWYbdQXl9Oelk6A5wGcG34tUzxn9KkzJzgOaxOXN3EC/d38Ge873j2Zu9Fr9EzwnPE2ae2cNXAqyyvX536qiU171wVzxWhV3BF6BXUG+qJzY8luTQZvUbP/EHzm4STLgu+zBITB1U4Q1xCyKnIYaLfRKYGTOXmoTdzrPBYh9NNIz0iWXt6LUbFSFxRXLN0vDdnvIlGaIjNj2XxusWsT1nPgrAFbE7djKOVo6WyMeNt782JwhPszNrJCM8R3BF5hyWzqMZQY4mz2+hseGTMI2g1Wi4Pvtxyn6sSVmFQDMweMJsXp7yItdaaIe5DuDL0Sq4bfB0AswfMZvaA2cQXx/OXbX/hoS0PUd1QzcNRD2Ovt2d7xnYeGfMI+VX5fBv/LXdG3klKWQrRPtHcNOQmtv+4nVf3v8q/p/+bL09+iaPekaqGKmx1tpYKMa86D53Q0aA0WCrbaO9oKusrqW6oprqhmiDHIJJKk8ivyrfMYFpeV87nJz5nccRiS7hQURS+ivuKV2NeZbTXaKobqnks+jH+tedf7M7ezZ6sPeRV5fHpnE9bbEl1FlLQJX0eBysHnhj7RJtlxvmO467Iu1gYtrDJ9msGXcPe7L1EekS2O/e5rYnPWkOv1RPtE91qWEev0VsmSjPz1PinaDA2oNOoP1MHKwdLJ3FHGOk1km/jv+VI/hFOl562ZBk1tg1gtNdoBjoP5LMTnxHtE82G1A3cNPQmy/XN+Nj5sD9nP45Wjrw05SV0Gh3BTsHY6mypbqhuEiM+O9Pp+sHX80vyL1w/+Hr+Ou6vlmep1+h5ccqLzWwPdw1n+aXLuWntTVQ3VDPJfxI6jY7XYl5jW8Y2PjvxGXuz9+Kgd6C6oZoQ5xACHAO4M/JO3jn8Di/vUz37+0fdT7hrONmV2XjaelqEfILfBLZnbmd9ynq87LwIcAxoklZ467BbeX7v8+zL2ccVoVcA8NGxj/jf0f/hbe9t+T4tj13Ou7HvEu4azv6c/VhprLgy9Eo+OvYRbx18iwalgecmPkekZ2SHP7+OIAVdclGg0+h4MOrBZtsvDbqU121fbyZyvQFzKueFMsV/ClqhZXnscgyKodUBM0IIHoh6gIe3PMwd69WlCv849I/NypnTHV+a8pJlkIxWo2WY+zAO5B5oM0Y8wnME267f1mKfQ2t423vzwWUfsCdrD4NdBxPoGMia5DU8sPkBjIoRrdDyyfFPAAhxUvsebh9+O4kliXx+8nN0Gh1/CP9Dk1W8vOy8yKrMYkrAFA7kHqCqoYox3mMQQuDv4K/ek9By1cCreOPQG2xK28QVoVdQWlvKV3FfAVgyrarqq/jsxGfMDJzJ6zNeZ1fWLspqy7DT2zHedzyrElbx1PinWBC2oN33fL5IQZdc1NjobFi3aF0zL7Q/4WrjSrRPtKWjrq0RkLOCZlkyX+aGzG1xEMwfh/2Ryf6Tm4WohrsPVwX9HFkcHRFzM6HOoZbUUHu9Pe9e+i63rbuNcNdwHK0cWZmgLnZu7ky20lrx6tRXmeQ3CQWl2ZKMPvY+ZFVmMchlEINcB3Ek/4gllGUW9IEuA7HT23HD4Bv44OgH7M3ey47MHVTWVzLFfwq7s3dTVlfGxtSNVNRXsDhiMRqhsfQhADwy5hEWhi20LP3Y1ZzzWyyE+Ai4EshTFGV4C/unAz8C5mFtqxRFkYsVSvoMLWXa9DdmB81mb/Ze7HR2Fg+7Nf467q/Y6my5PeL2Fvc7WTm12N8wM2gmO7N2tpjC2dl42Hqwev5qtELLrqxdrExYiZOVU5MORyFEq16xOf1ykMsgwlzCVEE3hcMcrBzwsvWyiPDdI+5mXco67tt4H3XGOuaFzOPmoTezPXM7W9O38t2p7xjoPLBJeqsZZ2vnbhNzaJ+H/gnwNvBZG2W2K4pyZadYJJFIOp1ZA2bxf3v/j8Fug8/ZKeds7czTE57u8DWivKNYPX/1+ZrYYcytqnE+43C0ciTEOaTd01mM9xlPYXUhrjauXBF6BQbFYAnXAHw05yNcrF0AtRX37MRneXb3s9w05CauH3y9Jdvnbzv+BsCT457stKk0LgTRnhnOhBDBwC9teOiPdVTQo6OjlZiY1mewk0gkncuymGUMdBnI/EHze9qUTmdj6kbs9fZM8JvQbdfcmr6VvTl7CXEOYcGgBd0WthNCHFAUpcXe9c4S9JVABpCFKu4tLmkthLgbuBsgKChoTGpq++a3lkgkEolKW4LeGQmRB4EBiqKMBN4CfmitoKIo7yuKEq0oSrSnZ8uLGEskEonk/LhgQVcUpUxRlArT67WAXgjhcY7DJBKJRNLJXLCgCyF8hKk3QAgxznTOwgs9r0QikUg6RnvSFr8CpgMeQogM4GlAD6AoynLgD8CfhBANQDVwg9Ida0lJJBKJpAnnFHRFUW48x/63UdMaJRKJRNKDyOlzJRKJpJ8gBV0ikUj6CVLQJRKJpJ8gBV0ikUj6CVLQJRKJpJ8gBV0ikUj6CX1a0BsMRn45koXBeP5p7wajQklVXSdaJZFIJD1DnxR0s4BvPJnL0i8P8cuRrFbLJudXcPnr21h3LLvF/d/GpDP55S1U1DZ0ia0SiUTSXfQ5QV93LIcRz6wnu7Sao5nqauvfxWS0WLa0qp47P43hVG45f111lIKK2mZlDqUVU1HbQHJ+RZfaLZFIJF1NnxP0AFdbKusM7DtdxPGsMgB2JhWQWVLdrOxrG06RXlzFCwsiqaw18NzPJ5qVScxThTw5v7JrDZdIJJIups8J+lBfJxysdew7XcSxzDLGh7ihKLDqQHMvfXdyIVPDPLlpfBA3jQ9i7dFs6g1Gy35FUUiwCLr00CUSSd+mzwm6ViOIGuDKhhO5FFTUMme4DxMHuvPF3jRqGwz8bfVR/vT5AUqr60nMq2B0kAsAkf7ONBgV0oqqLOfKK6+lvEaNnScVSA9dIpH0bfqcoAOMC3Ylv1yNh0f4OfOn6QPJKavhHz8c48u9aaw7nsNvJ3IBGB3kCsBALwcAkvLOeOLmcIujtU6GXCQSSZ+nbwp6iLvl9TA/JyYP8mBUoAvfxmTgaK1DUeD13+IRAkYGugAQ6mkPQFIj4U7ILQdgxhAvThdUYLyA9EdJx8gtq0HOsiyRdC59UtBHBDhjpdUQ4mGPg7UOIQQPXxoGwD+uGkawux2ZJdUM9nbEwVqdIdjJRo+XozVJjWLlifkVONnoGB/qRk29keyymhavl5hXzqSXNvPu74k0NIrBS86PzJJqJr60md9P5fe0KRctb29O4LPdKT1thqST6ZOCbqPXctVIP+YM97Fsmz7Yi+1PzOC66EAuj1C3m+PnZgZ6OjQR9ITcCsK8HQn1UMMxMSlF/ByrDlQ6lVPOsz8fp7rOwOa4PDJLqnll3Ske+vpwm7ZV1TWQ1ULGTVdgNCo8tfooseklzfbllddQXWfoFjs6yun8SgxGpclnIelevt6fzsoWEgkkXU9GcRU19V3z2+yTgg7w7+tG8pc5Q5psC3SzA2BupC8A0QPcmuwf6GVPcn4liqJgMKoZLoM8HRhoCsc88m0sD3x1iLs+i+Hm/+3l450pbI3P42BqCUFudtw7bSBrjmaTmFfeql1vbErgyrd2XNDo1faSUljJF3vT+Hp/WrN9C9/dxbLfTnW5DedDdqla4eW20iJqzNb4fOa/vYPaht5ZOfVFDEaF3LKaJgkCku5BURRmL9vGa+u75rfZZwW9LUYFurDqvolcM9q/yfaBng6UVtdTWFnHD4cyKaqsY2q4J56O1rjbW+Fub8U9U0PZcioPo6JgZ6Vle0IBB9OKiQpy4c4pIVjpNHy0M6XVa8fnlFNUWdcsDTK/vJYfDmV2atz4RLaah78/pbjJ9toGAxnF1RzJKO20a3UmOaWqkOeVNx/odTbb4/OJzSglpUCKT2dRUFFLvUGhuKqespr6njbnoqK4qp7qegN+LrZdcv5+KegAUUGuaDWiybaBnmpo5URWGct+iyfS35m5w30QQvD9nyay/uGp/HXeUL67ZwKr75vIhFB31hzNJq+8ltFBrng4WLNglD+rDmZQXFlHWU09T3wfy6mcMx672es5fFYY5H/bk3n4m8PE5ZQTk1LEtct3UXmB0w2YB1Yl5lVQVHlmPpqCCvV1Ui/N3DH3VTT20FfsSeUP7+1qVuGlmp7n6QIZnuksGocE06WX3q2Yn72/qxT0C8acuvinzw+QWVLNX+cOQWMS/RAPe1ztrQCIDnZjgLs9k8M8KKlSPRhzPP6OKSHUNhhZvi2JD7ef5tuYDB76+hB1DUYURSGjWP3AYjNKmlx7d3IhAGuPZrN8axL7U4o5mNbUs+4oJ7LKsNKqH+GB1GKLGOaZhLKgopbSqt7ngeWaPfSyMx7673F5xKQWcyq3aTjLLDi9tXLqi2SVnKlI0wrbL+i1DQaufGs7G47ndIVZFwVmffCXHvqF4+dsw5KJwVw+3IeXFkYycZBHm+Unm/Zb6zQM9XUCINzbkQWj/Pl4Zwof7ThNqKc9cTnlvLMlkfzyWmob1CyY2PQz4Y6ymnqOmead+f5ABltM2R2H0kraZXdBRS3Rz29k+NPruWdFjGX78awyLovwxkqr4efYLGa89jvf7E9rEspI7OaOx7oGI+uOZbcZWspuIeRiHrG7tVHmi6KcGQjW28cJ7E8p6jMZUI099I7E0Q+kFnMss4y9p4vOef7e6Ej0BszPXoZcOgEhBM9cHcGy60Zxw7igc5Yf5OWAj5MNIwKc0WvPPKpHLgsHBcprG3j7xijmRPjw8c7TpJi8nWG+TpzMLrP0ZO9LLsKowNUj/cgurcFgVHC3t2q3h743uYiCilrCvR1YfzyX0wWV5JXXUFBRS1SQK5EBzvwUm0VKYRV7Txc1EcruziRZdTCDez8/aAk51TU0F7kcUwuioraBytoGqusMpBerz25r/BlBz6+opcqUqdPRkMtzP5/gto/3nc8tdJiE3HKuXb6bFXtSLdsMRoWquvMLqSmKYnEAuoKs0mocrHU42+o7JOg7EgqAM30grbHk43088PWhC7KxK4nLKWvxewlqGPCLvakt7usMMkuqsdVrcbXTd8n5LypB7yhCCN79YxTPXxPZZHuAqx1PXTGUpTMGMczPiZlDvSiraeD3U3kAXDXSjwajwrb4fLJLq9mdXIiVTsOTc4egETAu2I3Zw7w5lFbSrk7Sw+nFWOk0/Of60QD8EpvFCVP8fJifExNC3dFqBF6O1qQXVZFfVoMQYKXTkJRXwZoj2RwyVR4bT+Sy7xwe1tnU1BuY9NJmvmkhm+Zs9qWo5z6eVcaRjBKGP72+iTjV1Bsoqqwj1EPNLMorryUpvwJFgQHuduxPKbL0LZjDLX7ONiR3cGqGPcmFbE8o6JbUTXPn9A+HMi3bXlkfx+xl29r1+X4Xk05MypnP5Pf4fK58awcHUtuu8JPymw6GW3csm1s+3HvOlLiskmp8nW0Y4G7XMUFPVAU9q7T1tFyjUeF0QSXb4vPbzAZrzPrjOfxnY3y77bgQcstquOLNHaw82HLK5oc7TvPU6mPnrLTOl6ySavxcbBBCnLvweSAF/RxEBbky2Mex2fbFE4N57PLBljIAP8Wq87JfYUqbvHvFASa8uJkv96YxJsgVPxdbll03iqevHsboIBdKq+v58XAWd38W02bn1OH0EiL8nAhytyN6gCu/HMlmT7IqAMP8nLh/xiA2/Hkq08I9SSuqIq+8Fnd7a0I97Nkan89DXx/iqdXHqK4z8OdvDreYzlhUWdfkS1xeU8+N7+/hQGoRRzNLySyp5p0tSc3SMesajE1ExSxCJ7PL2J5QQJ3ByKqDZ4TO3BFqHsGbW1ZjmYLh9kkh1BsUdiWp/Q2pphbP9CFelFTVN+n4bQtFUUgtrKTBqHDkrL6M9rDlVB6TXtpMXlkNtQ0GNp3MbVOY401x/9iMUpLzK6hrMPJdTAaZJdWcPkdF9P2BDB7//gj/2Zhg2bbT5Akfz2rZSy+vqefRb2OZ9e+tvL892bL9ox0pbE8o4P1tyS0eZyarpAY/F1uC3Oza3SlaXFlnma46u6R1sTNn0AB8uqt9nu5/tybxzpZEahsMGIxKl1bCx7NKMRiVVifj22vq6zK3GDubrJJq/F3tuuTc0FcF3dAAxt4Trwz1sMfJRkdGcTU+TjYEudvxxg2jeHFhJA/MHIS9tZYrRqgif81ofyL8nC2VwJ+/PcyGE7ks/ngfeS0Mh683GDmaWcookwBeOcKXU7nlLN+axMwhXjjZ6LG10jLQ04EgNztyy2pJK6rCy9GagZ4OxOWU02BUOJFdxusb4ymvbWjSKWbmie+PcOtHey3vv9mfzu7kQr4/kGER6bSiKksrBNSwwuzXt/LSujhA7Yw1i/DJ7DIOmo5bezTbIvrm+PmIAGf1mPJaEvLK0WkEfxgTgLVOw57kM4IuBEwN8wTaH3bJr6il0iQKB9vZT9GYT3amkFlSzRd703h3SxJ3fBpjOU9Lwn4qpwIPB2uEgB8OZ7E1Pt9S+bTVT3Iss5S/rTqKRsCRjDOtNXOMOj63ZQ/3hbVxrD6UgZejNV/sTcVoVMgrq2F/ahF2Vlre/T2RL/am8vbmBF5eF8dJUwvCTHap6iUGudmRUVx9zjETiqLw28lcFAUmDXInr7ym1f4C8zTWPk42rDyYQfk50iIrahuIzSil3qBwMrucNzbGM+O137ts4M3JbPWZmjsnz7blmKnl21XZP5kl1fi72HTJuaEvCvqxVfC8JxSf7mlLLGg0wjIJWKCb2tkxf5Q/N44L4tHLBhPz99n88ZIBTY4Z6OmAk40OBysdLy6MJKO4mnEvbCLqX7818cxO5ZRTU2+0CPq8Eb642um5YWwgy/84psk5g9zVmj82vQQvJ2vLgKlrxwRgpdVYPLfs0mrLKNPHvovFaFTYe7qQ+NwK8srUH+vHplz7bfEFxKQUE+hmi4+TDcu3Jll+tPtOF5FaWMVX+9KorjMQYxLwUYEuxOWUcyi9BHd7K3LKajiQVkx5Tb2lFWD20PPKakjIrSDYwx57ax3D/Z0t8fe0oip8nWwYYmohtTfTpXHO+rnCFmeTV1bD9oR8tBrB53tS+XCH+j3bn1LE0YxShv5zXTOhTcgrZ3yIGxNC3VmxO4W3Nifg4WCFg7WOQ+mtX//zPanotYLHLx9CWU0DKYVVlNXUWz7/+NyWK7AdiflcNsyHf1w5jPSiarYnFrDueA6KAu/eHIVA8NTqY7y2IZ73fk/ijUbef029gYKKOvycVQ+9wai0ObK5sraBeW/u4Invj+DlaM2cCB+MSutjCMwV9v0zBlJVZ2C7qbXRGvtTiiwVypGMEjacyCWnrIa1R1teYawtSqrq+OuqI2225MyVm/k7nJxfYak8YhrZkl5UTXlN/QWNHamqa2hybONn31X0PUG3cwfFCGWZ5y7bjZg97sB2Nqc0GsG/rxvFZ3eM48ZxQXx3zwT+Nm8IQgie+/mE5YtgFrfRger5vRxtiPn7bF5aNAIrXdOPzzxStrLOgJejNVPDPYn0d+Yvc4cwO8IbUOPR9QaF/IpatiXk83NsFseySi3TCO9LKWL98VwyS6qZPtiTzJJqtiXkMy7YnftmDGR/SjGTXtrMf7cmsfZoNkJAeU0Da49mE5NSjI1ew7XRAVTVqbHyP00fiLVOw52fxhD5zAb+t0OtVMK9HbHWaSwhlzBTSunoQBeOZpZS12AkraiKIHc7Alxt0WuFpd/gXKSYwhzjgt04lFbc5EdVUFHLqoMZrDqY0WImxo+HszAq8PcrhlJYWUdlXQOudnpiUor55WgWNfVGS9peSkEl1XUG0oqqCPN24Ln5EbjZW3Eko5SrRvoxMtC5VQ+9rsHIr8dymD3Mm6nhajbVkYwSDqQWY1Qg2N2OhNxyFEVpYn9OaQ3pRdWMDXHj8ggf3O2teHtzAt/GpBPm5cD0wV789shUfn9sOqeen8M1o/w42OgZmCtUPxdbSxpvWx2wOxILOJldxsOXhvHT0skEmL5j2a3E0c2Vw7xIXxxtdE2yllpid1IhVloNLnZ6Np3MI840pqNxB3Njvt2f3myMh5mt8fl8tS+d/25NavV65vNnFKuCPeeN7Xy6KwVQW0Y6jcDVTk96cRXf7E/n4W8OW8Z7dIT88lrGPr/REoaFrs9Bh74o6E5+6v+yjtfgXUnUABfgjKi2h9nDvC2e/chAF+6eOpA/zw5n7+kifj2WQ1VdA2uOZONmb2Xx/IFmA6bMBDW6tpejDdHBbvz8wGQ8HKy5fVIwQW523D9zEKCmAWYUV1PbYOTdLUmW8+47XcSHO5IJdrfj6asiAFV8xgxw5dYJwWx+dBqzhnjx7w3x/BSbxZwIH0I87FluEviRAS6MDHCx2DE5zIM/XjKAAe52jAx04VhmGY42OhysdXg72XC6oJKUwkqLoI8KcqGuwcjJ7DJSC6sIcrNDp9VweYQPX+xNtYhPTb2BPcmFLcZbTxdWotMIrhrpS2FlXZM49qvrTvHIt7E88m0sz/5yvMlxB1KL+WRXCiMDXVg8IZjh/k5cNyaQmUO8OZBaxJY4Ndy0PaGA3UmFTH/td1769SSKAoO9HRnk5cgvD0zhufkRPDAzjNGBrsTllLdo447EfEqr67lqpJ+lcjuSUcre5CL0WsG10YEUV9WzK6mQYf9cb8kwMXc6jwt2w0qn4U/T1Ur2WGYZ80epv40AVzuCPeyx1mkZM8CVvPJai0dqFhVfFxtGB7rgZm/F2mOt55X/fiofB2sd988YhI+zDb7Oarggu1F/S3J+BVe/vYNPd6mhKnsrLW72Vkwe5MG2hHwUReFQWjGPfHOYd7YkNjn/7qRCRgW5MDrQxZLhtGC0P4fSSppVNGU19fx19dFWh82bW3Ar9qRS3IKXXlNvIDm/AjsrLUWVdRxKK6GuwWgR7L3JhYwIcGagpwMZxVWW6x86j/EiG0/mUllnaDIBnTnU2VUpi9AXBd1RjUVT3vrC0D1BVJArIwOcmXSO3PZzcePYQMK9Hbjvi4NMemkze04XsnTGoHb1irvbW2FnpQXA09G6yb4xA9zY9sQMy/w2u5MKMDt+647n4GZvxcSB7vxwKJODaSXcNimEEA97BpjCOGMGqBVPqKcDLy6KxFqvobS6nitH+HHTuCAS8iqw1mt4+NJwBnk5oNUIHKx1hHk58o8rh/HT0sl8ettYAt1sLRWPt5M1G0+qIjltsBdwZv76d7YkWtIyAZ6/Zjju9tb86YsDvLMlkXlvbOeG9/cw9v82Nps1MKWgkiA3OyYO8kAj4Kq3dliyKHYkFjBjsCeLJwzgh0OZlimU1xzJZtF7u6htMPKXOYPRaAQ/3T+ZlxZFMjbYleKqeuJzK3C103MwrZgPTJ2Rn+5WPckwbzUsZGul5dYJwbjZWzE6yAWDUbF0Jjbm59hsnG31TAnzRK/VEOHnxL7TRaw7ls2IABdLH8OzPx+nut7AK+vjUBSF/aeLsLfSMtRXvd6dU0I59I/ZrHt4CvdMG9jsOubnaQ49nS5URS/QVa0o5wz3YdPJXE7llHPvigNNBhopisLvp/KYPMjDkrbrawoXmDtGj2SUsPC9XRzJKOWn2CxTFoctQgimhnuSXVrDC2tPsuDdXaw6lMnrv8VbWgmphZUcyypl4kB3RpicAGdbPf+8chhONjru/DSmiajvSizAYAoPthSbT8qvwNFGR1WdgY93Ng/JJuRWYFTO9MlsPKmumZCQp4ZdjmaWMi7EnQBXW9KLqi1Cfz79MOZWXEzqmewli4cuBb0R1g5g7QxlvUvQ7a11/Lh0MuNC3M5duA10Wg1f3nUJT8wZzKhAFz69bRy3Tw5p17FCCItYep0l6Gb8TB0y200paOZyUUGujAt2o6ymAScbHX8YEwDA7KHeeDpaWzxo9Rgbnr06gnBvB2YM8eT2ySH8tHQSWx6dzoSB7tjoVcEZM6Dp9Asudlas+tMkS+zfy0m15emrIiwVhp+zDZ6O1mw4kYu3k7VlPh4XOyvevmk0Oo2GV9efos5g5JVFIxju78Tzv5xs0omVUlhFsIc9Az0d+OaeCUQNcOWNTQnEpBSZQklePHRpOHZWOv69QRX6b2PSCXSzZevj05k4UK2UNRqBEILoYFfLuR+aFUa9QWFzXJ5l9LCVVkOwe/OW2ahAFzQC/vnjMX45ksWBVDUlc0tcHj/HZnHFCF9L2GxEgBpqSiuq4uFLwwg3VRDxuRV4OVpzJKOUzXF57E8pImqAK7pG4yJc7a0Y4uPUZKyEmSE+jthZaS0d1PE55ThY6wgwNfuvHOFLVZ2Ba5fvYt3xHN7afCbeHp9bQXZpDTOGeFq2OdnosLPSkl2qjoO4Z8UB7K10XBHpy9HMUlILq/A1CdbUcPW4D7afZkqYB78+NAWDovDZ7hRO5ZRz3X9342SjZ/4of0YGqhXYhFB3XO2t+OaeCWgELHxvF6+tP0V1nYGt8ep3tt6gtBibT8qrYFywG3OH+/DxrhTLPDWKorA5Lpctpg79S4ep4ceNpkVwkvIrOJ6ldsyOCnQh0M2O7NJqyxiOjo7orqhtYGdSIU42OtKLqskrq6HeYOTH2EysdRp8nGWnaFOcfHudoHcmHg7W3Dd9EB/fNs7yo2gvFkF3alnQHW30ONnoLFPu3jxe7awdM8DVUhndNH4A9qZ55B+fM5h1D02xTJFgZmFUABv+PA07Kx1ajWBEgEuTMu/fEs1r145sdn1PR2tLWOr2ScH865rhLJ4YbNkvhGC0qcP0nqkDsdFrLfuig93Y8th0Dv5jNpsfnc51YwP5z/Wj0WoEL5sybcwpi+aWxdhgN565OgJFgX/8qIZYJg50x83eitsnBbPueA6H00vYnVTI5cN8LPfdmFAPB1zs9AS62XLDuCCLCL+yaARTwjyIDHBuIrBm3B2seeemKMprGlj65SEWvbeb0f/6jXtWHGCorxNPzj0zW2iUqUL769yhTAnzxMvRGicb1ZbXrx9FkJsd931xkLic8maziLaFTqthZICLxcuMyykn3NvB0uIbH+KOh4MVZTUNjA5y4cfDWeSVqx60WQCnhXs1+Xx8nW3IKqnmoa8PUVRZx39vGcOVI3ypazASl1NuyeLwd7FliI8jAa62vHnDaIb6OnH5MB8+3ZXCVW/vwKjAN/dcQoiHPaMCXbHVa7nM1Ncz1NeJH5dOZu5wH97eksiDXx9iW3w+s4Z44Wyrt3jXpVX1fLorhboGI6cLKgn1tOf+GYMor2ngM1Ns/Jcj2dz+SQzLfovHVq9l4kB1gZwsU0uhrsHIL0fUEO6IAGcCXe0wKmBUIHqAK6mFVRRUnOkEzi2r4WHTvTdm3bFsJry4ids/2U9dg5H7ZqjhzZjUYv666ig7Ewt5bn5EixVvZ9H829sXcPSF8t4VQ+8tnPHQW/cC/F3tOJldhoeDNddGB7DmaBazh3kT6mHPCwsiuXKkr6WstU6LtYO21XO1RnvihGMGuDGmBXGaG+lDRnE1N7YymtfNNOcOgI+zDXdNDeXNTQmEesYzNcyDqjoDIaaBS6BmFEX6O3M0sxQPB2sGmVobSyaF8N9tySz98iB1BiOXRfg0uxaonvqjs8NxtNFjo9cyY7AnlbUGwrwd+eDW6DbT/uZG+jJzqBcnssooqqxjR2IBWSXVvLRwBE42Z0YLzhvug/+fJlhCTEIIIvycyS2vYeJAdz5cHM3X+9NJK6qyxMrby5gBrry3NYmqugZO5ZYzd/iZz1erEfzfgkhq6g2MDHBhxr9/57NdqTwyO5xvY9IZGeDczKP0c7FlU1wu9QaFFxZEMtzfuUmIr3EWx8e3jUWn0VjmSbp7Wii/nczlsmHePHt1hKWV5mZvxb6nZlkWpAG18n/jhtEM93Pm/9aeBODe6QNxtNHx+6l8ahsMPLnqCL8ey6HBqFDbYGSgpwPD/Z2ZOcSLD3ecZmFUAC+vi2OIjyPzR/njYqfHx8kGK62GOoOaPXY4vYSfDmfh4WCNr7MNAY36q26ZMICY1GIOpZUw2+TZv7kpgR8OZzHc35k7p4QC6uR7z685SaiHPYfTS/BwsGLJxGBe/y2el36NI62oiodmhXH92HOPUL8Q+qagO/lDUlxPW9ErmRTmwd7TRXg7tSHoLraczC4j1MMePxdbNvx5mmXfTeO79gvXHhaMDmDB6IB2l79v+kDSi6p4c1MCb25SQwYRfs5Nyswf5cfRzFIuCXWzeKdu9lb8YUwAX+xNw83eyhL2aYlbJgRbXr99U5Sl/6FxC6I1rHVaSyx71lDvFsvotJpmlduy60diVFRxD/NW+yLOhwkD3Xl7SyIrD2ZSUlXPYG+HJvsvb1SRzYnw4cMdp3Gx05OcX8kbN4xqdj4fJzVT6pJQN24cFwiAt5MN/i62ZJpi6GZ8z0rRiwpy5fA/Z+No03zoe0vbAO6YHMK2hHy2JxQwLcyTAFdbfjicxexl2ywjXc2ZLebMnQdnhbHovV1MfnkzRgW+uHN8k/4tPxcbUgqruHKEL4fTSyisrGPWEC+EEJZMNXd7Ky6P8EGnEcSkFDF7mDc5pTV8F6OOMv3lSDZ3TgmltKqe13+LZ8ZgT5bfMobSqnpqG4zY6LWMDHRh3+kiZg7x4qFZYS3eX2fSRwXdFypy1QFG2r55C13FjMFezBjs1WYZc5PYvM5qX8dGr+X160exKCqArJJqJgx0b5ZtdNVIP5b9Fm/xsszcMTmEL/elMXOIV6vZQ2fTlU3mxpwthufLuBA3nGx0LP9dFb3BPk6tlv3nVcPYlbSd59ecxN/F1jLquTGDvByw0Wt4YUFkk876qAGuZJZU43uOgTOtCXdraDSCt2+K4nB6CUHudgS527H8j1E8+m0s0QNcCfGw5zvT6kvmKbJHBbrw60NTeH9bMs62+mbJCv6utqQUVjEuxA0fJxtyymosHbO+zjZoNYIIf2ds9FqmhXvyya4ULovw4fM9qRgVhZvGB/Hl3jTSi6r48XAmlXUGnpgzBGudFi+nM5X8nAgfiivr+Pe1I5uFLbuCvqmGTn5qLnpl3pk0Rkm7MefBNg5L9Acmh7WeYeTtZMOBv8/GRt9UjEM9Hfj0tnEtTu/QX9BrNcwa6s1q01wzbd2rr7Mt/7dgOEu/PMRdU0Ja7Bu4fXIIC6L8m4X1xgW78nNsVpP02c7C2VbPtEb9SXOG+zIuxB1bvZb9KUV8dyADVzt9k3BcuLdji/04AAEudmhEIeHejoR5O5gEXW3V6bQarh8byHhTn9LLfxjB1W/tYNF7uwC4e2ooN5sE/T8bE9hyKo8Zgz0tM7I25vbJIe1OaugM+qagO5pz0bOkoJ8H5iZxfxP0c2Fr1XJ4pKMdz32Ry4apgu7paN1E9FriyhF+jPB3aTL2oTF6rabFPprrxwYx2MeJgC6cq6Qx5vuYMNAdFzu9xTtvD0smBRM1wAUbvZZBXg5sTyggMuBMmO6FBWcm5PNwsOaDxdG8sTGBP14ywPJ9GRngzMqDGTha6/jz7PBOuqsLo28KupOpGdiPM126kimDPLlxXBATTL39kv7P1HBPrHQayzQK5yKohTTMc2Gl01xw2u75oNdqeP36UU06VM/FUF8ni0e9ZGIwQ32c8HBoOTMM1D6Z92+NbrLt/VujyS6tYZivU7NR2z1FHxV001qhMtPlvHC20/PiwshzF5T0G+ytdfzfNcO7dNh5T3KufqO2GOBuzwD3jrdWvZ1s2kw+6An6pqDbuYPWqtfN5yKR9GaujQ7saRMkXcw52wlCiI+EEHlCiGOt7BdCiDeFEIlCiCNCiKjON7PZRdVc9OKULr+URCKR9BXaE/j5BJjTxv65QJjp727gvQs3qx0MnAEnfoTjq7vlchKJRNLbOaegK4qyDWhrzbL5wGeKyh7ARQjRPHm1s5nzMgReAqvuhi0vQHXHZ0STSCSS/kRnxND9gfRG7zNM25r1WAoh7kb14gkKusARiXobuPEr+OVh2PoybHsV/EbDlEfByh52vQUeg2HgTHAPBdcQNVQjkUgk/ZRu7RRVFOV94H2A6Ojo81sGpDF2bnDdZ5AdCyd/gRM/wNc3qfscvOH0Ntjzjum9Dwyapf6Fz1FFXyKRSPoRnSHomUDj7vMA07buw3ek+jftCTjwCdSUwoT7wdgA2UegIF4V97g1cPgLsHGBMUsg4hrwHaV67rUVoLcFTccnopJIJJLegGjPenlCiGDgF0VRhrew7wpgKTAPGA+8qSjKuHOdMzo6WomJiemwwReE0QBpu2HPe3BqrTp9gJM/uIWq2wfOhOu/AF3bI+kkEomkpxBCHFAUJbqlfef00IUQXwHTAQ8hRAbwNKAHUBRlObAWVcwTgSrgts4xuwvQaCF4svpXWQgJ61WvvSgZIhbA0e/gmz9C6DQIuww8un52NIlEIuks2uWhdwU94qGfi11vw2//BMUANs5w649qR6tEIpH0Etry0HvHBAS9hYlL4akcWBqjCvqn89Vcd4lEIukDSEE/G52VGmpZsgbcQuDbW+GnB8HQfFFaiUQi6U1IQW8NlyC4cyNM/jMc/BS+vF4OXpJIJL0aKehtodXDpc/AVW/C6a3w3mRI2dnTVkkkEkmLSEFvD2MWwx0b1HDMZ1fDwRU9bZFEIpE0Qwp6e/EfA3f/DsFT4Kel8PkfIKfFCSglEomkR5CC3hFsnOHm72D2c5AZA/+7FOI39LRVEolEAkhB7zhaPUx6CO7fD57h8PWNcOrXnrZKIpFIpKCfNw6esPgX8BkB398OmQd62iKJRHKRIwX9QrBxgpu+AXsP+GyBHIQkkUh6FCnoF4qDl+qpuw9UByEd/KynLZJIJBcpUtA7A9cBcPt6CJkK6/4GpRk9bZFEIrkIkYLeWeis1AFIikGdsXH//9Q51iUSiaSbkILembiFwJX/gdJMWPMorL4Hemg2S4lEcvEhBb2zGXk9PBYPlz4Lcb/A3v9C3Fooz+lpyyQSST+nW9cUvWgQAiY+oC6ese4v6jYrB5j1Txh3t1ysWiKRdAlS0LsKjRauXwFJW8DJD3a9Cb8+AVoriO69izpJJJK+ixT0rsTRB0bdqL4OngxfXgdrHwfv4RA4tmdtk0gk/Q4ZQ+8uNFpY+IHqrX97K1Tk9bRFEomknyEFvTuxc4MbvlAXyvj6ZkjeKldCkkgknYYU9O7GJxKueQeyY9W51d8eCyd/7mmrJBJJP0AKek8wfBE8kQzXfgI6G3Ug0q63e9oqiUTSx5Gdoj2FtQNELIAhV8HK22HDU1CRA+FzYMAkmdookUg6jPTQexqtTu0sHXqV6qV/cgVs/ldPWyWRSPog0kPvDeis4frPoaYUNvwDtv8b0veBtSNMewL8Rve0hRKJpA8gPfTehI2zOhfMhKVQXQIZ++GjuXD0+562TCKR9AGkh97b0Gjg8v9TX1fkw7e3wMo7IHETTHkE3AZCXYW6uIZEIpE0Qgp6b8bBExb/DFtfge2vQeyXoNGBsQGGXAlXLANH7562UiKR9BKE0kPTu0ZHRysxMTE9cu0+SUk6xK9TF89QDLD3fTUTZtClEH07DJwpM2MkkosAIcQBRVGiW9onPfS+gksgjLvrzPuoxbDvAzjxgzpNr3MQaPUw+o8w+c9S3CWSixDpofd1Gmrh8JdweitUFkDKdjUc4z9G3a/RQtAE8ItSUyQlEkmfRnro/RmdtTodb/Rt6upIW1+BHa+rXntjnAPVMkETwXckWNn1jL0SiaTLkB56f0RRoL4ahAbqqyBpMxz4RPXeQZ2TfcAkGHEdDLtGirtE0odoy0OXgn4xUZYF2UdUYT+1FoqSwSsC7livDmKSSCS9nrYEXQ4suphw8oPBc9Q89wcOwnUrID8OVt8LDXU9bZ1EIrlAZAz9YkUIGHa1Ku7rnoR/D4aBM8DBB8IuhZBpaoeqRCLpM0hBv9gZfy94hMHBFZB5AMpzYc874DkEbvgSHH3BUAu2rj1tqUQiOQdS0C92zIOTBl2qvq+vUTNk1j4O708HQx0oRpj0kDp3e1URRN0CXkN71GyJRNIcKeiSpuhtIPIP4B+lzvzo5A9VBbDtVXW/1kr14N0GQtAlMPs5sPfoWZslEgnQziwXIcQc4A1AC/xPUZSXztq/BHgVyDRteltRlP+1dU6Z5dLHyD8Ftm5qXP3QCnV634TfwM4dRt2kivqI69V1UyUSSZdxQWmLQggtEA/MBjKA/cCNiqKcaFRmCRCtKMrS9holBb0fkH0EVt2tZsqggN5eHbwUNht2v6vmt1/6LLgO6GlLJZJ+w4WOFB0HJCqKkmw62dfAfOBEm0dJ+j++I+D+PerrvJOwfRnseRd2v6167vU1ELcGfEZAyBR1/hm3ELUiiPkIQqepUxIk/gahM8B9YM/ej0TSx2mPh/4HYI6iKHea3t8CjG/sjZs89BeBfFRv/s+KoqS3cK67gbsBgoKCxqSmpnbSbUh6DUXJkLpLXVKvthz2LofMg5C2R50l0sEHKvMAob43o7WGyGsBRV3oQ2sFhYngGqymUGr1auaNk69aviwbco6CZzi4DFA7d+uqoKpQDf/obdVyhgb1Ohpd70zDNDTIOXYkHeJCQy7tEXR3oEJRlFohxD3A9YqizGzrvDLkcpFRmgnHV0PucXWe90kPq4t2lKZD6HTVq0/cCFYO6mpNhlpwDYHiFDDWnzmPRzg01EBJ2pltnkPVnPr9H6oduADuYWpWTt4JVdCFFuw91fNaO8HYO9VQkM4WQqbC1pfUlaFG3axWKNVFcMl96tQJh79SM320OtDo1crFyQ8Cx4ODt1pxZR1UbTfUQ0G8ek9thZqMBlj/FBz8DK54Te2HaC/1Neo9Wdm3/xhJv+FCBX0C8IyiKJeb3v8VQFGUF1sprwWKFEVxbuu8UtAlraIoqoBqtFBTplYCxgZI36t6+1b2atpkQLTaWbv/f2ocP2gijLhWXekp+7A6E6XvSLB2UL33ilzVc889Aak7zlxPZwsN1eATqXr9AAhw9IG6Sqgta91Wna0ptdPQdLvWSu0k9hul2liaoYaX6sqhMEmtqHKPmSqt0xB2GQSMVTuaC06pFcPIG2HwXLVszMdqOQdvtRLU6NW5eMbfq7ZiEjeqz8w5EHyGq+vT5hyFskw1U8lvtHr/jj5NxxTkx8P6v0FlPkRcA4Nmg9cwdeUs82dRW65ODSEEFCSoZa3swTtSLVeWDZkxakvJd0THP2+jQf18ddatl6ktV1t/igK2LuDoBzqr9p2/rhJO/KT27bSUkdVQq/71kVXALlTQdahhlFmoWSz7gZsURTneqIyvoijZptcLgL8oinJJW+eVgi7pNIxGKElRxbG988Dnx6veemmG2nIImQajb1YXEtHqoTxH7fB19Iar3lDFylCvthYM9aooZ8aox+tt1SmKDaaWhHMA7H0Pjv+gLheot1M9+sJEVejdBqqiOvIGtUWw/TV1YFdZhlqpBE1QBezo92daJz6RMGAyVOSo91lVAEe+VVsreju1JWHGvKpVS2it1f6Msiz1r6ZUFTK3gWoro/E5tFZn7tk5SBX1vONnyth5qJVIddGZbR7hauvIawjYe0HsV2prSm+npsS6hkDQeMg8BOVZaqsm+wjUV6r3GHgJBI5Tw2v7/gvxG9SWUWmGei3LfVipi7p4DVM/L43+TAtKo1Mr/8RN4B0BJalqxWbvpbacTv0K4Zerawcc/lJ9b6iFuS/D6FvUe9bbqt+lsizY8gKgwND5qnOgGAGhVq5Ovur1Ejao/UjGBvAdpT7T+PXqX1WBWmELrRpu1Ohg2Hz18z8PLnhyLiHEPOA/qGmLHymK8n9CiOeAGEVRfhJCvAhcDTQARcCfFEWJa+ucUtAlvR5FubCFQoxGVaQdvFXvs6ZUFTatvuVrVRWBvfuZbWXZqhjauasdxmfbUlWkhmxK0lSBsHNTvdisQ+oxvqPUhVEKEtRWjqOvWgklb1W9epcg1bYxS9QwWGkGnN6mtgjMQq7RqSGqjP1qKCxigTqyuCJPncVTb6OKr18U5BxRha0kXQ07KQbwHq6GphpqVdHOjlVtdA5Uxb+mVBVdOzdI36+OVm6oVu9Po1P7YjR6NXzlE6luqypS7+fUGlVwW6q8bF0h7HI15Ka3g7F3wI7/qNceOBMS1qvHWTtDxHz13pM2nzlea6Wm6daWmVqLerV11RIafdOwYOPtwZPUii9xoxoCdPRWvxejboIJ97X2zWkTOduiRCLpXuqqoDwb3EKbV0SVhaqAt1RZGurViiH7CARPViuPc6EoqjhbWlANqod8dsWpKGoZnZUadss7AeFzVK/baITDX6gtIKFVK5rqIvX1pIfUii9j/5lQoNGgVmplmWq5kOlqXwyK2jqorVBbQl0wi6kUdIlEIuknyOlzJRKJ5CJACrpEIpH0E6SgSyQSST9BCrpEIpH0E6SgSyQSST9BCrpEIpH0E6SgSyQSST9BCrpEIpH0E3psYJEQIh84n/lzPYCCTjanM5B2dZzeapu0q2P0Vrug99p2IXYNUBTFs6UdPSbo54sQIqa1UVI9ibSr4/RW26RdHaO32gW917auskuGXCQSiaSfIAVdIpFI+gl9UdDf72kDWkHa1XF6q23Sro7RW+2C3mtbl9jV52LoEolEImmZvuihSyQSiaQFpKBLJBJJP6HPCLoQYo4Q4pQQIlEI8WQ3XztQCLFFCHFCCHFcCPGQafszQohMIcRh09+8Rsf81WTrKSHE5V1sX4oQ4qjJhhjTNjchxG9CiATTf1fTdiGEeNNk2xEhRFQX2TS40XM5LIQoE0I83BPPTAjxkRAiTwhxrNG2Dj8fIcRiU/kEIcTiLrTtVSFEnOn6q4UQLqbtwUKI6kbPbnmjY8aYvgOJJvsvYO28Vu3q8GfX2b/bVuz6ppFNKUKIw6bt3fm8WtOI7v2eKYrS6/9Q1zJNAkIBKyAWGNaN1/cFokyvHVEXzR4GPAM81kL5YSYbrYEQk+3aLrQvBfA4a9srwJOm108CL5tezwN+BQRwCbC3mz6/HGBATzwzYCoQBRw73+cDuAHJpv+upteuXWTbZYDO9PrlRrYFNy531nn2mewVJvvndoFdHfrsuuJ325JdZ+3/N/DPHnherWlEt37P+oqHPg5IVBQlWVGUOuBrYH53XVxRlGxFUQ6aXpcDJwH/Ng6ZD3ytKEqtoiingUTUe+hO5gOfml5/ClzTaPtnisoewEUI4dvFtswCkhRFaWtkcJc9M0VRtqEuXn729TryfC4HflMUpUhRlGLgN2BOV9imKMoGRVHMKx/vAQLaOofJPidFUfYoqip81uh+Os2uNmjts+v0321bdpm87OuAr9o6Rxc9r9Y0olu/Z31F0P2B9EbvM2hbULsMIUQwMBrYa9q01NRk+sjcnKL77VWADUKIA0KIu03bvBVFyTa9zgG8e8g2gBto+iPrDc+so8+np76Dt6N6cmZChBCHhBBbhRBTTNv8TfZ0h20d+ey6+5lNAXIVRUlotK3bn9dZGtGt37O+Iui9AiGEA7ASeFhRlDLgPWAgMArIRm3u9QSTFUWJAuYC9wshpjbeafJCeiQ/VQhhBVwNfGfa1FuemYWefD5tIYR4CmgAvjBtygaCFEUZDTwCfCmEcOpGk3rdZ3cWN9LUcej259WCRljoju9ZXxH0TCCw0fsA07ZuQwihR/2gvlAUZRWAoii5iqIYFEUxAh9wJkTQrfYqipJp+p8HrDbZkWsOpZj+5/WEbaiVzEFFUXJNNvaKZ0bHn0+32ieEWAJcCdxsEgJMIY1C0+sDqPHpcJMdjcMyXWLbeXx23fbMhBA6YCHwTSN7u/V5taQRdPP3rK8I+n4gTAgRYvL4bgB+6q6Lm2JzHwInFUVZ1mh749jzAsDc8/4TcIMQwloIEQKEoXbCdIVt9kIIR/Nr1A61YyYbzD3ki4EfG9l2q6mX/RKgtFGTsCto4jX1hmfW6HodeT7rgcuEEK6mUMNlpm2djhBiDvAEcLWiKFWNtnsKIbSm16GozyjZZF+ZEOIS03f11kb305l2dfSz687f7aVAnKIollBKdz6v1jSC7v6eXUjPbnf+ofYKx6PWsk9187UnozaVjgCHTX/zgBXAUdP2nwDfRsc8ZbL1FBfYg34O20JRswdigePmZwO4A5uABGAj4GbaLoB3TLYdBaK70DZ7oBBwbrSt258ZaoWSDdSjxiTvOJ/ngxrPTjT93daFtiWixlHN37XlprKLTJ/xYeAgcFWj80SjCmwS8DamUeCdbFeHP7vO/t22ZJdp+yfAvWeV7c7n1ZpGdOv3TA79l0gkkn5CXwm5SCQSieQcSEGXSCSSfoIUdIlEIuknSEGXSCSSfoIUdIlEIuknSEGXXNQIdQZIu562QyLpDGTaouSiRgiRgpoDXNDTtkgkF4r00CUXDaZRtWuEELFCiGNCiKcBP2CLEGKLqcxlQojdQoiDQojvTHNzmOecf0Woc2jvE0IM6sl7kUhaQgq65GJiDpClKMpIRVGGA/8BsoAZiqLMEEJ4AH8HLlXUyc5iUCd1MlOqKEok6sjC/3Sr5RJJO5CCLrmYOArMFkK8LISYoihK6Vn7L0FdlGCnUFe9WYy6KIeZrxr9n9DVxkokHUXX0wZIJN2FoijxQl3qax7wvBBi01lFBOriAje2dopWXkskvQLpoUsuGoQQfkCVoiifA6+iLmVWjrpkGKirA00yx8dNMffwRqe4vtH/3d1jtUTSfqSHLrmYiAReFUIYUWfr+xNq6GSdECLLFEdfAnwlhLA2HfN31NkCAVyFEEeAWtRpgSWSXoVMW5RI2oFMb5T0BWTIRSKRSPoJ0kOXSCSSfoL00CUSiaSfIAVdIpFI+glS0CUSiaSfIAVdIpFI+glS0CUSiaSf8P93IEpF+wNI2gAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiwAAAGwCAYAAACKOz5MAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB+LklEQVR4nO3dZ3hURR+G8XvTeyMdkhAgdEIvAaRIEBCRpiKigCgqoogoKvpKsUUsiBU7iApYIIA0pQUQQqihhRZIg/SE9L573g9LFiIB0ncT/r/Lc+3m1Bk2uA9z5syoFEVREEIIIYQwYEb6LoAQQgghxO1IYBFCCCGEwZPAIoQQQgiDJ4FFCCGEEAZPAosQQgghDJ4EFiGEEEIYPAksQgghhDB4JvouQE3QaDTEx8dja2uLSqXSd3GEEEIIUQGKopCdnY2npydGRrduQ2kQgSU+Ph4vLy99F0MIIYQQVRAXF0eTJk1uuU+DCCy2traAtsJ2dnZ6Lo0QQgghKiIrKwsvLy/d9/itNIjAUnobyM7OTgKLEEIIUc9UpDuHdLoVQgghhMGTwCKEEEIIgyeBRQghhBAGr0H0YRFCCCGqQ61WU1xcrO9iNEimpqYYGxtX+zwSWIQQQtyxFEUhMTGRjIwMfRelQXNwcMDd3b1aY6VJYBFCCHHHKg0rrq6uWFlZyeCjNUxRFPLy8khOTgbAw8OjyueSwCKEEOKOpFardWGlUaNG+i5Og2VpaQlAcnIyrq6uVb49JJ1uhRBC3JFK+6xYWVnpuSQNX+mfcXX6CUlgEUIIcUeT20C1ryb+jCWwCCGEEMLgSWARQgghhMGTwCKEEEI0cCEhIahUqnr9+LYEltvIK87jZOpJfRdDCCGEuKNVKrAEBQXRvXt3bG1tcXV1ZdSoUZw9e/aWxyxbtgyVSlVmsbCwKLOPoijMnTsXDw8PLC0tCQwM5Pz585WvTQ2Ly46jz6o+TPl7CkXqIn0XRwghhLhjVSqw7Nq1i+nTp7N//362bt1KcXEx99xzD7m5ubc8zs7OjoSEBN0SExNTZvsHH3zAZ599xtdff01YWBjW1tYMGTKEgoKCyteoBjWxaYK9mT35JfkcSzmm17IIIYSofYqikFdUopdFUZQKl3PAgAE8//zzzJw5E0dHR9zc3Pjuu+/Izc3l8ccfx9bWlhYtWrB58+abnmP16tW0a9cOc3NzmjZtyscff1xm+1dffYWfnx8WFha4ubnxwAMP6Lb9+eefdOjQAUtLSxo1akRgYOBts0B1VWrguC1btpT5edmyZbi6unL48GH69et30+NUKhXu7u7lblMUhcWLF/O///2PkSNHArB8+XLc3NxYu3YtDz/8cGWKWKNUKhW9PHux8eJG9ifsp7t7d72VRQghRO3LL1bTdu7ferl2xFtDsDKr+NfyTz/9xCuvvMKBAwf47bffmDZtGsHBwYwePZrXX3+dTz75hMcee4zY2Ngbjj18+DAPPfQQ8+fPZ9y4cezbt49nn32WRo0aMXnyZA4dOsSMGTP4+eef6d27N+np6ezZsweAhIQExo8fzwcffMDo0aPJzs5mz549lQpcVVGtPiyZmZkAODk53XK/nJwcfHx88PLyYuTIkZw6dUq3LSoqisTERAIDA3Xr7O3t6dmzJ6GhoeWer7CwkKysrDJLbenl0QuA/fH7a+0aQgghRGV17NiR//3vf/j5+TFnzhwsLCxwdnZm6tSp+Pn5MXfuXNLS0jh+/PgNxy5atIhBgwbx5ptv0rJlSyZPnsxzzz3Hhx9+CEBsbCzW1tbcd999+Pj40LlzZ2bMmAFoA0tJSQljxoyhadOmdOjQgWeffRYbG5tarW+Vh+bXaDTMnDmTPn360L59+5vu16pVK3788Uf8/f3JzMzko48+onfv3pw6dYomTZqQmJgIgJubW5nj3NzcdNv+KygoiAULFlS16JVSGlhOpp0kqygLOzO7OrmuEEKIumdpakzEW0P0du3K8Pf31703NjamUaNGdOjQQbeu9Hs1OTkZO7uy312nT5/W3dUo1adPHxYvXoxarWbw4MH4+PjQrFkzhg4dytChQxk9ejRWVlZ07NiRQYMG0aFDB4YMGcI999zDAw88gKOjY2WrXClVbmGZPn06J0+eZNWqVbfcLyAggIkTJ9KpUyf69+/PmjVrcHFx4ZtvvqnqpZkzZw6ZmZm6JS4ursrnuh13a3ea2jVFo2g4mHiw1q4jhBBC/1QqFVZmJnpZKjsarKmp6Q1lv35d6fk0Gk2l/xxsbW05cuQIK1euxMPDg7lz59KxY0cyMjIwNjZm69atbN68mbZt2/L555/TqlUroqKiKn2dyqhSYHnuuefYsGEDO3fupEmTJpU61tTUlM6dOxMZGQmg69uSlJRUZr+kpKSb9nsxNzfHzs6uzFKbAjwDAAiNL/8WlRBCCFGftGnThr1795ZZt3fvXlq2bKmbnNDExITAwEA++OADjh8/TnR0NDt27AC0YahPnz4sWLCAo0ePYmZmRnBwcK2WuVKBRVEUnnvuOYKDg9mxYwe+vr6VvqBarebEiRO6KaZ9fX1xd3dn+/btun2ysrIICwsjICCg0uevDaW3hcISwvRcEiGEEKL6XnrpJbZv387bb7/NuXPn+Omnn/jiiy94+eWXAdiwYQOfffYZ4eHhxMTEsHz5cjQaDa1atSIsLIz33nuPQ4cOERsby5o1a0hJSaFNmza1WuZK9WGZPn06K1asYN26ddja2ur6mNjb2+umj544cSKNGzcmKCgIgLfeeotevXrRokULMjIy+PDDD4mJieHJJ58EtClt5syZvPPOO/j5+eHr68ubb76Jp6cno0aNqsGqVl139+4Yq4yJzoomIScBDxsPfRdJCCGEqLIuXbrw+++/M3fuXN5++208PDx46623mDx5MgAODg6sWbOG+fPnU1BQgJ+fHytXrqRdu3acPn2a3bt3s3jxYrKysvDx8eHjjz9m2LBhtVrmSgWWJUuWANrnv6+3dOlSXSVjY2MxMrrWcHPlyhWmTp1KYmIijo6OdO3alX379tG2bVvdPq+88gq5ubk89dRTZGRk0LdvX7Zs2XLDAHP6YmtmS3vn9hxLOcb+hP2M9hut7yIJIYS4g4WEhNywLjo6+oZ11z9q/N/HjseOHcvYsWPLPX/fvn3LvQZobyf9d5iTuqBSavvB6TqQlZWFvb09mZmZtdaf5YujX/DN8W8Y5juMD/p9UCvXEEIIUXcKCgqIiorC19fXYP6B3FDd7M+6Mt/fMpdQBV3fj0WjVL7HtRBCCCGqTgJLBXV06YiliSXpBemcv6L/eY6EEEKIO4kElgoyNTalm1s3APYnyKi3QgghRF2SwFIJpbeFQhNkPBYhhBCiLklgqYRentrAciTpCEXqIj2XRgghhLhzSGCpBD8HPxpZNCK/JJ9jKcf0XRwhhBDijiGBpRJUKpWulUWG6RdCCCHqjgSWSpJh+oUQQtQHTZs2ZfHixfouRo2RwFJJpYHlZNpJMgsz9VwaIYQQ4s4ggaWS3K3d8bX3RaNoOJR4SN/FEUIIIe4IEliqQB5vFkIIoW/Z2dlMmDABa2trPDw8+OSTTxgwYAAzZ84sd//Y2FhGjhyJjY0NdnZ2PPTQQyQlJem2Hzt2jIEDB2Jra4udnR1du3bl0CHtP8xjYmIYMWIEjo6OWFtb065dOzZt2lQX1dSp1OSHQquXRy9WnlkpA8gJIURDoyhQnKefa5tagUpV4d1nzZrF3r17Wb9+PW5ubsydO5cjR47QqVOnG/bVaDS6sLJr1y5KSkqYPn0648aN001yOGHCBDp37sySJUswNjYmPDwcU1NTAKZPn05RURG7d+/G2tqaiIgIbGxsaqLWFSaBpQq6u3fHWGVMTFYM8TnxeNp46rtIQgghakJxHrynp/+nvx4PZtYV2jU7O5uffvqJFStWMGjQIACWLl2Kp2f5Zd++fTsnTpwgKioKLy8vAJYvX067du04ePAg3bt3JzY2ltmzZ9O6dWsA/Pz8dMfHxsYyduxYOnToAECzZs2qXM2qkltCVWBrZkt75/aAPC0khBCi7l28eJHi4mJ69OihW2dvb0+rVq3K3f/06dN4eXnpwgpA27ZtcXBw4PTp04C2xebJJ58kMDCQ999/nwsXLuj2nTFjBu+88w59+vRh3rx5HD9+vJZqdnPSwlJFvTx6cSzlGKHxoYz2G63v4gghhKgJplbalg59XVuP5s+fzyOPPMLGjRvZvHkz8+bNY9WqVYwePZonn3ySIUOGsHHjRv755x+CgoL4+OOPef755+usfNLCUkUBngEAhCWGoVE0ei6NEEKIGqFSaW/L6GOpRP+VZs2aYWpqysGDB3XrMjMzOXfuXLn7t2nThri4OOLi4nTrIiIiyMjIoG3btrp1LVu25MUXX+Sff/5hzJgxLF26VLfNy8uLZ555hjVr1vDSSy/x3XffVeZPttoksFSRv7M/liaWpBekc/7KeX0XRwghxB3E1taWSZMmMXv2bHbu3MmpU6d44oknMDIyQlVO8AkMDKRDhw5MmDCBI0eOcODAASZOnEj//v3p1q0b+fn5PPfcc4SEhBATE8PevXs5ePAgbdq0AWDmzJn8/fffREVFceTIEXbu3KnbVlcksFSRqbEp3dy6AcjTQkIIIercokWLCAgI4L777iMwMJA+ffrQpk0bLCwsbthXpVKxbt06HB0d6devH4GBgTRr1ozffvsNAGNjY9LS0pg4cSItW7bkoYceYtiwYSxYsAAAtVrN9OnTadOmDUOHDqVly5Z89dVXdVpflaIoSp1esRZkZWVhb29PZmYmdnZ2dXbdnyN+5oODH9DHsw9fD/66zq4rhBCi+goKCoiKisLX17fcL/n6Jjc3l8aNG/Pxxx/zxBNP6Ls4Zdzsz7oy39/SwlINpQPIHU46TJG6qFLHqjVq3g59m8lbJhObFVsbxRNCCNGAHT16lJUrV3LhwgWOHDnChAkTABg5cqSeS1Y7JLBUQwuHFjSyaESBuoBjKccqfJyiKLx/4H1+P/c7h5MO89jmxziZerIWSyqEEKIh+uijj+jYsSOBgYHk5uayZ88enJ2d9V2sWiGBpRpUKhW9PK8O0x9f8WH6l51axqqzq1ChwtvWm/SCdKb8PYU9l/bUVlGFEEI0MJ07d+bw4cPk5OSQnp7O1q1bdQO7NUQSWKopwEP7eHNFO95ujtrMosOLAHi528v8PuJ3env2Jr8kn+d3PE/w+eBaK6sQQghRX0lgqaaeHj0BOJV2iszCzFvuezDxIG/8+wYAj7Z5lIntJmJtas0Xd3/BiGYjUCtq5u6byzfHvqEB9IUWQgghaowElmpyt3bH194XjaLhYOLBm+53IeMCL+x8gWJNMYHegbzc7WXdNlNjU97t+y5PdngSgC/Cv+Cd/e+g1qhrvfyGRKNoOH/lPFtjtpJRkKHv4gghhDAgMjR/DQjwCCAqM4r9CfsJ9Am8YXtyXjLTtk0juyibTi6dCLorCGMj4zL7qFQqXujyAi6WLroOuSn5KSzstxBLE8u6qkqdUhSFCxkXOJB4gENJhziUeIgrhVcAsDKxYkKbCUxqNwl7c3s9l1QIIYS+SWCpAb08erHizIpy+7HkFucyfft0EnITaGrXlM/v/hwLk5s/7/9Im0dwsXLhtd2vsTNuJ1P/mcoXd3+Bg4VDLdagbpQGlINJBzmYeJDDSYdJL0gvs4+liSVOFk5czrnMdye+Y+WZlUxoM4HH2j4mwUUIIe5gElhqQDf3bhirjInJiiE+Jx5PG+303sWaYl4KeYkz6WdwsnDiq8CvKhQ8BvsMxukeJ57f8TzHUo7x2ObH+Hrw1zS2aVzLNakd22K2sTlqM4eSDt0QUCyMLejs2pnu7t3p7t6ddo3aYWJkwo7YHXx17CvOXTnHN8e/YcXpFTzW9jEebfsotma2eqqJEEIIfZGRbmvIo5se5VjKMRb0XsAYvzEoisK8ffMIjgzG0sSSH4f8SHvn9pU6Z+SVSJ7Z9gxJeUk4WzqzJHAJrZ1a11INascf5/7grdC3dD9bGFvQybWTLqC0b9QeU2PTco/VKBq2x27nq/CviMyIBMDWzJaJbSfyaJtHsTGzqZM61AZFUdAomhtuDQoh6k59Hul2wIABdOrUicWLF+u7KBUiI90akNLZm/fHa28LfX3sa4IjgzFSGfFhvw8rHVYAWji24Jd7f6GFQwtS81OZvGUyO2N31mi5a9OeS3t4d/+7AIz1G8tPQ39i3/h9fHfPdzzl/xSdXTvfNKwAGKmMGOwzmNX3r+bD/h/S3L452UXZfBn+JUPXDOW749+RW5xbV9WpUR8e+pBeK3pxNPmovosihBD1ggSWGlI6TH9YYhjB54P56ph2Uqg3er5Bf6/+VT6vu7U7Pw37ie7u3cktzmXGzhk8v+N5LmVfqpFy15bTaad5addLqBU19ze/n3kB8+ji1uWWAeVmjFRGDG06lNX3r+aDfh/ga+9LZmEmnx39jKGrh7LyzMpaqEHtOZFygp8jfqZAXUBQWBAaRaPvIgkhhMGTwFJD/J39sTSxJL0gnXn75gHwZIcneajVQ9U+t52ZHV8Hfs3j7R7HRGVCSFwIo9aN4utjX1OoLqz2+WtaQk4C07dPJ78kn57uPZkfML/c6c4ry9jImGG+wwi+P5igu4JoateUjMIM3gt7j33x+2qg5LVPo2h4/8D7up9Pp59mw8UNeiyREKK+u3LlChMnTsTR0RErKyuGDRvG+fPnddtjYmIYMWIEjo6OWFtb065dOzZt2qQ7dsKECbi4uGBpaYmfnx9Lly7VV1VuSQJLDTE1NqW7e3cAFBTua3YfMzrPqLHzmxmbMavbLP68/096uvekUF3Il+FfMnrdaHZf2l2tcyuKQmJuYo2En+yibJ7d/iwp+Sm0cGjBJwM/qVKryq0YGxlzX7P7CB4ZzFi/sQAsPry4XrRU/HXhL46nHsfKxIpH2zwKwGdHPiO/JF/PJRNCgPb/h3nFeXpZqtqldPLkyRw6dIj169cTGhqKoijce++9FBcXAzB9+nQKCwvZvXs3J06cYOHChdjYaPsAvvnmm0RERLB582ZOnz7NkiVLDHYuInlKqAbd7XU3uy/tpqd7T97q/VaNtCr8V3OH5nx3z3f8Hf03Hx78kLjsOKZvn85Ar4G82uPVCj9JVKIpITw5nJ1xO9kZt5O47DicLZ1Z0HsB/Zr0q1LZitXFvBjyIpEZkbhYurAkcEmtPtFjYmTCjC4z2BK9hdPpp/kn+h+G+g6ttetVV05RDouPLAbgKf+neLTto+yI3UF8bjw/R/zMU/5P6beAQghty/CKnnq5dtgjYViZWlXqmPPnz7N+/Xr27t1L7969Afj111/x8vJi7dq1PPjgg8TGxjJ27FjdPEPNmjXTHR8bG0vnzp3p1q0bAE2bNq2ZytQCaWGpQaP9RvPLvb+wJHBJjbcqXE+lUjHUdyjrR6/X3SbaGbeTkWtH3vI2UV5xHttitvHGv28w8PeBPP734yyPWE5cdhwAqfmpTN8+nQWhC8grzqtUmRRFYX7ofMISwrAyseLLQV/ibu1e7brejpOFE5PbTQbgs6OfUawurvVrVtW3x78lNT8Vb1tvHmv7GObG5rzQ5QUAfjjxA6n5qXouoRCivjl9+jQmJib07HktZDVq1IhWrVpx+vRpAGbMmME777xDnz59mDdvHsePH9ftO23aNFatWkWnTp145ZVX2LfPcG+vSwtLDTJSGdHRpWOdXc/a1JpZ3WYxssVIgsKCCEsM48vwL1l/YT2v9XiNfk36kZqfSkhcCDvjdrI/fj9FmiLd8fbm9vRv0p8BXgPo5taN7058x88RP/PnuT8JSwjjvb7v0cm1U4XKsuTYEtZfWI+xypiP+n9Em0ZtaqfS5ZjYdiKrzqwiLjuOP8//yfjW4+vs2hUVnRnNz6d/BuDVHq9iZmwGwFDfofwc8TMn007yVfhXzA2Yq89iCnHHszSxJOyRML1duzY8+eSTDBkyhI0bN/LPP/8QFBTExx9/zPPPP8+wYcOIiYlh06ZNbN26lUGDBjF9+nQ++uijWilLdcg4LA2Eoii620TJ+ckA+Nj5EJMVU2a/JjZNGOg9kIFeA+ns2hkTo7KZ9UDCAf63938k5CZgpDLi8XaPM73T9Fu2GK2NXMube98EYG7AXB5s+WAN1+72fjvzG++EvYOThRObxmzC2tS6zstwK89ue5Y9l/dwV+O7+CrwqzLbjiQdYdKWSRipjFg9YjUtHFvoqZRC3Fkawjgs06dPp2XLlmVuCaWlpeHl5cXy5ct54IEHbjh2zpw5bNy4sUxLS6lvvvmG2bNnk5WVVaPllXFYhE55t4lKw0oH5w7M6DyD4PuD2TRmE690f4Xu7t1vCCsAPTx6sPr+1dzf/H40ioYfTv7A+I3jOXflXLnXDY0PZcG+BYD2qSh9hBWAMS3H4G3rTXpBOstPLddLGW5m96Xd7Lm8BxMjE17p/soN27u4dSHQOxCNomHR4UV6KKEQor7y8/Nj5MiRTJ06lX///Zdjx47x6KOP0rhxY0aOHAnAzJkz+fvvv4mKiuLIkSPs3LmTNm20reBz585l3bp1REZGcurUKTZs2KDbZmgksDQwpbeJ1o5ay0f9P2L7g9tZMXwFU/2n0sKxRYU6Atua2fJu33dZPGAxjuaOnL1yloc3PMzSk0vLzCB97so5ZoXMokQpYZjvMJ7v/HxtVu2WTI1MmdFF+1TWslPLDKY/SJG6iIUHFgLwWJvHaGrftNz9ZnadiYnKhD2X9xAaH1qHJRRC1HdLly6la9eu3HfffQQEBKAoCps2bcLUVNsyrlarmT59Om3atGHo0KG0bNmSr77StvSamZkxZ84c/P396devH8bGxqxatUqf1bmpSt0SCgoKYs2aNZw5cwZLS0t69+7NwoULadWq1U2P+e6771i+fDknT54EoGvXrrz33nv06NFDt8/kyZP56aefyhw3ZMgQtmzZUqFyyS2h2pOan8qCfQsIuRQCQBfXLrzb911MjUyZsGkCSXlJdHPrxjeDv9H1y9AXRVF4ZOMjnEw7yfjW43m95+t6LQ9oO9MuPrIYZ0tn/hr11y2nE1h4YCG/nP6Flo4t+f2+32XYfiFqWX2+JVTf1PktoV27djF9+nT279/P1q1bKS4u5p577iE39+bDo4eEhDB+/Hh27txJaGgoXl5e3HPPPVy+fLnMfkOHDiUhIUG3rFxZv0YvbaicLZ357O7PeKv3W1iZWHEk+Qhj14/liX+eICkvCV97XxYPXKz3sALa22Ivdn0RgD/O/kFcVpxey5Ocl8y3x78F4MWuL9527qOn/Z/G1syWc1fOsf7C+rooohBC1BuVCixbtmxh8uTJtGvXjo4dO7Js2TJiY2M5fPjwTY/59ddfefbZZ+nUqROtW7fm+++/R6PRsH379jL7mZub4+7urlscHR2rViNR41QqFaP9RrP6/tV0ce1CXkkeMVkx2hmoB32Fvbm9vouo08OjB30a96FEKeHzo5/rtSyLDy8mryQPfxd/7mt23233d7Bw4Gn/pwH4/OjnlX60XAghGrJq9WHJzMwEwMnJqcLH5OXlUVxcfMMxISEhuLq60qpVK6ZNm0ZaWtpNz1FYWEhWVlaZRdS+JrZN+HHIj7zc7WV6uPfgq8CvaGLbRN/FusGLXV5EhYrN0Zs5lXZKL2UITw7nr4t/ATCnxxyMVBX7qza+9Xga2zQmJT+FnyJ+uv0BQghxh6hyYNFoNMycOZM+ffrQvn3FZyJ+9dVX8fT0JDAwULdu6NChLF++nO3bt7Nw4UJ27drFsGHDUKvV5Z4jKCgIe3t73eLl5VXVaohKMjYyZlK7Sfww5AfaNWqn7+KUq5VTK4Y3Gw5oWznqmkbREHQgCIDRLUZXaqZuM2MzZnadCcDSk0tJyUupjSIKIUS9U+XAMn36dE6ePFmp3sTvv/8+q1atIjg4uEynm4cffpj777+fDh06MGrUKDZs2MDBgwcJCQkp9zxz5swhMzNTt8TF6bevgjA8z3V+DlMjU/Yn7K/ziRHXRq4lIi0CG1Mb3ZNLlTHEZwj+Lv7kl+TzRfgXtVBCIcT1GsBwZAavJv6MqxRYnnvuOTZs2MDOnTtp0qRitwQ++ugj3n//ff755x/8/f1vuW+zZs1wdnYmMjKy3O3m5ubY2dmVWYS4XmObxoxrNQ6o24kRs4qy+PTIpwBM6zgNZ8vKTyKmUqmY3W02AMHngzmbfrZGyyiE0Cp97DcvT/qL1bbSP+PSP/OqqNTQ/Iqi8PzzzxMcHExISAi+vr4VOu6DDz7g3Xff5e+//9ZNsHQrly5dIi0tDQ8Pj8oUT4gynvJ/iuDIYE6nn2ZL1BbubXZvrV9zSfgS0gvS8bX3ZXybqk8R0Mm1E/f43MM/Mf+w6PAivhn8TQ2WUggBYGxsjIODA8nJ2tHBraysamXS2juZoijk5eWRnJyMg4MDxsZVH66hUoFl+vTprFixgnXr1mFra0tiYiIA9vb2WFpq50CYOHEijRs3JihIew9/4cKFzJ07lxUrVtC0aVPdMTY2NtjY2JCTk8OCBQsYO3Ys7u7uXLhwgVdeeYUWLVowZMiQKldMCEcLR6a0n8LnRz/ns6OfMdhncK1OSnkh4wKrzmhvkb7W/TVMjap3rZldZ7Ijbgf74vfx7+V/6du4b00UUwhxHXd37SStpaFF1A4HBwfdn3VVVWrguJslz6VLlzJ58mRAO79B06ZNWbZsGaCdqjomJuaGY+bNm8f8+fPJz89n1KhRHD16lIyMDDw9Pbnnnnt4++23cXNzq1C5ZOA4cTN5xXkMDx5Oan4qr/V4jQltJtTKddQaNdO2TSM0IZSBXgP57O7PauS8Hx78kOURy2nh0II/RvxR7nQKQojqU6vVFBcb7mzv9ZmpqelNW1Yq8/0tkx+KBu/3s7/z9v63a2VixEvZlwiODGZd5DqS8pIwMzJj7ai1eNnWzJNrmYWZDA8eTmZhJlPaT6Fto7YoXP0rq4CCgqIounWlP1uYWNDbs7fBTQIphBDXq8z3t/xzTTR4o/1G83PEz0RnRfPTqZ94ttOz1TpfQUkB22O3E3w+mLDEa9PQ25nZMbv77BoLKwD25vY84/8MCw8u5MeTP1bqWFszW8a1GseENhOq1PlXCCEMibSwiDvC1pitzAqZhaWJJRtHb8TFyqXS54hIi2DN+TVsitpEdlE2ACpU9PLoxRi/MQz0Hoi5sXlNF51idTHvhL1DdGa09poqFSpU115Rof3v6nvgUs4l4rK1j/ubGpkyovkIJrWbRDP7ZjVevpqkKIp0ehTiDiK3hIT4D0VRmLBpAidSTwDQyKIRbtZuuFldXf7z3tXKFUsTSzILM9l4cSPBkcGcST+jO5+ntSejWoxiZIuReNp46qtaN6VRNOyM28myk8sITwnXrR/gNYDH2z1OZ9fOBhcMQuNDeWHnCziaO9LdvTs9PHrQw70H7tbV66gnhDBcEliEKMexlGNM2zqN7OLsCu1vb25PfnE+RZoiQNtSMch7EKP9RtPLo1eFh9vXt/DkcJaeXMrOuJ26vi7+Lv5MaTeFAV4DbjsrtEbRkJqfSnxOPAm5CaQXpDOk6ZAavc0UlRnFhI0Tyv1svG29tQHGvQc9PHrI7S0hGhAJLELchKIoXCm8QlJuEsl5ySTlJZGYm0hSXpJ2ydW+5pfk645p5diK0X6jua/ZfQY10WNlRWVG8dOpn/jrwl+6EOZj58PEthPp5dGLxNxE4nPjSchJKPOamJtIsabs0xO+9r4sG7oMJ4uKzyN2M5mFmUzYNIGYrBg6uXTiKf+nOJh0kIMJB4lIj7hh0D9fe196uPfQhRhHC5koVYj6SgKLENWgKAo5xTkk5SZhZGSEr52vwd0+qY7U/FRWnF7BqrOrdH1xbsdYZYyrlSse1h7EZceRkp9CG6c2/DDkB2zNbKtclmJNMdO2TSMsIQwPaw9WDl9JI8tGuu3ZRdkcSTrCgcQDHEw8yJn0M9eekgJMVCbc2+xeprSfQnOH5lUuhxBCPySwCCFuK7c4lzXn1/Dr6V9JzU/Fw9oDD2sPPG08b3h1tXLVjQETlRnF5C2TSS9Ip4trF74e/DWWJpZVKsM7+9/ht7O/YWliyc/DfqaVU6tb7p9ZmMmhpEMcTDxIWEIYkRnXpu8Y6DWQJzs8ib/Lraf+EEIYDgksQohKqezTOWfSzzBlyxSyi7Pp07gPnw/8vNKjCK86s4p3w95FhYrFAxdzt/fdlS02J1NP8sOJH9geu13X8tLdvTtPtn+SAM+ABtUyJkRDJIFFCFHrjiYf5emtT5Nfks9gn8F82O/D23bgLRUaH8q0bdNQK2pe6PICT3Z4slpluZh5kWUnl/HXxb8o0ZQA0MapDU90eIJA78AKl0sIUbcksAgh6sS+y/t4bsdzFGuKGd1iNAt6L7htq0Z0ZjSPbHqE7KJsRjQbwbt9362xlpDE3ESWRyznz3N/6jpO+9j58Hi7xxnRfARmxmY1ch0hRM2QwCKEqDPbYrbx0q6X0CgaHmv7GLO7zb5pAMkszOTRTY8SnRWNv4s/Pw75sVYG28soyGDlmZX8euZXMgszAXCxdGFSu0k81OqhKve5uZnc4lyMVcZYmFjU6HmFaOgksAgh6tTayLW8ufdNAJ7t9CzTOk67YZ8STQnTtk1jf8J+3K3dWTl8Za2PqZJXnMfq86v56dRPJOUlAdpBA6e0n8KDrR6sdnCJzozmp4ifWB+5Hntze4LuCqKnR8+aKHqVaBQN++L30dm1s8wjJeoFCSxCiDr36+lfef/A+wC82v1VHm37aJnt74W9x8ozK7E0sWT5sOW0dmpdZ2UrVhfz18W/+Pb4t1zOuQxcCy4PtXqo0i0j5Q3GB9rpEab6T2Vax2l6mVl7xekVBB0IooNzB5YOXVorrVdC1CQJLEIIvfj62Nd8Gf4lAG/3eZtRLUYB12bMBlg8YDGDfAbppXzFmmL+ulA2uDhbOmtbXFo+eMvgctPpDpoMYELbCWyJ2sLq86sB6OLahYX9FtbptAJqjZrhwcN19aponyIh9EkCixBCLxRF4aNDH7E8YjlGKiM+6v8R9mb2PL31aUqUEmZ0nsFU/6n6LibFmmLWR67n2+PfEp8bD2iDyxPtn+CBlg+UCS4FJQWsv7Ce5RHLicmKAa6bULLtJJo5XJtQctPFTby1/y1yi3NxMHfgnT7v0N+rf53UaXvMdmaGzMTKxIoCdQEaRcP/ev6Pca3H1cn1hagKCSxCCL1RFIX5ofNZc34NJkYmWJpYkl2UzfBmwwnqG2RQ/+IvVhez/kL5wSXQJ5C1kWtZeWYl6QXpANia2TKu1Tgeaf3ITWf8js2KZfbu2USkRQDwaJtHmdV1VqXHqamsSZsncST5CFM7TMXWzJZFhxdhojLhhyE/0MWtS61eW4iqksAihNArtUbNK7tf4Z+YfwDwd/bnx6G180RQTShWF7Puwjq+Pf4tCbkJN2z3tPbksbaPMcZvDFamVrc9X5G6iE8Of8Ivp38BoF2jdnzY70O87LxqvOwAp9JO8fCGhzExMuHvsX/jYunCK7tfYUv0FhpZNOK3+37DzdqtVq5dUyLSInSzqIs7hwQWIYTeFauLWRC6gLjsOD7q/9FNWyQMSbG6mLUX1vLd8e9IyE2gjVMbJrebzD1N76lSJ9qQuBD+t/d/ZBZmYm1qzfyA+Qz1HVrj5Z6zZw4bLm7gvmb3EXRXEKB9QurRzY9y/sp5/J39WTp0qcGOQ/PTqZ/46NBH2JnZ8ceIP/C08dR3kUQdkcAihBDVUKwuJq0gDTcrt2rfwkrMTeTV3a9yJPkIAGP9xvJqj1drbCyY5Lxkhvw5hBKlhFX3raJdo3a6bXHZcTy84WGyirIY4zeG+QHzDeqWHMD3J77n0yOf6n72d/Zn2dBltX4LTRiGynx/G9VRmYQQot4wNTbF3dq9Rr7c3a3d+WHIDzzl/xQqVKw+v5pHNj7CpexLNVBS7ZxMJUoJXVy7lAkrAF62XnzY70OMVEasOb+GP879USPXrAmKorAkfIkurExoMwFbM1uOpx5n0eFFei6dMEQSWIQQopaZGJnwfOfn+faeb3G2dCYyI5I3/n0DjaKp1nkLSgp0IeSxto+Vu0/vxr2Z0XkGAEEHgjiafLRa16wJiqLw+dHP+erYVwC80OUFXuvxGu/2eReAX07/wraYbfosojBAEliEEKKO9PLoxa/3/oqliSVHko8QfD64WufbcHEDGYUZNLZpzECvgTfdb0r7Kdzjcw8lmhJmhcwiOS+5WtetDkVRWHR4Ed+d+A6A2d1m6ya/HOg9kMntJgPw5t43icuK01cxhQGSwCKEEHXI08aT5zs/D8Ciw4tIy0+r0nkUReGXCO1TSI+0fuSWM1KrVCre7vM2fo5+pOan8mLIixSpi6p03epQFIWFBxey7NQyAF7v+ToT200ss8+MLjPo5NKJnOIcXtr1EoXqwjovpzBMEliEEKKOjW89njZObcgqyuKjQx9V6Rz74vdxIfMC1qbWjPEbc9v9rUyt+HTAp9p+IinHeS/svSpdt6o0ioZ39r/Dr6d/RYWKuQFzGd96/A37mRqZ8mH/D3Ewd+B0+mk+PPhhta5bpC7i+xPfs+zksmrfghP6JYFFCCHqmImRCfMC5mGkMmLDxQ2ExodW+hw/n/4Z0A7Bb2NmU6FjvOy8+KDfB7rOv3XVCVetUTN/33x+P/c7KlS81ectHmz54E33d7d21z2e/dvZ39h0cVOVrhuXFcdjmx/j0yOf8vHhj3kr9C0JLfWYBBYhhNCDds7tdC0M7+x/h4KSggofezHjInsv70WFikfaPFKp6/Zt3JcZXbSdcN8Le4/w5PBKHV9ZJZoS/rf3fwRHBmOkMuK9u97TzTF1u3JO7aCdxmFB6AKiMqMqdd0t0Vt4cMODRKRFYGtqi5HKiNXnVzN/33wJLfWUBBYhhNCT5zo9h6ulK7HZsbpOqBVR2rpyt/fdeNlWfvTcJ9o/wWCfwZRoSnh2+7O8vOtlvj/xPXsv761yn5ryFGuKdYPamahM+KDfB9zX7L4KH/9sp2fp7t6dvJI8Xtr1Evkl+bc9pqCkgLdC32L2rtnkFufS2bUza0au4b2+72GkMiI4Mpg3976JWqOuTtWEHsjAcUIIoUfbYrbxYsiLmBiZsHrE6jKTKZYnoyCDwD8DKVQXsnTIUrq5d6vSdfOK85i0ZRJn0s/csM3V0pXWjVrTxqkNbZza0LpRazytPSs0Lo2iKBRriskvyWf+vvlsi92GiZEJH/X/iEHelZ+lOyUvhQf/epC0gjRGtxjNW33euum+FzMu8vLulzl/5TwqVDzZ4Ume7fSsbpTiLVFbeG3Pa6gVNSOajeDtPm/fsrOyqH0y0q0QQtQTiqIwY8cMQi6F0MW1C0uHLsVIdfPG7++Of8dnRz+jjVMbfrvvt2oNbleoLuRw4mFOp5/mTPoZTqef1s1I/V92Zna0cmqFpYklhSWF5KvzKSwppFBdSIG6gIKSAu37kgIUrn2tmBmZ8cnAT+jXpF+VyxmWEMZTW59Co2h4u8/b5d5SWhe5jnfD3iW/JB8nCyeC7gqit2fvG/b7O/pvXt39KmpFzb2+9/Ju33erNO2CqBkSWIQQoh6Jz4ln1LpR5Jfk81bvtxjtN7rc/YrVxQxdPZTk/GTe6/seI5qPqPGy5Bbncjb9rC7EnEk/Q2RGJCWakkqfy9nSmXf7vEvvxjcGh8r6+tjXfBn+JRbGFqwYvgI/Rz9A21L0bti7rL+wHoCe7j15v9/7OFs63/Rc22K2MXvXbEqUEoY2HUrQXUESWvREAosQQtQzpRMA2pvbs37UepwsnG7YZ8PFDczZMwdnS2f+GftPnc23U6Qu4kLGBc5dOYdG0WBubI65iTmWxpaYm5hjYWyBubE5FiYWWJhcfW9sgYmRSY3NXaRRNEzbNo198fvwtfdl1fBVxGXH8fKul4nOisZIZcSzHZ/lyQ5PVug2z47YHby06yVKNCUM9hnMwn4LMTWS+YvqmgQWIYSoZ0o0JTy84WHOXjnL/c3v592+75bZrigKD298mIi0CJ7r9BxPd3xaTyXVn/SCdB7860GS85Lxd/HnTNoZijRFuFq6srDfwkr35wmJC2FWyCyKNcUEegfyQb8PZNLFOiaTHwohRD1TOjaLChXrL6wnLCGszPajyUeJSIvA3Nich1o9pKdS6peThRMf9vsQY5Uxx1OOU6Qp4q7Gd/HH/X9UqfPxAK8BLB64GDMjM7bFbuOlXS9RrC6uhZKLmiAtLEIIYUDe3f8uq86uwsfOh9X3r8bc2ByAF3e+yLbYbYz1G8v83vP1W0g9+/3s73xz7Bsea/sYE9tNvGUn5YrYe3kvM3bMoEhTxIAmA/h4wMeYGZuV2adEU0J8TjzRWdHEZsUSnRVNTFYMsVmxmBqbMrTpUEY2H4mXXeUfM7+TyS0hIYSop7KLshm5diQp+SlM6ziNZzs9y6XsSwwPHo5G0RB8fzAtHFvou5gNzr74fczYMYNCdSF3Nb6LAV4DiMmK0S2Xsi9Roty+43FXt66MbD6Se5reg7WpdR2UvH6TwCKEEPXY39F/8/KulzE1MuXP+//kz3N/8nPEz/T27M03g7/Rd/EarP0J+3l++/MUqMsfddjc2BxvO2+a2jXF29YbHzsffOx8SMxNZP2F9eyL36d7pNvSxJLBPoMZ2Xwk3dy7VbsVqKGSwCKEEPWYoihM3z6dPZf30MmlE+czzpNbnMtXg77iriZ36bt4DdqhxEN8c/wbzIzNtIHE1gcfex+a2jXF1cr1lsEjMTeRDRc3sC5yHdFZ0br1ntae3N/ifu5vfn+VRiZuyCSwCCFEPXc55zKj1o7S/Wvf196XtSPXyr/U6wFFUTiWcox1F9axJWoLOcU5um1dXLvQ0rElVqZWWJlYYWVqhaWJpe79f18dzB2wMLHQY21qlwQWIYRoAJaeXMqiw4sAeLPXm3fs00H1WUFJATtid7DuwjpC40PLjAJcEWZGZjzU6iGe6PDELQfDq69qLbAEBQWxZs0azpw5g6WlJb1792bhwoW0atXqlsf98ccfvPnmm0RHR+Pn58fChQu59957ddsVRWHevHl89913ZGRk0KdPH5YsWYKfn1+FyiWBRQjREBVripm+bTqZRZksG7oMSxNLfRdJVENibiLbY7dzpeAKeSV55BXnkVeSR35xPvkl+WXW5ZXkkVucqxth2MLYgvGtx/N4+8dxtHDUc01qTq0FlqFDh/Lwww/TvXt3SkpKeP311zl58iQRERFYW5ffG3rfvn3069ePoKAg7rvvPlasWMHChQs5cuQI7du3B2DhwoUEBQXx008/4evry5tvvsmJEyeIiIjAwuL2TWESWIQQQjQ0iqIQmhDKl0e/5HjqcQCsTKyY0GYCk9pNwt7cXs8lrL46uyWUkpKCq6sru3btol+/8ie2GjduHLm5uWzYsEG3rlevXnTq1Imvv/4aRVHw9PTkpZde4uWXXwYgMzMTNzc3li1bxsMPP3zbckhgEUII0VApisKey3v44ugXnE4/DYCtqS2PtXuMx9o8ho2ZTYXPlV2Uzam0U5xIOcHlnMv42Png5+hHS8eWuFi61NhUChVVme/vas32lJmZCYCT041zXpQKDQ1l1qxZZdYNGTKEtWvXAhAVFUViYiKBgYG67fb29vTs2ZPQ0NByA0thYSGFhYW6n7OysqpTDSGEEMJgqVQq+jXpx12N72JH7A6+CP+CyIxIvgr/il9P/8rkdpN5pPUjWJlalTmuRFPC+SvnOZF6guMpxzmReoKozKib9qOxN7fHz8FPF2D8HP3wc/C74bz6UuXAotFomDlzJn369NHd2ilPYmIibm5uZda5ubmRmJio21667mb7/FdQUBALFiyoatGFEEKIekelUjHIZxADvQfyd/TffBX+FdFZ0Xx65FN+jviZKe2n4G7tzomUE5xIPUFEWkS5Y8o0tmmMv7M/TWybEJsdy7kr54jJiiGzMJNDSYc4lHTohv1LQ8wT7Z/QW4CpcmCZPn06J0+e5N9//63J8lTInDlzyrTaZGVl4eUlz7YLIYRo+IxURgzzHcZgn8FsitrEkvAlXMq5xEeHPrphX1tTW9o7t6eDSwf8nf1p79yeRpaNbtivUF3IxYyLnM84z/kr5zl35Rznr5wnJT+FyzmXuZxzmdD4UJ7t+GxdVLFcVQoszz33HBs2bGD37t00adLklvu6u7uTlJRUZl1SUhLu7u667aXrPDw8yuzTqVOncs9pbm6Oubl5VYouhBBCNAgmRibc3/x+hvkOY33kelacWYGxyhh/F386OHegg0sHmto1rdDYPebG5rRp1IY2jdqUWZ9RkMH5DG2AySrKwtjIuLaqc1uVCiyKovD8888THBxMSEgIvr6+tz0mICCA7du3M3PmTN26rVu3EhAQAICvry/u7u5s375dF1CysrIICwtj2rRplSmeEEIIcccxNTJlbMuxjG05tsbP7WDhQHf37nR3717j566sSgWW6dOns2LFCtatW4etra2uj4m9vT2WltrxASZOnEjjxo0JCgoC4IUXXqB///58/PHHDB8+nFWrVnHo0CG+/fZbQHtPbubMmbzzzjv4+fnpHmv29PRk1KhRNVhVIYQQQtRXlQosS5YsAWDAgAFl1i9dupTJkycDEBsbi5HRtean3r17s2LFCv73v//x+uuv4+fnx9q1a8t01H3llVfIzc3lqaeeIiMjg759+7Jly5YKjcEihBBCiIZPhuYXQgghhF5U5vtbZtESQgghhMGTwCKEEEIIgyeBRQghhBAGTwKLEEIIIQyeBBYhhBBCGDwJLEIIIYQweBJYhBBCCGHwJLAIIYQQwuBJYBFCCCGEwZPAIoQQQgiDJ4FFCCGEEAZPAosQQgghDJ4EFiGEEEIYPAksQgghhDB4EliEEEIIYfAksAghhBDC4ElgEUIIIYTBk8AihBBCCIMngUUIIYQQBk8CixBCCCEMngQWIYQQQhg8CSxCCCGEMHgSWIQQQghh8CSwCCGEEMLgSWARQgghhMGTwCKEEEIIgyeBRQghhBAGTwKLEEIIIQyeBBYhhBBCGDwJLEIIIYQweBJYhBBCCGHwJLAIIYQQwuBJYBFCCCGEwZPAIoQQQgiDJ4FFCCGEEAZPAosQQgghDJ4EFiGEEEIYPAksQgghhDB4EliEEEIIYfAqHVh2797NiBEj8PT0RKVSsXbt2lvuP3nyZFQq1Q1Lu3btdPvMnz//hu2tW7eudGWEEEII0TBVOrDk5ubSsWNHvvzyywrt/+mnn5KQkKBb4uLicHJy4sEHHyyzX7t27crs9++//1a2aEIIIYRooEwqe8CwYcMYNmxYhfe3t7fH3t5e9/PatWu5cuUKjz/+eNmCmJjg7u5e2eIIIYQQ4g5Q531YfvjhBwIDA/Hx8Smz/vz583h6etKsWTMmTJhAbGzsTc9RWFhIVlZWmUUIIYQQDVedBpb4+Hg2b97Mk08+WWZ9z549WbZsGVu2bGHJkiVERUVx1113kZ2dXe55goKCdC039vb2eHl51UXxhRBCCKEnKkVRlCofrFIRHBzMqFGjKrR/UFAQH3/8MfHx8ZiZmd10v4yMDHx8fFi0aBFPPPHEDdsLCwspLCzU/ZyVlYWXlxeZmZnY2dlVuh5CCCGEqHtZWVnY29tX6Pu70n1YqkpRFH788Ucee+yxW4YVAAcHB1q2bElkZGS5283NzTE3N6+NYgohhBDCANXZLaFdu3YRGRlZbovJf+Xk5HDhwgU8PDzqoGRCCCGEMHSVDiw5OTmEh4cTHh4OQFRUFOHh4bpOsnPmzGHixIk3HPfDDz/Qs2dP2rdvf8O2l19+mV27dhEdHc2+ffsYPXo0xsbGjB8/vrLFE0IIIUQDVOlbQocOHWLgwIG6n2fNmgXApEmTWLZsGQkJCTc84ZOZmcnq1av59NNPyz3npUuXGD9+PGlpabi4uNC3b1/279+Pi4tLZYsnhBBCiAaoWp1uDUVlOu0IIYQQwjBU5vtb5hISQgghhMGTwCKEEEIIgyeBRQghhBAGTwKLEEIIIQyeBBYhhBBCGDwJLEIIIYQweBJYhBBCCGHwJLAIIYQQwuBJYBFCCCGEwZPAIoQQQgiDJ4FFCCGEEAZPAosQQgghDJ4EFiGEEEIYPAksQgghhDB4EliEEEIIYfAksAghhBDC4ElgEUIIIYTBk8AihBBCCIMngUUIIYQQBk8CixBCCCEMngQWIYQQQhg8CSxCCCGEMHgSWIQQQghh8CSwCCGEEMLgSWARQgghhMGTwCKEEEIIgyeBRQghhBAGTwKLEEIIIQyeBBYhhBBCGDwJLEIIIYQweBJYhBBCCGHwJLAIIYQQwuBJYBFCCCGEwZPAIoQQQgiDJ4FFCCGEEAZPAosQQgghDJ4EFiGEEEIYPAksQgghhDB4EliEEEIIYfAqHVh2797NiBEj8PT0RKVSsXbt2lvuHxISgkqlumFJTEwss9+XX35J06ZNsbCwoGfPnhw4cKCyRRNCCCFEA1XpwJKbm0vHjh358ssvK3Xc2bNnSUhI0C2urq66bb/99huzZs1i3rx5HDlyhI4dOzJkyBCSk5MrWzwhhBBCNEAmlT1g2LBhDBs2rNIXcnV1xcHBodxtixYtYurUqTz++OMAfP3112zcuJEff/yR1157rdLXEkIIIUTDUmd9WDp16oSHhweDBw9m7969uvVFRUUcPnyYwMDAa4UyMiIwMJDQ0NByz1VYWEhWVlaZRQghhBANV60HFg8PD77++mtWr17N6tWr8fLyYsCAARw5cgSA1NRU1Go1bm5uZY5zc3O7oZ9LqaCgIOzt7XWLl5dXbVdDCCGEEHpU6VtCldWqVStatWql+7l3795cuHCBTz75hJ9//rlK55wzZw6zZs3S/ZyVlSWhRQghhGjAaj2wlKdHjx78+++/ADg7O2NsbExSUlKZfZKSknB3dy/3eHNzc8zNzWu9nEIIIYQwDHoZhyU8PBwPDw8AzMzM6Nq1K9u3b9dt12g0bN++nYCAAH0UTwghhBAGptItLDk5OURGRup+joqKIjw8HCcnJ7y9vZkzZw6XL19m+fLlACxevBhfX1/atWtHQUEB33//PTt27OCff/7RnWPWrFlMmjSJbt260aNHDxYvXkxubq7uqSEhhBBC3NkqHVgOHTrEwIEDdT+X9iWZNGkSy5YtIyEhgdjYWN32oqIiXnrpJS5fvoyVlRX+/v5s27atzDnGjRtHSkoKc+fOJTExkU6dOrFly5YbOuIKIYQQ4s6kUhRF0XchqisrKwt7e3syMzOxs7PTd3GEEEIIUQGV+f6WuYSEEEIIYfAksAghhBDC4ElgEUIIIYTBk8AihBBCCIMngUUIIYQQBk8CixBCCCEMngQWIYQQQhg8CSxCCCGEMHgSWIQQQghh8CSwCCGEEMLgSWARQgghhMGTwCKEEEIIgyeBRQghhBAGTwKLEEIIIQyeBBYhhBBCGDwJLEIIIYQweBJYhBBCCGHwJLAIIYQQwuBJYBFCCCGEwZPAokcbjyew6J+zZBcU67soQgghhEEz0XcB7lSHY67w/MojaBRYdyyez8d3xr+Jg76LJeqIoihoFDA2Uum7KEIIUS9IC4se5BepefmPY2gUMDFSEZOWx9gl+/jh3ygURdF38fQiMbOA3w/GMf3XIwz6OIQ3157kdEKWvotVa97ZeJq2c7dwOCZd30URQoh6QaU0gG/IrKws7O3tyczMxM7OrkbPnZJdSGZ+ES1cbWvsnPPXn2LZvmjc7Sz445kA3tkYwd+nkgAIbOPKhw90xNHarMauZ4gKitUcjE5n19kUdp9P4VxSTrn7dfVxZEJPb+7t4IGFqXEdl7J2nIrP5L7P/0VRoLO3A2um9UalkpYWIcSdpzLf3xJYbiE6NZdHfwhDo1EInt4HNzuLap9zb2QqE74PA2D5lB70a+mCoij8vD+GdzacpkitwcPegk8f7kwPX6dqX89QKIrChZRcdp1LYfe5FMKi0igo1ui2G6nAv4kD/Vu60Mrdlo3HE/j7VCIlGu2vp72lKQ90bcIjPb1p7mKjr2pUm6IoTPg+jH0X0nTrvp/YjcC2bnoslRBC6IcElhqSkVfEmCX7uJiSSztPO35/OgBr86p3+8kqKGboJ7uJzyzg0V7evDOqQ5ntp+IzeX7FUS6m5mKkghcDW/LswBb1up+DRqOweNs5Vh+5zOWM/DLb3OzM6d/ShX4tXejbwhkHq7KtSsnZBfxx6BIrwmLLHNu7eSMm9PRhcFs3zEzq113NbRFJPLn8EGYmRgzv4EHw0cu0drdl04y7MKrHn7MQQlSFBJYaFJuWx+iv9pKWW8TdrV359rGumBhX7Uvy5T+O8efhS3g7WbH5hbvKDT+5hSW8ufYka45eBqBPi0Z88lAnXGugdUcfPtl6jk+3nwfAzMSInr5O9PPThpSWbjYVuhWi1ijsPpfCr2Ex7DiTzNVGF5xtzHiomxcTevnQ2MGyNqtRI4rVGoYs3s3FlFymDWjOM/2a0/eDHWQXlPDpw50Y2amxvosohBB1SgJLDTsSe4Xx3+6nsETDo728eXtk+0r3OdgakcTU5YdQqeD3pwPo3vTWt3v+PHyJN9eeJL9YjbONGR8/1In+LV2qU40698+pRJ76+TAAc+9ry/ge3liaVa8fyuWMfH47EMuqg3EkZxcCYGdhwuaZ/Qw+tPy0L5p560/RyNqMkNkDsLUw5cudkXz491l8GlmxbVZ/TKsYhoUQoj6qzPe3/N+xArp4O7J4XCdUKvhlfyzf74mq1PHpuUXMWXMCgKfuanbbsALwQNcm/PV8X1q725KaU8SkHw+wcMsZitWa2x5rCCKTc5j1+zEAJvduypS+vtUOKwCNHSyZdU8r9r52N18/2gU/VxuyCkr4cMuZap+7NmXmF7N42zkAXhzcElsLU0D7Z+NsY0ZMWh5/HLqkzyIKIYRBk8BSQcM6ePDGvW0AeHfTaTafSKjQcYqi8L+1J0jNKaSlmw0vDm5Z4Wu2cLVh7fQ+PNrLG4AlIRe499M9/HMqscYff07OLqCgWF0j58oqKOapnw+RU1hCD18n3hjepkbOez1TYyOGtvdg0UOdAFgbHs+xuIwav05N+XJnJFfyivFzteHh7l669dbmJkwf2AKAz7afr7HPQAghGhoJLJXwRF9fJgb4ADDzt3COxF657THrj8Wz6UQiJkYqFj3UqdKP5lqYGvPOqA4smdAFBytTzifn8NTPhxm7ZB9hF9Nuf4JbUBSFsItpPPnTIXq8u50hi3dzIaX8x4srSqNRmPVbOBdTcvGwt+CrCV1q9TZHhyb2jOms7fvx7sbTBjmOTUxaLsv2RgPw+vA2N/SBeqSnN40dLEnMKuDn0Bg9lFAIIQyfBJZKUKlUzL2vLYNau1JYomHqT4eIScu96f6JmQW8ufYkAM/f7Uf7xvZVvvawDh7smj2Q6QObY2FqxJHYDMZ9u5/JSw8QEV+5AdZK1Bo2HI9n1Jd7Gfftfrad1o4BE5OWx5iv9rG/GkHo0+3n2XY6GTMTI755rCvONuZVPldFvTykFeYmRhyITteNZ2NIFm45Q5Faw11+zgwopx+SuYkxLwT6AfBVSKRM1SCEEOWQwFJJJsZGfDa+M+0b25GWW8TjSw+SkVd0w36KovDq6uNkFZTg38SeZwc2r/a17S1NmT2kNbtnD+TRXt6YGKkIOZvCvZ/t4YVVR4lNy7vl8TmFJfzwbxT9PwzhuRVHOXYpE3MTIx7p6c3qaQF09nYgM7+Yx34II/ho5ftT/HMqUfdE0HujO9TZVAOeDpZMvasZAO9vPk1RieH08zkUnc6mE4kYqeCN4W1u2ll7TOfGNHOx5kpeMT/8W7k+UkIIcSeQwFIF1uYm/DipO572FlxMzeWp5YcpLCnb92DlgTh2nUvBzMSIjx/sWKO3RVztLHhnVAe2zerPiI6eAKwLj+fuj0OYu+4kydkFZfZPzCwgaPNpAoK28/aGCC5n5ONkbcbMQD/2vXY3743uQFcfJ1ZO7cW9HdwpViu8+NsxFm87V+FbLP/tZPtA1yY1Vt+KeGZAc5xtzIlOy+OX/YZxW0WjUXh742kAxnX3orX7zXvAmxgb8dLgVgB8vyeK9NwbQ7AQQtzJ5LHmajibmM0DS/aRXVjC/R09WTyuE0ZGKmLT8hj66W7yitT8b3gbnrz6r//acvJyJh/+fZZd51IAsDQ15om+vgxs7cqvYTGsD4/XjRjbzNmaJ+9qxpgujcvtT6PRKCz8+wzf7LoIaP/lHzS2A+YmN+97k1VQzKgv93IxJZcevk78+mRPvTyeuyIslteDT+BgZcqulwdib2Va52W43rrwy7ywKhxrM2N2zh6Aq+2tx9LRaBRGfPEvp+KzmHqXL28Mb1tHJRVCCP2Qx5rrSCt3W5Y82hUTIxXrj8WzaOs51BqFl/84Rl6Rmh6+Tkzp41vr5Wjf2J6fpvRg5dRedPJyIL9YzRc7Ixm7ZB9rjlymRKPQw9eJ7yd2Y9us/jzS0/umnX+NjFTMGdaG90Z3wNhIxZqjl5n4w4Fyb3tB3XeyvZWHujWhpZsNGXnFfL7jvF7KUKqgWM0HW84CMG1A89uGFdD+2c8eom1l+Sk0hoTM/NscIYQQdw4JLNXU18+Z98Zoh9j/YmckU5Yd5EB0OlZmxnz0QMc6HW49oHkjgp/tzTePdaWFqw1GKrjP34N10/vw+9MBBLZ1q3B5HunpzY+Tu2NjbkJYVDpjluwrt4PxZzuudbL9+tG66WR7MybGRrx+9dHzn0Kjb9khurb98G8UlzPy8bS3qFQLW/+WLvRo6kRRiYbPd0TWYgmFEKJ+kcBSAx7q5sWMu7VjaZTelvnf8LZ4N7Kq87KoVCqGtHNn26z+nH/3Xr54pAsdvRyqdK7+LV34c1qAtq9OSi6jv9rH4Zhrj3JvjUhi8TZtS8a7o9pX+To1aUArV+7yc6ZYrbBQT4PJpWQXsiTkAgCzh7aq1KPsKpWKl6+2svx+MI7oVP2FLiGEMCQSWGrIi4NbMvrqeCADW7kwvofXbY6ofTUxaWJrdzuCp/ehfWM70nOLGP/dfjYeTyAyOYcXfwsHYFKADw920399S70xvA1GKth0IpHDMel1fv1Ptp0jp1D7dNjIjpWfH6iHrxMDWrlQolH45OrouHeCpKwCnv31MC+sOsqn286z/lg8Jy9nklNYou+i1Rv5RWq+3nVBgq5okKTTbQ1SaxSOxF6hYxOHejeL8O3kFZUwY+VRtp1OBsDZxpzUnEK9drK9lddWH2fVwTg6ezuwZlrvSs/9VFVnE7MZ9uluNIp2zqgevrefhqE8Jy9nct/n/6JSwaYZd9HGQ3+/13WhWK3h4W/3l2nBu56rrTm+ztY0c7GhmbM1vs7W+LpY4+1kZXC/e/q06J+zfLYjEg97C9Y916dCfaeE0Kda7XS7e/duRowYgaenJyqVirVr195y/zVr1jB48GBcXFyws7MjICCAv//+u8w+8+fPR6VSlVlat25d2aLpnbGRiu5NnRpcWAGwMjPhm8e68XifpgCk5hTiYW/Bl4/or5Ptrcy6pyVWZsYcjc1gw/GKTaNQE97bdBqNAkPbuVc5rIC2I/Vwfw8UBT7+p+G3sgRtOsPhmCvYmpvw0uCWPNStCd2bOuJsYwZAcnYhYVHprDwQy7ubTvPk8kMM+ngX/vP/4cXfwvn3fCoaTb3/t1e1FJaoWXEgFoCEzAKe+fnG4RYMVUGxmqBNp1l5tfxClMeksgfk5ubSsWNHpkyZwpgxY267/+7duxk8eDDvvfceDg4OLF26lBEjRhAWFkbnzp11+7Vr145t27ZdK5hJpYsmapmxkYp5I9rRwtWGDccSeGN4G1xs9dfJ9lZcbS14pn9zFm09x8ItZxjc1q3S0yJUVsjZZHadS8HUWMVrw6ofuGcNbsnmEwlsO53EkdgrdPF2vOm+Go3CmcRsDkSlERaVTnRaHn1bNOKBrl60cretdllK5RWVEBGfhX8NtiJuPJ7Aj3u1g+V9/FBH7mnnXmZ7Zn4xUam5RKXmEJWSy8XU3Ks/55JXpCb46GWCj17G096CMV2aMLZrE3ydrWukbPXJlpOJpOYU4WxjTlGJmiOxGbwRfJIPH/CvsxbGqihWa3huxRFd6212QTFP9av+QJui4anWLSGVSkVwcDCjRo2q1HHt2rVj3LhxzJ07F9C2sKxdu5bw8PAqlcNQbgkJw5JfpGbgRyEkZhUwZ1hrnu5fe/8TzCoo5oEl+ziXlMMTfX15876aGUPllT+P8fuhS/Ru3ogVU3vp1qs1ChHxWYRFpbH/YjoHo9PJzC9/SP8Oje15oGsT7u/oiaO1WaXLkF1QzI4zyWw+kUjIuWQKijX09HVi6ePdsTKr3j8sIpNzGPnFv+QWqXm6fzPmDKv4RJmKohAel8Gfhy+x/lg82QXX+rp09XHkga5NGO7vgZ1F7Y/HU6LWYKRS1elTgf815qu9HInNYNbglnTycmDy0gNoFOpkLKiq0mgUXvrjGMFHL2NspEJ9tZXs/TEdeLiHt55LJ+pCZb6/67wZQ6PRkJ2djZNT2eby8+fP4+npiYWFBQEBAQQFBeHtXf4vbGFhIYWFhbqfs7IqN5eOuDNYmhnz8pBWvPzHMb7YGcmD3bxwqsIX9q3EZ+SzdG8UKw/EkVNYgoOVKTPu9qux878Q2JK1R+PZdyGNn/fHkFNQwoGoNA5FXyH7P51RrcyM6erjSK9mjWjiaMmmEwlsP53MicuZnLicyTsbIwhs48YDXZvQv6XLDZMwXi8jr4itEUlsOZnInvOpFKnLTncQFpXO40sPViu05BWV8Oyvh8ktUtPT14nZ97Sq1PEqlYrO3o509nbkzfvasu10En8evsTucykcjrnC4ZgrzF9/iiHt3HmgaxP6tHCukY7o11NrFH4/FMdHf5/FytyYN4e3ZXBbtzpv0Th5OZMjsRmYGqt4uIcXrrYWvDG8LW9viOC9Tafxc7OlfznzWOmToigs+OsUwUcvY2Kk4tuJXQmLSuebXReZE3wCWwtThvt71Mi14jPyUanAw96yRs4n9KPOW1g++OAD3n//fc6cOYOrqysAmzdvJicnh1atWpGQkMCCBQu4fPkyJ0+exNb2xubs+fPns2DBghvWSwuL+K/rR4+dFODDgpHta+S8p+Iz+W73RTYcT9CNIuznasOCke3o3dy5Rq5RasFfp1h6dbbn69mam9Dd14mevk708HWifWP7G/oTpeUUsv5YPH8evsSp6ybJdLYxZ0yXxozt0kR3yyg1p5B/TiWx+WQCoRfSdPUCaOZizbD27gxr70GRWsOkHw6QXVhCD18nllUhtCiKwszfwlkXHo+LrTkbZ/StsQ6iSVkFrD16mT8PX+J88rXZx93tLHi4hxeTAppWqaXpv47GXmHe+lMcv5RZZv3AVi7Mv78dPo3q7rZUaUvc/R09+Wy89lZ76Xxmvx+6hK2FCeum96GZi02dlel2Fm09x2fbz6NSweJxnRjZqTGKovB68ElWHojF1FjF95O6VytoKYrCL2GxvPXXKYyNVCx7vAe9mjWqwVqI6qpMC0udBpYVK1YwdepU1q1bR2Bg4E33y8jIwMfHh0WLFvHEE0/csL28FhYvLy8JLKJc+y6k8sh3YZgYqfj7xX40r+L/tBVFYff5VL7bfZF/I1N16wOaNeKp/s0Y0NKlVv5lnZpTyMgv9pJzNSD09HWiV7NGtPGwq1SLQUR8FquPXGLt0cukXTdXkX8TeyxNjTkYnc71/VZbu9sytL0793bwwM/VpkzdjsZeYeJ1oWXp5O5Ym1c8tPwcGs2b67RfIiun9qpWB+WbURSF45cyWX3kEuvC43W3zKzMjHmkhzdP3tUMd/vKh6SU7EI+2HKGPw5rJwi1NTfhhUA/0nOL+G7PRYrVCmYmRjzTvznPDmhe632nMvKK6PnedgpLNKyeFkBXn2t/loUlah75LozDMVdo5mxN8PQ+2Fvqd8oK0A6s+PaGCADeHtWex3r56LapNQovrDrKhuMJWJoa88uTPcrUqaLyi9S8EXyCNUcv69ZZmRnz8xNVO5+oHQYZWFatWsWUKVP4448/GD58+G337969O4GBgQQFBd12X+nDIm7nyZ8Osu10MoFt3Ph+UrdKHVtUouGvY/F8t+ciZxKzAW0H5Hs7ePDUXc3o0MS+NopcRulf05oIRMVqDSFnU/jzcBzbTyeXaUnxb2LP0KstKbfruFrV0BIel8GDX++jWK3wxr1tmNqv9vtXFJao+ftUEl+HXCAiQdvSZGZsxNiujXm6X3OaVqCTbrFaw8+hMXyy9ZzudtwDXZvw6tDWus7nF1JymLfulC7QejlZMn9EOwa1caulmsG3uy/w3qYztPWwY+OMvjf8jqRkF3L/F/+SkFlAv5YuLJ3cvcZvjVXGH4fimP3ncQBevqclz5VzC7WoRMPU5YfYdS4FWwsTfnsqgLaeFf9/e1RqLtN+OcyZxGyMr055sTcylT3nU7E1N+GXJ3saxECXwgADy8qVK5kyZQqrVq1i5MiRtz1vTk4O3t7ezJ8/nxkzZtx2fwks4nYik3MYsng3ao3Ck319cbIxw8zYCHMTI0yNjTAz0S6691dfj8RcYeneaBKztDNgW5kZ83B3bx7v0xQvp7ofybimpeUUsulkImq1hkFt3CpdpzKhpam2I+6tQkt6bhEjPv+Xyxn5DG3nzpJHu9Rpfw9FUdh1LoWvdl7gQLR2UEEjFdzbwYNpA5rTzrP88LnvQirz15/iXJL2FlOHxvYsGNmu3Ce3FEVh88lE3t4QQUKm9vcmsI0rc+9rV+OjX6s1CgM+2klcej4Lx3ZgXPfy+/2dvJzJA1/vo6BYo9eJNf8+lci0Xw6jUWDqXb68fm+bm37++UVqHvshjEMxV3C2MeOPZ3pX6Omvf04l8tLvx8guLMHZxpwvHulMr2aNyC9SM3npAcKi0rGzMGHlU71u+nmLulOrgSUnJ4fISO0cJ507d2bRokUMHDgQJycnvL29mTNnDpcvX2b58uWA9jbQpEmT+PTTT8s8Bm1paYm9vfaX5eWXX2bEiBH4+PgQHx/PvHnzCA8PJyIiAheX29+/lMAiKmLuupMsD42p0rEutuY83qcpE3r46H0WaEMTHpfBY9+H3Ta0qDUKjy87yO5zKfg6W7PuuT518gTPzRyKTuerkAvsOJOsWzewlQvPDmxB96baWwbxGfm8u+k0G6+O5eNoZcrsIa0Z193rtq0UuYUlfL4jku/3XKREo2BuYsSzA1rwdP9mNXabaMeZJKYsO4S9pSn75wzC0uzm5914PIHpK44A8NGDHXmga5MaKUNF7Y1M5fGlBylSa3ioWxMWjr3949aZ+cWM/3Y/EQlZNHaw5M9pATftOFui1vDRP+f4epd2WoxuPo58OaELbnbXbvvlFpYw8ccDHI65gqOVKaueCqjRx/5F5dVqYAkJCWHgwIE3rJ80aRLLli1j8uTJREdHExISAsCAAQPYtWvXTfcHePjhh9m9ezdpaWm4uLjQt29f3n33XZo3r9hjqBJYREXkFJbwdcgF0vOKKC7RUKTWUFSioVitobDk2vtr6xXsLE2Z0NObkZ08MTep3b4I9Vl4XAaP/RBGdsHNQ8vibedYvO08FqZGrJ3eh9buhvF3NSI+iyW7LrDxeLyuD0/3po509XHip33R5BerMVLBo718mDW4JQ5WleuwG5mczdx1p9h3IQ0Abycr5t/flrtbV/820aQfD7DrXEqFW01KR8I1MzZi5VO96Opz87F9alJ4XAaPfLefvCI1Q9u588UjnW/5lNr1UrILeeibUKJSc2nuYs0fz/S+4Wm/1JxCnl9xlNCL2j/jKX18mXNv63IHtcwqKOax78M4dikTZxszVj0VQAtXw+mMfKeps1tChkICixD6d31o6d7UkaWP98DmamgJOZvM48sOakfufbAjY+v4X/cVEZ2ayze7L7L68KUyj3F3b+rIgvvbV6oPxX8pisLGEwm8vSGCpCztAwOvDWvNM9UYGygqNZeBH4WgUkHIywMq9FSSRqPwzC+H+SciCWcbc/56vk+tP+p7Limbh74JJSOvmL4tnPlhcrdKh//LGfk8sGQfCZkFdGhsz4qpPbG92jp3OOYK0389QmJWAVZmxnzwgD/3+Xve8nyZecWM/07bcuNqa87vTwdUqB+TqHkSWIQQenEsLoNH/xNaMvOLGf7ZHjLyinmkpzfvje6g72LeUlJWAd/vuciB6CtM6dOU+zt61lg/m5zCEj7+5yxL90ZjbKTiz2cC6HyLEYxv5e0NEfzwbxQDW7mw9PEeFT4ut7CEsUv2cSYxmw6N7fn96YBb3koqUWvILighq6CYrPwSLM2McLQyw8HK7La3xeLS83jg630kZRXS2duBX57oWamnya4XmZzDQ9+Ekp5bRE9fJ36a0oNVB2J5Z+NpSjQKzV2s+eaxrrRwrdgtnvTcIsZ/u5+zSdl42lvw29MBDaJfWn0jgUUIoTfXh5ZuPo4UaxSOxWXQobE9fzwTUOuP+Ro6RVGYsSqcv47F4+1kxcYZfXWtBRWVV1RCz/e2k11QwtLHuzOwlWuljo9Lz2Pkl3tJzy2ibwtn/NxsyMrXhpLM/GKySpeCkpvOlq1SgYOlKU7WZmUWRyvtq4OVGZ/vOE9MWh6t3Gz57elelb6d9l8nLmUy/rv95BSW4GFvoevUPLyDBwsf8Ne16FVUSnYh474N5WJKLl5Olvz2VACeDpVrccopLKFEral23e5UEliEEHp1fWgBsLc0ZcPzfeVfsFdl5hdz76d7uJyRz5jOjVk0rlOljl95IJY5a07g08iKnS8NqNKUAPsvpvHo92FlHmu/FWszY2wsTCgo1tx0GojyeDtZ8eczAbja1czAgGEX05j44wEKSzSYGKmYc28bpvRpWuVWsMTMAsZ9G0pMWh6+ztb89lSvW5Y1NaeQg1HpHIjWTokREZ+FRoGOTewZ1MaNu1u70s7TzqDnbzIkEliEEHp3/FIGE74PI6ewhB8ndWdg68q1AjR0h6LTeeibUDSKdqTXUZ0bV+g4RVEY9ukeziRmV3ueoO2nk9h5NhlbC1PsLEyxtzTFztJE+6r72RRbC5MyHViL1Roy8oq5kldEWk4RV/KKSM8tu1zJK8LWwoTXhrap8ce590am8nNoDE/c5at7oqs6LmfkM+6bUC5dyaeFqw2rnuqFs405iqJw6Uo+B6LSORClDSgXU3Nvez4Pewvubu3KoDau9G7ufMe3Kt6KBBYhhEFIzSkkI6+owv0K7jSlT07ZmJuwacZdFfpiPxClDToWpkaEzQmUx+xrSFx6Hg99E0pCZgEt3Wxo5W7Hwah03RhMpVQqaOVmS/emTnT3daJHUyeMVLDzbDLbTifz7/lU8ovVuv0tTI3o28JZ1/riVkMtTQ2FBBYhhKgHStQaHv52P4dirtDZ24E/ng647eO+z604wobjCTzc3Yv3x/rXUUnvDFGpuTz0TSgp2demfjExUuHfxF4XTrr5ON0yJBYUqwm9mMb200nsOJ1MfGbZwNOhsT3tG9vRxNGKJo6WNHG0wsvREhdb8zvyNpIEFiGEqCcuXclj2Kd7yC4oYcbdLZh1i1mrk7MK6P3+Dko0Chtn9JWRWmvBhZQcvtwZiY+TNd19Hens5XjLp6huRVEUTidks+NMEttOJ3PsUgY3+8Y1NzGi8XUBpjTQNHexoY2HbYMNMxJYaopGAzH/QuJJCHi25s4rhBDXWX8snhkrj2KkgpVTe9HzJjMKl95C6ubjyJ/TetdxKUV1pWQXsjcylei0XOLS87l0JY9LV/JJyMznVn2fvZ2suM/fg/v8PRtceJHAUlNSzsKXPcDIBGadBhvpNCiEqB0v/3GMPw9fwtPegs0v9LvhtkNRiYY+C3eQkl3IZ+M7c3/HWw+OJuqPYrWGxMwC4tK1ASbuapC5dCWPk5ezyvSJae5izYiOntzn71ntEXoVRdF7+JHAUpO+GwSXD8Hgt6HP7SdiFEKIqsgpLOG+z/YQnZbHvR3c+fKRshND/nUsnudXHsXZxpx9r92NmUnFhrYX9VteUQnbTyfz17F4Qs6lUFRybRTmNh52jOjowQh/z5sOGVCs1hCXnsfFlFyiUnO5mJqje5+SU0gja3Pc7c1xt7PAzc5C+2qvfXW3166zszCptWAjgaUmHV4Gf70Azi1h+gFtF3EhhKgFx+IyGLtkHyUahQ/G+vNQdy/dtoe+DuVAdDozBvkxa3BLPZZS6EtWQTFbTyWx4Xg8e86nlhlDp6OXAyP8PbAyMyHqulASm55X4bF2bsbKzFgXaH6a0qNGw7IElppUkAUft4LiPHhiK3hVfAhsIYSorCUhF1i45QyWpsZsmNGX5i42nE7IYtinezAxUrH3tbvl0VjBldwitpxKZMPxeEIvpN2yD4ylqTG+ztb4uljT/OprM2cb3OwsSM0pJCmrgMSsApIyta+JWYW699cPEmhjbsLJBUNqtB6V+f6u2qQOdxILO2g7Co6tgCPLJbAIIWrV0/2ased8CvsupPHCqqOsmdaH5aExAAxp5y5hRQDgaG3G+B7ejO/hTXJ2AVtOJrI1IgkjlYpmLtY0c7ammYsNzVyscbO1uOloyO72FrRvfPOnzfKL1LpAk1NQ/jQNdUVaWCoiZh8sHQZmNvDSWTCXqciFELUnMbOAoZ/uJiOvmPE9vFl79DL5xWp+e+rmTxAJUR9V5vtbem1VhHcAODWHohyIWKvv0gghGjh3ews+uDoo3MoDseQXq2nlZksP3+oPQy9EfSWBpSJUKuj8qPb9kZ/1WxYhxB3hnnbuPNrLW/fzxN4+en8EVQh9ksBSUZ0eAZUxxO2H1PP6Lo0Q4g7wxr1t6eLtgJ+rDaM6VWxyRCEaKgksFWXrDn6Dte+PSiuLEKL2WZoZs3pab7bO6o+1uTwjIe5sElgqo/Nj2tfwlaAuvvW+QghRA+Q2kBBaElgqo+UQsHaB3GQ4v1XfpRFCCCHuGBJYKsPYFDo+rH0vt4WEEEKIOiOBpbI6T9S+nvsbshP1WxYhhBDiDiGBpbJcWoJXT1DUcGyVvksjhBBC3BEksFRF6ZgsR3+G+j9QsBBCCGHwJLBURbvRYGoNaZEQu1/fpRFCCCEaPAksVWFuqw0tAEd/0W9ZhBBCiDuABJaq6nJ1TJZTwVCYrd+yCCGEEA2cBJaq8uoJjfygOBdOrtF3aYQQQogGTQJLVV0/IaLcFhJCCCFqlQSW6ug4Xjsh4qUDkHJW36URQgghGiwJLNVh6wYth2rfH1mu37IIIYQQDZgEluoqvS10bJVMiCiEEELUEgks1eV3D9i4QV4qnNui79IIIYQQDZIEluoyNtH2ZQE4IhMiCiGEELVBAktNKL0tFLkVshL0WxYhhBCiAZLAUhOc/cA7ABQNHFuh79IIIYQQDY4ElprS+erIt0d/kQkRhRBCiBomgaWmtB0JZjaQfhEit+u7NEIIIUSDIoGlppjbQPsx2ve/joVvB8C/n2gDjBBCCCGqpdKBZffu3YwYMQJPT09UKhVr16697TEhISF06dIFc3NzWrRowbJly27Y58svv6Rp06ZYWFjQs2dPDhw4UNmi6d/A/0GzgaAygvijsG0+fNYZvr4Ldn8EqZH6LqEQQghRL1U6sOTm5tKxY0e+/PLLCu0fFRXF8OHDGThwIOHh4cycOZMnn3ySv//+W7fPb7/9xqxZs5g3bx5HjhyhY8eODBkyhOTk5MoWT79s3WDiWnjpLNz3CTQboB26P/E47HgbvugKX/WGkIWQfEbfpRVCCCHqDZWiVL2HqEqlIjg4mFGjRt10n1dffZWNGzdy8uRJ3bqHH36YjIwMtmzRDrTWs2dPunfvzhdffAGARqPBy8uL559/ntdee+225cjKysLe3p7MzEzs7OyqWp3akZsGZzdCxDq4GAKakmvbnFtByyHg0hqcW4JzC7B01FtRhRBCiLpUme9vk9ouTGhoKIGBgWXWDRkyhJkzZwJQVFTE4cOHmTNnjm67kZERgYGBhIaGlnvOwsJCCgsLdT9nZWXVfMFrinUj6DJRu+RfgTObtOHlwg5IPatdyuzvAo38tI9KO/tde+/gox2kTgghhLgD1fo3YGJiIm5ubmXWubm5kZWVRX5+PleuXEGtVpe7z5kz5d82CQoKYsGCBbVW5lpj6QidJ2iX/Aw497d2pufUc9r+LdnxkJuiXWL3lT3WyBScmmlvO1k4gKWD9nyl78tbZ24PRjXcr7o4XzszdVY8WDuDrTvYuIOJWc1eRwghhLhOvfwn+5w5c5g1a5bu56ysLLy8vPRYoiqwdICO47RLqcJsSIvUhpfUc5B2Xvs+7TyUFJTfInMrRqbg2FQbdBo1176Wvrf3AiPjmx+rLtaWJTlC298mOQKST8OVKO0Aef9l7aINL7ae2lc7z7I/WzqAiQUYm4GJORib13yYEkII0WDVemBxd3cnKSmpzLqkpCTs7OywtLTE2NgYY2Pjcvdxd3cv95zm5uaYm5vXWpn1xtwWPDtrl+tpNJB1SRsgctOgIEPbQlOQob3NVN774jzQFGvDTtp5OP+faxmbXQszTs3ByVd7bMppbTBJPa89vjyWjuDgDXlXIDtBu19py1DiiYrX18hEG1xMzMq+mlpcDUAe2oklbd2vteTYumlfTS0qfh0hhBD1Xq0HloCAADZt2lRm3datWwkICADAzMyMrl27sn37dl3nXY1Gw/bt23nuuedqu3j1g5GRNiA4eFf8mJJCyEnSjgOTdkH7Wvr+ShSoi67eijp383OY2YBrm6tLW23nYNe2YOMKKpV2H40G8tO1t4iyE7W3tbIStEGmdMlK0LYeqQvLnl9Tol2Kcyv/Z2LhcDXEuGnLY+EAFvZlb4/9d5257bVyCyGEqFcqHVhycnKIjLw2nkhUVBTh4eE4OTnh7e3NnDlzuHz5MsuXLwfgmWee4YsvvuCVV15hypQp7Nixg99//52NGzfqzjFr1iwmTZpEt27d6NGjB4sXLyY3N5fHH3+8Bqp4hzIxvxZymg0ou02jhsxLV0PMBUi7qA0x5rbaQOLaVhtS7Jvc/gveyEjbl8XaGTz8b72vomiDUknhda+FUFJU9rU4H3KStWEnJ0kbhHKSrgagJO0+BRnaJaUSj4erjLUBxqqRtrxWjbQtOdbOYOV8rR6l760agbFpxc8vhBCi1lQ6sBw6dIiBAwfqfi7tSzJp0iSWLVtGQkICsbGxuu2+vr5s3LiRF198kU8//ZQmTZrw/fffM2TIEN0+48aNIyUlhblz55KYmEinTp3YsmXLDR1xRQ0xMgZHH+3SfODt968pKpU2SJlU43aeomiDSnYS5CRqw0xuKhRk/udW2X9e1UWgqLWtQfnp2ttkFWFhr70FpuvMfPW9pcNN1l9dqnPLqqToasvUZci8rL0dmJWgDVA+vaFJNzC1rPr5hRCiHqrWOCyGwqDHYRGGoThfG17yr0BeGuSlaoNOXtrV/jel71O12/LSyu9cXFGmVlfDixNYOV57b+kIVldfjc2vCyaXtLfVsi5rW5e4xV9LI1No3FUbXnz6gFcPsJDfeyFE/VOZ728JLEKUR6O5Fm50HZqvXAs9+Vf+s/667Yq6+tc3Ntc+aWXf5OoTVx6QGQfRe7UtS9dTGYG7vza8+PQG7wDt+D9CCGHgJLAIoS8aDRRmXQ0v6VdDz/Xv069tKym89gi4XROwbwx2Vxdr5/L7DymKtr9RzL6ry164En3jfo38tOezcr7aT6fRdf10XK6+b6S9nSUdkYUQeiKBRYg7SeZliA3VhpeYUO2j6RVlZKINLw7e1x6p9+ysHV35VuP0CCFEDZDAIsSdLDcNEsKv9c3JTbnaZ+e6vju5qVCUffNzmNmAR8eyIcapmbTGCCFqlEHNJSSEqGPWjaDFoNvvV1xwrdNx6nmIPwrxRyDhGBTlXG2x2Xttfwt78OgEnp20fWrKTAvheO29DOonhKgF0sIihChLo9YOKHj5yNUQc1Q7gvF/B/67GROLsiHGqpF2cL/SQf5srhvwz8ZN5qES4g4mt4SEEDVLXaydTyr+KCSe/M/TUxlXB/LLrNqj4JaOVwOM27WpGEqfjCp9tXWXQfyEaIDklpAQomYZm2r7tHh0vPk+pU9I/XfgvrxU7dgyOUlXB/xLuvazpvjaI+G3HLVYpW2RuT7E2HloJ9e0cbs2x5RVI5lUU4gGSgKLEKJmGBldHQHYARwrsL9Gc3XU4sTrQkzi1bmo/jMnlabk6j5J2g7FN6MyvnarSTfX1HWBxtzmxlnD/zv5pom5PCElhAGSwCKE0A8jI+2ov1ZO4Nb25vtpNNpWmqz4qyMDx5edWLM06OSmagftK92WUJ2ymYCpNTh4aWc1d2yqndHcsSk4+oK9l/S9EaKOSWARQhg2I6OrrSauQKeb76cu1j7xpGuxSbpuzqmrPxflXptos6Sg7CSc19OUQGEmJGVC0skbr6Uy0o5CXBpgdIHGV/tqYV9z9RdCABJYhBANhbHp1VGDPSt/rKJoA8/1s4YXZEFGrHZk4SvRkH719Uo0lORrt2XEQtTuG89n6VQ2wFz/ausu49kIUQUSWIQQQqXS3uIxMYPSycTtPMG19Y37Koq2tSY96j9hJkr7mpeqnXrhcjpcPnzj8SaW2vDi0VE7iWXjruDWXm4xCXEb8lizEELUpMLs61pjosqGmcy48h/9NjYHD/+rAaYbNO4iIwuLO4I81iyEEPpibqsNHx7+N25TF2tvI6We144qfPmwdsm/ApcOapdSlo7XAoxbu2u3u6xdwVj+1y3uPNLCIoQQ+qQokH7xWni5fBgSjt98ZGGV0dVHtcsZk8bOQ/uzmQ2YWmof4TaxkLFphMGSkW6FEKI+KynSPp10+TBcOgRpkVcf107UPrpdWcbm2uBiejXAlIYZU0uwdgbv3uDbD1zbSrgRdUoCixBCNEQatfbR7f+OSXP9YHs5iVCUpx1FuLKsGkHTvtD0LvDtD85+Ve9HU5yvna7BqpFMqyBuSvqwCCFEQ2RkrH0s2tb99vuqS7RjzZQUaMPD9a8lBdrZukvytbejovZAbKh2jqiIddoFtKMD+951NcD00443o1Jpg1NOEmReKrtkXdZ2LM68pD0XACptK46Nu3bEYVv3q+/dr93asr06IrGJ+c1qI4S0sAghhEB7Gyr+iDa8RO2CuAM39qOxa6LtQ5Mdrx1cr6Y5NYemfbQByacP2Deu+WsIgyK3hIQQQlRPcQFcOnA1wOyGy4fKhhSVsbbTr30T7WLX+Op7L23QsG8C5nbalpbS0YezE6++Tyy7LidJO+rwfzn6XrtF1bSP9pyiQZHAIoQQomYV5mhbYIzNtcHB1r3mJolUFG2wuXwYovdA9L+QcOzGMWscm14LMF49tI9+m1hqbyVVZ8wajVo7bUNxPhTnaqdrsHAAaxd5hLyWSWARQghRvxVkQmzYdQEmvPxB9wBQlX0K6r9PQplYAIq2M3Lx1eX69+W17pSe18pJ27/G2uXq7N+u12YEL11n7aLdryY7F5cUakOcpkR7K66BPr0lnW6FEELUbxb20PIe7QLauZ3iSgPMXm2A0d2iUrQdiEvyq3lRFZhZg7EZFGRoA1Je2nUdiG/D3E7b6mPV6OpM5I2080qVzkpu6aTdXpynnV289Nyly/XrinKundfUClxagUsbcL1usWt8R42GLC0sQggh6p/SCStL8rWtEf99Euq/ryojbWuLqTWYWf3n/dXl+ltLGjXkpWv71+QmQ07Kde+vW3KTtUGDWvgqNTIBVDd/RN3cThtkXNtcDTOttWErP0PbQlVQ+pp547r8DO2fnYW9NkSVWZzKWed4LYTVYEiSFhYhhBAN2/UTVtYGI2OwcdEut6PRaINAXrp24su8NO37vLSrP5e+v6JdTK20j3pbNSq7/Hedhb02OF2JhuQISDmjfU0+A2nnoTDrxikdKiv/ivb8FaKCuWnaDtd6IIFFCCGEqA4jo2u3fWqasQk4t9Au3H9tfUkRpF+4FmCSIyD1nHabhf3VxeHae0uHG9ebWmpvtZUGqfz0696Xs6iMa66jdRVIYBFCCCHqGxOza31Z6oqmCtNC1KCG2e1YCCGEEDVLj60rIIFFCCGEEPWABBYhhBBCGDwJLEIIIYQweBJYhBBCCGHwJLAIIYQQwuBJYBFCCCGEwZPAIoQQQgiDJ4FFCCGEEAZPAosQQgghDJ4EFiGEEEIYPAksQgghhDB4EliEEEIIYfAksAghhBDC4JnouwA1QVEUALKysvRcEiGEEEJUVOn3dun3+K00iMCSnZ0NgJeXl55LIoQQQojKys7Oxt7e/pb7qJSKxBoDp9FoiI+Px9bWFpVKdct9s7Ky8PLyIi4uDjs7uzoqYd2TejYsUs+G5U6o551QR5B6VpeiKGRnZ+Pp6YmR0a17qTSIFhYjIyOaNGlSqWPs7Owa9C9XKalnwyL1bFjuhHreCXUEqWd13K5lpZR0uhVCCCGEwZPAIoQQQgiDd8cFFnNzc+bNm4e5ubm+i1KrpJ4Ni9SzYbkT6nkn1BGknnWpQXS6FUIIIUTDdse1sAghhBCi/pHAIoQQQgiDJ4FFCCGEEAZPAosQQgghDN4dF1i+/PJLmjZtioWFBT179uTAgQP6LlK1zJ8/H5VKVWZp3bq1bntBQQHTp0+nUaNG2NjYMHbsWJKSkvRY4tvbvXs3I0aMwNPTE5VKxdq1a8tsVxSFuXPn4uHhgaWlJYGBgZw/f77MPunp6UyYMAE7OzscHBx44oknyMnJqcNa3N7t6jl58uQbPtuhQ4eW2ac+1DMoKIju3btja2uLq6sro0aN4uzZs2X2qcjvaWxsLMOHD8fKygpXV1dmz55NSUlJXVblpipSxwEDBtzweT7zzDNl9jHkOgIsWbIEf39/3eBhAQEBbN68Wbe9vn+OpW5Xz4bwWf7X+++/j0qlYubMmbp1Bvd5KneQVatWKWZmZsqPP/6onDp1Spk6dari4OCgJCUl6btoVTZv3jylXbt2SkJCgm5JSUnRbX/mmWcULy8vZfv27cqhQ4eUXr16Kb1799ZjiW9v06ZNyhtvvKGsWbNGAZTg4OAy299//33F3t5eWbt2rXLs2DHl/vvvV3x9fZX8/HzdPkOHDlU6duyo7N+/X9mzZ4/SokULZfz48XVck1u7XT0nTZqkDB06tMxnm56eXmaf+lDPIUOGKEuXLlVOnjyphIeHK/fee6/i7e2t5OTk6Pa53e9pSUmJ0r59eyUwMFA5evSosmnTJsXZ2VmZM2eOPqp0g4rUsX///srUqVPLfJ6ZmZm67YZeR0VRlPXr1ysbN25Uzp07p5w9e1Z5/fXXFVNTU+XkyZOKotT/z7HU7erZED7L6x04cEBp2rSp4u/vr7zwwgu69Yb2ed5RgaVHjx7K9OnTdT+r1WrF09NTCQoK0mOpqmfevHlKx44dy92WkZGhmJqaKn/88Ydu3enTpxVACQ0NraMSVs9/v8g1Go3i7u6ufPjhh7p1GRkZirm5ubJy5UpFURQlIiJCAZSDBw/q9tm8ebOiUqmUy5cv11nZK+NmgWXkyJE3PaY+1lNRFCU5OVkBlF27dimKUrHf002bNilGRkZKYmKibp8lS5YodnZ2SmFhYd1WoAL+W0dF0X7JXf9l8F/1rY6lHB0dle+//75Bfo7XK62nojSszzI7O1vx8/NTtm7dWqZehvh53jG3hIqKijh8+DCBgYG6dUZGRgQGBhIaGqrHklXf+fPn8fT0pFmzZkyYMIHY2FgADh8+THFxcZk6t27dGm9v73pb56ioKBITE8vUyd7enp49e+rqFBoaioODA926ddPtExgYiJGREWFhYXVe5uoICQnB1dWVVq1aMW3aNNLS0nTb6ms9MzMzAXBycgIq9nsaGhpKhw4dcHNz0+0zZMgQsrKyOHXqVB2WvmL+W8dSv/76K87OzrRv3545c+aQl5en21bf6qhWq1m1ahW5ubkEBAQ0yM8RbqxnqYbyWU6fPp3hw4eX+dzAMP9eNojJDysiNTUVtVpd5g8WwM3NjTNnzuipVNXXs2dPli1bRqtWrUhISGDBggXcddddnDx5ksTERMzMzHBwcChzjJubG4mJifopcDWVlru8z7F0W2JiIq6urmW2m5iY4OTkVK/qPXToUMaMGYOvry8XLlzg9ddfZ9iwYYSGhmJsbFwv66nRaJg5cyZ9+vShffv2ABX6PU1MTCz3My/dZkjKqyPAI488go+PD56enhw/fpxXX32Vs2fPsmbNGqD+1PHEiRMEBARQUFCAjY0NwcHBtG3blvDw8Ab1Od6sntBwPstVq1Zx5MgRDh48eMM2Q/x7eccEloZq2LBhuvf+/v707NkTHx8ffv/9dywtLfVYMlFdDz/8sO59hw4d8Pf3p3nz5oSEhDBo0CA9lqzqpk+fzsmTJ/n333/1XZRac7M6PvXUU7r3HTp0wMPDg0GDBnHhwgWaN29e18WsslatWhEeHk5mZiZ//vknkyZNYteuXfouVo27WT3btm3bID7LuLg4XnjhBbZu3YqFhYW+i1Mhd8wtIWdnZ4yNjW/o4ZyUlIS7u7ueSlXzHBwcaNmyJZGRkbi7u1NUVERGRkaZfepznUvLfavP0d3dneTk5DLbS0pKSE9Pr7f1BmjWrBnOzs5ERkYC9a+ezz33HBs2bGDnzp00adJEt74iv6fu7u7lfual2wzFzepYnp49ewKU+TzrQx3NzMxo0aIFXbt2JSgoiI4dO/Lpp582qM8Rbl7P8tTHz/Lw4cMkJyfTpUsXTExMMDExYdeuXXz22WeYmJjg5uZmcJ/nHRNYzMzM6Nq1K9u3b9et02g0bN++vcx9yfouJyeHCxcu4OHhQdeuXTE1NS1T57NnzxIbG1tv6+zr64u7u3uZOmVlZREWFqarU0BAABkZGRw+fFi3z44dO9BoNLr/sdRHly5dIi0tDQ8PD6D+1FNRFJ577jmCg4PZsWMHvr6+ZbZX5Pc0ICCAEydOlAloW7duxc7OTtdMr0+3q2N5wsPDAcp8noZcx5vRaDQUFhY2iM/xVkrrWZ76+FkOGjSIEydOEB4erlu6devGhAkTdO8N7vOs8W68BmzVqlWKubm5smzZMiUiIkJ56qmnFAcHhzI9nOubl156SQkJCVGioqKUvXv3KoGBgYqzs7OSnJysKIr2sTRvb29lx44dyqFDh5SAgAAlICBAz6W+tezsbOXo0aPK0aNHFUBZtGiRcvToUSUmJkZRFO1jzQ4ODsq6deuU48ePKyNHjiz3sebOnTsrYWFhyr///qv4+fkZ3OO+t6pndna28vLLLyuhoaFKVFSUsm3bNqVLly6Kn5+fUlBQoDtHfajntGnTFHt7eyUkJKTMY6B5eXm6fW73e1r6+OQ999yjhIeHK1u2bFFcXFwM5jHR29UxMjJSeeutt5RDhw4pUVFRyrp165RmzZop/fr1053D0OuoKIry2muvKbt27VKioqKU48ePK6+99pqiUqmUf/75R1GU+v85lrpVPRvKZ1me/z79ZGif5x0VWBRFUT7//HPF29tbMTMzU3r06KHs379f30WqlnHjxikeHh6KmZmZ0rhxY2XcuHFKZGSkbnt+fr7y7LPPKo6OjoqVlZUyevRoJSEhQY8lvr2dO3cqwA3LpEmTFEXRPtr85ptvKm5uboq5ubkyaNAg5ezZs2XOkZaWpowfP16xsbFR7OzslMcff1zJzs7WQ21u7lb1zMvLU+655x7FxcVFMTU1VXx8fJSpU6feEK7rQz3LqyOgLF26VLdPRX5Po6OjlWHDhimWlpaKs7Oz8tJLLynFxcV1XJvy3a6OsbGxSr9+/RQnJyfF3NxcadGihTJ79uwyY3coimHXUVEUZcqUKYqPj49iZmamuLi4KIMGDdKFFUWp/59jqVvVs6F8luX5b2AxtM9TpSiKUvPtNkIIIYQQNeeO6cMihBBCiPpLAosQQgghDJ4EFiGEEEIYPAksQgghhDB4EliEEEIIYfAksAghhBDC4ElgEUIIIYTBk8AihBBCCIMngUUIIYQQBk8CixDCIEyePJlRo0bpuxhCCAMlgUUIIYQQBk8CixCiTv3555906NABS0tLGjVqRGBgILNnz+ann35i3bp1qFQqVCoVISEhAMTFxfHQQw/h4OCAk5MTI0eOJDo6Wne+0paZBQsW4OLigp2dHc888wxFRUX6qaAQolaY6LsAQog7R0JCAuPHj+eDDz5g9OjRZGdns2fPHiZOnEhsbCxZWVksXboUACcnJ4qLixkyZAgBAQHs2bMHExMT3nnnHYYOHcrx48cxMzMDYPv27VhYWBASEkJ0dDSPP/44jRo14t1339VndYUQNUgCixCiziQkJFBSUsKYMWPw8fEBoEOHDgBYWlpSWFiIu7u7bv9ffvkFjUbD999/j0qlAmDp0qU4ODgQEhLCPffcA4CZmRk//vgjVlZWtGvXjrfeeovZs2fz9ttvY2QkDclCNATyN1kIUWc6duzIoEGD6NChAw8++CDfffcdV65cuen+x44dIzIyEltbW2xsbLCxscHJyYmCggIuXLhQ5rxWVla6nwMCAsjJySEuLq5W6yOEqDvSwiKEqDPGxsZs3bqVffv28c8///D555/zxhtvEBYWVu7+OTk5dO3alV9//fWGbS4uLrVdXCGEAZHAIoSoUyqVij59+tCnTx/mzp2Lj48PwcHBmJmZoVary+zbpUsXfvvtN1xdXbGzs7vpOY8dO0Z+fj6WlpYA7N+/HxsbG7y8vGq1LkKIuiO3hIQQdSYsLIz33nuPQ4cOERsby5o1a0hJSaFNmzY0bdqU48ePc/bsWVJTUykuLmbChAk4OzszcuRI9uzZQ1RUFCEhIcyYMYNLly7pzltUVMQTTzxBREQEmzZtYt68eTz33HPSf0WIBkRaWIQQdcbOzo7du3ezePFisrKy8PHx4eOPP2bYsGF069aNkJAQunXrRk5ODjt37mTAgAHs3r2bV199lTFjxpCdnU3jxo0ZNGhQmRaXQYMG4efnR79+/SgsLGT8+PHMnz9ffxUVQtQ4laIoir4LIYQQVTV58mQyMjJYu3atvosihKhF0l4qhBBCCIMngUUIIYQQBk9uCQkhhBDC4EkLixBCCCEMngQWIYQQQhg8CSxCCCGEMHgSWIQQQghh8CSwCCGEEMLgSWARQgghhMGTwCKEEEIIgyeBRQghhBAG7/8sslS6DoTKlAAAAABJRU5ErkJggg==", "text/plain": [ - "
" + "
" ] }, - "metadata": { - "needs_background": "light" - }, + "metadata": {}, "output_type": "display_data" } ], @@ -1045,7 +1021,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "id": "af9d6df1", "metadata": {}, "outputs": [ @@ -1087,123 +1063,123 @@ " \n", " \n", " 0\n", - " 7.400000\n", - " 0.080079\n", - " 0.000000\n", - " 65.800000\n", - " 0.025529\n", - " 67.119106\n", - " 440.000000\n", - " 1.038980\n", + " 14.2\n", + " 0.08\n", + " 1.66\n", + " 65.8\n", + " 0.346\n", + " 289.0\n", + " 9.0\n", + " 1.03898\n", " 2.720000\n", - " 1.080000\n", - " 8.0\n", - " 7\n", + " 0.22\n", + " 14.2\n", + " 6\n", " \n", " \n", " 1\n", - " 5.088797\n", - " 0.112499\n", - " 0.370000\n", - " 0.763677\n", - " 0.009000\n", - " 288.824821\n", - " 98.000000\n", - " 0.987110\n", - " 3.240000\n", - " 0.220000\n", " 14.2\n", - " 8\n", + " 1.10\n", + " 0.00\n", + " 0.6\n", + " 0.346\n", + " 289.0\n", + " 440.0\n", + " 0.98711\n", + " 2.720000\n", + " 1.08\n", + " 14.2\n", + " 7\n", " \n", " \n", " 2\n", - " 3.800000\n", - " 1.100000\n", - " 0.000000\n", - " 0.600000\n", - " 0.009000\n", - " 2.000000\n", - " 9.000000\n", - " 1.038980\n", + " 3.8\n", + " 0.08\n", + " 1.66\n", + " 65.8\n", + " 0.346\n", + " 289.0\n", + " 9.0\n", + " 0.98711\n", " 3.820000\n", - " 0.220000\n", + " 0.22\n", " 8.0\n", - " 4\n", + " 5\n", " \n", " \n", " 3\n", - " 3.800000\n", - " 0.080000\n", - " 1.659603\n", - " 0.600000\n", - " 0.034734\n", - " 2.000000\n", - " 9.000000\n", - " 0.987110\n", - " 3.775879\n", - " 1.080000\n", - " 9.5\n", + " 3.8\n", + " 0.08\n", + " 0.00\n", + " 0.6\n", + " 0.346\n", + " 289.0\n", + " 440.0\n", + " 0.98711\n", + " 2.720000\n", + " 0.22\n", + " 14.2\n", " 7\n", " \n", " \n", " 4\n", - " 5.700000\n", - " 0.330000\n", - " 0.213874\n", - " 10.937306\n", - " 0.050000\n", - " 39.064968\n", - " 147.790987\n", - " 0.997247\n", - " 3.330984\n", - " 0.380000\n", - " 8.7\n", - " 6\n", + " 14.2\n", + " 1.10\n", + " 0.00\n", + " 0.6\n", + " 0.346\n", + " 289.0\n", + " 9.0\n", + " 0.98711\n", + " 3.771223\n", + " 1.08\n", + " 14.2\n", + " 7\n", " \n", " \n", " 5\n", - " 14.200000\n", - " 0.080000\n", - " 0.000000\n", - " 0.600000\n", - " 0.009000\n", - " 2.000000\n", - " 9.000000\n", - " 0.987110\n", - " 2.916428\n", - " 0.220055\n", - " 9.5\n", - " 5\n", + " 14.2\n", + " 1.10\n", + " 1.66\n", + " 65.8\n", + " 0.009\n", + " 2.0\n", + " 440.0\n", + " 0.98711\n", + " 2.720000\n", + " 1.08\n", + " 14.2\n", + " 7\n", " \n", " \n", " 6\n", - " 14.200000\n", - " 0.087887\n", - " 1.660000\n", - " 0.600000\n", - " 0.108297\n", - " 49.000000\n", - " 65.466909\n", - " 0.987117\n", - " 2.720090\n", - " 0.220006\n", + " 3.8\n", + " 0.08\n", + " 0.00\n", + " 65.8\n", + " 0.346\n", + " 2.0\n", " 9.0\n", - " 6\n", + " 1.03898\n", + " 2.720000\n", + " 1.08\n", + " 8.0\n", + " 7\n", " \n", " \n", " 7\n", - " 8.870765\n", - " 1.099817\n", - " 1.657142\n", - " 12.921528\n", - " 0.025276\n", - " 288.846488\n", - " 438.337342\n", - " 0.996196\n", - " 2.724725\n", - " 0.220049\n", - " 10.2\n", - " 5\n", + " 14.2\n", + " 0.08\n", + " 1.66\n", + " 0.6\n", + " 0.346\n", + " 289.0\n", + " 9.0\n", + " 0.98711\n", + " 3.820000\n", + " 0.22\n", + " 14.2\n", + " 7\n", " \n", " \n", "\n", @@ -1211,37 +1187,37 @@ ], "text/plain": [ " fixed acidity volatile acidity citric acid residual sugar chlorides \\\n", - "0 7.400000 0.080079 0.000000 65.800000 0.025529 \n", - "1 5.088797 0.112499 0.370000 0.763677 0.009000 \n", - "2 3.800000 1.100000 0.000000 0.600000 0.009000 \n", - "3 3.800000 0.080000 1.659603 0.600000 0.034734 \n", - "4 5.700000 0.330000 0.213874 10.937306 0.050000 \n", - "5 14.200000 0.080000 0.000000 0.600000 0.009000 \n", - "6 14.200000 0.087887 1.660000 0.600000 0.108297 \n", - "7 8.870765 1.099817 1.657142 12.921528 0.025276 \n", + "0 14.2 0.08 1.66 65.8 0.346 \n", + "1 14.2 1.10 0.00 0.6 0.346 \n", + "2 3.8 0.08 1.66 65.8 0.346 \n", + "3 3.8 0.08 0.00 0.6 0.346 \n", + "4 14.2 1.10 0.00 0.6 0.346 \n", + "5 14.2 1.10 1.66 65.8 0.009 \n", + "6 3.8 0.08 0.00 65.8 0.346 \n", + "7 14.2 0.08 1.66 0.6 0.346 \n", "\n", - " free sulfur dioxide total sulfur dioxide density pH sulphates \\\n", - "0 67.119106 440.000000 1.038980 2.720000 1.080000 \n", - "1 288.824821 98.000000 0.987110 3.240000 0.220000 \n", - "2 2.000000 9.000000 1.038980 3.820000 0.220000 \n", - "3 2.000000 9.000000 0.987110 3.775879 1.080000 \n", - "4 39.064968 147.790987 0.997247 3.330984 0.380000 \n", - "5 2.000000 9.000000 0.987110 2.916428 0.220055 \n", - "6 49.000000 65.466909 0.987117 2.720090 0.220006 \n", - "7 288.846488 438.337342 0.996196 2.724725 0.220049 \n", + " free sulfur dioxide total sulfur dioxide density pH sulphates \\\n", + "0 289.0 9.0 1.03898 2.720000 0.22 \n", + "1 289.0 440.0 0.98711 2.720000 1.08 \n", + "2 289.0 9.0 0.98711 3.820000 0.22 \n", + "3 289.0 440.0 0.98711 2.720000 0.22 \n", + "4 289.0 9.0 0.98711 3.771223 1.08 \n", + "5 2.0 440.0 0.98711 2.720000 1.08 \n", + "6 2.0 9.0 1.03898 2.720000 1.08 \n", + "7 289.0 9.0 0.98711 3.820000 0.22 \n", "\n", " alcohol quality \n", - "0 8.0 7 \n", - "1 14.2 8 \n", - "2 8.0 4 \n", - "3 9.5 7 \n", - "4 8.7 6 \n", - "5 9.5 5 \n", - "6 9.0 6 \n", - "7 10.2 5 " + "0 14.2 6 \n", + "1 14.2 7 \n", + "2 8.0 5 \n", + "3 14.2 7 \n", + "4 14.2 7 \n", + "5 14.2 7 \n", + "6 8.0 7 \n", + "7 14.2 7 " ] }, - "execution_count": 12, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -1261,9 +1237,18 @@ "A conditional variable `cond` can be provided to the `fit` method. It can be either a column name in the dataset or a custom array. The model will then learn the conditional distribution of the dataset given `cond`. In this case, an array must be provided as the `cond` argument of the `generate` method." ] }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "191d1b6f", + "metadata": {}, + "source": [ + "Use a column name as the `cond` argument in the `fit` method:" + ] + }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "id": "56a1fc7e", "metadata": {}, "outputs": [ @@ -1271,47 +1256,31 @@ "name": "stderr", "output_type": "stream", "text": [ - "[2023-03-31T01:07:08.859587+0200][12004][INFO] Encoding fixed acidity 8821222230854998919\n", - "[2023-03-31T01:07:08.873767+0200][12004][INFO] Encoding volatile acidity 3689048099044143611\n", - "[2023-03-31T01:07:08.885765+0200][12004][INFO] Encoding citric acid 735380040632581265\n", - "[2023-03-31T01:07:08.896357+0200][12004][INFO] Encoding residual sugar 2442409671939919968\n", - "[2023-03-31T01:07:08.904579+0200][12004][INFO] Encoding chlorides 7195838597182208600\n", - "[2023-03-31T01:07:08.914577+0200][12004][INFO] Encoding free sulfur dioxide 3309873879720413309\n", - "[2023-03-31T01:07:08.922581+0200][12004][INFO] Encoding total sulfur dioxide 8059822526963442530\n", - "[2023-03-31T01:07:08.930580+0200][12004][INFO] Encoding density 3625281346475756911\n", - "[2023-03-31T01:07:08.939216+0200][12004][INFO] Encoding pH 4552002723230490789\n", - "[2023-03-31T01:07:08.947216+0200][12004][INFO] Encoding sulphates 4957484118723629481\n", - "[2023-03-31T01:07:08.956217+0200][12004][INFO] Encoding alcohol 3711001505059098944\n", - "[2023-03-31T01:07:08.964215+0200][12004][INFO] Encoding quality 3457201635469827215\n", - "[2023-03-31T01:07:17.078379+0200][12004][INFO] Step 100: MLoss: 0.9932 GLoss: 0.9775 Sum: 1.9707\n", - "[2023-03-31T01:07:24.055012+0200][12004][INFO] Step 200: MLoss: 0.2957 GLoss: 0.9254 Sum: 1.2211\n", - "[2023-03-31T01:07:32.461826+0200][12004][INFO] Step 300: MLoss: 0.0748 GLoss: 0.8407 Sum: 0.9155\n", - "[2023-03-31T01:07:39.522162+0200][12004][INFO] Step 400: MLoss: 0.0289 GLoss: 0.7444 Sum: 0.7733\n", - "[2023-03-31T01:07:47.110402+0200][12004][INFO] Step 500: MLoss: 0.0292 GLoss: 0.6655 Sum: 0.6947\n", - "[2023-03-31T01:07:54.622795+0200][12004][INFO] Step 600: MLoss: 0.0229 GLoss: 0.5844 Sum: 0.6073000000000001\n", - "[2023-03-31T01:08:01.951234+0200][12004][INFO] Step 700: MLoss: 0.0218 GLoss: 0.5572 Sum: 0.5790000000000001\n", - "[2023-03-31T01:08:09.957993+0200][12004][INFO] Step 800: MLoss: 0.0091 GLoss: 0.531 Sum: 0.5401\n", - "[2023-03-31T01:08:18.931373+0200][12004][INFO] Step 900: MLoss: 0.0114 GLoss: 0.5286 Sum: 0.5399999999999999\n", - "[2023-03-31T01:08:26.898063+0200][12004][INFO] Step 1000: MLoss: 0.0099 GLoss: 0.5259 Sum: 0.5358\n", - "[2023-03-31T01:08:34.593930+0200][12004][INFO] Step 1100: MLoss: 0.0106 GLoss: 0.5196 Sum: 0.5302\n", - "[2023-03-31T01:08:41.818482+0200][12004][INFO] Step 1200: MLoss: 0.0105 GLoss: 0.5072 Sum: 0.5176999999999999\n", - "[2023-03-31T01:08:49.426481+0200][12004][INFO] Step 1300: MLoss: 0.0086 GLoss: 0.5112 Sum: 0.5198\n", - "[2023-03-31T01:08:56.953344+0200][12004][INFO] Step 1400: MLoss: 0.0106 GLoss: 0.516 Sum: 0.5266000000000001\n", - "[2023-03-31T01:09:04.509760+0200][12004][INFO] Step 1500: MLoss: 0.0075 GLoss: 0.5062 Sum: 0.5136999999999999\n", - "[2023-03-31T01:09:11.742216+0200][12004][INFO] Step 1600: MLoss: 0.0098 GLoss: 0.5012 Sum: 0.511\n", - "[2023-03-31T01:09:19.870988+0200][12004][INFO] Step 1700: MLoss: 0.0088 GLoss: 0.499 Sum: 0.5078\n", - "[2023-03-31T01:09:27.578035+0200][12004][INFO] Step 1800: MLoss: 0.0163 GLoss: 0.4956 Sum: 0.5119\n", - "[2023-03-31T01:09:34.406045+0200][12004][INFO] Step 1900: MLoss: 0.0046 GLoss: 0.4955 Sum: 0.5001\n", - "[2023-03-31T01:09:41.645411+0200][12004][INFO] Step 2000: MLoss: 0.017 GLoss: 0.5008 Sum: 0.5178\n" + "[2023-04-06T19:10:28.307332+0200][45392][INFO] Encoding fixed acidity 8821222230854998919\n", + "[2023-04-06T19:10:28.316302+0200][45392][INFO] Encoding volatile acidity 3689048099044143611\n", + "[2023-04-06T19:10:28.328835+0200][45392][INFO] Encoding citric acid 735380040632581265\n", + "[2023-04-06T19:10:28.337818+0200][45392][INFO] Encoding residual sugar 2442409671939919968\n", + "[2023-04-06T19:10:28.346502+0200][45392][INFO] Encoding chlorides 7195838597182208600\n", + "[2023-04-06T19:10:28.355523+0200][45392][INFO] Encoding free sulfur dioxide 3309873879720413309\n", + "[2023-04-06T19:10:28.367907+0200][45392][INFO] Encoding total sulfur dioxide 8059822526963442530\n", + "[2023-04-06T19:10:28.379128+0200][45392][INFO] Encoding density 3625281346475756911\n", + "[2023-04-06T19:10:28.388190+0200][45392][INFO] Encoding pH 4552002723230490789\n", + "[2023-04-06T19:10:28.396086+0200][45392][INFO] Encoding sulphates 4957484118723629481\n", + "[2023-04-06T19:10:28.404089+0200][45392][INFO] Encoding alcohol 3711001505059098944\n", + "[2023-04-06T19:10:28.412665+0200][45392][INFO] Encoding quality 3457201635469827215\n", + "[2023-04-06T19:10:35.956508+0200][45392][INFO] Step 100: MLoss: 1.0404 GLoss: 0.9809 Sum: 2.0213\n", + "[2023-04-06T19:10:42.296829+0200][45392][INFO] Step 200: MLoss: 0.4041 GLoss: 0.9524 Sum: 1.3565\n", + "[2023-04-06T19:10:50.537642+0200][45392][INFO] Step 300: MLoss: 0.1456 GLoss: 0.9186 Sum: 1.0642\n", + "[2023-04-06T19:10:59.461444+0200][45392][INFO] Step 400: MLoss: 0.0757 GLoss: 0.898 Sum: 0.9737\n" ] }, { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 13, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -1322,30 +1291,28 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "id": "3fcb9493", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "" + "" ] }, - "execution_count": 14, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjYuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/P9b71AAAACXBIWXMAAAsTAAALEwEAmpwYAABHmElEQVR4nO3dd3ib1dn48e/RtizvHY84e5G9EwiEsCmrQNkkjPLCWyhddMFbeFt+BQodb0vL3quUPQIEwkoCZJO9nG073tuSrXl+f0h2nOCZ2JZl35/r0mVZOtJz67F86+h+znOO0lojhBAi8hnCHYAQQojuIQldCCH6CUnoQgjRT0hCF0KIfkISuhBC9BOmcG04OTlZ5+bmhmvzQggRkdatW1eutU5p7b6wJfTc3FzWrl0brs0LIUREUkodaOs+KbkIIUQ/IQldCCH6CUnoQgjRT4Sthi6EEO3xer0UFBTQ2NgY7lDCwmazkZWVhdls7vRjJKELIfqkgoICYmJiyM3NRSkV7nB6ldaaiooKCgoKGDJkSKcfJyUXIUSf1NjYSFJS0oBL5gBKKZKSkrr87aTDhK6UylZKfa6U2qaU2qqUur2VNqcopWqUUhtCl991KQohhGjFQEzmTY7ltXem5OIDfq61Xq+UigHWKaU+0VpvO6rdcq3197ocQRflVeXx4b4PuXbstcTb4nt6c0IIETE67KFrrYu01utD1+uA7UBmTwfWloO1B3li8xMUOYvCFYIQQjR79tlnufXWW8MdBtDFGrpSKheYDKxq5e7ZSqmNSqkPlVLj2nj8TUqptUqptWVlZV2PFkiMSgSgsrHymB4vhBD9VacTulLKAbwB/ERrXXvU3euBwVrricA/gLdbew6t9eNa62la62kpKa1ORdChRJskdCFE79i/fz+jR49m0aJFjBw5kquuuoqlS5cyd+5cRowYwerVq7/T/tRTT2XChAksWLCAgwcPAvDaa69xwgknMHHiRObNmwfA1q1bmTFjBpMmTWLChAnk5eUdd7ydGraolDITTOYvaa3fPPr+lglea/2BUupfSqlkrXX5cUd4FEnoQgw8//veVrYdOrofeXzGDorl7vNaLSYcYffu3bz22ms8/fTTTJ8+nZdffpkVK1bw7rvv8sc//pELL7ywue1tt93GwoULWbhwIU8//TQ//vGPefvtt/n973/PkiVLyMzMpLq6GoBHH32U22+/nauuugqPx4Pf7z/u19SZUS4KeArYrrX+Sxtt0kPtUErNCD1vxXFH1wqH2YHZYKaisUeeXgghjjBkyBDGjx+PwWBg3LhxLFiwAKUU48ePZ//+/Ue0/eabb7jyyisBuOaaa1ixYgUAc+fOZdGiRTzxxBPNiXv27Nn88Y9/5IEHHuDAgQNERUUdd6yd6aHPBa4BNiulNoRu+y2QA6C1fhS4BLhFKeUDGoDLdQ+tPq2UItGWSGWD9NCFGCg605PuKVartfm6wWBo/t1gMODz+Tr1HI8++iirVq1i8eLFTJ06lXXr1nHllVcyc+ZMFi9ezDnnnMNjjz3GqaeeelyxdpjQtdYrgHYHRGqtHwYePq5IuiDRliglFyFEnzNnzhz+/e9/c8011/DSSy9x0kknAbBnzx5mzpzJzJkz+fDDD8nPz6empoahQ4fy4x//mIMHD7Jp06aeT+h9UVJUkpRchBB9zj/+8Q+uu+46HnzwQVJSUnjmmWcAuOOOO8jLy0NrzYIFC5g4cSIPPPAAL7zwAmazmfT0dH77298e9/ZVD1VGOjRt2jR9rAtc3LniTlYXr+aTSz7p5qiEEH3F9u3bGTNmTLjDCKvW9oFSap3Welpr7SNyLpckWxKVDZWE68NICCH6oohM6Im2RDwBD06vM9yhCCFEnxGZCV3OFhVCiO+IzITe4uSiQ/WHwhyNEEL0DRGd0N/e/TZnvnEm60rWhTkiIYQIv4hO6O/teQ+Aj/Z9FM5whBCiT4johO4JeAD49OCnBHQgnCEJIQaQRYsW8frrr4c7jO+IyIRuMVqIMccAcPGIiylrKGNT2aYwRyWEEOEVkQkdgiNdUqJS+OnUn2IymFh6YGm4QxJC9EN/+MMfGDVqFCeeeCJXXHEFDz300BH3f/rpp0yePJnx48dz/fXX43a7Afj1r3/N2LFjmTBhAr/4xS+A1qfR7U4Reeo/wFVjrsJhdhBnjWPOoDksObCEn037GQYVsZ9RQoi2fPhrKN7cvc+ZPh7Ovr/dJmvWrOGNN95g48aNeL1epkyZwtSpU5vvb2xsZNGiRXz66aeMHDmSa6+9lkceeYRrrrmGt956ix07dqCUap4yt7VpdLtTxGa/K0ZfwXnDzgPg3CHnUuwsZn3J+jBHJYToT7766isuuOACbDYbMTExnHfeeUfcv3PnToYMGcLIkSMBWLhwIcuWLSMuLg6bzcYNN9zAm2++id1uB1qfRrc7RWwPvaX5OfOxm+y8v/d9pqW3OsWBECKSddCT7mtMJhOrV6/m008/5fXXX+fhhx/ms88+a3Ua3aSkpG7bbsT20FuKMkVx2uDT+Hj/x+yt2YvH7wl3SEKIfmDu3Lm89957NDY2Ul9fz/vvv3/E/aNGjWL//v3s3r0bgBdeeIGTTz6Z+vp6ampqOOecc/jrX//Kxo0bgcPT6P7+978nJSWF/Pz8bo23X/TQAc4bdh7v7nmXC96+gCmpU3ju7OfCHZIQIsJNnz6d888/nwkTJpCWlsb48eOJi4trvt9ms/HMM89w6aWX4vP5mD59OjfffDOVlZVccMEFNDY2orXmL38JLvbW2jS63Skip89tjdaatSVreTPvTRbvXczyy5cTZ43r+IFCiD6pr0yfW19fj8PhwOVyMW/ePB5//HGmTJnSK9seENPntkYpxfT06Vw68lI0mjXFa8IdkhCiH7jpppuYNGkSU6ZM4eKLL+61ZH4s+k3Jpcn45PFEmaJYWbSS0wafFu5whBAR7uWXXw53CJ3Wb3roTcxGM1PTprK6eDVLDyzl1R2vhjskIYToFf0uoQPMypjFvpp9/PSLn3L/6vupcdeEOyQhhOhx/TKhzxk0B4ViXNI4fNrHsoJl4Q5JCCF6XL9M6CMSRvD+Re/zwjkvkBqVyqcHPw13SEII0eP6ZUIHyInNwWwwMz9nPl8VfkWDryHcIQkhIozD4Qh3CF3SbxN6kwU5C2j0N7Ly0MpwhyKEED2q3yf0yamTAdhRtSPMkQghIpXWmjvuuIMTTjiB8ePH8+qrwdFzRUVFzJs3j0mTJnHCCSewfPly/H4/ixYtam7717/+tdfi7Hfj0I9mM9lItadSUFcQ7lCEEMfogdUPsKOyeztloxNH86sZv+pU2zfffJMNGzawceNGysvLmT59OvPmzePll1/mzDPP5M4778Tv9+NyudiwYQOFhYVs2bIFoEemyW1Lv++hA2THZJNf172T4AghBo4VK1ZwxRVXYDQaSUtL4+STT2bNmjVMnz6dZ555hnvuuYfNmzcTExPD0KFD2bt3L7fddhsfffQRsbGxvRZnv++hQzChryhcEe4whBDHqLM96d42b948li1bxuLFi1m0aBE/+9nPuPbaa9m4cSNLlizh0Ucf5T//+Q9PP/10r8QzYHro5Q3luLyucIcihIhAJ510Eq+++ip+v5+ysjKWLVvGjBkzOHDgAGlpafzwhz/kxhtvZP369ZSXlxMIBLj44ou59957Wb++9xbeGTA9dICC+gJGJowMczRCiEhz0UUX8c033zBx4kSUUvzpT38iPT2d5557jgcffBCz2YzD4eD555+nsLCQ6667jkAgAMB9993Xa3F2mNCVUtnA80AaoIHHtdb/d1QbBfwfcA7gAhZprfvMenBNCT2/Ll8SuhCi0+rr64HgbK4PPvggDz744BH3L1y4kIULF37ncb3ZK2+pMz10H/BzrfV6pVQMsE4p9YnWeluLNmcDI0KXmcAjoZ99QnMPXUa6CCH6sQ5r6Frroqbetta6DtgOZB7V7ALgeR20EohXSmV0e7THKM4aR4wlRka6CCH6tS4dFFVK5QKTgVVH3ZUJtMyWBXw36aOUukkptVYptbasrKyLoR4fGbooROQJ14pqfcGxvPZOJ3SllAN4A/iJ1rq2y1sCtNaPa62naa2npaSkHMtTHDNJ6EJEFpvNRkVFxYBM6lprKioqsNlsXXpcp0a5KKXMBJP5S1rrN1tpUghkt/g9K3Rbn5HpyOTTg58S0AEMakCM1hQiomVlZVFQUEBvf5vvK2w2G1lZWV16TGdGuSjgKWC71vovbTR7F7hVKfVvggdDa7TWRV2KpIel2lPxBXxUNlaSHJUc7nCEEB0wm80MGTIk3GFElM700OcC1wCblVIbQrf9FsgB0Fo/CnxAcMjiboLDFq/r9kiPU7o9HYBSV6kkdCFEv9RhQtdarwBUB2008KPuCqonpNpTgWBCH5s0NszRCCFE9xswxeSmhF7iLAlzJEII0TMGTEJPjkrGqIyUuCShCyH6pwGT0I0GI0lRSZLQhRD91oBJ6BA8MFrqKg13GEII0SMGVEJPtadKD10I0W8NqISeFp0mPXQhRL81oBJ6qj0Vp9dJvac+3KEIIUS3G1AJPc2eBiC9dCFEvzSgEnrzWHSpowsh+qEBldCbTv+XhC6E6I8GVEJPjU5FoSis71MTQQohRLcYUAndarSSE5vD7qrd4Q5FCCG63YBK6AAj4keQV50X7jCEEKLbDbyEnjCCg7UHafA1hDsUIYToVgMyoWs0e6v3hjsUIYToVgMvocePAGBX1a4wRyKEEN1rwCX07JhsbEab1NGFEP3OgEvoRoORYfHDyKvKw+P3DMgVxYUQ/dOAS+gQrKOvKV7DtBen8cK2F8IdjhBCdIsBmdC/N/R7zBo0iwRbAquLV4c7HCGE6BYDMqHPzJjJo6c9ypxBc9heuT3c4QghRLcYkAm9yZjEMZS6SqloqAh3KEIIcdwGdkJPGgPAjsodYY5ECCGO34BO6KMSRwFI2UUI0S8M6IQea4kly5HFtopt4Q5FCCGO24BO6BAsu0jJRQjRH0hCTxxDfl0+Tq+zS4+rbKyk2FncQ1EJIUTXDfiEPjh2MAD5dfldetwDqx/g51/+vCdCEkKIYzLgE3p2TDYAB2sPdulx5Q3lFNdLD10I0XdIQg8l9K720J1eJ1XuKpkLRgjRZ3SY0JVSTyulSpVSW9q4/xSlVI1SakPo8rvuD7PnOCwOEm2Jx5TQvQGvLJQhhOgzOtNDfxY4q4M2y7XWk0KX3x9/WL0rKyaLgrqCLj3G5XUBUOWu6omQhBCiyzpM6FrrZUBlL8QSNtkx2Rys61oN3ekLjoqpdlf3QERCCNF13VVDn62U2qiU+lApNa6tRkqpm5RSa5VSa8vKyrpp08cvOyabYmcxHr+nU+211s099JrGmp4MTQghOq07Evp6YLDWeiLwD+DtthpqrR/XWk/TWk9LSUnphk13j+yYbDSawvrCTrVv8DWgCR4MlZKLEKKvOO6ErrWu1VrXh65/AJiVUsnHHVkv6upIl5YnIUnJRQjRVxx3QldKpSulVOj6jNBzRtR8tMeT0GvcUnIRQvQNnRm2+ArwDTBKKVWglLpBKXWzUurmUJNLgC1KqY3A34HLdYQNzk6yJeEwO/hg7wedStBNB0QBqhql5CKE6BtMHTXQWl/Rwf0PAw93W0RhoJTirll38T9f/Q9Xf3A1r533GjaTrc32TQdEQXroQoi+Y8CfKdrk3KHncu/ce9lfu5/N5ZvbbduU0E0GkxwUFUL0GZLQW5gzaA4AG8s2ttuuqYY+KHqQ9NCFEH2GJPQW4m3x5MbmsqlsU7vtmmrogxyDZJSLEKLPkIR+lAkpE9hUtqndSbeaSi6ZjkxJ6EKIPkMS+lEmJE+gorGCQ85DbbZpKrlkRGfQ4GvA7Xf3VnhCCNGmiEvoNQ1elmwtxh/QeHwB3lhXgM8f6Lbnn5AyAaDdsovT6yTKFEWCLQGA6sbqbtu+EEIcq4hL6J/vKOW/XljH9qJaPtxSxM9f28j7m4q67flHJIzAZrS1e2DU6XViN9kPJ3Qpuwgh+oCIS+hzhiUB8PWecr7ZEzwh9a1vOzcHS2eYDCbGJI1hW8W2Ntu4vC6izdHEW+MBSehCiL4h4hJ6aqyN4akOvt5Twcq9wYS+PK+Msrruq2OPTRrLjsod+AP+Vu93+pxHJHQ5W1QI0RdEXEKHYC/9690V7K9wcfn0bAIa3tvY9kHMrhqTOIYGXwMHag+0er/T68RutpMSFZwxstRV2m3bFkKIYxWxCd0TOhB69azBnJAZyxvru7biUHvGJI0BYFtl62WXppJLnDUOm9FGiauk27YthBDHKiIT+qyhSSgFsTYTYzJiuWxaNlsP1bK5oHvO2hwaNxSr0cr2iu2t3u/0Ook2RaOUIi06TRK6EKJPiMiEHm+3MCM3kfmjUzEaFBdMzsRmNvDy6q4tI9cWk8HEqIRRbR4YbSq5AKTZ0yhxSkIXQoRfh7Mt9lXPXT8DQ3AadmJtZr43YRDvbCjk6z3lDE9x8NSi6cf1/GOSxrB472ICOoBBHfm55/IFSy4A6dHprC5efVzbEkKI7hCRPXQAm9mIxXQ4/GtnD8btC1DT4OXLXWU0elsfodJZY5PGUu+tZ0/1niNu9wf8NPgamhN6mj2NMldZmyNiROS6b9V9/Gfnf8IdhhCdFrEJ/WgTsuLZfM8ZPHTJRHwBzabjrKfPy5qHSZl4e/fblLnKeGjNQ1Q2VuLyBedxaZnQ/dpPRWNELdIkOuHz/M/55tA34Q5DiE6L2JJLa+wWE1MGB8/eXHegihlDEo/5uZKjkpmfM5939rzD/tr9LCtYxiHnIX45/ZfBbTXV0KPTAChxlpBqTz3OVyD6ErffTYOvIdxhCNFp/aaH3iQx2sKQ5GjWH6xCa31cpZdLR15KjbuGZQXLGJM4hk8OfMJzW58DINp0uIYOyEiXfsjr9zZ/IxMiEvS7hA4wJSeB9QeqWPTMGi7851ftToXbnpkZM8mNzWVUwiheOOcFJqdO5sXtLwIQY4kBgiUXgGJncfcEL/oM6aGLSBN5JZedH8Hin8F1H0LC4FabTB2cwBvrC/hyVxkAGwtqmJQd3+VNGZSBZ856BrPBjNVo5akznmJT+Sb21+xnVsYsAOKt8ViNVumh9zNaazwBzxHrxwrR10VeD91sg9pCqG79tHyAmUODtfPLp2djNioWbzr2aQGSo5KJs8YFN200MzVtKhePvBiz0QwEF5iWsej9jyfgAZCSi4gokZfQ40O98qq2E/qwFAdf3nEK931/PPNGpPDB5uJjLrt0hpwt2v94/MGELiUXEUkiL6HHZYEytNtDBxicFDw1/9wJGRRWN/BtfnWPhZRmT6PI2X1zsovwa1qFyuV19WhnQIjuFHkJ3WiG2Kx2e+gtnTo6OJRw9b7KHgspKyaLYmdxc69ORL6mv6VGyxKDImJEXkKH4MHQDnroTeLtFhKjLRyo6LlaaE5MDhpNYX33LbQhwqvlh7PU0UWkiMyEHj+40z10gMFJdg5UOHssnOyYbADy6/J7bBuid7XslUsdXUSKyEzoCYOhvhi8nftHy02K7tEeuiT0/ueIHroMXRQRIjITetNIl+rOJdDcpGgO1TQc94RdbUm0JWI32TlY2z3T94rwa9lDl5KLiBSRmdCbTijqZB09N9mO1pBf2TP/mEopcmJzpIfejzSNQwcpuYjIEZkJvXks+v5ONR+cFJx3ZX8Pl10kofcfUnIRkajDhK6UelopVaqU2tLG/Uop9Xel1G6l1Cal1JTuD/MojjQwWjud0HOTgjMj9vSB0YL6ApkXvZ+Qg6IiEnWmh/4scFY7958NjAhdbgIeOf6wOmAwQEIu7F8Bvo7HCMfbLcRFmdnfwwndF/BR7JJJuvoDGbYoIlGHCV1rvQxo76ycC4DnddBKIF4pldFdAbbp5F9C0QZ4/Xqo6Xj8d26SvcfHooOMdOkvWib0hk6OphIi3LpjtsVMoGUWKwjd9p1z4ZVSNxHsxZOTk3N8Wx1/CTjL4aNfwY73IW08jDoLpl0PWsO6ZyFxCAyZB7GZDE4KzpHeUwbHBuv6G0o3NM/EKCKXjHIRkahXp8/VWj8OPA4wbdq0458gY9bNMOxU2PUh7FoCy/8CX/8jONdLywNZjnRuM4/knboM/LsNGHPngMly3JtvKS06jZMyT+KFbS9w5ZgribXEduvzi94lJRcRibojoRcC2S1+zwrd1jtSRgYvc28PHiRd/mfweeCUX4O7Dg5+A4XrSNu9kl+YlsGLr0LMIJhybfBx2TMhOgX2LYfk4cHa/DG6bfJt/OD9H/D81ue5dfKt3fYSRe9r6qFHm6Ol5CIiRnck9HeBW5VS/wZmAjVa6/BMPZiQC+f/48jbMiYAsHpbCT9//gvePheG7HkBvrz/cBuLAzz1YIqCk35+ONHHpHdp82OSxnDG4DN4avNT2M12Fo1bhEEdeZjiy/wvGRw7mNy43GN4gaK3eAIeDMpAjCVGeugiYnSY0JVSrwCnAMlKqQLgbsAMoLV+FPgAOAfYDbiA63oq2OORHmejBgc7E6YwZNFlwd575T7YvRSq9sGIM2D9C/D5vcEHmKPhpJ/C6PMgeWRwZE0n3D3nbjSav677K/l1+fxu1u9QSgFQ2VjJTz7/CfNz5vOXU/7SUy9VdAOP34PVaMVussuwRRExOkzoWusrOrhfAz/qtoh6SEacDYCimsbgDdaYYO891IMHYPT3oCYf6stgxV/gs3uDl/gcmHodTL4GHCntbifWEsufT/4zf1v/N57e8jRJtiRumXgLRoORJfuX4NM+1havRWvdnOhF3+P2uzEbzNhNdumhi4gReWuKHqPEaAsWo4HipoTeGqWCyTs+By5/CSr2wIGvYdOr8On/whf3wdgLYNxFkL8KbHEw58fBOdqPeBrFT6b8hDJXGY9teowl+5dw16y7eH/v+ygUVe4qdlfvZkTCiB5+1eJYNffQzXapoYuIMWASulKK9Djb4R56ZyQNC16mXAOlO2Dt07DxFdj8GhhMEPDB7k+DB2CzZx0xckYpxb0n3sv8nPn849t/cPMnN+PTPi4bdRmv7nyVNcVrJKH3YR6/B4vRQpQpilJXabjDEaJTInMul2OUEWdrv4fentTRcM6f4Oc74Oo34Rd58P0n4NAGeO48+NfM78zRblAGTh98Oi+e8yITUydiMVi4cfyNZDoyWVO85vhfkOgxbr+7uYYuJRcRKQZcQi+qPc6vz5ZoGL4A7Ikw4Qfwi11w6bPgqoBnzoFdH8NR87nEWmJ54ownWPz9xaRHpzMtbRprS9bK0mZ9WHMP3RwlJRcRMQZUQk+Pi6Kkxk0g0I2L/lodwZr6wvdB++HlS+EvY+Dt/4bKvc3NzAYz6dHBYZALchZQ7a7m/LfOZ2XRyu6LRXQbTyCY0KWHLiLJgEroGXE2PP4Ala4eWMw5YwLcvgkufQ4Gz4Vt78Kz57W6CMf8nPk8ecaTWIwWfvb5z2Qt0j6oqeQSZYqiwddAcDCXEH3bgEro6aGhi8dcR++IyQLjLoRLn4HrPwyOdX/0RHji1OC0BJ7DPb2ZGTP512n/QqP51bJf4Q14m+97ZccrvLLjlZ6JUXSKx+/BYrBgN9vxa/8RC14I0VcNqIT+nbHoPSl9PFz7Now6Gwzm4LDHh6dDwdrmJtkx2dw9+242lm3kkQ3BWYc3lG7gvlX38cDqB9hXs6/n4xStcvvdzaNcQBa5EJFhQCX09OaE3ksHuTKnwEWPwg1LYNEHYDDCM2fDF/cHZ4oEzhpyFt8f8X2e3PwkT21+iru/vptUeypWo5W/r/9778QpvqPlmaIgi1yIyDBgxqEDJEdbcVhN5JXU9/7Gc+fCTV/Aez8OnqD05Z+Cvfic2fwqewZbE7byt/V/w2Qw8ff5f2dLxRb+teFfrC9Zz5S0nl8EShyp5SgXAKe35xZHEaK7DKiEbjAoxmbEsvVQTXgCsCfCZS9C2U7Y9J/g2abrnsW+6hFeyZ5BxZx7sWXNID4mg6lpU3kz703uXXUvL579Ih/s+4A91Xuo9dQSY4lh0bhFzaNmRPdrKrkk2ZIAqGrsubn0heguAyqhA4zLjOXfq/PxBzRGQ5jmUkkZBQv+J3jd74XNr2Ne8hvSX70WTDY45TfY59zGb2b8hts/v53TXz+dWk8tUaYoYi2xlDeU4/K6+P3c34cn/gGgqeSSFBVM6OUN5WGOSIiODagaOsC4QXE0eP3sKw9D2aU1RjNMugJu3whXvArDT4Old8P9OZz6xu2cnTyFeGs8j532GKuuXMXSS5dyychLWLx3MRUNFXj93o63IbqsaRx6Uw9dErqIBAMwoQdXEtp6qDbMkRzFFhdcQu+yF+GSZ2Dy1WC28cDGT3l/1v9jTuac5tkZrxpzFZ6Ah18u+yVzXpnDg2sexOv38uTmJ9lVtSvMLyTyaa2bx6HHWmIxG8yUN0pCF33fgCu5DE91YDEZ2FJYwwWTMsMdzncpBSd8P3ipKUQ9eRo8fgpkTgVHKsRlM2Ty1ZyUeRLLC5eTHZPN89ueZ1nBMvbX7md5wXKeO/u5cL+KiNZ0ToDFYEEpRXJUMhUNFWGOSoiODbgeutloYFRaDN8erOajLUWU1/fh+VTiMuHGpXDKb4JDHqvzYf1z8NhJ/G91A4+d/Ffeu/A9Tsk6hYL6Ak7JOoX1pevZXLaZRl8jAR0I9yuISE3riVqMwdkzk6OSpeQiIsKA66FDsOzy7zX5rD1QxaI5udxz/rhwh9S2uEw45VfBC0BDFXzzL1KWP0TKgW9g+o38bcadVJlMRJmiOP210/nd17/jUP0hZg+azV9O+ct3lsET7WuaNM1qtAKQFJXEofpD4QxJiE4ZkP/p187O5cYThzAqLYYN+dXhDqdrohLg1Dvhuo+Co2U++wPGv40n+bUbiX7lSi5NmsTu6t0Mjh3Mpwc/5cE1D7KjcscRq9iL9jXtq2qnprS2UUouImIMyB762EGxjB00lnvf38bzKw/g8QWwmCLssy1nJlz7TnBM+5on4eBKcNdy675lnHvKzxmRdSJ/KFrKi9tf5MXtL5Ibm8sf5v4BgCFxQ4izxoX5BfRdTT30l1YWsmfvTnJHJFPlrsIf8GM0GMMcnRBtG5AJvcmE7Hg8K/axq6SOEzIjNMGljIJzHgxe97gwv3I5Iz9/EHiQ/0kYzAVn/5GDRvjbur9xzYfXAJARncHL575MclRy+OLuw5om4nK5FVUuD9NsyQR0gCp3lewz0acN6IQ+MSuYxDcV1HQ5ode7ffj8AeLtlo4b9xaLHa56HQrXgbMUtfjnTHztJiaOPJO5Y25iqSmA2WTjvtX3ccvSW5iUMokRCSO4ZOQlUmdvoank4vMbcXn8R5xcJAld9GUDOqHnJNqJt5vZVFDNlTNzuvTYu9/ZSn6li//cPLuHojtGJgsMDsU0aDIsewh2fkDi1rf4QWwWnP9/xJ10P3etuIuCugLqvfV8sO8DHjr5IUlWIU0lF6/PgMvjb94vMtJF9HUDulumlGJ8ZhwbC7o+t0tRTQMHKvv4hE3xOXD+3+FnO+Dyl8EaAy9ezKnv38lXlV6+HnQR9077NdsqtnHdR9fJYsghhxO6EZfHJ6f/i4gxoBM6wMSseHaV1NHo9XfcuIV6t49KpycyVrIxmmD0uXDT5zD3JxCfg3Kkob74f1zw+m086kug1FnMhW9fyD1f38PH+z+msrESgC3lW3gr760BNaa9aToFv98ULLnI6f8iQgzokgvAsNRo/AFNQVUDw1MdnX5cvduH16+pc/uItZl7MMJuZI6C0//38O8l22Dza0xZ/xzPmww8O3IOH+x9nzfy3sBisDAvax6f53+OX/tZVrCMG8ffSE5sDjGWmPC9hl7QvHi3NtHg8WM324k2R8vQRdHnDfiEnpMYXMAgv9LVpYTudPsAqHJ6IiehHy1tLKTdDZOvZtTzF3Lf2nfwAtuyJ/JG9jjezf+c05InMsbo4O/5n7H04FJsRhs3jL+BWRmziLHEMCx+WLhfRbdrSug6EOyhQ/Bs0WJncTjDEqJDAz6hZyeEEnpV15YYc7qD/+gVTg+Dk6K7Pa5elTQMblkBhzZgrshj4hcPMDF/I79JHknU3rcBOGPWD8nLmcri/M/454Z/8s8N/wRgSuoUxiaNxWQwcd6w8xiZMBKAioYKYiwxzafPR5KmchMBKw1+P4GAZnr6dN7Me5PVRauZkTEjvAEK0YYBn9BTYqxYTQYOVnQ+oWutcXoO99D7BVscDD05eJlwGax7jqgtr8O8O8BZRvbKJ8he+QSnAtuTcqiIz2Kfzc7LzmJ2Ve3C7Xfz7NZnSYlKwaAMlLhKyIjO4M8n/5nxKeOP2FRRfRF/Xvdnrhh9BVPTprKrahe5sbl9JvkvPbCU3JjhbPYHS0sNXj93TLuDdSXr+OWyX/L6+a9H/IigB1Y/QJw1jpsn3hzuUEQ3GvAJXSlFdqK9Sz10l8dP07HQiv6S0FuyxsCcW4MXgEAAsqZDIPitZMzOD6G+mBP3r+MaeyIMmkyNq4C3E1LYG5OI257IcJ/mtbLVXP3h1QyPH06WIwu72c6YxDG8vONlCusL+SL/CyalTGJV8SpyY3O5ZOQlFNQVcP6w87/zIdBbDtUfYkPZBq4ccTObAdA0VBeTnJbFn0/+M1cuvpJfLvslj5/+OCZDZP771Hvq+ffOf2M1Wlk4bmHzQtgi8kXmO7Kb5STaOVjZ+UWAm+rnAJX9MaEfzWAIzs/eZOrC4M+iTfDurVCeR1xMOgv3rAF3bXDVJV8jlxoMvDh4HJsb68l3b6dG+3h/7/vEWGJ4ZPIdPLz/PTaVb+L69BNZUrubh9Y+hFEZeXfPu9w16y4O1h1kR8UOSlwl2M12cmNzGZs0lihTFCn2FJJtyWyp2MLQuKFMSJlAjbuGElcJcZY44m3x7Knew5f5X2IymBgWP4x5WfPaTMKVjZWsLV7Lnuo9AExPmc/rbOB+85MkP7IO5tzGiAV3c9esu7jrq7v4f6v+H3dMuwO72d7Te/87dlbupLKxktmDju0ciBWHVuAL+PAFfHyZ/yVnDTmrmyMU4SIJHchOiGLNvkq01s2LSLSnrkVC7zcll2ORMQH+a9nh330e2PoW7F8Go84lrnIvP1r1GNQE+7oYLRQk5BDjLCRu523MVEbcRjOOvJ38yBpLxagzMJbt5IaoRn674rcYNAwNKNKUmQZ7Ah+Xb+WNvDe+E4YBAxcMv4Cl+z+mznfkuQEKaBpYmmxNCJ0Rqzgpax613jpcPhenZp/KkxsfpaihDIATksaRU7yVj6y/IhYXdTkLiPn6H5C3lAtm/4hdg07h+V2vs6xgGdeNuw6bycZ7e95jRMIIJqVOAoLHFgY5BqG1ZlvlNvKq8kiJSiHVnkpKVApR5ig2l23mi/wvWFa4jCmpU/jd7N+1ecZuo6+RvKo8vin6hkc2PEKAAI+d/hizMmZ1+c/2Rf4XJFgTsBgtvL/3fc7IPQOF6tR7vzO+Lf0Wp9fJzPSZmI0dDxgoc5VR0VjB6MTR3bL99ri8Lh5Y8wAXDb+o+W/Vn6jOjKNWSp0F/B9gBJ7UWt9/1P2LgAeBwtBND2utn2zvOadNm6bXrl17LDF3uyeX7+Xexdv59n9OJyG64zrupoJqzn/4KwAumZrFQ5dO7OkQI5u7HvJXwr5lULUfLA4YcjKU7QCfG4afCl8/DAVrYdAkasp3sslfx4SEUcTF5UBtIRSuIwCUGo24laLIZKTUZGKkx8OT8XEsibYzs8HNxXV11BkM1JosxPt9nF5fh01rvrFF8Z7Djk1rGgwGVkRFkagV2mDgkAqQ6vPz08oq1kbZON3pYm5DI9sDOdzu/RH33XwZU53L4fP/F4wZ+NZq4W9pmaw3BstQuUYHxYEGGvXh8xly7GnU+9xUeqrb3DUmFKNih7C1di8Lchbg8zaQV7mDOp+TS0ZczOzsU1hTvIbXdrxCtTe4bOKC+NEc8NRS7nfxwLwHyHZk81n+ZygUGY4MRsSPwOVzcaD2AAdqDwDBKYDHJo4lNy6X018/nVOzTyUxKpHntz5PtMmO1Wjm+yMvZV7WPHLjcjEqI9HmaEqcJby0/SXm58xndOJo3t39LhajhfHJ4xkWPwylFHWeOsoaytBa88mBT5oPmMdaYrlh/A1cPury73yT2VC6gUP1hxieMJxbPrmFysZKHjr5IaalT8Ov/STaEqnz1LG5bDMZjgw8fg9Or5NMRyYBHcAX8JEdm93u267eU4/DEhy5dqj+EOnR6fxpzZ94aftLxFvjeeXcV8iKyerw7VvqKsVmshFrie2wbUc8fg8rClcwyDHomD/AlFLrtNbTWr2vo4SulDICu4DTgQJgDXCF1npbizaLgGla61s7G1RfSuhLthbzXy+s491b5zIhK77D9l/vKefKJ1YBcOroVJ5eNL2HIxwgtA6u2BQIgK8BLC1GD1Xng6sClCHYRhmCl4AfveFlDux4h8Ejz0WNPgcaquHgN8H1WqfdAEYLVB+A6oOgA+CuQ5fnoTz1+GvyWVW+mZEZM0iefxcUrIGGKnbUWTl/xWA8mHnxhpmcOCI5GFfxJvC64NC3sGMxW0o30Kh9TPUbadB+ii02vI5UvvCUsMtsJlprJjW6mex2U2UwUmoyUm404jQocj1e5noh2ufmH7njeIIasrw+JrrdeIFPou3oUK95vtPFBfVOcr1ehnp9HDCZWJg5iMpjPDXwb0MvY2TAwB0H3mZEXQUVBsWKqCh0i076IJODan8jLu1DAQkmB5W+w2vxDrVn4Pa7KXRXHvHc30uaxJmDz+Q/xctZfuhrAJIMFuwGCzn2dLJSTuA/e95Bh747JZiiGWSOZXtDMRowGUzcOP5G3t35GoXtLP03Nm44Jw4+ldSoVJRSaK0xGoyMSxrHe7te54Vd/+G8nNNxGMy8sv8DcqPSONBQymmpU1lZtROrycr0pBOorQm+L2498Q8oo5m86jxmZcwiPTqd9SXr+e+lN2MxmPnJtJ8zJG4ISbYkMqIzKHYWE2WOIjkqGafXyZ7qPVQ0VJAancqG0g08vulx7FoxxpbElNzT2V69h88Ll1Hnc3HZqMu4a9Zdx/S3O96EPhu4R2t9Zuj33wBore9r0WYREZzQtx2q5Zy/L+fhKyfzvQmDOmz/ybYSfvj8WhLsZgYnRfP2j+b2QpSixzR9kLTw4eYibnlpPQCPXzOVM8alt/5Ynxu8DcFRQi2fo/pgcJI0dz001e3ddWCyBue0j4qHlDHBlai+/jvs+YyK2kISR5+HGns+GC3s+fqvlFTvYYzBTsLEq2DsBdBYG/wgK95E4/pn+aJsI6VGxWmOIURbHBRU7yfPV4PDYGFw4kiyTbEYawsoqS9ia3w6+701uH0ubqmqwQwQkwEnXAzRyZTvWcp6ZwFFfhde7WebSWHRmutcAd60wnarmdtqG0nRipVGP0uj7cQEAox3e0i1p2JqrCG6sY4TGxpRAMrAOouZtVFWiixRuLSPzVYLBWYz59U5Odfp5BO7natq60j3+XgkIY6YQICtjni+NEOqz8evK6poNCgsBivRcTkU+l0YPU4aPfW87bCTZzETaKNUNNfVwMooG36lOL+uns1WKwGl+E9hEbuiE3g6KZEdARfx/gBlRiPlpiOnRk4z2KgOeMjwuIkNBNhks7a6nVxjNAU+J76jwpjZ6CXe52GjzUqxyYQjEGCB08VZThczJ92A+cw/tv6e6kB7Cb0zNfRMIL/F7wXAzFbaXayUmkewN/9TrXX+0Q2UUjcBNwHk5HRtMqyelJ0YPMq/4WA1p4xKxWFtf7c0HRTNTrQPjIOi/V0rCcHtOzzVQdPJRa0yWYOXo8XnBC+dcdo9cNo9JB1187DcE/nOaVtNk4KmjsY24Qec1VAV/KAIbSsOGOcsB2tscKK2kKzQBa2hJj/4IRSVCI6U5jbJJ/6UM1puy10X/EZji+M35buhvgSypoHJSm59GZcXrAl+OGVMCH6bCgSg5mDwG1LxZqjYzdRh85maMycYi9+HPvA11Xs+IX7wPFRsBnML1kBCLsTn8Mv6UshfiT60geXOg4wdehrJV98ejMOedMTrofYQV69/Hl99KVUKqNqH8rpoMJjYYPSRHjuY6XN+wI68d6nze5l+/q/Ru5bgr9yDacZUJu36iL9X7oPRl8OIM6kr3cyry+4hzuNkvN/INzHx7NbVoAP8NPd8EmIy2bRvCQ0eJyV+F0X4SDfaqQh4+FZXsMCSyMS4YSRrA8VlW3H43MzKPQs1+ntoexJF298k2ZaEJSYd6kth0KTOvTe6qDM99EuAs7TWN4Z+vwaY2bI3rpRKAuq11m6l1H8Bl2mtT23veftSDx1g7v2fUVjdQLTFyPJfnUpiO7X0F1ce4K63t3DuhAyW7Sxj8/+e2YuRit7w6pqD/OqN4MHcP140vsuzcYoI5HMHP8DMoWGcgQD43Yd/b0sr3/B6Uns99M5U4AqBlkcfsjh88BMArXWF1rppteUnganHEmg4vf2judxz3licHj9r91e227a5h55gp87tw+3r2sReou87sofua6el6DdM1iOTt8HQcTKHXk3mHelMQl8DjFBKDVFKWYDLgXdbNlBKZbT49Xxge/eF2DtSYqxcPiMHs1Gx/mB1u23r3T6UgsyE4B+7yunthQhFb3J7Dyf0hvZKLkL0IR3W0LXWPqXUrcASgsMWn9Zab1VK/R5Yq7V+F/ixUup8wAdUAot6MOYeYzMbGTsojvUHq9ptV+/24bCYSA6VZSqdHtLjbL0RouglTd+6DApcXZxaWYhw6dSJRVrrD4APjrrtdy2u/wb4TfeGFh5TcuJ5ZfVBvP4AZmPrX2Ccbh/RVlPzmHU5MNr/uH0BDAocVhMut5RcRGQY8AtcHG1yTgKN3gA7iurabON0+4m2Gkl2BBN6hdPdZlsRmdy+AFaTEbvF1P4oFyH6EEnoR5mSEw/Qbtml3u3DYTWRFhsssxTVNPZGaKIXub1+rGYDdqtRSi4iYkhCP0pmfBSpMdZ2E3pTySXGZibGaqKouvMTe4nIEOyhG7BbjHJQVEQMSehHUUoxJSehUz10gIx4G4ekh97veHwBLCYDdrPpiNk1hejLJKG3YsrgePIrGyira7023jKhD4qPoqhGeuj9TXMN3WqkQUouIkJIQm/FlJwEAL5to5feVHIByIiLoqhaeuj9jdvnby65yEFRESkkobfihMy4dk8wCo5yCfXQ42xUOD00Si+uX2mqoUeZTVJDFxFDEnorbGYjYzNiW62je3wBPP4ADmtwZraM+ODZojLSpX9xe5uGLRqb148Voq+ThN6GyTkJbCqoxusPHHF70wGylj10QEa69DNuX4thi9JDFxFCEnobpucm0ugNsGpvJXkldVzyyNesP1hFfSihtzwoCshIl36medii2YTHF8Af6HhlLyHCTdYUbcOCMakkRVt4+qt9mI2KtQequO6ZNVw/dwhwOKGnSw+9Xzp8pmiwtOby+Iixdbw+phDhJD30NtjMRq6eNZjPdpSyZGsJV8zIJsps5K9Ld2E1GRiSEt3cLinaIj30fsbtDY1ysTYldCm7iL5PeujtuHrWYB75Yg82s4Ffnz2G354zhkPVjeQm27G2WK4qI94mY9H7GbcvEKyhWyShi8ghCb0dKTFW7r3wBGJsJuKigl+3R6V/92t3RlwUByqcvR2e6EFuXwCL0UiMNfj3rmmQOe9F3ycJvQM/mJ7dYZvBiXaW55URCGgMhr6zeok4dk2jXJomYCutlZKa6Pukht4Nhqc6aPQGKJQDo/1CIKDx+jVWk4G02OAC0CVtTAMhRF8iCb0bDE91ALC7tD7MkYju4Amde2A1GUlyWDEoKJMeuogAktC7QVNCzytte1EMETma1hO1mgwYDYpkh5WSWumhi75PEno3iLdbSHZYpIfeTzStJ2o1B/890mJtlNRJD130fZLQu8mwFIck9H7C7TtccgFIjbFSKj10EQEkoXeT4anBhK61nCIe6Zp76Kbgv0dqrI1S6aGLCCAJvZsMT3VQ2+ijrL79nty6A5UUy1mlfVpjixo6QFqslfJ6z3cmahOir5GE3k06M9Kl0evnqidXce/ibb0VljgGzSUXc1PJJTgWvbyDD2shwk0SejcZlR4DwMdbS9pss2pfJY3eAMvzyvFJb6/Paiq5WIyHe+iAjHQRfZ4k9G6SGmPj6lk5PPfN/jYXmP5yZxkQPI18Y0F1L0YnuuJwD/3wKBeAEhmLLvo4Sejd6FdnjSYj1saPXlrPy6sOsqO4lhrX4TlAluWVMTE7HoM6nNxF3+M+qoaeGhPsoZfK2aKij5O5XLpRjM3MP6+awv+8s4XfvrUZAKNBMX9UKqeNSWV3aT13nTsGk0Hxxa4yfnbGqFafZ+uhGrQOrm0qel/LM0WB5rNF+9N8LnWNXowGhd0iKaA/kb9mN5uck8B7t57IxoIaCqsa2FRYzZvrC1m6PVhbP3lkCg0eP3/+ZBc/emk9/z1/GOMGHU7cXn+AHz63lkZfgC/vOKXVRRVqGrzE2kwoJROB9YRGz5HDFo0GRUqMtV+VXK55ajXJDgtPLpwe7lBEN5KE3gOUUkzKjmdSdjznTsjgjjNG8eWuMopqGhme6uC6E4dQ4fTwzoZCvtpTzpu3zGFoSnCUzIdbipsXy3h82V5+fsYotNYs2VrMtkO1fL2ngrUHqvjtOaP54UlD+XhbCROy4kiLsfHOxkKm5iSSk2RvM7ZGrx+DUlhMUm1ryzd7K4i1mZpr5wBDkx18uKWYCydlMmd4chijO34FVS425FdjNCgq6t0kOazhDkl0E0novcBkNLBgTFrz7w6riXvOH8f1c4dw0b++YtEza3hq4TSGpzp4cvlehiZHM2ZQLE8s38uMIYks3lTEv9fkY1AwMi2GiVlx/PnjXRRWNfDcNwdIjLZwQmYcy3aVkZ0YxVv/PZeVeyuItpqYNSSJnSV1JEVbqHf7WPTMatJibfznv2ZjNRnQmrBM+Vvj8hIb1fe+ZTR6/Xy8tZjvTRh0xIfeg5dO4Ppn17DwmdU8d92MiE7qn24vBcAf0CzZWsKVM3PCHFHvavT6sZmNHTeMQKozZzYqpc4C/g8wAk9qre8/6n4r8DwwFagALtNa72/vOadNm6bXrl17jGH3H98erOL6Z9fg9PgZmhzNjuI67r3wBE4emcIPHvuGolBv/Ufzh/HjBSOwmoyU1jay4C9fUtfo44yxaewrd7KnrJ5Fc4bw4soDGAyHT45pyWxUxNrMVDg9nDg8mX3lTpweH2efkE5Wgp3cpGhmD0ti9b5KdpfWYTEZqHB62FPqZNuhGrIT7cwbmcKJw5OJjTJzsNLF17vLMRkVo9JjGZHqYHtRLWv2VzIsxcGk7HjGDYojymKkrM7N1kM1JEZbePvbQzzz9T5OHZXKtXNy2VdWj8cfaP7mcMbY9Oa1Wpt4/QGMSvX4h8+Hm4u45aX1vHjDTE4ccWTSrmnwcvEjX1Pl9PD+j08kIy7quLfX6PXz5493kpVg59rZgzv8gPMHNM98tY/C6gZ+ddboY0pM1zy1isKq4FTP6XE2Xv7hrGOKvTWNXj9//GA7e8rqyYq384szR5ES0/43gM93llJQ6eLqWR2//uP17cEqrnxiFaeNTeP+748n2tr7fdqmRcejLMf2oaKUWqe1ntbqfR0ldKWUEdgFnA4UAGuAK7TW21q0+W9ggtb6ZqXU5cBFWuvL2nteSeiHldW5uf/DHZTWNTJ7WBI/PGkoZqOBRq+fN9cXYjKo7yy08en2EpbtKuPOc8cS0JqS2kYGJ0XzzoZCnly+j5tPHobZqNhYUM3YjDgKq13sK3dy66kjeH1tAX9duotxg2IZmuLgs+0lONtYYs1iNJCdGMXYQXHsLatn66Ha79zv1xp/4PD7yGE1Ue/2Nf9uMxu+8wFz+tg0lueVtfrBE2szcftpI4mxmThU3cCmghq+2l2O1WRg3KA4HDYTmfFRZMTZ2FhQjclgYEJWHNUuL06PD4Vid1k9TrePBLuFBq+PaIuJU0enolRwWGJOop195U6KQ2Uwl8dPeb2b5XnlFFQ1sOq3CzC28uGxu7SeCx5egVKKyTnxTBuciNPj4/MdpQxPdTBjSCK5SdFYTQYavH4KqhpYnlfOrpI6oq0mBsXZyE2OJjfJjlKKN9cXsP5gNQAXTBrEGWPTSXJYsFuCC1RXubzsK3NiNCiqXB4+2FzU3H7q4AQunZpFlMXYfADXH9AEtCbJYSHBbuFQdQP5lS5cXj9njE0j3m5h9n2fcv3cIVhMBv75+W5uOHEIWQl2MuOjyEyIIjbKjN+vSYmxsq/cyTsbCxmVFsOwFAdf76nAbFQMCu1/szH4LS8lxsqhmgb+9NEOVu6tZGJ2PDuLa0mJsfJ/l09mYlY8RoNCa01JrZv/rM1vXrbxldX5AHx/SiZ1jT6Kaxr56ekjiLaY2FPmxGIykBFnIyfRTrzdTFmdm4OVLty+AMkOC5nxdvJK63C6/SQ7gt9W690+Vu6tYEJmPKmxVvJK6nF5fPz439/i82uqXB5yk6K589wxnDIqNThFcr2b9QeqKalt5LSxaWTGR3GouoG3NxRiNRk5cXgyFpOBaIuRxGgLDV4/ZqPhiA/VRq+fktpGNhXUsHpfJXOGJXHmuHQCWrO/wsXS7SU889U+rp2dy4/mD2/1f64jx5vQZwP3aK3PDP3+GwCt9X0t2iwJtflGKWUCioEU3c6TS0IPH601u0rqGZHqaO7xNnr9bC6s4Zs9FUzMjmfmkEQ8/gAOi+mIXnF5vZuVeyvw+gOkOGxMHZyAwQB7y5zsKqljUHwU0wYnUFbvZlN+DduLaqkLJdbJOfFUOT1kxEcxKTuewuoGdpfWMyYjhmiLCV9AU1zTyC/f2MTG/GoAlAquCHXyyBQ8fs2O4loaPH4OVrpwefxkxkfh8Qcoq3NjNCiizEb8Ac2Q5GjiosxUuTzYLUZKat2tLkBiNiq8/iPfpv918lB+c/aYNvffhvxqXlubz7oDVewsqcNkUMwYksjeMmfzN6qWMuOjmJwTT2Mowe+vcDZ/kDmsJv50yQR2Ftfxj8/yCHTwhTkrIYrbF4zAbjHx89c2tPqB2Bmv3zyb1BgbP3x+LfsqnHh8bT+PUtDZKYrMRsWDl0zkwsmZbMiv5vpn11Dp9DSXr1puJy7KTE2DlytmZBMXZeHRL/cQazOREG3hQIXrmF4XgMVkIBDQ+EI702hQzR2OKLORN/97DlVOD3e+vYV95U6UArPB0Dy6qek120xGGrwdryWb7LBiNRlwenxUtxim3PTeclhNuDy+5r/t3OFJ3Dp/BLOHJR3T6zvehH4JcJbW+sbQ79cAM7XWt7ZosyXUpiD0+55Qm/Kjnusm4CaAnJycqQcOHDimFyT6N39Ak1/pah5d0lpZwR/Q1DR4SYy2oLWmvN5Dgt2Mydj6wV6tNXvK6rGZjZiNBvaVO8lOtJMWY+VApQuH1USKw0q9x0eMtfO1/aa1RuOizGitKatzk1/lwuvX2MxG0mKtpMfajni+QEBTWufGoIJTLzclu9pGLwWVDVS7PLg8fpweHzE2E8NTYtBo7BbTEeULVyiBNHr9NHj9KBRGgwoOsaxzU+XykBEXRXZisDT0waYiGrwBRqU7mD8qtTmmQEBT7nRTWNVAYXUDTrcPg1KU1DYSZTFx8ZRM8krrOVTdwNzhyZgMikPVjRTVNBDQENDB15PisDBlcELzVAkAFaFvPduLalFKYTUZiLYaOWNsOoOT7DR6A82lh3UHKhmeGoPNbODdDYeIizIzLjMOnz9AYVUDBytdzX/zIcnR2MxGimsaKaxuYHiqg7goM8U1jazcW4HZZODU0alszK+m2uVl3KBYbGYjQ1OiGZwUDQTLeO9vOsS+Miduf4CMWBsnZMaRGG3hwy3F1DR4SbBb+N6EjFB8VWg09W4/lfXBjkKj109hdQNevybKYiA91kZarI2hKQ7GZ8bx4ZYi1u6vIt5uJjcpmonZcQxPjenUe6stfSahtyQ9dCGE6Lr2Enpnxq4VAi0LuFmh21ptEyq5xBE8OCqEEKKXdCahrwFGKKWGKKUswOXAu0e1eRdYGLp+CfBZe/VzIYQQ3a/DMTtaa59S6lZgCcFhi09rrbcqpX4PrNVavws8BbyglNoNVBJM+kIIIXpRpwZhaq0/AD446rbftbjeCFzavaEJIYToCjn/Wwgh+glJ6EII0U9IQhdCiH5CEroQQvQTnZqcq0c2rFQZcCyniiYDbZ6wFEYSV9f11dgkrq7pq3FB343teOIarLVOae2OsCX0Y6WUWtvWWVLhJHF1XV+NTeLqmr4aF/Td2HoqLim5CCFEPyEJXQgh+olITOiPhzuANkhcXddXY5O4uqavxgV9N7YeiSviauhCCCFaF4k9dCGEEK2QhC6EEP1ExCR0pdRZSqmdSqndSqlf9/K2s5VSnyultimltiqlbg/dfo9SqlAptSF0OafFY34TinWnUurMHo5vv1JqcyiGtaHbEpVSnyil8kI/E0K3K6XU30OxbVJKTemhmEa12C8blFK1SqmfhGOfKaWeVkqVhhZiabqty/tHKbUw1D5PKbWwtW11U2wPKqV2hLb/llIqPnR7rlKqocW+e7TFY6aG3gO7Q/Ef12rLbcTV5b9dd//fthHXqy1i2q+U2hC6vTf3V1s5onffZ1rrPn8hOG3vHmAoYAE2AmN7cfsZwJTQ9RiCi2aPBe4BftFK+7GhGK3AkFDsxh6Mbz+QfNRtfwJ+Hbr+a+CB0PVzgA8BBcwCVvXS368YGByOfQbMA6YAW451/wCJwN7Qz4TQ9YQeiu0MwBS6/kCL2HJbtjvqeVaH4lWh+M/ugbi69Lfrif/b1uI66v4/A78Lw/5qK0f06vssUnroM4DdWuu9WmsP8G/ggt7auNa6SGu9PnS9DtgOZLbzkAuAf2ut3VrrfcBugq+hN10APBe6/hxwYYvbn9dBK4F4pVRGD8eyANijtW7vzOAe22da62UE5+k/entd2T9nAp9orSu11lXAJ8BZPRGb1vpjrbUv9OtKgquEtSkUX6zWeqUOZoXnW7yebourHW397br9/7a9uEK97B8Ar7T3HD20v9rKEb36PouUhJ4J5Lf4vYD2E2qPUUrlApOBVaGbbg19ZXq66esUvR+vBj5WSq1TwYW4AdK01kWh68VAWphig+CCJy3/yfrCPuvq/gnXe/B6gj25JkOUUt8qpb5USp0Uui0zFE9vxNaVv11v77OTgBKtdV6L23p9fx2VI3r1fRYpCb1PUEo5gDeAn2ita4FHgGHAJKCI4Ne9cDhRaz0FOBv4kVJqXss7Q72QsIxPVcFlC88HXgvd1Ff2WbNw7p/2KKXuBHzAS6GbioAcrfVk4GfAy0qp2F4Mqc/97Y5yBUd2HHp9f7WSI5r1xvssUhJ6Zxaq7lFKKTPBP9RLWus3AbTWJVprv9Y6ADzB4RJBr8artS4M/SwF3grFUdJUSgn9LA1HbAQ/ZNZrrUtCMfaJfUbX90+vxqeUWgR8D7gqlAgIlTQqQtfXEaxPjwzF0bIs0yOxHcPfrtf2mQouTv994NUW8fbq/motR9DL77NISeidWai6x4Rqc08B27XWf2lxe8va80VA05H3d4HLlVJWpdQQYATBgzA9EVu0Uiqm6TrBA2pbOHLh7oXAOy1iuzZ0lH0WUNPiK2FPOKLX1Bf2WYvtdWX/LAHOUEolhEoNZ4Ru63ZKqbOAXwLna61dLW5PUUoZQ9eHEtxHe0Px1SqlZoXeq9e2eD3dGVdX/3a9+X97GrBDa91cSunN/dVWjqC332fHc2S3Ny8EjwrvIvgpe2cvb/tEgl+VNgEbQpdzgBeAzaHb3wUyWjzmzlCsOznOI+gdxDaU4OiBjcDWpn0DJAGfAnnAUiAxdLsC/hmKbTMwrQdjiwYqgLgWt/X6PiP4gVIEeAnWJG84lv1DsJ69O3S5rgdj202wjtr0Xns01Pbi0N94A7AeOK/F80wjmGD3AA8TOgu8m+Pq8t+uu/9vW4srdPuzwM1Hte3N/dVWjujV95mc+i+EEP1EpJRchBBCdEASuhBC9BOS0IUQop+QhC6EEP2EJHQhhOgnJKGLAU0FZ4C0hzsOIbqDDFsUA5pSaj/BMcDl4Y5FiOMlPXQxYITOql2slNqolNqilLobGAR8rpT6PNTmDKXUN0qp9Uqp10JzczTNOf8nFZxDe7VSang4X4sQrZGELgaSs4BDWuuJWusTgL8Bh4D5Wuv5Sqlk4C7gNB2c7GwtwUmdmtRorccTPLPwb70auRCdIAldDCSbgdOVUg8opU7SWtccdf8sgosSfKWCq94sJLgoR5NXWvyc3dPBCtFVpnAHIERv0VrvUsGlvs4B7lVKfXpUE0VwcYEr2nqKNq4L0SdID10MGEqpQYBLa/0i8CDBpczqCC4ZBsHVgeY21cdDNfeRLZ7ishY/v+mdqIXoPOmhi4FkPPCgUipAcLa+WwiWTj5SSh0K1dEXAa8opayhx9xFcLZAgASl1CbATXBaYCH6FBm2KEQnyPBGEQmk5CKEEP2E9NCFEKKfkB66EEL0E5LQhRCin5CELoQQ/YQkdCGE6CckoQshRD/x/wF1R+oUEOINSAAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiMAAAGwCAYAAAB7MGXBAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAABvCklEQVR4nO3dd3wUdf7H8ddudtMbIaQASei9VwMKKJFiAyuWO4SznAr2s2DDdocN9eztJ3ZRkaIUD6QLSJHeWyABUoD0nt2d3x8bFkJNIMmmvJ+Pxzx2d3Zm5/NlwX37ne98x2QYhoGIiIiIm5jdXYCIiIjUbQojIiIi4lYKIyIiIuJWCiMiIiLiVgojIiIi4lYKIyIiIuJWCiMiIiLiVhZ3F1AWDoeDQ4cOERAQgMlkcnc5IiIiUgaGYZCdnU3Dhg0xm8/c/1EjwsihQ4eIiopydxkiIiJyHhITE2ncuPEZ368RYSQgIABwNiYwMNDN1YiIiEhZZGVlERUV5fodP5MaEUaOnZoJDAxUGBEREalhzjXEQgNYRURExK0URkRERMStFEZERETErWrEmBEREZHz5XA4KCoqcncZtZLVasXDw+OCP0dhREREaq2ioiLi4+NxOBzuLqXWCg4OJiIi4oLmAVMYERGRWskwDJKSkvDw8CAqKuqsk25J+RmGQV5eHqmpqQBERkae92cpjIiISK1ks9nIy8ujYcOG+Pr6urucWsnHxweA1NRUwsLCzvuUjWKiiIjUSna7HQBPT083V1K7HQt6xcXF5/0ZCiMiIlKr6Z5mlasi/nwVRkRERMStFEZERETErRRGREREarBFixZhMpnIyMhwdynnrVxhZMKECfTs2ZOAgADCwsIYPnw4O3bsOOs+X3zxBSaTqdTi7e19QUVXFLvDzsqklRiG4e5SRERE6qxyhZHFixczZswY/vzzT+bNm0dxcTGDBg0iNzf3rPsFBgaSlJTkWvbv339BRVcEu8POsBnDuHPunWw8stHd5YiIiNRZ5Qojv/32G6NGjaJ9+/Z07tyZL774goSEBP7666+z7mcymYiIiHAt4eHhF1R0RfAwe9AptBMAM3bPcHM1IiJS2QzDIK/I5palPD3wAwYM4P777+ehhx6iXr16hIeH8+mnn5Kbm8vo0aMJCAigRYsWzJkz54yf8fPPP9O+fXu8vLxo0qQJEydOLPX+Bx98QMuWLfH29iY8PJwbbrjB9d6UKVPo2LEjPj4+1K9fn7i4uHN2OlyoC5r0LDMzE4CQkJCzbpeTk0NMTAwOh4Nu3brxn//8h/bt259x+8LCQgoLC12vs7KyLqTMM7qmxTX8uvdXfov/jcd7Po63pXqcPhIRkYqXX2yn3XP/c8uxt744GF/Psv/kfvnllzz++OOsWrWKH374gXvvvZdp06Zx7bXX8tRTT/HWW2/x97//nYSEhFP2/euvv7jpppt4/vnnGTFiBMuXL+e+++6jfv36jBo1ijVr1vDAAw/w9ddf06dPH9LS0li6dCkASUlJ3HLLLbz22mtce+21ZGdns3Tp0kofzmAyzvMIDoeDa665hoyMDP74448zbrdixQp27dpFp06dyMzM5I033mDJkiVs2bKFxo0bn3af559/nhdeeOGU9ZmZmQQGBp5Puadvg+Fg8M+DSc5N5vV+rzOk6ZAK+2wREXGvgoIC4uPjadq0Kd7e3uQV2WpEGBkwYAB2u90VEOx2O0FBQVx33XV89dVXACQnJxMZGcmKFSsoKCjg0ksvJT09neDgYG677TYOHz7M3LlzXZ/5+OOPM2vWLLZs2cLUqVMZPXo0Bw4cICAgoNSx165dS/fu3dm3bx8xMTFlqvfkP+cTZWVlERQUdM7f7/PuGRkzZgybN28+axABiI2NJTY21vW6T58+tG3blo8//piXXnrptPuMGzeORx55xPU6KyuLqKio8y31jMwmM9c0v4ZPNn7C9D3TFUZERGoxH6sHW18c7LZjl0enTp1czz08PKhfvz4dO3Z0rTs23CE1NfWUH/lt27YxbNiwUuv69u3L22+/jd1u5/LLLycmJoZmzZoxZMgQhgwZwrXXXouvry+dO3dm4MCBdOzYkcGDBzNo0CBuuOEG6tWrV94ml8t5Xdo7duxYZs6cycKFC8/Yu3EmVquVrl27snv37jNu4+XlRWBgYKmlslzT/BoAVhxaQWpeaqUdR0RE3MtkMuHraXHLUt5ZSq1W6ym1n7ju2Oedz92IAwICWLt2Ld9//z2RkZE899xzdO7cmYyMDDw8PJg3bx5z5syhXbt2vPvuu7Ru3Zr4+PhyH6c8yhVGDMNg7NixTJs2jQULFtC0adNyH9But7Np06YLurtfRYoJjKFrWFcchoOZe2e6uxwREZEL0rZtW5YtW1Zq3bJly2jVqpXrRnYWi4W4uDhee+01Nm7cyL59+1iwYAHgDDp9+/blhRdeYN26dXh6ejJt2rRKrblcp2nGjBnDd999x4wZMwgICCA5ORmAoKAg1537Ro4cSaNGjZgwYQIAL774IhdddBEtWrQgIyOD119/nf3793PnnXdWcFPO37Dmw1iXuo4Zu2cwuv1o3cdARERqrEcffZSePXvy0ksvMWLECFasWMF7773HBx98AMDMmTPZu3cv/fr1o169esyePRuHw0Hr1q1ZuXIl8+fPZ9CgQYSFhbFy5UoOHz5M27ZtK7XmcvWMfPjhh2RmZjJgwAAiIyNdyw8//ODaJiEhgaSkJNfr9PR07rrrLtq2bcsVV1xBVlYWy5cvp127dhXXigs0qMkgvDy82Ju5ly1Ht7i7HBERkfPWrVs3fvzxRyZPnkyHDh147rnnePHFFxk1ahQAwcHBTJ06lcsuu4y2bdvy0Ucf8f3339O+fXsCAwNZsmQJV1xxBa1ateKZZ55h4sSJDB06tFJrPu+raapSWUfjXognljzB7PjZjGg9gmcueqZSjiEiIlXnbFd5SMWpiKtpdG+aEsOaO0cez4mfQ5G9yM3ViIiI1B0KIyV6R/YmzDeMrKIsFiUucnc5IiIidYbCSAkPs4frMt8ZezQ9vIiISFVRGDnBsTCy7OAyjuQfcXM1IiIidYPCyAmaBjWlU4NO2A07s/bOcnc5IiIidYLCyEmODWSdvnt6pd8YSERERBRGTjGk6RA8zZ7sztjNtrRt7i5HRESk1lMYOUmgZyCXRV8GwIzdGsgqIiJS2RRGTuPYQNbZ8bMpthe7uRoREZHSmjRpwttvv+3uMiqMwshpxDaMpYFPAzIKM1hyYIm7yxEREanVFEZOw2K2cFXzqwDNOSIiIlLZFEbO4NhVNUsPLCWtIM3N1YiISF2SnZ3Nbbfdhp+fH5GRkbz11lsMGDCAhx566LTbJyQkMGzYMPz9/QkMDOSmm24iJSXF9f6GDRu49NJLCQgIIDAwkO7du7NmzRoA9u/fz9VXX029evXw8/Ojffv2zJ49uyqa6WKp0qPVIM2Dm9Ohfgc2H93M7L2z+Vu7v7m7JBERuRCGAcV57jm21RdMpjJv/sgjj7Bs2TJ++eUXwsPDee6551i7di1dunQ5ZVuHw+EKIosXL8ZmszFmzBhGjBjBokWLALjtttvo2rUrH374IR4eHqxfvx6r1QrAmDFjKCoqYsmSJfj5+bF161b8/f0rotVlpjByFsNaDGPz0c3M2DOjXGGkyF7Exxs/ZmHiQu7pdA+DmgyqxCpFRKRMivPgPw3dc+ynDoGnX5k2zc7O5ssvv+S7775j4MCBAEyaNImGDU9f+/z589m0aRPx8fFERUUB8NVXX9G+fXtWr15Nz549SUhI4LHHHqNNmzYAtGzZ0rV/QkIC119/PR07dgSgWbNm593M86XTNGcxtOlQrGYr29O2syNtR5n22XJ0CyNmjuCTjZ+wK30Xjy5+lFdXvUqxQ1fliIjIue3du5fi4mJ69erlWhcUFETr1q1Pu/22bduIiopyBRGAdu3aERwczLZtzvmyHnnkEe68807i4uJ45ZVX2LNnj2vbBx54gJdffpm+ffsyfvx4Nm7cWEktOzP1jJxFkFcQA6IGMG//PGbsmcHjIY+fcdtiezEfbfyI/9v0f9gNOyHeIVzS6BJm7JnBN9u+YfORzbze/3Ui/CKqsAUiIuJi9XX2ULjr2G70/PPPc+uttzJr1izmzJnD+PHjmTx5Mtdeey133nkngwcPZtasWcydO5cJEyYwceJE7r///iqrTz0j53BsIOusvbPO2Lux9ehWRsxy9obYDTtDmgxh+rDpvHzxy7xz6TsEWANYf3g9N/16EysOrajK8kVE5BiTyXmqxB1LOcaLNGvWDKvVyurVq13rMjMz2blz52m3b9u2LYmJiSQmJrrWbd26lYyMDNq1a+da16pVKx5++GHmzp3Lddddx6RJk1zvRUVFcc899zB16lQeffRRPv300/L8yV4whZFz6NOoD/W965NWkMayg8tKvVdsL+b99e9z26zb2JW+i3pe9ZjYfyKv93+det71ALg0+lJ+uOoH2oS0Ib0wnX/O+ycfbfgIh+FwR3NERKSaCwgI4Pbbb+exxx5j4cKFbNmyhTvuuAOz2YzpNKEmLi6Ojh07ctttt7F27VpWrVrFyJEj6d+/Pz169CA/P5+xY8eyaNEi9u/fz7Jly1i9ejVt27YF4KGHHuJ///sf8fHxrF27loULF7reqyoKI+dgNVu5qlnJnCMnTA+/I20Ht8y6hY82fITNsHF5zOVMGzbttINVowKj+Hro11zf8noMDN5f/z73zb+PjIKMqmqGiIjUIG+++SaxsbFcddVVxMXF0bdvX9q2bYu3t/cp25pMJmbMmEG9evXo168fcXFxNGvWjB9++AEADw8Pjh49ysiRI2nVqhU33XQTQ4cO5YUXXgDAbrczZswY2rZty5AhQ2jVqhUffPBBlbbXZNSAW9NmZWURFBREZmYmgYGBVX78nek7uf6X67GYLcy7YR4/7fiJTzZ+gs2wEewVzNMXPc2QJkPK9Fkzds/gpT9fotBeSKRfJBP7T6Rjg46V3AIRkbqnoKCA+Ph4mjZtetof8ZokNzeXRo0aMXHiRO644w53l1PK2f6cy/r7rZ6RMmhVrxVtQ9pic9gYPmM4H2z4AJthY2D0QKYNm1bmIALOy4W/veJbogOiScpNYuRvI5m8fTI1IBOKiEgVWbduHd9//z179uxh7dq13HbbbQAMGzbMzZVVDoWRMhrWwvkXILMwkyCvIF695FXeGvAWoT6h5f6s1iGtmXzVZOKi47A5bPx75b95YukT5LlrMh4REal23njjDTp37kxcXBy5ubksXbqU0NDy/+bUBDpNU0bZRdk8vOhh6nvX57Gej51XCDmZYRh8tfUr3vrrLeyGnZjAGJ696Fl6R/augIpFROq22nSapjqriNM0mmekjAI8A/hs0GcV+pkmk4nb299Ox9COPLb4MfZn7efOuXdyVbOr+FePf1Hfp36FHk9ERKQ60mmaaqBbeDemDZ/GiNYjMGFi5t6ZXDP9GqbsnKJLgEVEpNZTGKkmAj0DeeaiZ/jmim9oE9KGrKIsXljxAqN+G8Wu9F3uLk9ERKTSKIxUM50adOL7K7/nsR6P4WPxYV3qOm769Sbe/OtNDXAVEZFaSWGkGrKYLYxsP5Jfhv/CZVGXYTNsTNo8iWtnXMuSA0vcXZ6IiEiFUhipxiL8IvjvZf/lnUvfIdIvkkO5hxgzfwyPLHqElNwUd5cnIiJSIXQ1TQ1wafSl9I7szYcbPuTrrV8zb/88/jj4B1EBUfhYfFyLr9W39GvL8ddtQtrQtn7V3mtARETKb8CAAXTp0oW3337b3aVUGYWRGsLX6sujPR7lqmZX8eKfL7Lx8EZ2pp/+Do5n8kDXB7iz452nvdGSiIiIuyiM1DCtQ1rz9dCv2Za2jczCTPKL88mz5ZFvyyffdsLz4uOv0wrS+CvlL95Z9w57MvfwQp8X8PLwcndTREREAIWRGslsMtO+fvty7fPjjh/5z8r/MGvvLBKzE/nvpf+tkFlkRUSk8qSnp/Pggw/y66+/UlhYSP/+/XnnnXdo2bIlAPv372fs2LH88ccfFBUV0aRJE15//XWuuOIK0tPTGTt2LHPnziUnJ4fGjRvz1FNPMXr0aDe36lQKI3XETa1vIjowmkcXPcrGwxu5ZdYtvHfZe7QOae3u0kREqoRhGOTb8t1ybB+Lz3mdIh81ahS7du3il19+ITAwkCeeeIIrrriCrVu3YrVaGTNmDEVFRSxZsgQ/Pz+2bt2Kv78/AM8++yxbt25lzpw5hIaGsnv3bvLz3dP+c1EYqUMuiryI7678jrHzx7Ivax9/n/N3JlwygYHRA91dmohIpcu35dP7O/fc+2vlrSvxtfqWa59jIWTZsmX06dMHgG+//ZaoqCimT5/OjTfeSEJCAtdffz0dO3YEoFmzZq79ExIS6Nq1Kz169ACgSZMmFdOYSqBLe+uYmMAYvrniGy6KvIh8Wz4PL3yYzzZ9xvncL9HmsLE6eTXLDi7D7rBXQrUiInXXtm3bsFgs9O59PEDVr1+f1q1bs23bNgAeeOABXn75Zfr27cv48ePZuHGja9t7772XyZMn06VLFx5//HGWL19e5W0oK/WM1EFBXkF8GPchr656lck7JvPftf9lb8ZexvcZf86BrUX2IlYmreT3hN9ZmLCQ9MJ0ADo36MyLfV6kWXCzs+4vIuIuPhYfVt660m3Hrgx33nkngwcPZtasWcydO5cJEyYwceJE7r//foYOHcr+/fuZPXs28+bNY+DAgYwZM4Y33nijUmq5ECbjfP6XuIqV9RbEUn6Tt0/mlVWvYDfsdG7QmbcvffuUga35tnyWHVzG7wm/szhxMTnFOa73gryCKLYXk2fLw2q28s9O/+QfHf+B1Wyt6qaIiJRytlvbV2fH5hkZM2YMrVq1KnWa5ujRo0RFRfHVV19xww03nLLvuHHjmDVrVqkekmM+/vhjHnvsMbKysiq03rP9OZf191s9I3XczW1uJiYwhkcXP8qGwxu4ddatvHvZuzT0b8jiA4uZv38+fxz8gwJ7gWufUJ9QBkYPJC4mju7h3Tmaf5QXV7zI0oNLeW/9e8zdP5cX+7xI+9DyXfEjIiLHtWzZkmHDhnHXXXfx8ccfExAQwJNPPkmjRo0YNmwYAA899BBDhw6lVatWpKens3DhQtq2dU5w+dxzz9G9e3fat29PYWEhM2fOdL1X3SiMCLENY/n2im+5f8H97M/az22zb8Nu2LE5bK5tGvk3cgWQzg06YzYdH24U4RfB+wPfZ3b8bF5Z9Qo703dy6+xbGdluJPd1ua/SuidFRGq7SZMm8eCDD3LVVVdRVFREv379mD17Nlars/fZbrczZswYDhw4QGBgIEOGDOGtt94CwNPTk3HjxrFv3z58fHy45JJLmDx5sjubc0Y6TSMumYWZPLr4UVYmOc+pNg1qSlx0HHExcbQNaVumy9LSCtJ4ddWrzI6fDUBUQBQv9HmBnhE9K7V2EZGT1dTTNDVNRZymURiRUoodxSw5sISmgU0vaDDq4sTFvPjni6TmpQJwQ6sbeKT7IwR4BlRUqSIiZ6UwUjUqIozo0l4pxWq2MjB64AVfFdM/qj/Th03nplY3ATBl5xSGTx/OwoSFFVGmiIjUIgojUmkCPAN4NvZZPh/8OdEB0aTmp/LAwgd4ZdUr5zWviYiI1E4KI1Lpekb05OdrfuYfHf6BCRPfbvuWn3b+5O6yRESkmlAYkSrhbfHm4e4P80C3BwCYsGoCGw5vcHNVIlIXqCe2clXEn6/CiFSpOzrcQVx0HDaHjUcWPsKR/CPuLklEaikPDw8AioqK3FxJ7ZaXlwfgutz4fGieEalSJpOJly9+mb2z9rI3cy+PLnqUzwZ/phlbRaTCWSwWfH19OXz4MFarFbNZ//9dkQzDIC8vj9TUVIKDg13h73zo0l5xi/jMeG6ddSs5xTnc2uZWxvUe5+6SRKQWKioqIj4+HofD4e5Saq3g4GAiIiJOOxeV5hmRam9BwgIeXPggAP+5+D9c3fxqN1ckIrWRw+HQqZpKYrVaz9ojonvTSLV3WfRl3N3pbj7Z+AkvrHiBFsEtaFu/et43QURqLrPZrEnPqjmdQBO3uq/zfVzc6GIK7YU8vOhhMgoy3F2SiIhUMYURcSsPswevXPIKUQFRHMw5yONLHsfusLu7LBERqUIKI+J2QV5BvDXgLXwsPqxIWsE7695xd0kiIlKFFEakWmgd0poX+rwAwOebP2fe/nlurkhERKqKwohUG0ObDmVku5EAPPPHM+zJ2OPmikREpCoojEi18nD3h+kV0Ys8Wx4PLnyQ7KJsd5ckIiKVTGFEqhWL2cLr/V8nwi+C/Vn7eWrpUzgMTVYkIlKblSuMTJgwgZ49exIQEEBYWBjDhw9nx44d59zvp59+ok2bNnh7e9OxY0dmz5593gVL7RfiHcJbA97C0+zJogOL+HXPr+4uSUREKlG5wsjixYsZM2YMf/75J/PmzaO4uJhBgwaRm5t7xn2WL1/OLbfcwh133MG6desYPnw4w4cPZ/PmzRdcvNReHUI7cG+XewGYtHmSekdERGqxC5oO/vDhw4SFhbF48WL69et32m1GjBhBbm4uM2fOdK276KKL6NKlCx999FGZjqPp4Oum7KJsBk0ZRE5xDu9e9i4Doga4uyQRESmHsv5+X9CYkczMTABCQkLOuM2KFSuIi4srtW7w4MGsWLHijPsUFhaSlZVVapG6J8AzgBtb3wg4e0dERKR2Ou8w4nA4eOihh+jbty8dOnQ443bJycmEh4eXWhceHk5ycvIZ95kwYQJBQUGuJSoq6nzLlBrub23/htVsZW3qWtanrnd3OSIiUgnOO4yMGTOGzZs3M3ny5IqsB4Bx48aRmZnpWhITEyv8GFIzhPmGue7m+/nmz91cjYiIVIbzCiNjx45l5syZLFy4kMaNG59124iICFJSUkqtS0lJISIi4oz7eHl5ERgYWGqRuuv29rdjwsTCxIXszdjr7nJERKSClSuMGIbB2LFjmTZtGgsWLKBp06bn3Cc2Npb58+eXWjdv3jxiY2PLV6nUWc2CmnFp1KUAfLHlC/cWIyIiFa5cYWTMmDF88803fPfddwQEBJCcnExycjL5+fmubUaOHMm4ceNcrx988EF+++03Jk6cyPbt23n++edZs2YNY8eOrbhWSK03usNoAH7d+yupealurkZERCpSucLIhx9+SGZmJgMGDCAyMtK1/PDDD65tEhISSEpKcr3u06cP3333HZ988gmdO3dmypQpTJ8+/ayDXkVO1iWsC93CumFz2Phm6zfuLkdERCrQBc0zUlU0z4gALE5czNgFY/Gz+jHvhnkEeAa4uyQRETmLKplnRKQqXdL4EpoHNSe3OJcfd/zo7nJERKSCKIxIjWE2mV1jR77Z9g1F9iI3VyQiIhVBYURqlCuaXkG4bzhH8o/oBnoiIrWEwojUKFYPK39v93fAeZmvbqAnIlLzKYxIjXNDqxsI8AxgX9Y+FiYudHc5IiJygRRGpMbxs/pxc+ubAecU8TXggjARETkLhRGpkW5teyueZk82Ht7I2tS17i5HREQugMKI1EihPqEMazEM0A30RERqOoURqbGO3UBvyYEl7Erf5e5yRETkPCmMSI0VExhDXEwcoBvoiYjUZAojUqP9o8M/AJi9dzbJuclurkZERM6HwojUaB1CO9Azoic2w8bXW792dzkiInIeFEakxjvWOzJl5xQyCzPdXI2IiJSXwojUeH0b9qVVvVbk2fJ0Az0RkRpIYURqPJPJ5LqB3mebPuOvlL/cXJGIiJSHwojUCkOaDKF3ZG/ybHnc+/u9/Jn0p7tLEhGRMlIYkVrBYrbw3mXv0bdRX/Jt+Yz5fQxLDixxd1kiIlIGCiNSa3hbvHnn0ne4NOpSihxFPLjwQebvn+/uskRE5BwURqRW8fTwZOKAiQxpMgSbw8ajix9l9t7Z7i5LRETOQmFEah2r2corl7zCNc2vwW7YeXLpk0zbNa3Kjp9VlMWM3TPILsqusmOKiNRkFncXIFIZPMwevNT3Jbw9vPlx5488t/w5Cu2F3Nzm5ko9bnxmPA8seIB9Wfvot78f7w98v1KPJyJSG6hnRGots8nMMxc9w9/a/g2Af6/8N19u+bLSjvfHwT+4bdZt7MvaB8CSA0vYdHhTpR1PRKS2UBiRWs1kMvF4z8e5s+OdALyx5g0+3vBxhR7DMAy+3PIlY+aPIbs4m65hXYmLdt7A74MNH1TosUREaiOFEan1TCYTD3Z7kLFdxgLw3vr3eGftOxiGccGfXWgv5Jllz/DGmjdwGA6ua3kdnw36jEe6P4KHyYM/Dv7BhsMbLvg4IiK1mcKI1Bn/7PxP/tXjXwB8uulTXl/zOg7Dcd6fdzjvMP/43z/4Zc8veJg8eLLXkzwf+zyeHp5EBUZxdfOrAfhw/YcVUr+ISG2lMCJ1yu3tb+ep3k8B8PXWrxny8xD+u/a/7M3YW67P2XJkCzfPupmNhzcS4BnAh3Efclvb2zCZTK5t7u50NxaThWWHlrE+dX1FNkNEpFZRGJE655Y2t/By35cJsAaQlJvEZ5s+Y9iMYdw882a+3fYtaQVpZ91/Tvwcbv/tdlLzUmka1JTvr/ye2Iaxp2wXFRDFNS2uAeDDDeodERE5E5NRESfOK1lWVhZBQUFkZmYSGBjo7nKklii0F7IocREz98zkj4N/YDNsAFhMFvo26svVza9mQNQAvDy8AHAYDt5b9x6fbvoUgEsaXcKr/V4lwDPgjMc4kH2Aq6ddjc2w8fXQr+kS1qWymyUiUm2U9fdbYUQESCtIY078HGbumcnmo5td6wOsAQxqMoghTYfw7bZvWZS4CIDRHUbzYNcH8TB7nPOzn1/+PD/v+pmLIi/i00GfVlILRESqH4URkfO0N3MvM/fM5Ne9v5Kcm1zqPU+zJ8/3ed41OLUsDuYc5KqpV2EzbHw55Eu6hXer6JJFRKqlsv5+a8yIyEmaBTXjgW4P8L/r/8fngz/n2hbX4mf1I8w3jElDJpUriAA08m/E8JbDAc07IiJyOuoZESmDYkcxGGD1sJ7X/odyDnHltCuxOWx8MeQLuod3r+AKRUSqH/WMiFQgq9l63kEEoKF/Q65tcS2geUdERE6mMCJSRe7qeBcWs4WVyStZk7zG3eWIiFQbCiMiVSTSP5LrWlwHaOyIiMiJFEZEqtBdne7CarayOnk1q5NXu7scEZFqQWFEpApF+EVwXcuS3pH16h0REQGFEZEqd2fHO7GaraxJWcOqpFXuLkdExO0URkSqWIRfBNe3vB6A99e/Tw24ul5EpFIpjIi4wZ0d78TT7Mna1LWsTF7p7nJERNxKYUTEDcL9wrmh1Q2Ac94R9Y6ISF2mMCLiJnd0vMPVO/Jn0p/uLkdExG0s7i5ApK4K8w3jxtY38u22b3ljzRtc0fQKLGYLFrPFOeOr2ep6fWydxWzBy8OLDqEd8LH4uLsJIiIVQmFExI3u6HAHU3ZOYWf6Tnam7yzzfr0jevPJoE8wm9S5KSI1n8KIiBs18G3AxP4TWZi4EJvDhs2wUWwvdj23OWwUO0pelzzfm7GXlckr+XHHj9zc5mZ3N0FE5ILprr0iNcy3277llVWv4GPxYdqwaTTyb+TukkRETkt37RWppW5pcwvdwrqRb8tn/PLxuhJHRGo8hRGRGsZsMvNS35fw9vBmZdJKpuya4u6SREQuiMKISA0UHRjNA90eAGDimokk5SS5uSIRkfOnMCJSQ93a5la6NOhCbnEuz694XqdrRKTGUhgRqaE8zB682PdFvDy8WH5oOdN3T3d3SSIi50VhRKQGaxrUlLFdxgLw2urXSM5NdnNFIiLlpzAiUsP9vd3f6dSgEznFOby44kWdrhGRGkdhRKSG8zB78FKfl/A0e7L04FJ+2fOLu0sSESkXhRGRWqBZcDPu7XIvAK+ufpXUvFQ3VyQiUnYKIyK1xKj2o2hfvz3ZRdm8tOIlna4RkRpDYUSklrCYLbzU9yUsZguLDixiVvwsd5ckIlImCiMitUjLei25t7PzdM2ElRM4kn/EzRWJiJxbucPIkiVLuPrqq2nYsCEmk4np06efdftFixZhMplOWZKTdQmiSGUY3WE0bUPaklWUpdM1IlIjlDuM5Obm0rlzZ95///1y7bdjxw6SkpJcS1hYWHkPLSJlYDVbnadrTBYWJC7gt32/ubskEZGzspR3h6FDhzJ06NByHygsLIzg4OBy7yci5dc6pDV3d7qbDzZ8wH9W/oc2IW1oGtTU3WWJiJxWlY0Z6dKlC5GRkVx++eUsW7bsrNsWFhaSlZVVahGR8rmz4520CWlDRmEGt866lQUJC9xdkojIaVV6GImMjOSjjz7i559/5ueffyYqKooBAwawdu3aM+4zYcIEgoKCXEtUVFRllylS61g9rHwY9yHdwrqRU5zDgwsf5N1172J32N1dmohIKSbjAka3mUwmpk2bxvDhw8u1X//+/YmOjubrr78+7fuFhYUUFha6XmdlZREVFUVmZiaBgYHnW65InVTsKGbimol8u+1bAC5udDGvXPIKQV5Bbq5MRGq7rKwsgoKCzvn77ZZLe3v16sXu3bvP+L6XlxeBgYGlFhE5P1azlSd7Pcl/Lv4P3h7e/HHwD26eeTM70na4uzQREcBNYWT9+vVERka649AiddbVza/m6yu+ppF/Iw7kHOBvs//GrL2aGE1E3K/cV9Pk5OSU6tWIj49n/fr1hISEEB0dzbhx4zh48CBfffUVAG+//TZNmzalffv2FBQU8Nlnn7FgwQLmzp1bca0QkTJpE9KGH676gceXPM7yQ8t5cumTbD6ymUd6PILVbHV3eSJSR5W7Z2TNmjV07dqVrl27AvDII4/QtWtXnnvuOQCSkpJISEhwbV9UVMSjjz5Kx44d6d+/Pxs2bOD3339n4MCBFdQEESmPIK8gPhj4AXd1vAuAb7Z9w91z79ZsrSLiNhc0gLWqlHUAjIiUz/z983l62dPkFucS5hvGWwPeolODTu4uS0RqiWo9gFVEqoeBMQP57srvaBLYhNS8VEb9NoopO6e4uywRqWMURkTquGZBzfj+yu+5LOoyih3FvLDiBV7+82WK7cXuLk1E6giFERHB39Ofty59i/u73o8JEz/s+IE7596pcSQiUiUURkQEALPJzN2d7ua9ge/hb/VnbepaRswcweYjm91dmojUcgojIlJKv8b9+O7K72ga1JTUvFRun3M7v+z5xd1liUgtpjAiIqdoGtSU7674jgFRAyhyFPH0H0/z6qpXKXZoHImIVDyFERE5LX9Pf/576X+5t/O9gHM+knvm3UN6QbqbKxOR2kZhRETOyGwyc1+X+3j70rfxtfiyKnkVN8+8me1p291dmojUIgojInJOA6MH8u0V3xIdEM2h3EP8ffbfmRM/x91liUgtoTAiImXSol4LvrvyOy5udDEF9gIeX/I4r69+nW1Ht5FbnOvu8kSkBtN08CJSLnaHnffWv8dnmz4rtb6+d32iA6OJDogmJjCGqMAoYgJiiA6Mxs/q56ZqRcSdyvr7rTAiIufl9/2/8/XWr9mXtY+0grSzblvfuz4xgTF0CevCiNYjaOjfsIqqFBF3UhgRkSqTXZRNQnYCCVklS3YC+7P2k5ideEpQMZvMXBZ1Gbe2vZUe4T0wmUxuqlpEKpvCSBmsS0jn57UHGH91e6weFTt8Zt7WFKauPcA/Lm5KzyYhFfrZIjVJVlEWidmJ7M3Yyy97fuHPpD9d77Wu15rb2t7GFc2uwMvDy41VikhlUBg5h7wiG/1eW8SRnEIGtG7A+7d2w8/LUiGf/c2f+3l2xmYMA0wmuPuSZjx8eSu8rR4V8vkiNdnu9N18t/07ft3zKwX2AgDqedXjhlY3MKL1CML9wt1coYhUFIWRMpi3NYX7v19LQbGDTo2D+HxUT0L9z///zgzD4N0Fu3lz3k4A2kUGsjUpC4BW4f68eVMXOjQKqpDaRWq6zMJMpu6ayvfbvycpNwkAi8nC5TGXc2vbW+ncoLNO4YjUcAojZbQ2IZ07vlhNel4xMfV9+XJ0L5qEln/kv8Nh8MKvW/hyxX4AHhjYkofjWvL7tlTGTd3EkZxCLGYT91/WkvsubV7hp4VEaiqbw8aixEV8u+1b1qSsca3vUL8DN7e5mcFNBuNt8XZfgSJy3hRGymHv4Rxun7SKxLR8Qvw8+XxUT7pEBZd5/yKbg0d/2sCvGw5hMsHzV7fn9j5NXO+n5RbxzPRNzN6UDECnxkFMvLEzLcMDKrglIjXb9rTtfLftO2btnUWRowiAIK8ghjUfxk2tbyImMMbNFYpIeSiMlNPh7EJGf7GKzQez8LF68P5tXbmszbnPXecW2rjnm79YuusIVg8TE2/qwjWdT71s0TAMftlwiOdmbCEzvxhPi5nHBrXmHxc3xcOsrmiRE6UVpDF111R+2vETh3IPudbHRsYyovUI+kf1x2KumDFeIlJ5FEbOQ06hjfu+XcuSnYfxMJv49/AO3Nwr+ozbp+UWMfqL1WxIzMDX04OP/tadfq0anPUYKVkFPPHzRhbtOAxAryYhvHFjZ6Lr+1ZoW0RqA7vDzrJDy/hhxw8sPbAUA+d/rsJ8w7ih5Q1c3+p6wnzD3FyliJyJwsh5KrY7ePLnTfy89gAADw5syUNxLU8ZSHcwI5+R/7eSPYdzqedr5fNRPekaXa9MxzAMg8mrE3l55lZyi+z4enrw9JVtubVXtAbsiZzBgewDTNk5hWm7p7nmLvEweXBZ9GXc1Pomekf01r8fkWpGYeQCGIbBm/N28u6C3QCM6BHFv6/tgKVk0Onu1Gz+/n+rSMosoGGQN1/d0YsWYeUf/5GYlse/ftrAynjnf1gvbhHKs1e1o3WExpKInEmRvYjf9//ODzt+YG3qWtf6tiFtefeyd3VpsEg1ojBSAb5duZ9np2/GYcBlbcJ479au7EjOZvQXq8nIK6ZFmD9f/aMXDYN9zvsYDofB58viee1/OyiyOTCb4JZe0Tx8easLusxYpC7Ymb6TH3f8yMy9M8ktzqWRfyM+G/QZjQMau7s0EUFhpMLM3ZLM/d+vo9DmoE1EAPuP5pFfbKdLVDCTRvWknp9nhRxn/9FcXpmznTmbnVfc+HtZGHNpC0b3baLJ0kTO4VDOIe6ceyeJ2YmE+Ybx2aDPaBrU1N1lidR5CiMV6K/96dz5pXMuEoB+rRrw0d+64etZ8aP5V+49ysuztrHpYCYAjev58OTQNlzZMVLnw0XO4nDeYe6aexd7MvcQ4h3CJ5d/QuuQ1u4uS6ROUxipYHsO5zDu5020jgjg2ava4WmpvEnLHA6DaesO8tr/tpOSVQhA95h6PHtVu3LNfyJS16QXpPPPef9kW9o2Aj0D+SjuIzo26OjuskTqLIWRWiCvyMYnS/by8eK95BfbARjWpSGPD2lDozKMUymyOcjIKyIjv5iIIG8Cva2VXbKI22UVZXHf7/ex4fAGfC2+vD/wfXpE9HB3WSJ1ksJILZKcWcAbc3fw89oDGAZ4WcyM6tuEiEBv0vOKycgrOuGxiPRc5/PcIrvrM3ysHnz09+70P8c8KCK1QV5xHvcvuJ9Vyavw9vDm7Uvfpm+jvu4uS6TOURiphTYfzOSlmVtdlwKXhcnkDCJ5RXY8Pcy8d2tXBrWPqMQqRaqHAlsBjyx6hKUHl2I1W3m9/+sMjB7o7rJE6hSFkVrKMAzmbk1hyl8H8PQwE+xrpZ6vp+uxnp+VYF9P53NfK4HeVmwOgwcnr2PO5mQ8zCbeHtGFq08zZb1IbVNsL+aJpU8wb/88PEwe/Pvif3NlsyvdXZZInaEwIqXY7A4em7KRaesOYjbBq9d34sYeUe4uS6TS2Rw2xi8fzy97fsGEifGx47m+1fXuLkukTijr77fuY19HWDzMTLyxM7f0isJhwGNTNvL1in3uLkuk0lnMFl7q+xI3tboJA4PnVzzP11u/dndZInIChZE6xGw28Z9rOzK6bxMAnp2xhU+W7HFvUSJVwGwy88xFzzCq/SgAXlv9Gv9a/C9WJa2iBnQOi9R6Ok1TBxmGwRtzd/D+QmcQeTiuFQ8MbHFek6pl5hVTZHfQIEBT10v1ZxgGH234iA82fOBaFx0QzXUtr2NYi2GE+oS6sTqR2kdjRuSc3luwizfm7gTgn/2b8eSQNmUKJPlFdn7flsKM9QdZvPMwxXaDm3tGMW5oW4J8NZeJVH9bj25lys4pzI6fTW5xLgAWk4UBUQO4vtX1xEbG4mHWbRhELpTCiJTJ//0Rz0sztwJwe2wM469uj9l8aiCx2R0s33OU6esP8r/NyaXmMDkm1N+TZ69qxzWdG2rqeqkR8orz+N++/zFl1xQ2Ht7oWh/pF8m1La/l2hbXEuGnS+FFzpfCSJk++BCs+wb6PeackKOO+nblfp6ZvhnDgJt6NGbCdZ3wMJswDIONBzKZvv4gv25I4khOoWufxvV8GNalIcO7NCI9r5inpm1id2oO4Lx3z7+HdyAqxNddTRIpt53pO5m6ayq/7vmVrKIswDnW5OJGF3NVs6toEdyCqIAovC3ebq5UpOZQGDkXWyG81QFyU+H6/4OON1TM59ZQU9ce4F8/bcBhwFWdImkR5s+M9YeIP5Lr2qaer5UrO0UyvEsjusfUK9X7UWiz8/Hivby3cDdFNgfeVjMPxbXijoubYvXQOGmpOQpsBfye8Ds/7/yZNSlrTnk/3DecmMAYogOjiQkoeQyMoXFAY7w8NHZK5EQKI2Wx+HVY+DIERcPY1WCt2//HM2tjEg9OXofNcfyvhLfVzOXtIhjepSGXtGxwzhsE7j2cw9PTNrNi71EA2kQEMOG6jnSNrleptYtUhn2Z+5i6ayqrk1ezP3s/2UXZZ9zWhIlIv0iiA50DYoc2HVqFlYpUTwojZVGUB+92h+xDEPc8XPxwxX12DTV/WwrPTt9Mi/AAhndpyKD2Efh7Wcr1GYZhMOWvA/x79jYy8ooxmeDvF8Xw2ODWBOhmfVJDGYZBRmEG+7P2k5Cd4HzMSiAhO4GErARyinNKbT+6/Wge7PagBsJKnaYwUlbrv4fp94BXIDywDvx0aV9FOZpTyL9nb2Pq2oMAhAd68cI17RncPkIDXKVWMQyDtII0ErITWJiwkElbJgHQv3F/Xu33Kn5WPzdXKOIeCiNl5XDApwMgaQP0vBOunFixny8s232Ep6dtYt/RPABu6RXFf67tqEAitdbsvbN5dtmzFDmKaBHcgvcGvkcj/0buLkukymk6+LIym2Hwf5zP10yCwzvcW08t1LdFKL891I+xl7bAw2zi+1WJfLBIM79K7XVFsyv4YsgXhPqEsjtjN7fOupW1KWvdXZZItaUwAtDkYmh9JRh2mPecu6uplbytHvxrcGuev6Y9AK//bwe/bU52c1Uiladjg458f+X3tA1pS1pBGnfMvYPpu6e7uyyRaklh5JjLXwSzBXb+BnsXu7uaWuvvF8UwMjYGgId/WM+WQ5lurkik8kT4RfDFkC+4POZybA4bzy57ljfXvIndceqkgSJ1mcLIMaEtoMcdzudznwb9x6LSPHdVOy5uEUp+sZ27vlxDanaBu0sSqTS+Vl/e6P8Gd3e6G4BJWybx4MIHXdPQi4jCSGn9nwCvIEjeBBsmu7uaWsviYeb9W7vRrIEfhzILuPurvygoVviT2stsMnN/1/t59ZJX8TR7svjAYv42+28czDl4zn0dhoPDeYfZdHgT8/fPZ2XSSjIL1aMotYuupjnZsndg3rMQEAn3/wWeuiSvssQfyWX4+8vIzC9mWJeGvD2ii66wkVpv0+FNPLDwAY7kHyHEO4QJl0ygnlc9knOTSc5Ldj6WLCl5KaTkpWBz2E75nEi/SFqHtKZNSBva1GtD65DWNPJvpH9DUq3o0t7zZSuE93pCxn4YMA4GPFm5x6vjlu8+wsjPV2FzGDw2uDVjLm3h7pJEKl1ybjIPLHiAbWnbyrS92WQm1CeUCN8I0grSOJBz4LTbBVgDXAGldUhrWtVrRaRfJMFewTUipBzIPsDSg0vZfGQzF0VexFXNrqoRdcuZKYxciM1TYcposPrC/WshMLLyj1mHfbtyP09P2wzAR3/rxpAO+vOW2i+vOI8XVrzA3H1zCfQKJMIvggjfCOfjiYtvBA18G2AxH58JObsom53pO9metp3tadvZkbaDXRm7TtuDAmA1W2ng04AGvg0I8w075XmYbxgNfBsQYA2o0h//InsRa1PXsvTAUpYeXEp8Znyp9/s37s/42PE08G1QZTVJxVIYuRCGAf83CA6sgq5/g2HvV/4x67jnf9nCF8v34WP14Kd7YunQKMjdJYlUCYfhwGy68OF7xfZi9mbudQWU7Wnb2ZOxh/TC9DJ/hp/Vj+iAaBoHNCY6IJrowGiiAqKICogizDesQupMzk1m6cGlLD2wlD+T/iTflu96z8PkQZewLrQIbsHUXVMpdhQT6BnI072fZmjToeolqYEURi5U4mr4vzjABPcshYiOVXPcOspmdzD6i9Us3XWEyCBvZozpS1hg3b5xoUhFKLIXcST/CKl5qRzOP0xqXurx13mHXeuyirLO+jleHl5EBUS5gkoj/0an3KX45LBg4vjr+Kx4/jj4B7vSd5XaJtQnlIsbXcwljS7hooYXEejp/G/8rvRdPP3H065TWZfHXM4zFz1DiHfIef9ZSNVTGKkIP42GLVOh2QD4+3RQKq9UmfnFXPfBMvYczqVzVDA/3H0R3lbdZEykKhTYCjiUe4jErETXzf8ScxJJzErkYM5B7EbFXPFmNpnpFNrJGUAaX0KbkDZn7HEpdhTz2abP+GTDJ9gMGyHeITx30XMMjBlYIbVI5VMYqQjp+5yDWe1FcOtP0GpQ1R27jtp3JJfhHywjI6+Yazo35L836wobEXcrdhSTnJNMYnZJUMlOICknCZtRMkblhF8R44QXJz4P9gqmb8O+9GnYh2Dv4HIdf+vRrTz9x9PsztgNwFXNruLJXk8S5KXTudWdwkhFmfssLH8HQlvDvcvBw3LufeSCLN9zhJH/57zC5tHLW3H/wJbuLklE3KzIXsQH6z9g0pZJOAwHYT5hPN/neS5pfIm7S5Oz0I3yKsolj4JPCBzZAWu/dHc1dUKf5qG8NLwDABPn7WTFnqNurkhE3M3Tw5OHuj/EV0O/oklgE1LzU7lv/n08v/x5copy3F1epbI5bEzdNZVvt31Lga12zlitnpGyWPkJzHkMfEPhgXXg7YYa6qAnpmzkhzWJxDarz/d3X+TuckSkmsi35fPO2nf4dtu3GBgEWAOo510PTw9P52J2Plo9rK7nrnVmKy3rteSy6MsI9Ql1d1POadnBZby++nX2ZDrvdB7hF8GD3R7kiqZXVMjVTZVNp2kqkr0YPoiFo7ug7dXQbRRE9VIoqWQHM/Lp/9pCbA6Daff1oWt0PXeXJCLVyJrkNTyz7JkyTat/MhMmuoR1YWD0QC6LvoyogKhKqPD87c3cyxur32DpwaWAc8yNt8Wb5Fzn3c471O/AYz0fo1t4N3eWeU6VFkaWLFnC66+/zl9//UVSUhLTpk1j+PDhZ91n0aJFPPLII2zZsoWoqCieeeYZRo0aVeZjuj2MAOyYA9/ffPy1yQyRnSGmb8kSCz76saxo//ppA1P+OsCgduF8MrKHu8sRkWqmyF7ErvRdFNoLKXIUUWQvothe7Hp+8rq84jxWJa9i05FNpT6nVb1WxEXHcVn0ZbSq18ptA+czCzP5cMOHTN4+Gbthx2KycGvbW/ln53/iafbkm23f8OnGT8mz5QHOS54f7vYwUYHVK0wdU2lhZM6cOSxbtozu3btz3XXXnTOMxMfH06FDB+655x7uvPNO5s+fz0MPPcSsWbMYPHhwhTam0u38H2ydAfv+cE4XX4oJwts7g0mTvhDdB/w1a+CF2p2azeVvLcEw4PdH+tEiLMDdJYlILZCcm8yChAUsSFjAmpQ1pS5dbuzfmIHRA4mLiaNTg05Vcjqk2FHMjzt+5IP1H7jmfBkQNYBHuz9Kk6AmpbY9kn+E99e/z9RdU3EYDixmC7e1uY27O9/tmqeluqiS0zQmk+mcYeSJJ55g1qxZbN682bXu5ptvJiMjg99++61Mx6k2YeREmQdg/3LYvwz2LXOewjlZSHMIbAh+DU5YQk997hWgOUzO4p9fr+F/W1K4vltjJt7U2d3liEgtk1GQwaIDi5ifMJ8Vh1ZQaC90vVffuz69InvRK6IXPSN6Eh0QXaG9JoZhsPTgUl5f/Tr7svYB0CK4BY/3fJzYhrFn3Xdn+k7eWP0GK5JWAM5TOfd2vpcbW9+I1WytsBovRLUJI/369aNbt268/fbbrnWTJk3ioYceIjPz9LfBLiwspLDw+F+GrKwsoqKiqlcYOVlO6vFgsn85pG4p+74eXs5Q4lPPGUy8A52PXgHgFXj88cT13kEQ0NAZaCrqH4a9GLIOQtYhwARWb7CULFYfsHiBpeSxCsPT+sQMhr+/DIvZxOLHL6VRsE+VHVtE6pa84jyWHVrG7/t/Z8mBJeQUl75SJ8w3jF4RznDSI6IHjf0bn3c42Z2+mzfWvMGyQ8sAqOdVj7Fdx3Jdy+tK3YvobAzD4I+DfzBxzUTXINcmgU14tMej9G/c3+3zNJU1jFT6pBnJycmEh4eXWhceHk5WVhb5+fn4+Jz6wzJhwgReeOGFyi6tYvmHQftrnQtAXhokb4Scw5B74nLk+PO8o1CUA/ZCyDrgXMrL4g1BjU9Yoo8/D46CwEbO8ABgK3IeIyMRMhKOL5klr7MOguEo+3Fd4cQTzFbwsJY8WpyPZsvx5x7HXlvB098ZpnyCwfvYcux10PHXVud08F2igunTvD7L9xzl0yV7ef6a9uX/cxIRKQNfqy+Xx1zO5TGXU2wvZv3h9axKXsXq5NVsPLyR1LxUZu6dycy9MwGI9IukZ0RPekb0pFdEL0K8Q0grSHMtR/OPcrTgqOv5ievTCtIwMLCYLfyt7d+4u9PdBHiW71S0yWTiksaXENswlqm7pvL++vfZl7WP+xfcT4BngPMOzvVau+7m3DyoOVaP6tFrcqJK7xlp1aoVo0ePZty4ca51s2fP5sorryQvL++0YaRG9oycr6I8yDviDC0F6VCY7VwKso4/L8w8dV1+OuSmlu0Y/uHOIJB1iFJTJZ6Oh5fz1JLJBLZCKM53Ptryyx5UKorF29kjZPUh17CyO91BkcmTzk0j8fT2O95jc2yx+Dh7j3zqOeeG8Q05/twnGMzVcGp5h8P5/RqGs1adrhOptvJt+Ww4vIFVSatYk7KGTYc3HZ+F9jxdGnUp/+rxL6IDoyukxuyibD7b9BnfbfuOAvupc5JYzBaaBzV3hZM2IW1oVa9Vpc1mW216RiIiIkhJSSm1LiUlhcDAwNMGEQAvLy+8vLxO+16t4+kLntEQfB5/EW2FzoCRecDZu1HqsaQHxJYPOSf8+Vu8nccKjoagqOPPg2OcPSl+YWA+zWAtwwCHrXQ4KS4AW4HztcMGjmLnqR6HreSxGBz248/tJUtRNhRkQn6G87Eg46TXmYBR8tnOf0x+QOdjZe3bXv4/KyjpfTkxpNQDTz+w+joXT98TnvuVhBzf49uYLc5AVpbFYXO2Jz/N2UuWn17ymFb6sSDjeMjz9D/h+4g+6buJVlgRcTMfiw8XRV7ERZHOeY/yivNYn7qe1SmrWZW8ii1HtmA37FjNVur71CfEO8S11Peuf8q6MN8w6vvUr9AaAzwDeLj7w4ztMrbUXZx3pO9ge9p2souy2ZG+gx3pO/hlzy+u/Rr6NeTli1+mZ0TPCq2nrCo9jMTGxjJ79uxS6+bNm0ds7NkH5kgZWLwgpKlzOR3DcP7gZSY4/w88OPr8x5iYTM5TLFXRvedwQGGWM5QUZjlDT3Eeq3cfYtKirdTzdPDs4CZ4U+QMR8X5ztBSnOfsPToxAOSnOz8Djged9PjKb8P5KMqB1K3O5XQ8A0pCZGPn92AvKlmKnYHw2PNSj0XOHqHAhs7wGRRV+jReUJRzvJJCjki5+Vp96dOoD30a9QGcPSfFjmICrAFuH6th9bDSOsR5emYYwwDn+JKk3CRnOEnb4QopB3MOcij3kFvviFzuMJKTk8Pu3btdr+Pj41m/fj0hISFER0czbtw4Dh48yFdffQXAPffcw3vvvcfjjz/OP/7xDxYsWMCPP/7IrFmzKq4VcnomE/jVdy41idnsPK3iE1xqdbcmBk9sXMzeI7k0KW7LXf2ale3z7MXHeylcPRQlQaU4z7kU5UFxrjPYHHtelFcSdkqeG3bn/DKnLCYweZReZ/Y4oSem3kmnjk5+rOfsHck84Lxk/MTxPMeWnBRnj1LqlvINjj4m9zAkbTj9ex5eENTo+Jgjr4CSsT6nGftzbNyP2aNkLJBnydgh79IDnk9+bfVxbqvQI7WYj8UHH6rvAHuTyURD/4Y09G/IZdGXudZnFWWxM20nMYEx7qutvGNGFi1axKWXXnrK+ttvv50vvviCUaNGsW/fPhYtWlRqn4cffpitW7fSuHFjnn322Zo36ZlUCz+sTuCJnzcRHujFkscvxctSDceBVIbi/BPCSiJgOH/cPTxLeqy8TnjuWfq5vcg5OPnE03gZJY/ZSZxzHFGFMZ0QUnzO8OhdegyQl7/z9JVXQMnjsecBJ7xX8njymCDDKFkczjYajhNe4zyGwpFIpdJ08FIrFdrs9HttISlZhbxyXUdu7lUxg77qLHvxSeOOEp29QKXG+xwbD2Q7/vzYe/aikjFEBceX4hOf51NlYcdsLR08zsXiAwEREBB5hseSxUsT7Ymcr2ozgFWkInlZPLjrkma8PGsbHy/Zy409ovAw6/9uz5uHFerFOJfKYBglY1ryTwopeSWv88/ymA+FOc6xNIXZJY85JzyWXFnmKLmawVFcvtps+c7xQ+caQ+Tp7zylZvYoOXVV8njsdJzZ4jxNZ7Y4TzGaLeBb33lZfVBj53idY89966s3RuQ0FEakxrm5VzTvLthN/JFcftuczJWdIt1dkpyJyeSch8bi6RxDU9EMw9kzU5TjfCw1jscMmEqen/ja7Ow5yTsK2cnOU1WnfUx2Bp6ikgBUEY5dOn9iSPEPd4aaYzWeuHDyOtPxdjuflDw/w6PZ4hz0XL+58zgKQlJNKYxIjePvZeH2Pk14Z/4uPly8mys6Rrh95Lq4ialkpuCSCfLKxTvwzFeiHVOY4xw8nJ/uPDXlsDkHMjtszqu+XM/tJc9LTl/lHj4+TufYrMY5Kc4JDsvSG1MZrH4Q0gzqN3M+hjQveX2OoGK3lfRCndQrVZR3wkBm6/ExSmbLCWOWrMcHOnsFnN/3JHWCwojUSKP6NOHTJXvZfDCLpbuO0K+VbkoolcCrZIBsRbAVQfYhyCwJJ1kHnM/zjpwwR80Jg26PLZzw2mE/ITSU9Pic7hGOT1yYvs85Fqg4F1I2OZeTHQsqXgGnBg9bfsW0HxPUawIN2kBYG+djg9YQ2to5x4/UaQojUiOF+Hlyc68oJi3bx4eL9iiMSPVn8XT+GNdrUvXHthU5r8Q6ugfS9kJayePRPaWDytl4eJ5w9VKAM0AYjuOTGTqOzW1jcz6eONGhoxgwjvcK7ZxzwgebnKeSwto6w0mDttCglXOiwZN7nUo9tx0PaI6SuXZshc7eJ1uRc2zSiQOsXc8Lj1+Of2yMj8njzGOBPKzOy+9PnNH52KX5ZQlRhlEyqWPJ/Ed5R48/FuU4534KKrl1R1Aj5+0w6mBPr66mkRrrYEY+/V9biM1hMO2+PnSNrufukkRqHluhcy6bo3ucvSCnXDZd8vrYPa7Oh2E4byZ6ZAekbofDJUvqNuePdE1l8S49X5BviDMc5aeXhI6jJaf4yjFlvKf/8WAS2KhkosKS51Zf5ySO+RnHZ64+7ZLhHNxttpa+ZcaxWaWPPboupS9Z1+km53imCqRLe6VO+NdPG5jy1wEGtQvnk5E93F2OiJRX7hFnKDm8HQ7vcD4e2eXs7TCdcAWTyXzC85OuXjo2TuXYDTxdzz2dg4YtJywenoDphPE/jhOen9jjUtIrYy90/viffDuH8gQMcJ4K863vnATRt35Jz4qfc3zRsVt4uDuY3TEPonpV6Efq0l6pE+7p34yf1x5g7tYUdqVk0zJcc0KI1Ch+odD0EudSUxhGyQ1LT7r1RF6a8xSLb/0TlpJTOmUZvFuUVzLvT2LJAOiDpZ/bCk64u3nQCXc5P3ldkHP8z7H7iRXnlb5txonrTlz8wyv5D+7MFEakRmsRFsCgduH8b0sKHy3ey8SbOru7JBGp7Uwm59VY3oEVOwbI0xdCWziXOuY0t2cVqVnuHeD8hztj/UEOZlTUyH8REakqCiNS43WJCqZP8/rYHAYfL97j7nJERKScFEakVrh3QHMAvlqxn48W76EGjMsWEZESCiNSK1zcIpS7+zUD4JU523nh163YHQokIiI1gcKI1Aomk4mnrmjLM1e2BeCL5fsY+91aCortbq5MRETORWFEapU7L2nGu7d0xdPDzJzNyYz8v1Vk5pXzbq4iIlKlFEak1rm6c0O++EdPArwsrNqXxg0fLa/Sq2x+2XCIq95dyoLtKVV2TBGRmkxhRGqlPs1D+eneWCICvdmVmsN1HyxjW1JWpR7T7jB4Zc52Hvh+HZsPZvH0tM0U2RyVekwRkdpAYURqrTYRgUy9rw8tw/xJySrkpo9WsHzPkUo5VmZ+MXd8uZqPSi4t9rF6kJRZwNS1ByrleCIitYnCiNRqDYN9mHJPH3o1CSG70Maoz1fzy4ZDFXqM3ak5XPv+MhbtOIy31cw7t3TlX4NbA/DBoj3Y7OodERE5G4URqfWCfK18dUcvrugYQZHdwQPfr+OzpXsr5LMXbE/h2veXsfdILg2DvJlyTx+u6dyQW3pFUd/Pk4S0PH7dWLHhR0SktlEYkTrB2+rBu7d0Y1SfJgC8PGsbL83cet5jOgzD4INFu7njyzVkF9ro2aQev9x/MR0aBQHg62nhjkuaAvDegt04NOeJiMgZKYxIneFhNjH+6naMG9oGgP/7I57uL83jocnr+G1zMvlFZZuTJL/IzgOT1/PabzswDLi1dzTf3nkRof5epbb7+0UxBHpb2HM4l9+2JFd4e0REaguTUQPmzc7KyiIoKIjMzEwCAwPdXY7UAjPWH+Tfs7aRml3oWudj9eDSNg0Y0iGSS1s3IMDbesp+BzPyufurNWw5lIXFbOL5a9rzt4tiznict+bt5L/zd9E2MpDZD1yMyWSqlPaIiFRHZf39VhiROsvhMFibkM6czcn8tjm51Fwknh5mLmkZypAOEVzeLpxgX09Wxadx7zd/cTS3iBA/Tz68rRu9m9U/6zEy8oro+8oCcovsfDayB3Htwiu7WSIi1YbCiEg5GIbB5oNZzNmcxG+bk9l7JNf1nofZRPeYeqzdn47NYdAuMpBPRnancT3fMn32K3O289HiPXSOCmb6fX3UOyIidYbCiMh5MgyDnSk5/LY5mTmbk9ienO1678pOkbx+Qyd8PS1l/rwjOYVc/OoCCoodfH1HLy5p2aAyyhYRqXbK+vtd9v+iitQRJpOJ1hEBtI4I4MG4luw7ksvcrckE+Vi5qUdUuXs2Qv29uKVXNJOW7ePdBbsVRkRETqKraUTOoUmoH3f3a86IntHnfYrln/2a4+lhZlV8Giv3Hq3gCkVEajaFEZEqEBHkzY09GgPw3sLdbq5GRKR6URgRqSL39G+Oh9nE0l1HWJ+Y4e5yRESqDYURkSoSFeLLtV0bAc5ZWUVExElhRKQK3TegOSYT/L4tha2HstxdjohItaAwIlKFmjXw56pODQF4f5F6R0REQGFEpMqNubQ5ALM3JbE7NcfN1YiIuJ/CiEgVaxMRyKB24RgGfKAra0REFEZE3GHsZS0AmLHhEPuP5p5jaxGR2k1hRMQNOjUOpn+rBtgdBh8t3uPuckRE3EphRMRN7i/pHZny1wEOnXDH4DPJLihmd2o2K/ceJbfQVtnliYhUGd2bRsRNejQJIbZZfVbsPco783cxomcUKVkFJGcWkJxVSEpWgfN1VgEpmQXkFtld+/ZuGsL3d12E2aw7AItIzacwIuJG91/WghV7jzJ5dSKTVyeec/sAbwsFxXZWxqfxzcr9jIxtUvlFiohUMoURETeKbV6fwe3Dmb8tlbAAL8KDvIkI9CY80JuIIG/CA72cz0te+3pa+HL5Psb/soVX52znsjZhNK7n6+5miIhcEJNhGIa7iziXrKwsgoKCyMzMJDAw0N3liFQ4wzDKfEdgh8Pgpo9XsGZ/Ov1aNeDL0T3P+27CIiKVqay/3xrAKlINlCdMmM0mXr2hE54WM0t2HubntQcrsTIRkcqnMCJSAzVv4M9DcS0BeGnmVlKzC9xckYjI+VMYEamh7r6kGR0aBZKZX8z4GVvcXY6IyHlTGBGpoSweZl67vjMWs4k5m5OZsynJ3SWJiJwXhRGRGqxdw0DuHeC88d6zM7aQkVfk5opERMpPYUSkhht7WQtahPlzJKeQF2dudXc5IiLlpjAiUsN5WTx49fpOmEwwde1BFu5IdXdJIiLlojAiUgt0j6nH6D5NAXh66iZydO8aEalBFEZEaol/DW5FdIgvhzILeHXOdneXIyJSZgojIrWEr6eFV67rCMDXf+5n5d6jbq5IRKRsFEZEapE+LUK5uWcUAE9O3URBsf0ce4iIuJ/CiEgt89SVbQkP9CL+SC5vzdvp7nJERM5JYUSklgn0tvLv4c7TNZ8u3cvGAxnuLUhE5BwURkRqobh24VzTuSEOAx6fspFcXV0jItWYwohILTX+6naE+HmyPTmbIf9dwp8a0Coi1ZTCiEgtVd/fi09HdqdRsA+Jafnc/MmfPP/LFvKLNKhVRKqX8woj77//Pk2aNMHb25vevXuzatWqM277xRdfYDKZSi3e3t7nXbCIlF33mBB+e+gSbukVDcAXy/cx9L9LWLMvzc2ViYgcV+4w8sMPP/DII48wfvx41q5dS+fOnRk8eDCpqWeegjowMJCkpCTXsn///gsqWkTKLsDbyoTrOvLlP3oRGeTNvqN53PjxCv49a6su/RWRaqHcYeTNN9/krrvuYvTo0bRr146PPvoIX19fPv/88zPuYzKZiIiIcC3h4eFnPUZhYSFZWVmlFhG5MP1bNeB/D/fjph6NMQz4dGk8V7yzlHUJ6e4uTUTquHKFkaKiIv766y/i4uKOf4DZTFxcHCtWrDjjfjk5OcTExBAVFcWwYcPYsmXLWY8zYcIEgoKCXEtUVFR5yhSRMwj0tvLaDZ35fFQPwgK82Hs4l+s/XM4rc7arl0RE3KZcYeTIkSPY7fZTejbCw8NJTk4+7T6tW7fm888/Z8aMGXzzzTc4HA769OnDgQMHzniccePGkZmZ6VoSExPLU6aInMNlbcKZ93B/ruvaCIcBHy3ew9Xv/qE5SUTELSr9aprY2FhGjhxJly5d6N+/P1OnTqVBgwZ8/PHHZ9zHy8uLwMDAUouIVKwgXytvjujCJ3/vTqi/J7tSc7j2g+W8/ftOHA7D3eWJSB1SrjASGhqKh4cHKSkppdanpKQQERFRps+wWq107dqV3bt3l+fQIlJJBrWPYO7D/bm6c0PsDoO3f9/FP75cTUZekbtLE5E6olxhxNPTk+7duzN//nzXOofDwfz584mNjS3TZ9jtdjZt2kRkZGT5KhWRShPi58m7t3TlzZs64201s2jHYa5+7w+2HtLgcRGpfOU+TfPII4/w6aef8uWXX7Jt2zbuvfdecnNzGT16NAAjR45k3Lhxru1ffPFF5s6dy969e1m7di1/+9vf2L9/P3feeWfFtUJEKsR13Rrz8719iApxTpR23YfLmLbuzOO7REQqgqW8O4wYMYLDhw/z3HPPkZycTJcuXfjtt99cg1oTEhIwm49nnPT0dO666y6Sk5OpV68e3bt3Z/ny5bRr167iWiEiFaZ9wyB+HXsxD05ez+Kdh3n4hw1sSMzkqSva4mnRpM0iUvFMhmFU+5FqWVlZBAUFkZmZqcGsIlXE7jD47+87eWeBc3xXj5h6fHBbN8ICNYOyiJRNWX+/9b85InJaHmYTjwxqzWcjexDgZWHN/nSufPcPTSUvIhVOYUREziquXTi/3H8xrcMDOJxdyM2f/MkXy+KpAZ2qIlJDKIyIyDk1DfVj2pg+XN25ITaHwfO/buWRHzfoDsAiUiHKPYBVROomX08L79zchc6Ng5gwZzvT1h1ky6FM+rdqQESQDw2DvIkI8iYyyIcGAV54mE3uLllEagiFEREpM5PJxJ2XNKN9wyDu/34tO1Ny2JmSc8p2HmYTYQFeJeHEm4hAHyKDvOnYOIjeTUMwmRRUROQ4XU0jIuclNbuAmRuSOJSRT1JWAcmZBSRl5JOSXYj9LNPJNw31Y0TPKG7o3phQf68qrFhEqlpZf78VRkSkQtkdBkdyCknKLCA5M7/ksYAD6fks2pFKbsk4E4vZxKD24dzcM5qLW4Ri1mkdkVpHYUREqp3cQhu/bjjE96sT2ZCY4VofFeLDiB5R3NgjinDNYyJSayiMiEi1tvVQFpNXJzBt3UGyC2yAc6zJZW3CuLVXNP1aNdAgWJEaTmFERGqE/CI7szcl8f2qBNbsT3etbxjkzfCujbiuWyNahAW4sUIROV8KIyJS4+xKyWby6kR+XnuAjLxi1/oOjQIZ3qUR13RuqOnoRWoQhRERqbEKiu38vi2F6esOsWhHKraSq3PMJujbIpRruzZicPsI/Lw0O4FIdaYwIiK1QlpuEbM2HmLauoOsTchwrfexejC4fTjDuzbi4hahWDw0obRIdaMwIiK1zv6juUxfd4hp6w6w72iea32ovye39IrmwYEtFUpEqhGFERGptQzDYH1iBtPXHeTXjUmk5RYBcGXHSN4a0QVPiwKJSHWgMCIidUKx3cGM9YcYN3UjxXaDgW3CeP+2bnhbPdxdmkidV9bfb/3vg4jUaFYPMzd0b8ynI3vgZTEzf3sqd365hrwim7tLE5EyUhgRkVphQOswvhjdC19PD/7YfYRRn68mu6D43DuKiNspjIhIrRHbvD5f39GbAC8Lq/al8bf/W0VGXpG7yxKRc1AYEZFapXtMPb676yKCfa1sSMzglk9XciSn0N1lichZKIyISK3TsXEQP9wdS6i/F9uSsrj5kz9JySpwd1kicgYKIyJSK7WOCODHf15EZJA3u1NzuOnjFRxIzzv3jiJS5RRGRKTWatbAnx//GUtUiA/7j+Zx00cr2Hck191lichJFEZEpFaLCvHlp3/2oVkDPw5lFnDTxyvYlZLt7rJE5AQKIyJS60UEefPD3bG0iQggNbuQEZ/8yfR1B3WljUg1oRlYRaTOSM8t4vZJq9h4IBNw3gW4R0wIl7YJ47I2YbQK98dkMrm5SpHaQ9PBi4icRnZBMR8u2sO8rSnsSs0p9V6jYB8ubdOAy9qE0ad5qKaUF7lACiMiIueQmJbHwh2pLNieyvI9RymyOVzveVnM9Glen8vahNG3RShRIb5YdUdgkXJRGBERKYf8IjvL9xxhwfZUFm5P5VBm6XlJPMwmIoO8iQ7xJaqeL9H1fWlcz4foEF+iQ3wJ8fPUKR6RkyiMiIicJ8Mw2JGS7QomGw9kUnhCr8np+Hp6OINKiC/Xdm3EFR0jq6hakepLYUREpII4HAZHcgpJSMsjIS2PxLR852N6HolpeSRnFXDyf0mv69aIF4d1wN/L4p6iRaoBhRERkSpSaLNzMN0ZUFbsOcqnS/fiMCCmvi/v3NyVzlHB7i5RxC0URkRE3GRVfBoPTV7HocwCLGYTjw5qzT/7NcNs1pgSqVvK+vutoeEiIhWsV9MQ5jzYjys7RmJzGLz623b+/vlK3axP5AwURkREKkGQr5X3bu3Kq9d3xMfqwbLdRxny9hJ+35ri7tKqNcMw2JGcTUGx3d2lSBXSaRoRkUq253AOD3y/ji2HsgAYGRvDU1e01aRqJyi2O5i1MYlPluxla1IWMfV9efOmznSPCXF3aXIBNGZERKQaKbTZef23HXz2RzwArcMDeOeWrrSOCDjt9pn5xRxMz+dgRj4H0/M4lFnAwYx8Ar0ttGsYRIeGgbSJCMTHs2YHmuyCYiavSuTzZfEknTS3i9kE9/RvzkNxrfC0qCO/JlIYERGphhbtSOVfP23gSE4RXhYzYy9tgdViPiF45HMoI5/sQts5P8tsguYN/GnfMJD2DYNcj0G+1ipoyYVJyszni2X7+G5lgqutof5ejOoTw7AujXjr951MXXsQgLaRgbw9ossZg5tUXwojIiLV1OHsQh6bsoFFOw6fdbsQP08aBnvTKNiHRsG+NAz25mhuEVsOZbH1UCZHck5/1+FGwT60bxhIy3B/HIZzdtm8Ihv5xQ7yi2zkFdnJL7aXrD/+3NnrEki7kmDTLjKQxvV8KnRm2W1JWXy6dC+/rD+EzeH8+WnewI+7+zVjWJdGpU5dzdmUxFPTNpGeV4ynh5l/DW7FHRc3w0NXJdUYCiMiItWYw2Hwzcr9zNuaQn0/TxrV86FhsA+Ngn1oXPLc1/PME6YZhkFqdiFbDmWy5WAWWw5lsSUpk8S0/AqtM8jHSrvIQGevS6NA2kUG0byBH5Zy3KfHMAyW7T7KJ0v3smTn8QDWu2kId/drxqWtw8542XNqdgHjft7E/O2pgPNKpYk3diYqxPfCGiZVQmFERKQOyswrZktSJlsPZRF/JBdPixlfTw98PS14Wz1KnnvgY/XAx/Xcgo+nB0dyCtlyMNMZbA5lsSs1m2L7qT8RXhYzLcP9sXqYsdkNiu0Oiu0ObA7D9drmMCi2OSh2OLDZDVcviNkEV3SM5K5LmpV5MjjDMPhhdSIvzdxKbpEdfy8Lz13djhu7N64z9wNyOAwMqHG9QgojIiJyQYpsDnamZLM1KYuth7LYcsgZcnKLyn/ZrY/VgxE9o7jj4qbn3auRcDSPR39az+p96QBc3i6cCdd1JNTf67w+r6ZYFZ/GEz9vJKfQxjNXtuWazg1rTAhTGBERkQrncBjsT8tjd2oOhmFg9TBj8TBhMZvxtDgfLR4m53qz89HqYSbY11ohlzLbHQafLt3Lm3N3UmR3UN/Pk6evbEur8AACvC34e1nw97bgZanZVxkBFBTbeXPeTj5durfUvY8uaRnKy8M7EFPfz33FlZHCiIiI1FrbkrJ4+If1bE/OPu37nhYzAV4WZ0ApCSkB3lYCvCy0jQwkrl04TUOr74/55oOZPPLjenam5ABwU4/GNK7ny3sLd1Nkc+BlMfPAwJbcdUmzan3Zs8KIiIjUaoU2O+8t2M1vm5PJKigmp8BWrlNIzRr4cXnbcOLahdMtul61GI9hszv4YNEe3pm/C5vDINTfi1eu60hcu3AA9h3J5enpm1i2+ygALcP8+c91HenZpHpODqcwIiIidY7dYZBTaHMuBTayC4rJdj23kZ5XxJ97j/Ln3qOlBueG+HlyaeswLm8XxiUtG+DndeYrmSrL7tQcHv1xPRsOZAJwRccIXh7ekRA/z1LbGYbBjPWHeGnmVo7mOi/vvqVXFE8MaUOwr+cpn+tOCiMiIiJnkFVQzJKdh/l9awoLdxwmM7/Y9Z6nh5nY5vWJaxfOZW3CaBjkXakDRh0Og0nL9/Hab9sptDkI9Lbw0vAO5xyompFXxCtztjN5dSIAof6ePHtVu2o1wFVhREREpAxsdgdr9qfz+9YU5m1LYf/RvFLv1/fzpE1kAG0iAmkdEUDbCOeEchUxIDcxLY/Hpmzgz71pAPRr1YDXru9ERJB3mT9jVXwaT03bxO5U5/iSi1s4B7g2qQZjYhRGREREyskwDPYczmHe1lR+35bCuoR0HKf5lTSboGmoH20iAmkTEUCbSOdjqL8XdsPA7jAwSh7thoFhOE8hOdeD3TBYufcoL8/aRk6hDV9PD56+si239oo+r16NIpuDT5bs4Z0FzgGunhYzvZuG0DTUj2ahfjRt4E+zUD8aBvtU6dgYhREREZELlF9kZ1dqNtuTstmWnMX2pGy2J2eRnld87p3LqEdMPSbe1LlCLtXddySXZ6Zv5o/dR077vqeHmZj6vjRr4EfTUP+SoOIMLCF+nhV+ekdhREREpBIYhsHh7EK2JWezPSmL7cnZbEvKYs/hnNPOWAvOmVM9TCbMZpyPJhO+Xh78o29T7rykYu+3YxgG6xMz2JWSw94jucQfyWHv4Vz2H82jyO44436TRvfk0tZhFVYHlP33u+qHC4uIiNRgJpOJsEBvwgK96d+qgWt9sd1Bkc2B+YTQ4WE2VflgUpPJRNfoenSNrldqvd1hcCgj3xlQDucQfyS3JKzkcjAjn6ZunERNYURERKQCHJtttrryMJuICvElKsS3VIgC52yvnm6sXWFERESkjquIK4MuRPWNcCIiIlInKIyIiIiIWymMiIiIiFspjIiIiIhbnVcYef/992nSpAne3t707t2bVatWnXX7n376iTZt2uDt7U3Hjh2ZPXv2eRUrIiIitU+5w8gPP/zAI488wvjx41m7di2dO3dm8ODBpKamnnb75cuXc8stt3DHHXewbt06hg8fzvDhw9m8efMFFy8iIiI1X7lnYO3duzc9e/bkvffeA8DhcBAVFcX999/Pk08+ecr2I0aMIDc3l5kzZ7rWXXTRRXTp0oWPPvrotMcoLCyksLDQ9TorK4uoqCjNwCoiIlKDlHUG1nL1jBQVFfHXX38RFxd3/APMZuLi4lixYsVp91mxYkWp7QEGDx58xu0BJkyYQFBQkGuJiooqT5kiIiJSg5QrjBw5cgS73U54eHip9eHh4SQnJ592n+Tk5HJtDzBu3DgyMzNdS2JiYnnKFBERkRqkWs7A6uXlhZeXl7vLEBERkSpQrp6R0NBQPDw8SElJKbU+JSWFiIiI0+4TERFRru1FRESkbilXGPH09KR79+7Mnz/ftc7hcDB//nxiY2NPu09sbGyp7QHmzZt3xu1FRESkbin3aZpHHnmE22+/nR49etCrVy/efvttcnNzGT16NAAjR46kUaNGTJgwAYAHH3yQ/v37M3HiRK688komT57MmjVr+OSTTyq2JSIiIlIjlTuMjBgxgsOHD/Pcc8+RnJxMly5d+O2331yDVBMSEjCbj3e49OnTh++++45nnnmGp556ipYtWzJ9+nQ6dOhQ5mMeu/o4KyurvOWKiIiImxz73T7XLCLlnmfEHQ4cOKDLe0VERGqoxMREGjdufMb3a0QYcTgcHDp0iICAAEwm01m3PTZBWmJiYq2eIK0utLMutBHUztpG7aw96kIboXLbaRgG2dnZNGzYsNRZk5NVy0t7T2Y2m8+aqE4nMDCwVv/lOaYutLMutBHUztpG7aw96kIbofLaGRQUdM5tdNdeERERcSuFEREREXGrWhdGvLy8GD9+fK2fwbUutLMutBHUztpG7aw96kIboXq0s0YMYBUREZHaq9b1jIiIiEjNojAiIiIibqUwIiIiIm6lMCIiIiJuVavCyPvvv0+TJk3w9vamd+/erFq1yt0lXZDnn38ek8lUamnTpo3r/YKCAsaMGUP9+vXx9/fn+uuvJyUlxY0Vl82SJUu4+uqradiwISaTienTp5d63zAMnnvuOSIjI/Hx8SEuLo5du3aV2iYtLY3bbruNwMBAgoODueOOO8jJyanCVpzbudo5atSoU77fIUOGlNqmurdzwoQJ9OzZk4CAAMLCwhg+fDg7duwotU1Z/p4mJCRw5ZVX4uvrS1hYGI899hg2m60qm3JWZWnngAEDTvk+77nnnlLbVOd2fvjhh3Tq1Mk18VVsbCxz5sxxvV8bvkc4dztr+vd4Jq+88gomk4mHHnrIta5afadGLTF58mTD09PT+Pzzz40tW7YYd911lxEcHGykpKS4u7TzNn78eKN9+/ZGUlKSazl8+LDr/XvuuceIiooy5s+fb6xZs8a46KKLjD59+rix4rKZPXu28fTTTxtTp041AGPatGml3n/llVeMoKAgY/r06caGDRuMa665xmjatKmRn5/v2mbIkCFG586djT///NNYunSp0aJFC+OWW26p4pac3bnaefvttxtDhgwp9f2mpaWV2qa6t3Pw4MHGpEmTjM2bNxvr1683rrjiCiM6OtrIyclxbXOuv6c2m83o0KGDERcXZ6xbt86YPXu2ERoaaowbN84dTTqtsrSzf//+xl133VXq+8zMzHS9X93b+csvvxizZs0ydu7caezYscN46qmnDKvVamzevNkwjNrxPRrGudtZ07/H01m1apXRpEkTo1OnTsaDDz7oWl+dvtNaE0Z69epljBkzxvXabrcbDRs2NCZMmODGqi7M+PHjjc6dO5/2vYyMDMNqtRo//fSTa922bdsMwFixYkUVVXjhTv6RdjgcRkREhPH666+71mVkZBheXl7G999/bxiGYWzdutUAjNWrV7u2mTNnjmEymYyDBw9WWe3lcaYwMmzYsDPuUxPbmZqaagDG4sWLDcMo29/T2bNnG2az2UhOTnZt8+GHHxqBgYFGYWFh1TagjE5up2E4f8RO/A/9yWpiO+vVq2d89tlntfZ7POZYOw2j9n2P2dnZRsuWLY158+aValt1+05rxWmaoqIi/vrrL+Li4lzrzGYzcXFxrFixwo2VXbhdu3bRsGFDmjVrxm233UZCQgIAf/31F8XFxaXa3KZNG6Kjo2t0m+Pj40lOTi7VrqCgIHr37u1q14oVKwgODqZHjx6ubeLi4jCbzaxcubLKa74QixYtIiwsjNatW3Pvvfdy9OhR13s1sZ2ZmZkAhISEAGX7e7pixQo6duxIeHi4a5vBgweTlZXFli1bqrD6sju5ncd8++23hIaG0qFDB8aNG0deXp7rvZrUTrvdzuTJk8nNzSU2NrbWfo8nt/OY2vI9AowZM4Yrr7yy1HcH1e/fZo24Ud65HDlyBLvdXuoPDCA8PJzt27e7qaoL17t3b7744gtat25NUlISL7zwApdccgmbN28mOTkZT09PgoODS+0THh5OcnKyewquAMdqP913eey95ORkwsLCSr1vsVgICQmpUW0fMmQI1113HU2bNmXPnj089dRTDB06lBUrVuDh4VHj2ulwOHjooYfo27cvHTp0ACjT39Pk5OTTft/H3qtuTtdOgFtvvZWYmBgaNmzIxo0beeKJJ9ixYwdTp04FakY7N23aRGxsLAUFBfj7+zNt2jTatWvH+vXra9X3eKZ2Qu34Ho+ZPHkya9euZfXq1ae8V93+bdaKMFJbDR061PW8U6dO9O7dm5iYGH788Ud8fHzcWJlUhJtvvtn1vGPHjnTq1InmzZuzaNEiBg4c6MbKzs+YMWPYvHkzf/zxh7tLqVRnaufdd9/tet6xY0ciIyMZOHAge/bsoXnz5lVd5nlp3bo169evJzMzkylTpnD77bezePFid5dV4c7Uznbt2tWK7xEgMTGRBx98kHnz5uHt7e3ucs6pVpymCQ0NxcPD45RRwCkpKURERLipqooXHBxMq1at2L17NxERERQVFZGRkVFqm5re5mO1n+27jIiIIDU1tdT7NpuNtLS0Gt32Zs2aERoayu7du4Ga1c6xY8cyc+ZMFi5cSOPGjV3ry/L3NCIi4rTf97H3qpMztfN0evfuDVDq+6zu7fT09KRFixZ0796dCRMm0LlzZ/773//Wuu/xTO08nZr4PYLzNExqairdunXDYrFgsVhYvHgx77zzDhaLhfDw8Gr1ndaKMOLp6Un37t2ZP3++a53D4WD+/PmlzgPWdDk5OezZs4fIyEi6d++O1Wot1eYdO3aQkJBQo9vctGlTIiIiSrUrKyuLlStXutoVGxtLRkYGf/31l2ubBQsW4HA4XP/hqIkOHDjA0aNHiYyMBGpGOw3DYOzYsUybNo0FCxbQtGnTUu+X5e9pbGwsmzZtKhW85s2bR2BgoKvr3N3O1c7TWb9+PUCp77O6t/NkDoeDwsLCWvM9nsmxdp5OTf0eBw4cyKZNm1i/fr1r6dGjB7fddpvrebX6Tit0OKwbTZ482fDy8jK++OILY+vWrcbdd99tBAcHlxoFXNM8+uijxqJFi4z4+Hhj2bJlRlxcnBEaGmqkpqYahuG8LCs6OtpYsGCBsWbNGiM2NtaIjY11c9Xnlp2dbaxbt85Yt26dARhvvvmmsW7dOmP//v2GYTgv7Q0ODjZmzJhhbNy40Rg2bNhpL+3t2rWrsXLlSuOPP/4wWrZsWa0ueTWMs7czOzvb+Ne//mWsWLHCiI+PN37//XejW7duRsuWLY2CggLXZ1T3dt57771GUFCQsWjRolKXQubl5bm2Odff02OXDw4aNMhYv3698dtvvxkNGjSoVpdKnqudu3fvNl588UVjzZo1Rnx8vDFjxgyjWbNmRr9+/VyfUd3b+eSTTxqLFy824uPjjY0bNxpPPvmkYTKZjLlz5xqGUTu+R8M4eztrw/d4NidfKVSdvtNaE0YMwzDeffddIzo62vD09DR69epl/Pnnn+4u6YKMGDHCiIyMNDw9PY1GjRoZI0aMMHbv3u16Pz8/37jvvvuMevXqGb6+vsa1115rJCUlubHislm4cKEBnLLcfvvthmE4L+999tlnjfDwcMPLy8sYOHCgsWPHjlKfcfToUeOWW24x/P39jcDAQGP06NFGdna2G1pzZmdrZ15enjFo0CCjQYMGhtVqNWJiYoy77rrrlPBc3dt5uvYBxqRJk1zblOXv6b59+4yhQ4caPj4+RmhoqPHoo48axcXFVdyaMztXOxMSEox+/foZISEhhpeXl9GiRQvjscceKzU/hWFU73b+4x//MGJiYgxPT0+jQYMGxsCBA11BxDBqx/doGGdvZ234Hs/m5DBSnb5Tk2EYRsX2tYiIiIiUXa0YMyIiIiI1l8KIiIiIuJXCiIiIiLiVwoiIiIi4lcKIiIiIuJXCiIiIiLiVwoiIiIi4lcKIiIiIuJXCiIiIiLiVwoiIVLpRo0YxfPhwd5chItWUwoiIiIi4lcKIiFSYKVOm0LFjR3x8fKhfvz5xcXE89thjfPnll8yYMQOTyYTJZGLRokUAJCYmctNNNxEcHExISAjDhg1j3759rs871qPywgsv0KBBAwIDA7nnnnsoKipyTwNFpFJY3F2AiNQOSUlJ3HLLLbz22mtce+21ZGdns3TpUkaOHElCQgJZWVlMmjQJgJCQEIqLixk8eDCxsbEsXboUi8XCyy+/zJAhQ9i4cSOenp4AzJ8/H29vbxYtWsS+ffsYPXo09evX59///rc7mysiFUhhREQqRFJSEjabjeuuu46YmBgAOnbsCICPjw+FhYVERES4tv/mm29wOBx89tlnmEwmACZNmkRwcDCLFi1i0KBBAHh6evL555/j6+tL+/btefHFF3nsscd46aWXMJvVuStSG+hfsohUiM6dOzNw4EA6duzIjTfeyKeffkp6evoZt9+wYQO7d+8mICAAf39//P39CQkJoaCggD179pT6XF9fX9fr2NhYcnJySExMrNT2iEjVUc+IiFQIDw8P5s2bx/Lly5k7dy7vvvsuTz/9NCtXrjzt9jk5OXTv3p1vv/32lPcaNGhQ2eWKSDWiMCIiFcZkMtG3b1/69u3Lc889R0xMDNOmTcPT0xO73V5q227duvHDDz8QFhZGYGDgGT9zw4YN5Ofn4+PjA8Cff/6Jv78/UVFRldoWEak6Ok0jIhVi5cqV/Oc//2HNmjUkJCQwdepUDh8+TNu2bWnSpAkbN25kx44dHDlyhOLiYm677TZCQ0MZNmwYS5cuJT4+nkWLFvHAAw9w4MAB1+cWFRVxxx13sHXrVmbPns348eMZO3asxouI1CLqGRGRChEYGMiSJUt4++23ycrKIiYmhokTJzJ06FB69OjBokWL6NGjBzk5OSxcuJABAwawZMkSnnjiCa677jqys7Np1KgRAwcOLNVTMnDgQFq2bEm/fv0oLCzklltu4fnnn3dfQ0WkwpkMwzDcXYSIyOmMGjWKjIwMpk+f7u5SRKQSqZ9TRERE3EphRERERNxKp2lERETErdQzIiIiIm6lMCIiIiJupTAiIiIibqUwIiIiIm6lMCIiIiJupTAiIiIibqUwIiIiIm6lMCIiIiJu9f9f8cdHEx2pbgAAAABJRU5ErkJggg==", "text/plain": [ - "
" + "
" ] }, - "metadata": { - "needs_background": "light" - }, + "metadata": {}, "output_type": "display_data" } ], @@ -1355,7 +1322,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "id": "2ea981cd", "metadata": {}, "outputs": [ @@ -1365,7 +1332,7 @@ "array([3, 4, 5, 6, 7, 8, 9])" ] }, - "execution_count": 15, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -1377,7 +1344,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 17, "id": "bbd33233", "metadata": {}, "outputs": [ @@ -1419,143 +1386,419 @@ " \n", " \n", " 0\n", - " 3.8\n", - " 1.10\n", + " 14.2\n", + " 0.08\n", + " 1.66\n", + " 0.6\n", + " 0.346\n", + " 289.0\n", + " 9.0\n", + " 0.98711\n", + " 2.72\n", + " 0.22\n", + " 14.2\n", + " 6\n", + " \n", + " \n", + " 1\n", + " 14.2\n", + " 0.08\n", " 0.00\n", + " 0.6\n", + " 0.346\n", + " 289.0\n", + " 440.0\n", + " 0.98711\n", + " 2.72\n", + " 1.08\n", + " 14.2\n", + " 8\n", + " \n", + " \n", + " 2\n", + " 3.8\n", + " 0.08\n", + " 1.66\n", " 65.8\n", - " 0.009000\n", + " 0.346\n", " 289.0\n", - " 50.104997\n", - " 1.038893\n", + " 9.0\n", + " 0.98711\n", " 3.82\n", - " 0.220000\n", + " 0.22\n", " 8.0\n", + " 7\n", + " \n", + " \n", + " 3\n", + " 3.8\n", + " 0.08\n", + " 0.00\n", + " 0.6\n", + " 0.346\n", + " 289.0\n", + " 9.0\n", + " 0.98711\n", + " 2.72\n", + " 0.22\n", + " 14.2\n", " 5\n", " \n", " \n", - " 1\n", + " 4\n", " 14.2\n", - " 0.08\n", + " 1.10\n", " 1.66\n", " 0.6\n", - " 0.251377\n", + " 0.346\n", " 289.0\n", - " 9.000000\n", - " 0.987291\n", - " 3.82\n", - " 1.080000\n", + " 440.0\n", + " 1.03898\n", + " 2.72\n", + " 1.08\n", + " 14.2\n", + " 6\n", + " \n", + " \n", + " 5\n", + " 14.2\n", + " 0.08\n", + " 1.66\n", + " 0.6\n", + " 0.009\n", + " 2.0\n", + " 9.0\n", + " 0.98711\n", + " 2.72\n", + " 1.08\n", " 8.0\n", " 6\n", " \n", " \n", - " 2\n", + " 6\n", " 3.8\n", " 1.10\n", + " 1.66\n", + " 65.8\n", + " 0.009\n", + " 289.0\n", + " 440.0\n", + " 1.03898\n", + " 2.72\n", + " 1.08\n", + " 8.0\n", + " 5\n", + " \n", + " \n", + "\n", + "" + ], + "text/plain": [ + " fixed acidity volatile acidity citric acid residual sugar chlorides \\\n", + "0 14.2 0.08 1.66 0.6 0.346 \n", + "1 14.2 0.08 0.00 0.6 0.346 \n", + "2 3.8 0.08 1.66 65.8 0.346 \n", + "3 3.8 0.08 0.00 0.6 0.346 \n", + "4 14.2 1.10 1.66 0.6 0.346 \n", + "5 14.2 0.08 1.66 0.6 0.009 \n", + "6 3.8 1.10 1.66 65.8 0.009 \n", + "\n", + " free sulfur dioxide total sulfur dioxide density pH sulphates \\\n", + "0 289.0 9.0 0.98711 2.72 0.22 \n", + "1 289.0 440.0 0.98711 2.72 1.08 \n", + "2 289.0 9.0 0.98711 3.82 0.22 \n", + "3 289.0 9.0 0.98711 2.72 0.22 \n", + "4 289.0 440.0 1.03898 2.72 1.08 \n", + "5 2.0 9.0 0.98711 2.72 1.08 \n", + "6 289.0 440.0 1.03898 2.72 1.08 \n", + "\n", + " alcohol quality \n", + "0 14.2 6 \n", + "1 14.2 8 \n", + "2 8.0 7 \n", + "3 14.2 5 \n", + "4 14.2 6 \n", + "5 8.0 6 \n", + "6 8.0 5 " + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin.generate(len(outcome), cond=outcome)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "ed7f9903", + "metadata": {}, + "source": [ + "Use an array as the `cond` argument of the `fit` method:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "8d90f2fa", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-04-06T19:23:00.914877+0200][45392][INFO] Encoding fixed acidity 8821222230854998919\n", + "[2023-04-06T19:23:00.923931+0200][45392][INFO] Encoding volatile acidity 3689048099044143611\n", + "[2023-04-06T19:23:00.934174+0200][45392][INFO] Encoding citric acid 735380040632581265\n", + "[2023-04-06T19:23:00.954557+0200][45392][INFO] Encoding residual sugar 2442409671939919968\n", + "[2023-04-06T19:23:00.965758+0200][45392][INFO] Encoding chlorides 7195838597182208600\n", + "[2023-04-06T19:23:00.976757+0200][45392][INFO] Encoding free sulfur dioxide 3309873879720413309\n", + "[2023-04-06T19:23:00.996365+0200][45392][INFO] Encoding total sulfur dioxide 8059822526963442530\n", + "[2023-04-06T19:23:01.005686+0200][45392][INFO] Encoding density 3625281346475756911\n", + "[2023-04-06T19:23:01.014352+0200][45392][INFO] Encoding pH 4552002723230490789\n", + "[2023-04-06T19:23:01.021350+0200][45392][INFO] Encoding sulphates 4957484118723629481\n", + "[2023-04-06T19:23:01.029350+0200][45392][INFO] Encoding alcohol 3711001505059098944\n", + "[2023-04-06T19:23:01.036351+0200][45392][INFO] Encoding quality 3457201635469827215\n", + "[2023-04-06T19:23:07.229567+0200][45392][INFO] Step 100: MLoss: 1.3287 GLoss: 0.9813 Sum: 2.31\n", + "[2023-04-06T19:23:13.660368+0200][45392][INFO] Step 200: MLoss: 1.2782 GLoss: 0.9404 Sum: 2.2186\n", + "[2023-04-06T19:23:21.260768+0200][45392][INFO] Step 300: MLoss: 1.2039 GLoss: 0.8899 Sum: 2.0938\n", + "[2023-04-06T19:23:29.299141+0200][45392][INFO] Step 400: MLoss: 1.1596 GLoss: 0.8612 Sum: 2.0208\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import random\n", + "from sklearn.preprocessing import LabelEncoder\n", + "cond = random.choices(['red', 'white', 'rose'], k=len(loader))\n", + "cond = LabelEncoder().fit_transform(cond)\n", + "plugin.fit(loader, cond=cond)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "8c07b5a8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAiwAAAGwCAYAAACKOz5MAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/bCgiHAAAACXBIWXMAAA9hAAAPYQGoP6dpAAB+d0lEQVR4nO3dd3hT5d/H8XfSvfeEUsoqe0MpS5DKUJHhAEQZAiqCigwVB7gRXODjwJ+DoQKiMpShzLJnS1mFQqHQUrpo6Z5JzvNHaKBSoDuhfF/Xda6kJ+ec3HcD5MN97qFSFEVBCCGEEMKEqY1dACGEEEKIO5HAIoQQQgiTJ4FFCCGEECZPAosQQgghTJ4EFiGEEEKYPAksQgghhDB5EliEEEIIYfLMjV2AqqDT6bh8+TIODg6oVCpjF0cIIYQQZaAoCllZWfj6+qJW374NpVYElsuXL+Pn52fsYgghhBCiAuLi4qhbt+5tj6kVgcXBwQHQV9jR0dHIpRFCCCFEWWRmZuLn52f4Hr+dWhFYim8DOTo6SmARQggh7jJl6c4hnW6FEEIIYfIksAghhBDC5ElgEUIIIYTJqxV9WIQQQojK0Gq1FBUVGbsYtZKFhQVmZmaVvo4EFiGEEPcsRVFITEwkPT3d2EWp1ZydnfH29q7UXGkSWIQQQtyzisOKp6cntra2MvloFVMUhdzcXJKTkwHw8fGp8LUksAghhLgnabVaQ1hxc3MzdnFqLRsbGwCSk5Px9PSs8O0h6XQrhBDinlTcZ8XW1tbIJan9in/HleknJIFFCCHEPU1uA1W/qvgdS2ARQgghhMmTwCKEEEIIkyeBRQghhKjlQkNDUalUd/XwbQksd5BblMuJKyeMXQwhhBDinibDmm8jLiuOR1Y/goWZBbuH78bSzNLYRRJCCCHuSeVqYZkzZw6dOnXCwcEBT09PBg8eTFRU1G3PWbx4MSqVqsRmbW1d4hhFUZg1axY+Pj7Y2NgQEhLC2bNny1+bKlbXvi5OVk7kafI4mnLU2MURQghRzRRFIbdQY5RNUZQyl7NXr168+OKLTJkyBRcXF7y8vPj+++/Jyclh7NixODg40KhRIzZu3HjLa/z555+0aNECKysr6tevz2effVbi9W+++YbGjRtjbW2Nl5cXjz32mOG1P/74g1atWmFjY4ObmxshISHk5OSU/xdeDuVqYdmxYweTJk2iU6dOaDQa3njjDfr27UtkZCR2dna3PM/R0bFEsPnv8KZ58+bx5ZdfsmTJEgICAnj77bfp168fkZGRN4WbmqRSqQjyCWJDzAYOJBygk3cno5VFCCFE9csr0tJ81r9Gee/I9/pha1n2r+UlS5bw6quvcvDgQX777TcmTpzI6tWrGTJkCG+88QZffPEFTz/9NLGxsTedGxYWxhNPPME777zDsGHD2Lt3Ly+88AJubm6MGTOGw4cP89JLL/Hzzz/TtWtX0tLS2LVrFwAJCQmMGDGCefPmMWTIELKysti1a1e5AldFqJRKvENKSgqenp7s2LGDnj17lnrM4sWLmTJlyi07+iiKgq+vL9OmTWP69OkAZGRk4OXlxeLFixk+fPhN5xQUFFBQUGD4OTMzEz8/PzIyMnB0dKxodUq1+uxqZu2dRRuPNvzy4C9Vem0hhBDGk5+fT0xMDAEBAYb/HOcWau6KwNKrVy+0Wq0hRGi1WpycnBg6dChLly4F9MsO+Pj4sG/fPvLz8+nduzdXr17F2dmZkSNHkpKSwqZNmwzXfPXVV1m/fj0nT55k1apVjB07lkuXLuHg4FDivcPDw+nQoQMXLlzA39+/TOUt7XcN+u9vJyenMn1/V6oPS0ZGBgCurq63PS47Oxt/f390Oh3t27fno48+okWLFgDExMSQmJhISEiI4XgnJyeCgoLYt29fqYFlzpw5vPvuu5Upepl18ekCwIkrJ8guzMbe0r5G3lcIIUTNs7EwI/K9fkZ77/Jo3bq14bmZmRlubm60atXKsM/LywvQT4n/3zBw6tQpBg0aVGJft27dmD9/PlqtlgceeAB/f38aNGhA//796d+/P0OGDMHW1pY2bdrQp08fWrVqRb9+/ejbty+PPfYYLi4u5a1yuVR4lJBOp2PKlCl069aNli1b3vK4wMBAfvrpJ9auXcsvv/yCTqeja9euXLp0CdAnQLj+iy3m5eVleO2/Zs6cSUZGhmGLi4uraDXuyMfeh3oO9dAqWg4nHa629xFCCGF8KpUKW0tzo2zlnQ3WwsLiprLfuK/4ejqdrty/BwcHB8LDw1m+fDk+Pj7MmjWLNm3akJ6ejpmZGZs3b2bjxo00b96c//u//yMwMJCYmJhyv095VDiwTJo0iRMnTrBixYrbHhccHMyoUaNo27Yt9913H6tWrcLDw4Pvvvuuom+NlZUVjo6OJbbqVNzKciDhQLW+jxBCCFETmjVrxp49e0rs27NnD02aNDEsTmhubk5ISAjz5s3j2LFjXLhwgW3btgH6MNStWzfeffddjhw5gqWlJatXr67WMlfoltDkyZNZt24dO3fupG7duuU618LCgnbt2hEdHQ2At7c3AElJSSWWnU5KSqJt27YVKV6VC/IJYuWZlexP2G/sogghhBCVNm3aNDp16sT777/PsGHD2LdvH1999RXffPMNAOvWreP8+fP07NkTFxcXNmzYgE6nIzAwkAMHDrB161b69u2Lp6cnBw4cICUlhWbNmlVrmcvVwqIoCpMnT2b16tVs27aNgICAcr+hVqvl+PHjhnASEBCAt7c3W7duNRyTmZnJgQMHCA4OLvf1q0Nn786oUBGdHs2VvCvGLo4QQghRKe3bt2flypWsWLGCli1bMmvWLN577z3GjBkDgLOzM6tWreL++++nWbNmLFy4kOXLl9OiRQscHR3ZuXMnDz74IE2aNOGtt97is88+Y8CAAdVa5nKNEnrhhRdYtmwZa9euJTAw0LDfyckJGxsbAEaNGkWdOnWYM2cOAO+99x5dunShUaNGpKen88knn7BmzRrCwsJo3rw5AHPnzuXjjz8uMaz52LFjZR7WXJ5exhX1xN9PcCrtFHN6zOHhBg9Xy3sIIYSoObcauSKqXo2PEvr2228B/XCqGy1atMiQymJjY1GrrzfcXL16lQkTJpCYmIiLiwsdOnRg7969hrAC+qFUOTk5PPvss6Snp9O9e3f++ecfk/oD1MW3C6fSTnEg4YAEFiGEEKKGVWoeFlNREy0se+P38tyW5/C282bTo5vK3ZtbCCGEaZEWlppTFS0ssvhhGbXzaoeF2oLEnERis26eNVAIIYQQ1UcCSxnZmNvQ1rMtAPsvy2ghIYQQoiZJYCkHw3wsiTIfixBCCFGTJLCUQ5BPEKCfQE6r0xq5NEIIIcS9QwJLObRwa4G9hT2ZhZmcTjtt7OIIIYQQ9wwJLOVgrjano3dHAJn1VgghhKhBEljKqbgfiwQWIYQQpqx+/frMnz/f2MWoMhJYyqk4sBxJPkKBtsDIpRFCCCHuDRJYyqmBUwM8bDwo0BYQkRxh7OIIIYQQ9wQJLOWkUqlKjBYSQgghjCErK4uRI0diZ2eHj48PX3zxBb169WLKlCmlHh8bG8ugQYOwt7fH0dGRJ554gqSkJMPrR48epXfv3jg4OODo6EiHDh04fPgwABcvXmTgwIG4uLhgZ2dHixYt2LBhQ01U06BcawkJvS4+XVh3fh37E/bzEi8ZuzhCCCGqiqJAUa5x3tvCFsqx7MvUqVPZs2cPf/31F15eXsyaNYvw8HDatm1707E6nc4QVnbs2IFGo2HSpEkMGzaM0NBQAEaOHEm7du349ttvMTMzIyIiAgsLCwAmTZpEYWEhO3fuxM7OjsjISOzt7aui1mUmgaUCiltYTqaeJLMwE0fL6lm/SAghRA0ryoWPfI3z3m9cBku7Mh2alZXFkiVLWLZsGX369AH0CxH7+pZe9q1bt3L8+HFiYmLw8/MDYOnSpbRo0YJDhw7RqVMnYmNjmTFjBk2bNgWgcePGhvNjY2N59NFHadWqFQANGjSocDUrSm4JVYC3nTf1HeujU3QcSjxk7OIIIYS4x5w/f56ioiI6d+5s2Ofk5ERgYGCpx586dQo/Pz9DWAFo3rw5zs7OnDp1CtC32IwfP56QkBA+/vhjzp07Zzj2pZde4oMPPqBbt27Mnj2bY8eOVVPNbk1aWCooyCeIC5kXOJBwgD71+hi7OEIIIaqCha2+pcNY721E77zzDk8++STr169n48aNzJ49mxUrVjBkyBDGjx9Pv379WL9+PZs2bWLOnDl89tlnvPjiizVWPmlhqaBgn2BA5mMRQohaRaXS35YxxlaO/isNGjTAwsKCQ4eut/JnZGRw5syZUo9v1qwZcXFxxMXFGfZFRkaSnp5O8+bNDfuaNGnCK6+8wqZNmxg6dCiLFi0yvObn58fzzz/PqlWrmDZtGt9//315frOVJoGlgjp6d0StUhOTEUNSTtKdTxBCCCGqiIODA6NHj2bGjBls376dkydPMm7cONRqNapSgk9ISAitWrVi5MiRhIeHc/DgQUaNGsV9991Hx44dycvLY/LkyYSGhnLx4kX27NnDoUOHaNasGQBTpkzh33//JSYmhvDwcLZv3254raZIYKkgJysnmrvqU6ms3iyEEKKmff755wQHB/Pwww8TEhJCt27daNasGdbW1jcdq1KpWLt2LS4uLvTs2ZOQkBAaNGjAb7/9BoCZmRmpqamMGjWKJk2a8MQTTzBgwADeffddALRaLZMmTaJZs2b079+fJk2a8M0339RofVWKoig1+o7VIDMzEycnJzIyMnB0rLkRO/PD5vPjiR95pOEjfNj9wxp7XyGEEJWXn59PTEwMAQEBpX7J321ycnKoU6cOn332GePGjTN2cUq41e+6PN/f0sJSCV18r60rdHk/Fc19tSAvCiGEMIIjR46wfPlyzp07R3h4OCNHjgRg0KBBRi5Z9ZDAUgltPdpiqbYkOS+ZmMyYcp2bp8nj+c3P0/fPvhxJPlJNJRRCCFGbffrpp7Rp04aQkBBycnLYtWsX7u7uxi5WtZDAUgnW5ta082oH6FtZyqpIV8T0HdPZc3kPiTmJjP93PJsubKquYgohhKiF2rVrR1hYGNnZ2aSlpbF582bDxG61kQSWSipevbms6wopisK7e99l56WdWJlZ0dm7M4W6QqbvmM6Sk0vkFpEQQghRCgkslRTkrZ+m/1DiITQ6zR2PXxC+gLXn1mKmMuPT+z7lfw/8j+GBw1FQ+PTwp8w5OAetTlvdxRZCCCHuKhJYKqm5W3McLBzIKsriVOqp2x77c+TP/HjiRwBmB8+ml18vzNRmvBH0BtM7Tgdg+enlvBL6CnmavGovuylKzk1m3+V95Bpr8TEhhBAmSQJLJZmpzejk3Qm4/ay3G85vYN6heQC83P5lhjQeYnhNpVIxusVoPr3vUyzVlmyP2864f8eRmpdavYU3ARqdhrCkMOaHzeexvx6jz+99eHbzs/T/sz8/nfhJgosQQghAAkuVKB7efKt+LHvj9/LmnjcBGNlsJONalj4+vl/9fnzf93ucrJw4fuU4T214ipiM8o0+uhsk5yaz+uxqpoZOpeeKnoz5Zww/nviRqKtRqFDhYuXC1YKrfBH2BQNWDWDxicUSXIQQ4h4nix9WgSAffT+WI8lHyNfkY21+fVKcE1dOMCV0Chqdhv71+/Nqp1dLnTa5WHuv9vw84GcmbpnIpexLPL3xab7s/SXtvdpXez2qi0an4WjKUXbH72Z3/G5Op50u8bqTlRPdfLvRvU53utXphqOlIxtiNrDw6ELisuL4LOwzFp1cxDMtn+GJwCewMbcxUk2qTk5RDufSz9Hao7WxiyKEEHcFmem2CiiKQsgfISTnJvO/B/5HsK9+YcSLmRd5esPTXC24ShefLnzd52sszSzLdM3UvFRe3PYix68cx1JtyUc9PqJf/X7VWY1qkZSTxLhN47iYedGwT4WKFm4t6F63O93rdKelW0vM1GY3navRafj73N98d+w74rPjAXC3ceeZls/weJPHSwTDu4miKIzfNJ6DiQd5r+t7JW4PCiFqzt08022vXr1o27Yt8+fPN3ZRykRmujURKpXKMLy5uB9LSm4Kz21+jqsFV2nu1pz5veeXOawAuNm48WO/H+nt19sw7HnxicV31bDn3KJcXtz2IhczL+Jg6cCAgAF81P0jQoeFsvzh5UxqO4k2Hm1KDSsA5mpzhjQewt9D/ubdru/ia+fLlbwrzDs0jwdXPcivp36lQFtQw7WqvM0XN3Mw8SAA88Pnk1WYZeQSCSGE6ZPAUkVunI8lqzCLiVsmEp8dTz2HenzT5xvsLOzKfU0bcxu+6PUFTzZ9EoDPwj7jzd1vkp6fXpVFrxZanZbXdr7GqbRTuFq7svLhlczrOY+BDQfiau1armtZqC0Y2ngo64asY1bwLHzsfEjJS+Hjgx/z4KoH+evcX9VUi6pXoC3g87DPATBXmZOWn8b/jv3PyKUSQgjTJ4GlihT3Y4lMjeSFLS8QdTUKN2s3Fj6wEDcbtwpf10xtxuudX2dGxxmoUPH3+b95ZM0j/HXuL5Nubfn08KeEXgrFUm3Jgt4LqOtQt9LXtDCz4PEmj7NuyDre7vI2XrZeJOcm8+buNwlLCquCUle/nyN/Jj47Hk9bT+bdpx819supX0rcMhNCiPK4evUqo0aNwsXFBVtbWwYMGMDZs2cNr1+8eJGBAwfi4uKCnZ0dLVq0YMOGDYZzR44ciYeHBzY2NjRu3JhFixYZqyq3JYGlinjaetLAqQEKChEpEdhZ2LHwgYX4OfhV+toqlYpRLUaxdMBSGjk34mrBVd7c/SYTNk8wyS+6FadX8MupXwD4sMeHtPVsW6XXtzSz5InAJ9gwdAMPBjwI6FfONuUAB/rbhMWtKa90eIUH/B+gR50eaHQaPj30qZFLJ4QAfR+z3KJco2wV/TdszJgxHD58mL/++ot9+/ahKAoPPvggRUVFAEyaNImCggJ27tzJ8ePHmTt3Lvb29gC8/fbbREZGsnHjRk6dOsW3335rsmsRySihKhTkE8T5jPNYqC34sveXNHVtWqXXb+vZlpUPr2RJ5BIWHl3IgYQDDF07lGdbP8szLZ/BwsyiQtct0BZwPOU49Rzr4WnrWaky7rq0izkH5wDwUruX6F+/f6WudzuWZpZM6ziNbbHbiEiJIDQulN71elfb+1XWgvAF5GnyaO3e2hC0ZnSawb7L+wi9FMrey3vp6tvVyKUU4t6Wp8kjaFmQUd77wJMHsLWwLdc5Z8+e5a+//mLPnj107ar/9+PXX3/Fz8+PNWvW8PjjjxMbG8ujjz5qWGeoQYMGhvNjY2Np164dHTt2BKB+/fpVU5lqIC0sVWhks5F08+3G/N7z6ezTuVrew8LMgvGtxrP6kdUE+wRTqCvkq4iveOzvxwhPCi/zdbILs9kYs5FpodPouaInY/8dy0OrHuLXU7+iU3QVKltUWhTTd0xHp+gY1HAQ41uNr9B1ysPT1pORzfRLqn955EuTXdbgZOpJ1p5bC8BrnV9DrdL/1QtwCmB40+EAzDs4r0zLOwghRLFTp05hbm5OUND1kOXm5kZgYCCnTulnX3/ppZf44IMP6NatG7Nnz+bYsWOGYydOnMiKFSto27Ytr776Knv37q3xOpSVtLBUIX9HfxY+sLBG3svP0Y/vHviODTH6GXTPZ5xn9D+jebTxo7zS4RWcrJxuOic1L5XtcdvZGruVAwkHKNIVGV6zs7AjpyiHjw9+zNbYrbzX9b1y9TtJyU1h8rbJ5Gpy6ezdmdnBs28730xVeqbVM/x+5nei06P5+/zfDG40uEbet6wURWHuwbkAPNzg4ZvmXnm+zfOsO7+OcxnnWBm1kiebPWmMYgoh0A92OPBk2RazrY73rg7jx4+nX79+rF+/nk2bNjFnzhw+++wzXnzxRQYMGMDFixfZsGEDmzdvpk+fPkyaNIlPPzW929QyD0stkFGQwRdhX/Dn2T8BcLV25bVOrzEgYACXcy6z9eJWtsZuJSIlokTrSX3H+vSp14c+9frQ3K05f5z5g8/CPiNPk4etuS3TOk7j8SaP3zF45BblMvbfsUSmRlLfsT6/PPhLqYGpOi06sYjPwz7H286bdUPWYWVmVaPvfzv/xPzDjJ0zsDG34a/Bf+Ft533TMSujVvL+/vdxsnJi/ZD1Nf77E+JeVBvmYZk0aRJNmjQpcUsoNTUVPz8/li5dymOPPXbTuTNnzmT9+vUlWlqKfffdd8yYMYPMzMwqLa/MwyIA/Uyx73R9h0X9FhHgFEBafhqv7XqNvn/2pf+f/fnk8CeEJ4ejU3Q0d2vOi+1eZO2gtfw95G+mdJhCK49WmKnNGNZ0GH8O/JP2nu3J1eTy/v73eX7L8yTmJN7yvbU6LTN3zSQyNRIXKxe+6fONUb5sRzQdgZetF4k5iaw4vaLG3/9W8jX5hmHMY1uOLTWsAAxtPJTGLo3JKMjgm4hvarKIQoi7WOPGjRk0aBATJkxg9+7dHD16lKeeeoo6deowaNAgAKZMmcK///5LTEwM4eHhbN++nWbNmgEwa9Ys1q5dS3R0NCdPnmTdunWG10yNBJZapKN3R/4Y+AeT2k7CUm1JYk4iapWajl4deb3z62x6dBO/Pfwbz7Z+lgbODUq9hp+jHz/1+4kZHWdgZWbF3st7Gbp2KGui15Tag/2LsC/YFrcNC7UFC+5fgJ9j5UdFVYS1uTUvtH0BgO+Pf28yk7EtObmEhJwEvO28GdNizC2PM1eb81qn1wD4Leo3zqWfq6ESCiHudosWLaJDhw48/PDDBAcHoygKGzZswMJCPxBDq9UyadIkmjVrRv/+/WnSpAnffKP/j5GlpSUzZ86kdevW9OzZEzMzM1asMJ3/9N1IbgnVUvHZ8USlRdHWs225J2ordj7jPG/tfovjV44D0KtuL2YFz8LD1gO4fhsDYG6PuTzY4MGqKXwFaXQahv41lJiMGJ5t/SwvtnvRqOVJykli4JqB5Gnyyvz7eXnby2yL20ZX364sDFlYY/2AhLgX3c23hO42NX5LaM6cOXTq1AkHBwc8PT0ZPHgwUVFRtz3n+++/p0ePHri4uODi4kJISAgHDx4sccyYMWNQqVQltv79q2847L2gjn0d7q93f4XDCkADpwYsHbCUl9u/jLnanNBLoQz5awgbYzayJ34PHx34CIDJbScbPayAvpXipXYvAfoJ2q7kXTFqeb488iV5mjzaerRlQMCAMp0zveN0LNQW7L28l52XdlZzCYUQ4u5RrsCyY8cOJk2axP79+9m8eTNFRUX07duXnJycW54TGhrKiBEj2L59O/v27cPPz4++ffsSHx9f4rj+/fuTkJBg2JYvX16xGokqZa42Z3yr8fz28G80c21GRkEGr+58lclbJ6NVtDzS8BGebf2ssYtp0KdeH1q7tyZPk8fCozUzYqs0x1OOG5YMeK3za2VuKfFz9OPp5k8D8MnhTyjSFt3hDCGEuDeUK7D8888/jBkzhhYtWtCmTRsWL15MbGwsYWG3nhb9119/5YUXXqBt27Y0bdqUH374AZ1Ox9atW0scZ2Vlhbe3t2FzcXGpWI1EtWji0oRfH/qViW0mYq4yR6No6OjVkXeC3zGp2xYqlYopHaYA8OeZP4nNjK3xMiiKwtxD+mHMjzR8hJbuLct1/rOtn8XN2o2LmRdZdnpZdRRRCCHuOpXqdJuRkQGAq2vZbzvk5uZSVFR00zmhoaF4enoSGBjIxIkTSU1NveU1CgoKyMzMLLGJ6mehtuCFti+w/OHlTO0wlQX3L6jw7LrVqZN3J7rV6YZG0fDVka9q/P03xmzkaMpRbMxteLn9y+U+387CznDewqMLSc279d8FIYS4V1Q4sOh0OqZMmUK3bt1o2bLs/4N87bXX8PX1JSQkxLCvf//+LF26lK1btzJ37lx27NjBgAED0GpLn7V0zpw5ODk5GTY/P+OMTLlXNXVtytiWY3G0NN0OzlPaTwFg44WNRKZG1tj75mnyDMOYx7caX+GlDgY1GkQz12ZkF2XzVUTNhy4h7iW1YOyJyauK33GFA8ukSZM4ceJEuYY/ffzxx6xYsYLVq1eX6CU8fPhwHnnkEVq1asXgwYNZt24dhw4dIjQ0tNTrzJw5k4yMDMMWFxdX0WqIWqqpa1PDej0LwhfU2PsuPrGYpNwkfO18GdV8VIWvo1apeb3z64D+1tbptNNVVUQhxDXFw35zc3ONXJLar/h3XPw7r4gKTc0/efJk1q1bx86dO6lbt2zTt3/66ad8/PHHbNmyhdatW9/22AYNGuDu7k50dDR9+vS56XUrKyusrExnJlNhmia3m8ymi5vYe3kvBxIOEORTvQuaJeYk8tOJnwB4peMrWJtXbphke6/29K/fn38u/MPcg3P5qd9PJtVfSIi7nZmZGc7OziQnJwNga2srf8eqmKIo5ObmkpycjLOzM2ZmZhW+VrkCi6IovPjii6xevZrQ0FACAgLKdN68efP48MMP+ffffw0rQt7OpUuXSE1NxcfHpzzFE6IEPwc/Hm/yOMtPL2d+2HyWPbSsWv8xmh8+n3xtPu0929PPv1+VXHNqh6lsj9vO4aTDLI1cir+jv34pek0uOUU55GpyySvKI1ejX56+eJ+iKHTx7UK/+v3wc7g7bplmFGTw1ZGvcLF2obN3Z1p7tMbSzNLYxRK1nLe3fvbp4tAiqoezs7Phd11R5Zo47oUXXmDZsmWsXbuWwMBAw34nJydsbPSLNo0aNYo6deowZ84cAObOncusWbNYtmwZ3bp1M5xjb2+Pvb092dnZvPvuuzz66KN4e3tz7tw5Xn31VbKysjh+/HiZWlJk4jhxK1fyrvDgqgfJ0+Tx2X2f0bd+3yq9vk7RsffyXpafXs7OSztRoWL5w8tp4daiyt7jqyNf8d2x7yp8fgu3FvSv359+9fvhY2+a/wnQ6DQ8v+V5DiRcX3TO2syatp5tCfIJopN3J1q4tcBcLeu1iuqh1WopKpJpBKqDhYXFLVtWyvP9Xa7Acqv/nS5atIgxY8YA+gWZ6tevz+LFiwGoX78+Fy9evOmc2bNn884775CXl8fgwYM5cuQI6enp+Pr60rdvX95//328vLzKVC4JLOJ2vo74moVHF1LfsT6rB62uki+9jIIM1kav5beo34jNuj50ekKrCbzU/qVKX/9GuUW5TN8xncvZl7G1sMXW3BYbCxvsLOywNdf/XLzf1sIWG3Mbcoty2Ry7mUOJh0oseNnGow396/enb/2+ZeoQnFWYxZmrZ4hKi+LM1TOcTjtNfHY8z7d5npHNRlZZHT899ClLIpdgY25Djzo9OJx0mLT8tBLH2FnY0d6zPUE+QXT27kygayBqlawuIsTdrNoCi6mSwCJuJ7swmwdXPcjVgqvMCp7F400er/C1otKiWH56ORtiNpCnyQPAwcKBQY0GMbzpcPwd/auq2FXiSt4Vtlzcwr8X/iUsKQwF/V93FSpDH5kH/B/AxdqF+Kx4oq5G6bdrASU+O/6W1/6o+0cMbDiw0mXccH4Dr+3Sr6P06X2f0q9+PxRF4Vz6OQ4mHuRg4kEOJR4is7Dk9AWOlo4E+wYzruU4mrmZ5mJtQojbk8AixH/8EvkLcw/NxcPGg/VD12NjblPmc4t0RWy9uJXlp5cTnhxu2N/YpTEjmo7goYCHsLWwrY5iV6nk3GQ2X9xsmCemmFqlxsbchpyi0mes9rbzJtAlkCYuTQh0DSQsKYzlp5djrjLny/u/pEfdHhUu0+m00zy94WnytfmMaznOMOnff+kUHVFpUYYAE5YUVqK8A+oPYHK7ydRzrFfhsgghap4EFiH+o1BbyCNrHiE+Ox4fOx9crV2xs7C7abO3sMfWwhZ7C3vsLOw4e/Usv5/5nZS8FADMVeb08e/DiKYjaO/Z/q4dUZCQncCmi5v4J+YfTqSeAMBSbUlD54YEugYS6BJIoKs+pDhZOZU4V6fomLlrJhtiNmBjbsMPfX+gtcftR/6VJj0/neHrhxOfHU+3Ot34+v6vMVOXbQSBRqchMjWSZaeXseH8BhQUzFXmPNrkUZ5r/ZxhgU4hhGmTwCJEKf6J+YcZO2dU6Fx3G3ceb/I4jzV5rMKTwZmqy9mXyS3Kxd/JHwt12eZIKNIWMXnbZPZe3ouzlTNLBiyhgVODMr+nRqdh4paJ7E/YT137uqx4eMVNwaisotKiWBC+gF3xuwCwMbfhqWZPMablGJOe3FAIIYHF2MURJuxi5kVSclPIKcohpyiH7KJscotyyS7KNuy7cbO1sGVwo8GE1AsxyWUIjCm3KJdx/47jROoJfOx8+HnAz3jZla2j/GeHP2PxycXYmNvwy4O/0MSlSaXLczjxMPPD5xtudzlaOjK+1XhGNB1R6TlxhBDVQwKLEKJGpOWnMWrjKC5mXqSRcyOWDFhyx1aNjTEbeXXnq8D1TrZVRVEUQuNC+fLIl0SnRwPgaevJC21eYFCjQTIsWggTI4FFCFFj4rPjeXrD06TkpdDesz3fPfDdLVs0otKieGrDU3fsZFtZWp2WdefX8XXE1yTkJABQ37E+L7V/iZB6IXdt3yMhapvyfH/LJAZCiEqpY1+Hb0O+xd7CnvDkcF7b+Roaneam49Lz03l5+8vka/Pp5tuNF9u9WG1lMlObMajRINYNWcernV7FxcqFC5kXmBo6lac3Ps2R5CNV9l5ZhVn8eupXNsZsNPoieoqicOLKCQq1hUYthxDVQVpYhBBV4lDiIZ7f/DyFukIebfwos4NnG1oyqrKTbUVkF2az+ORilkYuNcyf06deH15u/zIBTmVbYuS/knKS+PXUr6w8s9IwxLqvf19md51ttM6+q8+uZtbeWQR5B7HwgYVyC0yYPLklJIQwii0XtzBtxzR0io7nWj/H5HaTAfj88OcsOrmoSjvZVkRKbgrfHP2GVWdXoVN0mKnMeKzJYzzf5nncbdzLdI1z6edYfHIx686vM7Qk+Tv6E58Vj0bR4Gvny9yec2nr2bYaa3IzRVEY+tdQQ9+dsS3HMrXD1BotgxDlJYFFCGE0K6NW8v7+9wF4M+hNnKycDJ1sP7nvE/rX72/M4gH60DE/fD6hcaGAfij02BZjGd1idKmTACqKwpHkIyw6sYjQS6GG/R29OjK25Vi61+nOySsneXXnq1zKvoSZyoxJbSfxTMtnyjy3TGUdTDjIuE3jMFebG4LU570+5wH/B2rk/YWoCAksQgij+vbot3wT8Q0qVFiaWVKgLeCZls/wSodXjF20Eg4nHubzsM85fuU4AG7WbrzQ9gWGNh6KudocnaJje9x2Fp1YZBgurUJFn3p9GNty7E0T5mUVZvH+/vfZGLMRgCCfID7q/lGNzN0zZfsUtsZuZVjgMKzMrFgauRRbc1uWP7y8XHPkCFGTJLAIIYxKURQ+PPAhv0X9BkA332583afsM9nWJEVR2HRxEwvCFxCXFQdAgFMADzd4mL/P/c2FzAuAfibgQY0GMar5KOo71b/t9dZEr2HOwTnkafJwsXLhg+4f0LNuz2qrw+XsywxYNQCdomPNoDX4O/ozYdMEDicdpoFTA5Y9tAw7C7tqe/+qcCnrEo5WjjLZ3z1GAosQwui0Oi2fHv6UuKw4Puz+YY12sq2IIm0RK8+sZOHRhaQXpBv2O1g6MDxwOE82e7LM/VwAzmec59UdrxJ1NQqAUc1HMaX9lGqZgHB+2Hx+PPEjQd5B/NDvB0C/8OWwv4eRnJfMA/4P8Nl9n5nscO5VZ1fxzt538LD1YOXDK3GzcTN2kUQNkcAihBAVlFWYxaITiziWcoz7/O5jaOOhFW6dKNAW8Pnhz1l2ehkAzd2aM6/nvCpd1Ttfk88DfzxAekE683vPp0+9PobXIpIjGPvvWDQ6DdM7Tmd0i9FV9r5V5ddTv/LxwY8NP3f17cq3Id+iVsmsG/cCmYdFCCEqyMHSgZfav8QP/X7g6eZPV+pWipWZFTODZrKg9wKcrJyITI3kib+f4O9zf1dZef+58A/pBen42PnQq26vEq+19WzLq530HZ6/CPuCQ4mHqux9q8IPx38whJVHGj6CtZk1ey/v5cfjPxq5ZMIUSWARQohqdn+9+/lj4B908OpAriaXN3a/wS+Rv1T6uoqisOyUvvVmWOCwUvsIDQ8czsAGA9EqWqbvmE5iTmKl37eyFEXhy/AvWRC+AIAX2rzAB90+4I2gNwD4KuIrwpLCjFlEYYIksAghRA3wtvPmx74/Mq7lOAC+PPJlpcPD0ZSjnEo7hZWZFY82frTUY1QqFW8Hv02gSyBp+WlM2zHNqDPhKorCvEPz+P749wBM6zCNiW0nolKpGNxoMA83eBidouPVna9yNf+q0copTI8EFiGEqCFmajNeav8S7TzbkafJY96heZW6XnHfmAEBA3C2dr7lcTbmNnzR6wscLB04lnKs0u9bUVqdlnf3vcsvp/StS28FvcWYlmMMr6tUKt7u8jb1HeuTnJvMG7vfQKfojFJWYXoksAghRA1Sq9S8GfQmZiozNl/czJ74PRW6TkpuCpsvbAbgyaZP3vF4P0c/Pu6h7y/yW9Rv/HXurwq9b0VpdBre2P0Gf579E7VKzQfdPmBY02E3HWdrYcun932KlZkVu+N3s+Tkkkq/965Lu9h7eW+lryOMSwKLEELUsEDXQEY0HQHARwc+okBbUO5r/HHmDzSKhnae7Wjm1qxM5/Ss25OJbSYC8N6+9ziddrrc71sRhdpCpoVOY0PMBsxV5szrOY9BjQbd8vhA10Be6/waAAvCFxCRHFGh980tyuXN3W/ywtYXeG7zc/wc+XOFriNMgwQWIYQwgkltJ+Fh40FsViyLTiwq17nFc8YAhuBTVs+3eZ7udbpToC1gyvYpZBRklOv88srT5PHStpfYFrcNS7Ul83vPp1/9fnc877HGjzGg/gC0ipZXd75a7nKeSz/Hk+ufLNGSNO/QPH44/kO56yBMgwQWIYQwAntLe2Z0mgHoh/cWz7JbFpsvbuZK3hU8bDwI8Q8p1/uqVWo+7vExdezrEJ8dz7TQaey6tIvUvNRyXacscopymLhlInsu78HG3IavQ77mPr/7ynSuSqViVvAs6jnUIyEngbf2vEVZpw1bG72WEetHcC7jHB42HvzU7ydeaPMCoG+x+SbimzJfS5gOmThOCCGMRFEUJmyawIHEA/Ss25Ov7v+qTLPRPr3haSJSInih7QuGWzzldSr1FE9vfLrE7SgvWy+auzWnuVtzWri1oLlb83LPOqvRaUgvSCclN4X397/P8SvHsbew55uQb2jn2a5C5Ry5YSRFuiJmdJzBqBajbnlsniaPjw58xJroNQB08enCnB5zDDMU/3D8B8NQ6mdaPsOU9lNMdvbfe4XMdCuEEHeJ8xnnefSvR9HoNCzovYD7691/2+MjUyMZtm4Y5mpzNj+2uVzLBfxXWFIYK6NWEpkaycXMiyjc/HVwY4hp7tYcgNS8VFLzUw2PaflppObpH6/mXy1xHScrJ7574DtauLWocDlXnF7Bhwc+xFxtztL+S2nl0eqmY86ln2P6julEp0ejVql5oc0LjG81/qa5aX6O/NkwSmpks5G81uk1CS1GJIFFCCHuIsVrAfna+bJm8BpszG1ueezbe95mTfQaHgx4kLk951ZZGXKKcjiVeorI1Egi0yKJTI3kQsaFUkPMnahQ4WLtQn3H+rzV5S0auzSuVNkURWHajmlsvriZOvZ1WDlwZYlFEv8+9zfv73+fPE0e7jbuzO0xl84+nW95vZVRK3l///sAPN7kcd7q8pYsBWAkEliEEOIukluUy+C1g0nISWBCqwm81P6lUo+7mn+VkN9DKNQV8vOAn2nr2bZay5VTlMPptNOcvHKSyLRIotKisFBb4Grjipu1G242brhZu+Fq7Wp47mbjhouVS5WvzJ1VmMUTfz/BpexL9KnXhy96fUG+Np85B+awOno1AEE+QXzc4+MytTqtiV7DrD2zUFB4pOEjvNf1PZNcTby2k8AihBB3ma2xW5myfQrmanNWPbKKAKeAm4758fiPzA+fT3O35qx4aMU9dyvj5JWTPLXxKTQ6DWNbjmXXpV1Ep0ejQsXEthN5ttWz5QodG85v4I3db6BVtAyoP4APe3yIhbrqV9MWtyaLHwohxF3mfr/76VGnBxqdhg8PfHjTKBaNTsNvUb8B+oni7rWwAtDCvQXTOkwDYNGJRUSnR+Nm7cb3fb9nYpuJ5W4hebDBg3x636eYq83ZeGEjM3bMoEhbVB1FF1VAWliEEMJExGXGMXjtYAp1hXzS8xP6B/Q3vLb14lamhE7BxcqFzY9vxsrMyoglNR5FUZgaOpUtsVsI8g7i455luwV0OzvidvBK6CsU6YroWbcnn/f6/Kbfr0anISE7gbisOGKzYonLijNsFmoL+tXvx0MNHsLbzrtSZbnXyC0hIYS4S3179Fu+ifgGTxtP/hryF3YWdgCM/3c8BxIPML7VeF5u/7KRS2lcWp2WM1fP0MSlSZX1O9l7eS8vb3uZfG0+QT5B9KjTg7isOC5lXSI2K5aE7AQ0iua211ChorN3ZwY2HEiIf4jhsxO3JoFFCCHuUgXaAoasHUJcVhyjmo9iRqcZRF+NZshfQ1Cr1Pwz9B987H2MXcxa6VDiISZtnUSeJq/U163MrKhrXxc/Bz/8HP30jw5+JOUk8ff5vwlLCjMca21mzf317ueRho8Q5BOEudq8pqpxV5HAIoQQd7Hd8buZuGUiZiozVg5cycqolfwW9Rsh9UL4ovcXxi5erXYs5Rg/HP8BSzNLQyAp3jxtPW87/PlS1iXWn1/P3+f/5mLmRcN+dxt3Hgx4kEcaPkKga2BNVOOuIYFFCCHucq9sf4UtsVto5d6K6PRo8jR5/NTvJzp5dzJ20cQdKIrC8SvH+fvc3/xz4R/SC9INrzV2aUwj50bYWdhhZ26HnYUdtha2+p+vbbbm1392s3Gr1beWJLAIIcRdLjEnkUfWPGK4PdHIuRGrHll1T44OupsVaYvYFb+LdefXERoXSpGufKOQLNQWPNn0SZ5t82yJyfJqCwksQghRC/x04ie+CNPfAnq7y9s8EfiEkUskKiOjIINd8btIy0sjR5NDblEuOUU55BRde6654XlRDtlF2YbA6mzlzMQ2E3k88PFaNVeMBBYhhKgFirRFjN80nszCTH598FdsLWyNXSRRgxRFYXf8bj49/CnnM84DUN+xPtM7Tqdn3Z61orVNAosQQghRS2h0Gv488ydfR3zN1YKrgH4ZghkdZ1SoE6+iKFzKukRcVhx+jn7Usa9jtLWUJLAIIYQQtUxWYRbfH/+eXyJ/oUhXhAoVQxoPYXLbyXjYetzyPI1OQ9TVKI4kHSE8OZwjyUe4knfF8LqNuQ0BTgE0cm5EQ+eGhkcfO59qDzISWIQQQoha6lLWJeaHz+ffC/8C+sAxruU4RrUYhY25DblFuRy/cpzw5HDCk8I5mnL0prllLNQW1LGvw+XsyxTqCkt9HxtzGxo6NSwRYrr4dMHCrOr60EhgEUIIIWq5iOQIPjn0CceuHAPAy9YLDxsPTqWdQqtoSxzrYOFAW8+2tPdqTzvPdrR0b4mVmRVanZa4rDjOpZ8jOj1a/5gRzYWMCzeNaDJTmXFw5EEszSyrrA7VFljmzJnDqlWrOH36NDY2NnTt2pW5c+cSGHj7e2i///47b7/9NhcuXKBx48bMnTuXBx980PC6oijMnj2b77//nvT0dLp168a3335L48aNy1QuCSxCCCHuRYqi8M+Ff/gi7AsSchIM+71svWjv1Z4Onh1o59WORs6NynV7R6PTEJsVqw8wV6OJTo+mQFvAV32+qtLyV1tg6d+/P8OHD6dTp05oNBreeOMNTpw4QWRkJHZ2pU9ss3fvXnr27MmcOXN4+OGHWbZsGXPnziU8PJyWLVsCMHfuXObMmcOSJUsICAjg7bff5vjx40RGRmJtbV2lFRZCCCFqm3xNPpsubkKtUtPBs8Nds3xDjd0SSklJwdPTkx07dtCzZ89Sjxk2bBg5OTmsW7fOsK9Lly60bduWhQsXoigKvr6+TJs2jenTpwOQkZGBl5cXixcvZvjw4XcshwQWIYQQ4u5Tnu/vSnX/zcjIAMDV1fWWx+zbt4+QkJAS+/r168e+ffsAiImJITExscQxTk5OBAUFGY75r4KCAjIzM0tsQgghhKi9KhxYdDodU6ZMoVu3boZbO6VJTEzEy8urxD4vLy8SExMNrxfvu9Ux/zVnzhycnJwMm5+fX0WrIYQQQoi7QIUDy6RJkzhx4gQrVqyoyvKUycyZM8nIyDBscXFxNV4GIYQQQtQc84qcNHnyZNatW8fOnTupW7fubY/19vYmKSmpxL6kpCS8vb0Nrxfv8/HxKXFM27ZtS72mlZUVVlZWFSm6EEIIIe5C5WphURSFyZMns3r1arZt20ZAQMAdzwkODmbr1q0l9m3evJng4GAAAgIC8Pb2LnFMZmYmBw4cMBwjhBBCiHtbuVpYJk2axLJly1i7di0ODg6GPiZOTk7Y2NgAMGrUKOrUqcOcOXMAePnll7nvvvv47LPPeOihh1ixYgWHDx/mf//7HwAqlYopU6bwwQcf0LhxY8OwZl9fXwYPHlyFVRVCCCHE3apcgeXbb78FoFevXiX2L1q0iDFjxgAQGxuLWn294aZr164sW7aMt956izfeeIPGjRuzZs2aEh11X331VXJycnj22WdJT0+ne/fu/PPPP2Wag0UIIYQQtZ9MzS+EEEIIo6ixeViEEEIIIWqCBBYhhBBCmDwJLEIIIYQweRJYhBBCCGHyJLAIIYQQwuRJYBFCCCGEyZPAIoQQQgiTJ4FFCCGEECZPAosQQgghTJ4EFiGEEEKYPAksQgghhDB5EliEEEIIYfIksAghhBDC5ElgEUIIIYTJk8AihBBCCJMngUUIIYQQJk8CixBCCCFMngQWIYQQQpg8CSxCCCGEMHkSWIQQQghh8iSwCCGEEMLkSWARQgghhMmTwCKEEEIIkyeBRQghhBAmTwKLEEIIIUyeBBYhhBBCmDwJLEIIIYQweRJYhBBCCGHyJLAIIYQQwuRJYBFCCCGEyZPAIoQQQgiTJ4FFCCGEECZPAosQQgghTJ4EFiGEEEKYPAksQgghhDB5EliEEEIIYfIksAghhBDC5ElgEUIIIYTJk8AihBBCCJMngUUIIYQQJq/cgWXnzp0MHDgQX19fVCoVa9asue3xY8aMQaVS3bS1aNHCcMw777xz0+tNmzYtd2WEEEIIUTuVO7Dk5OTQpk0bvv766zIdv2DBAhISEgxbXFwcrq6uPP744yWOa9GiRYnjdu/eXd6iCSGEEKKWMi/vCQMGDGDAgAFlPt7JyQknJyfDz2vWrOHq1auMHTu2ZEHMzfH29i7TNQsKCigoKDD8nJmZWebyCCGEEOLuU+N9WH788UdCQkLw9/cvsf/s2bP4+vrSoEEDRo4cSWxs7C2vMWfOHEMQcnJyws/Pr7qLLYQQQggjqtHAcvnyZTZu3Mj48eNL7A8KCmLx4sX8888/fPvtt8TExNCjRw+ysrJKvc7MmTPJyMgwbHFxcTVRfCGEEEIYSblvCVXGkiVLcHZ2ZvDgwSX233iLqXXr1gQFBeHv78/KlSsZN27cTdexsrLCysqquosrhBBCCBNRYy0siqLw008/8fTTT2NpaXnbY52dnWnSpAnR0dE1VDohhBBCmLIaCyw7duwgOjq61BaT/8rOzubcuXP4+PjUQMmEEEIIYerKHViys7OJiIggIiICgJiYGCIiIgydZGfOnMmoUaNuOu/HH38kKCiIli1b3vTa9OnT2bFjBxcuXGDv3r0MGTIEMzMzRowYUd7iCSGEEKIWKncflsOHD9O7d2/Dz1OnTgVg9OjRLF68mISEhJtG+GRkZPDnn3+yYMGCUq956dIlRowYQWpqKh4eHnTv3p39+/fj4eFR3uIJIYQQohZSKYqiGLsQlZWZmYmTkxMZGRk4OjoauzhCCCGEKIPyfH/LWkJCCCGEMHkSWIQQQghh8iSwCCGEEMLkSWARQgghhMmTwCKEEEIIkyeBRQghhBAmTwKLEEIIIUyeBBYhhBBCmDwJLEIIIYQweRJYhBBCCGHyJLAIIYQQwuRJYBFCCCGEyZPAIoQQQgiTJ4FFCCGEECZPAosQQgghTJ4EFiGEEEKYPAksQgghhDB5EliEEEIIYfIksAghhBDC5ElgEUIIIYTJk8AihBBCCJMngUUIIYQQJk8CixBCCCFMngQWIYQQQpg8CSxCCCGEMHkSWIQQQghh8iSwCCGEEMLkSWARQgghhMmTwCKEEEIIkyeBRQghhBAmTwKLEEIIIUyeBBYhhBBCmDwJLEIIIYQweRJYhBBCCGHyJLAIIYQQwuRJYBFCCCGEyZPAIoQQQgiTJ4FFCCGEECZPAosQQgghTF65A8vOnTsZOHAgvr6+qFQq1qxZc9vjQ0NDUalUN22JiYkljvv666+pX78+1tbWBAUFcfDgwfIWTQghhBC1VLkDS05ODm3atOHrr78u13lRUVEkJCQYNk9PT8Nrv/32G1OnTmX27NmEh4fTpk0b+vXrR3JycnmLJ4QQQohayLy8JwwYMIABAwaU+408PT1xdnYu9bXPP/+cCRMmMHbsWAAWLlzI+vXr+emnn3j99dfL/V5CCCGEqF1qrA9L27Zt8fHx4YEHHmDPnj2G/YWFhYSFhRESEnK9UGo1ISEh7Nu3r9RrFRQUkJmZWWITQgghRO1V7YHFx8eHhQsX8ueff/Lnn3/i5+dHr169CA8PB+DKlStotVq8vLxKnOfl5XVTP5dic+bMwcnJybD5+flVdzWEEEIIYUTlviVUXoGBgQQGBhp+7tq1K+fOneOLL77g559/rtA1Z86cydSpUw0/Z2ZmSmgRQggharFqDyyl6dy5M7t37wbA3d0dMzMzkpKSShyTlJSEt7d3qedbWVlhZWVV7eUUQgghhGkwyjwsERER+Pj4AGBpaUmHDh3YunWr4XWdTsfWrVsJDg42RvGEEEIIYWLK3cKSnZ1NdHS04eeYmBgiIiJwdXWlXr16zJw5k/j4eJYuXQrA/PnzCQgIoEWLFuTn5/PDDz+wbds2Nm3aZLjG1KlTGT16NB07dqRz587Mnz+fnJwcw6ghIYQQQtzbyh1YDh8+TO/evQ0/F/clGT16NIsXLyYhIYHY2FjD64WFhUybNo34+HhsbW1p3bo1W7ZsKXGNYcOGkZKSwqxZs0hMTKRt27b8888/N3XEFUIIIcS9SaUoimLsQlRWZmYmTk5OZGRk4OjoaOziCCGEEKIMyvP9LWsJCSGEEMLkSWARQgghhMmTwCKEEEIIkyeBRQghhBAmTwKLEEIIIUyeBBYhhBBCmDwJLEIIIYQweRJYhBBCCGHyJLAIIYQQwuRJYBFCCCGEyZPAIoQQQgiTJ4FFCCGEECZPAosQQgghTJ4EFiGEEEKYPAksQgghhDB5EliEEEIIYfIksAghhBDC5ElgEUIIIYTJk8AihBBCCJMngUUIIYQQJk8CixF9vimK8UsOcS4l29hFEUIIIUyaBBYjWRsRz5fbotlyKpmHv9zNb4diURTF2MUyqtxCDf+eTOSLzWc4fCGtVv8+Pv03ijbvbuJoXLqxiyKEEHcFlVILvhUyMzNxcnIiIyMDR0fHKr22TqdQqNVhbWFWZddMyMij3xc7yczX4OtkzeWMfAAeau3DR0Na4WRjUWXvZeoSM/LZejqJLZFJ7DmXSqFGZ3itjZ8z47sHMKClN+ZmtSdbn03Kot/8negU6ODvwh/PB6NSqYxdLCGEqHHl+f42r6Ey3ZXyi7RM//0oWfkafhzdsUq+NHU6hem/HyUzX0Obuk6sfD6Yn3Zf4LNNUaw/lkBEbDpfjmhLB3/XKqiB6VEUhZOXM9lyKomtp5I5Hp9R4nU/VxuaeTsSeiaFo3HpvLj8CHWcbRjTtT7DOvvhaH33h7mPNpxCd+2/CWEXr7I5Mom+LbyNWyghhDBx0sJyG6cTMxn89R7yi3Q83cWf9wa1qPT/hH/aHcN76yKxtlCz/qUeNPSwByAiLp2Xlh8hNi0XM7WKl/s0ZlLvRpip7/7/eRdotOw7l2oIKQnXWpQAVCpo5+dMn2ZePNDci8ae9qhUKq5kF/DL/ov8vO8iqTmFANhZmjGsUz3GdquPn6utsapTKbvPXuGpHw9grlYxoJUPfx+9TGNPeza+3KNWtSIJIURZlOf7WwLLHfxzIpGJv4ahKDDr4eY80z2gwtc6m5TFQ/+3m0KNjvcHteDp4PolXs/KL2LW2pOsPhIPQOcAV+YPa4uvs01lqmBUSZn5PLZwL3FpeYZ9NhZm9GjsTkgzL3o39cTDweqW5+cXaVkbEc8Pu2I4m6zvnKxWQf+W3ozr3oAO/i7VXoeqotUpPPx/uzmVkMmYrvV55YEm3PfJdtJzi5j3aGue6ORn7CIKIUSNksBSxf638xwfbTiNSgXfP92RkOZe5b5GoUbHkG/2cPJyJvc18WDx2E63bK1ZfeQSb60+QU6hFicbC+Y+2or+LX0qW40aV6jRMfx/+wiPTcfNzpJ+Lb0JaeZJ14bu5e4TpCgKO89e4Ydd59l19ophf1s/Z14OaUzvQM+qLn6V+/1wHDP+OIaDtTk7ZvTG1c6SH3ad54P1p/B2tCZ0Rq8q7SslhBCmrjzf39IGXQYTejRgROd6KAq8tOIIJ/7T76IsFmw9w8nLmTjbWvDJY61ve2tpSLu6bHi5B23qOpGRV8Tzv4Qzc9Vx8gq1lalGjXtv3UnCY9NxsDbnz4ld+WhIK+5v6lWhL2WVSsV9TTz4eVwQ/07pybCOfliaqYmIS+eZxYeIMPHRNnmFWj7dFAXAi/c3wtXOEoCnuvhTx9mGxMx8Fu+9YMQSCiGEaZPAUgYqlYr3BrWgR2N3cgu1jFtyiMQb+mHcyeELaXwbeg6AOUNa4elofcdz/N3s+P35rjx/X0NUKlh+MJaBX+3m+KXyhyVjWHk4jl/2x6JSwYLhbanvbldl1w70dmDuY63Z8/r9hDTzRFHgw/WRJj0M+vtd50nKLKCuiw2jbrgVaG1hxisPNAHgm+3RZOQWGamEQghh2iSwlJGFmZqvR7ansac9SZkFjFtyiJwCzR3Pyy7QMHXlUXQKDG1fhwGtyn5rx9JczesDmvLLuCA8HayITs7mka93M/33oyRllj0w3UmRVseWyKQqm8Du2KV03lpzAoApfZpwf9Py30IrCw8HK94f3BJrCzWHLlzl35OJ1fI+lZWclc/CHfrA+lr/pje1MA1pV4dALwcy8zV8syPaGEUUQgiTJ4GlHBytLfhpTCfc7S05eTmTl1ccQau7/f/qP1gXSWxaLnWcbXjnkRYVet9ujdzZ+HIPBrf1RVHgj7BL9PoklAVbzpJbeOfQdCt5hVqW7L1Ar09CGb/0MA99uYtNlfzST80u4PmfwyjU6OjT1JMX729UqevdiY+TDRN6NADg442nS8zjYiq+2HyW3EItbf2cebj1zYHVTK3itQGBACzac4HL6Xk3HSOEEPc6CSzl5Odqy/9GdcTKXM2WU8l8sD7ylsdujkxixaE4VCr47Ik2lZpDxM3eivnD27H6ha508Hchr0jLF1vOcP+nO/gz7BK6OwSnG2XkFfH19mi6z93G7L9OEp+eh6WZmvwiHc/9EsaPu2MqdHtFo9Xx0oojXM7IJ8Ddjs+HtUVdA8Oyn7uvIe72VlxIzeXn/Rer/f3KIyoxi98OxQLw1kPNbtl3qXegJ50DXCnU6Ji/5UxNFlEIIe4KElgqoH09Fz5/oi2g/x/x0n0XbjrmSnYBr/95DIBnezSgSwO3KnnvdvX0M6N+9WQ76rroO2tO+/0og7/Zw8GYtNuem5yZz5yNp+j28TY++TeK1JxC6rrY8P7gloTPeoAng/Qdi99fF8k7f51Eoy1fa8Unm6LYE52KraUZC5/qUGMz9tpbmTOtr74fyJdbz5KeW1gj71sWczbqJ4kb0NKbjvVvPRmgSqXi9QFNAX0L2pmkrJoqohBC3BUksFTQQ619mNFP34z/zl8n2R6VbHhNURRe//MYqTmFNPV2YOq1L9OqolKpeLi1L1um3sdr/Ztib2XOsUsZPPHdPib+EsbF1JwSx19MzeGN1cfpPm873+04T3aBhkAvB+YPa0vo9F483cUfeytzPhzckjce1H9pLtl3ked+DitTPx2A9ccS+G7HeQDmPdaaQG+HKq3znTzR0Y9ALwcy8or4v22m0Q9k19kUQqNSsDBT8Vr/pnc8vn09F/q38EanwLx/omqghEIIcfeQwFIJL/RqyOMd6qJTYPKv4ZxKyATgt0NxbDmVjKWZmi+GtcXKvHrm1rC2MGNir4aEzujFyKB6qFWw8UQiD3y+k482nCLsYhovLj9C709DWXYglkKNjg7+Lvw4uqO+T0y7OiVmV1WpVDzbsyHfjGyPlbmaraeTeeK7fXfs4HsmKYsZfxwF4NmeDXi4tW+11Pd2zNQq3nioGQBL913gwpWcO5xRvbQ6hQ/XnwLg6S71yzxKanq/QNQq2HIqicMXbt9iJoQQ9xKZOK6SCjU6Rv90kH3nU/F1smbBiHaM/ukguYVa3niwKc/2bFhjZYlKzOKD9ZElJlYr1ivQgxd6NaJzQNnWKAqPvcqEJYdJzSnEx8man8Z0opnPzb/bzPwiBn21h5grOXRt6MbSZzobdYr5UT8dZOeZFAa09ObbpzoYrRwrD8fx6h/HcLw2SZzLtXlXymLmqmMsPxhHR38Xfr+HFkbML9Ly876LWJipaOBhT4C7Hb7ONrVieQohROlkptsalpFbxJBv93A+JQeVChQFggJcWTahS43/Y6soCqFnUvhw/SnOp2TzUGtfJt7XkOa+5f+9xKbmMnbxQc6l5GBvZc7XI9tzXxMPw+s6ncKzPx9my6lk6jjb8NfkbrjZ33qa/ZoQlZjFgAX6lZB/fz6YTrfpN1Jdcgs19PoklOSsAt58sBkTejYo1/mJGfn0+nQ7+UU6vh/VkQcqMLPy3ag4qN3I0lxNfTdbAtztCHC3p4GHHQ3c7Qhwt8PVzvKeCXNlpdHqWH88ge6N3I3+d1GIspDAYgQXU3MY8s1e0nIKcbAyZ+OUHtR1Md4CfTqdQnahptKrG2fkFvHcL4fZfz4NM7V+Ar2RQf6AvoPr55vPYGmu5o/ng2ld17kKSl55xV98bfycWT2xa42MVLrRgi1n+WLLGfxcbdgy9b4K3RKc989pvgk9d88sjLg2Ip6XV0SgUulHTMWl5XIxNZfC23T8drQ2p6mPI4+1r8vANr7YWMqyBt/vPM+HG07RxMue1S90w87K3NhFEuK2JLAYyZHYq3y++QzPdA+4K9a2KatCjY7XVx1jVbh+UcbnejYgqIEr45YcRlH0nWyf6Gg6C/clZ+XT+5NQcgq1LBjelkFt69Tce2fm0+vTUHILtXz1ZLsK9+fJyCu6ZxZGPJ+SzcD/201OoZaX7m/E1L76zuxancLl9DzOpWQTcyXHsJ1PyeFyRh43/svlaG3O4x39GBlUjwbXVkC/1+h0Cr0+DSU2LRfQDwz4akQ7aYUSJk0Ci6hyiqLw5dZovrg2R0jxra+RQfX4cEgrI5fuZl9tO8unm85Qx9mGrdPuq7FFBV//8xgrDsXRrp4zqyZ2rdSXRfH/ln2crNk+vWwLI15Oz2NP9JVrfYrcCW7oVmW3JaOTs/jtUBy7zl5hQo8GPNqhbqWvmV+kZeg3e4lMyKRzgCvLxgeVqTUpv0jLhdQcQqNS+GX/RS5dvT7ZXo/G7jzVxZ8+TT1rfcvUjUKjkhmz6BB2lmYUanUUaZUa70dXUZfT85iyIoIAdzs+GNISi3voc7vXVevihzt37mTgwIH4+vqiUqlYs2bNbY9ftWoVDzzwAB4eHjg6OhIcHMy///5b4ph33nkHlUpVYmva9M7DQEXNUalUvBzSmC+GtcHCTIWiQLt6zsweWLHZe6vbuO4N8HGyJj49j0V7LtTIe55OzGTlYX0fjNtNEldWTwf74+tkTUJGPktusTDi1ZxCNhxP4M3Vx+n9aShdP97GjD+O8U3oOZ768QDdPt7GnI2niEqs2LwuuYUafj8cx2Pf7iXk8518vyuG04lZTP/jKH+GXapE7fQ+XH+KyIRMXO0s+XJ4uzIHDGsLM5p6O/L8fQ3ZMaM3i8Z04v6mnqhUsOvsFZ77OYwe87bz5dazJFfhMha3cjox86bpBGrarwf0ExQ+3tGPWQ83B/SzP++JvrkTvilJySrgqR8OcPBCGr8djmPG70fLNRGmuHeU+wZnTk4Obdq04ZlnnmHo0KF3PH7nzp088MADfPTRRzg7O7No0SIGDhzIgQMHaNeuneG4Fi1asGXLlusFM5d7r6ZoSLu61HO1Y3NkEuO6B2Bpbpr/E7KxNGNGv0CmrjzKN9ujebxjXdyrsROioih8tOE0OgUebOVNB//Kd/a1tjBjat9Apv9+lK+3RzO8Uz3MzVQcvJDG3ugr7IlO5VRiZolbI2oVtKrrTICbLdujUkjMzOe7Hef5bsd5Wvg6MqRdHR5p64unw60X4FQUhePxGaw4FMdfEZfJvjYXj5laRe9AT+yszFgbcZkZfxzF0lzNwDYVu+214XiCYWbiz59og7fTnRcFLY2ZWkXvpp70bqrv+/LrgVhWHo4jISOfzzef4cutZ+nX0punu/gTFOBapbdIMnKLmLPxFCsOxWGuVjGxV0Mm39+o2qYyuJXL6XlsPZUEwFNd6tHQw56IuAz+DL/E5GXh/P1id6P2qbuV9NxCnv7xAOev5ODpYEVaTiFrIi7jbGvJ7IHN5XaWKKFSt4RUKhWrV69m8ODB5TqvRYsWDBs2jFmzZgH6FpY1a9YQERFRoXLILSFRGp1OYdDXezgen8FTXerxweCqv3Wl1Sn8ezKR73ae52hcOhZmKrZMvQ9/t6pZnVqrU3hwwS6ikrL0Mxtn5KP5z/8+m3jZ07WhO90auRPUwNXQ0bpAo2X76WRWhcezPSqZIq3+PDO1ih6N3RnSrg59m3sbOqtm5BaxJiKeFYfiDHMKAfi72fJERz8e61AXL0drdDqFN9ccZ/nBOMzUKr5+sh39W5Z9UU/Qj0B76MtdZBVomNirYZkm1iuP/CItG08k8Mv+WMIuXjXsb1XHien9AunZ2L1SX4aKorDheCKz/zrJleyCEq818rRn7qOtqiS0ltXnm6L4cls0XRq4suLZYED/O3hs4V5OxGfSqo4Tvz8fXGO3RssiK7+Ip344wNFLGXg6WLHyuWCOXkpnym8RKAq83KexYSXzysou0KAoCg6VHIQgql55vr9rvBlDp9ORlZWFq2vJv8xnz57F19cXa2trgoODmTNnDvXq1Sv1GgUFBRQUXP9HIjMzs9TjxL1NrVbx5kPNGP6//Sw/GMeYrvVp5Fk1M/DmFWr5PSyOH3bFGDo5WpqrefPBZlUWVuD6wojPLD5s6KdR18WGbg3d6drIjeCGbrdsLbEyN6N/Sx/6t/Thak4h645dZtWReI7EphMapZ+F197KnP4tvdHqFDYcT6Dg2uKRluZqBrT0ZlgnP7oEuJUYaaVWq/hwcCsKNQp/hl/ixeVH+HakmpAyDr8u0GiZvDycrAINHfxdmFpFX0o3srYwY0i7ugxpV5eTlzP4ZX8sa47Eczw+g9E/HaRzgCuv9gu87XIJt3I5PY+315xg62n97NYNPeyYM7Q1qdkFvL32JNHJ2Ty2cB+jg+szo19gtY/UKdLqWHFIfyuyeAQf6H8HC5/qwMD/283x+AzeWnOCTx5rbRKtFnmFWsYtPszRSxm42Frw6/gg6rvbUd/djoy8ImatPcmCrWdxtrVgbLeASr3XumOXef3P41iYqfh1fJcKTfEgTEONt7DMmzePjz/+mNOnT+PpqR9Js3HjRrKzswkMDCQhIYF3332X+Ph4Tpw4gYPDzV8w77zzDu++++5N+6WFRZRmwtLDbI5M4v6mnvw0plOlrnUlu4Cl+y7y874LXM0tAsDZ1oJRXfwZ1bV+tdx2UhSFv45eJq9QS9eG7tRzq1zT/vmUbNYciWfVkfgSnVUBmno7MLyTH4Pb1cHZ9vaT3Wl1Cq/8FsFfRy9jaabmf6M60KsMo+Pe/fski/ZcwNnWgg0v9cDX2aZS9Smr1OwCvg09x9L9Fw2revcO9GBa30Ba1nG64/lancLP+y7wyb9R5BRqsTBTMbFXIyb1bmi4BZSeW8gH60/xx7X+PXWcbZgztBU9b5i/qKptPJ7AxF/Dcbe3Yu/r9990m3b32SuM+ukAOgXeH9ySp7v43+JKNaNAo2X8ksPsOnsFB2tzlk/octPvv3jKBNDfLhzavvwdvAs0Wj5cf4ql+64viOpqZ8mKZ7vQxKtmlw4Rt1Zjo4TKG1iWLVvGhAkTWLt2LSEhIbc8Lj09HX9/fz7//HPGjRt30+ultbD4+flJYBGlOp+STd8vdqLRKfw6PohujdwrdI0fdsfwZ9glQytEPVdbxvcI4LEOdbG1vPv6XOl0CocvXmXdscuogKHt69K6rlO5/geu0ep4cfkRNp5IxMpczU9jOt329/vvyUSe+zkMgB9Hd6RPs5qfFC8hI48vt0az8nAc2mu31x5q7cO0B5rcckj06cRMXv/zOBFx6QB08HdhztBWt/zi23kmhZmrjhOfrg+Ej3Woy1sPNbtjCKyIkT/sZ090KpN6N2RGv9JvrX234xxzNp7GwkzFime71OjtqhsVaXVM+jWcTZFJ2Fqa8fO4zqWWRVEU3l93ip/2xGCmVvHdUx3K3IIH+luOk5aFczw+A4Dn7mvAvnOpHLuUgbu9Fb8914WG9+jwd1NjkoFlxYoVPPPMM/z+++889NBDdzy+U6dOhISEMGfOnDseK31YxJ2889dJFu+9QDMfR9a92L3MQ33DLqbx3Y7zbD6VZOjc2qauE8/2bEj/lt4ybTz6eXpe+DWMLaeSsbEwY/HYTgSVsjr5pau5PLhgF5n5Gib0CODNh5obobTXXbiSwxdbzvDX0csoiv7222Pt6/JSSGPqXGv1yS/S8tW2aBbuOIdGp2BvZc5r/QMZGeR/xwkJcwo0fPJvFEv2XUBRwN3eivcHtWBAq/L197md8ynZ3P/ZDv3oqFd737JjraIoTF52hPXHE/B0sGLdi93xdKxYJ+eK0uoUpq6MYG3EZSzN1Sy6Q7jV6RRm/HGMP8MvYWmuZsnYzgQ3vPOq9/+cSGTGH0fJytfgYmvB58Pa0jvQk/TcQkZ8f4BTCZl4Oer7zFTl7VtRMSYXWJYvX84zzzzDihUrGDRo0B2vm52dTb169XjnnXd46aWX7ni8BBZxJ2k5hdz3yXay8jW0rOOIuVqNRqdDo1Uo1Oofi67NXaHR6SjS6CjSKYZbBwB9mnrybM8GdK7ikSa1QYFGy7NLw9hxJgU7SzOWjguig7+L4fUirY4nvtvHkdh02vg58/tzwSYzwuxUQiafbYpiyyl9nxRLMzUju9Sja0N3PtpwiphrC2k+0NyL9wa1wMepfLewwi6m8eofxziXor9O/xbevDeoRZUEhg/WRfLD7pgy3e7MKdAw+Os9nE3OpqO/C8smdKmxz0BRFN5Yre+oba5W8d3THcrUuqbR6pj4azibI5Owt9LfPmpVt/Tbd4UaHR9vPM1Pe2IAfSvY/41oV+KWY2p2ASO+38+ZpGzqONvw23NdTHL01L2kWgNLdnY20dHRALRr147PP/+c3r174+rqSr169Zg5cybx8fEsXboU0N8GGj16NAsWLCgxDNrGxgYnJ/0fvOnTpzNw4ED8/f25fPkys2fPJiIigsjISDw87nzvVwKLKIsfdp3ng2srKJeVpZmaIe3qMKFnQJV12K2t8ov0fRN2R1/BwcqcX8YH0cbPGYCPNpzifzvP42htzvqXeuDnanpfEmEXr/LJv6fZf77kKtmeDla8N6hFuUdC3ahAo+XrbdF8E6pvqXG0Nud/ozrSpZSWqLLKL9IS9NFWMvKK+GlMR+5veucAcD4lm0Ff7SGrQMPoYH/eHdSywu9fVjfe3lGr4MsR5ZsBOr9Iy9hFh9h3PhVXO0tWPhdMI8+St3MuXc1l8rIjhlt2z/ZswIx+gaVOQJeclc/w7/Zz/koO9Vxt+e25LuUOoaLqVGtgCQ0NpXfv3jftHz16NIsXL2bMmDFcuHCB0NBQAHr16sWOHTtueTzA8OHD2blzJ6mpqXh4eNC9e3c+/PBDGjYs2wyNElhEWeh0CjvOppBfqMXcTI25mQpLMzXmahUW5mos1Pp9FmZqLMxUmJupcbaxkPVYyiGvUMvoRQc5GJOGo7U5y5/tQlJmPs8sPgzAd093oF8LbyOX8tYURWFPdCqf/HuaY/EZjOhcj9f6N8XJpmqGw55KyOTVP45xPF4/lPffKT3LtZL3jf4Iu8T0349Sx9mGna/2LvPtyS2RSYxfqv88Pnu8TZXMWHw7xUOuAT55rDWPV2AZj6z8Ip78/gDH4zPwdbLm94ldDbfttp5KYurKo2TkFeFkY8Fnj7e5Y3+XxIx8hv1vHxdTc2ngbseKZ7vU+C0yoSdT8wshjCa7QMOoHw8QHpuOi60FCpCeW8SYrvV55xHTnBn5vxRFIadQi301hNW8Qi0P/d8uzqfkVGq9n8Ff7yEiLp0Z/QKZ1LtRuc4tnlDPylzNnxO7lhilU6jRkVeoJbdIQ06BVv+8UENukf65pZkaWysz7CzNsbMyw9bSHDtLc2ytzG5q0fg29Bxz/zkNwHuDWjAquH6561ksNbuAJ77bx7mUHBp42LF8Qhd+2h3DdzvPA9DGz5mvRrQrc+tdfHoeTyzcR3x6Ho097VnxbBdZ4doIJLAIIYwqM7+Ip69NCgb6Cdv+mBhc4zPAmqpjl9IZ+s1eNDqlQgt0nojP4OH/242FmYq9r/fBw6F8X7Q6ncK4JYfYfm0uHnsrc30oKdTeNDFheViaq7Gz1IcYG0szopOzAXitf1Mm9qr8mkaX0/N47Nu9XM7Ix8pcbRixN7ZbfWYOaFbuPjmxqbk88d0+EjPzaertwPIJXSrc4iUqRgKLEMLoMnKLmLD0MPHpeSybECQjMv6jeK4RB2tz/p3Ss1zz0cxcdZzlB2N5uLUPXz3ZvkLvn5FXxOCv9xg6Ff+XuVqF7bXwYWtpho2lGTYWZhTpFHIL9OEmp1BDboGWQq2u1GsATO7diOn9AitUxtKcS8nmiYX7SM0pxMHKnHmPta7UyKvzKdkM+99+UrIKaFnHkV/Hd7ntLUBFUbiYmkvYxascvniV8ItXKdLq6N7Ynd5NPQlu4GZSMwqbOgksQgiTodUpMvy7FBqtjscW7iMiLp2uDd34ZVzQHYdKg74/R9BHW8kt1LLi2S6V6ribkVdEVGIWNhZm2FqZ6QOKhb51pDytFcW3kXIKNeQW6m8l5RRqcLaxrJaZZaOTs/gzPJ7hnfyqJAifTcpi+P/2k5pTSFs/Z34e19kwjX9+kZYT8RklAkpqTuEtr2VtoaZbQ3d6NfXk/qaehr42onQSWIQQ4i4QcyWHBxfsIq9Iy9sPN2dc9ztPQ//zvgu8vfYkjTzt2fxKTxliX0VOJWQy4vv9pOcW0cHfhfb1nAm7eJUT8Zk3tSBZmqlpXddJf5y/C2qViu1RyWw/nUxCRsnVwQO9HOh9Lby0r+dc5hXJ7xUSWIQQ4i7xy/6LvLXmBJbmata/2J3Gt5k2XlEU+s/XL4Y5e2DzSq+zI0o6EZ/BiO/3k5WvKbHf3d6KDv7OdPR3pb2/Cy3rOJbaH0tRFE4nZrHttD68hMde5cYuQU42FvRs4kFLX0fqudri52pLPTdbw4Kl9yIJLEIIcZdQFIWxiw8RGpVCC19HVr/Q7Za3Yw5dSOPxhfuwsTBj/xt9qmy4tbjuaFw6n26Kop6rLR38Xejo74qfq02FWrKu5hSy82wK204ns+NMCunX1h/7Lxdbi+sB5obNz9UWX2ebWn1LVQKLEELcRZIz8+k3fydXc4tuuybQyyuOsDbiMsM6+jH3sdY1XEpRGVqdwpHYq+w6e4ULqTnEpuUSl5bLlexb94cBsLcy56FWPjzRqS7t67nUuluAEliqSlEenFoHySch5J2qu64QQvxH8arLahX8/nzwTYsCpmYXEDxnG4VaHX9N7kbrus7GKaioUtkFGuLScg0BJvaG7VJaXon+Mw087Hiiox9D29WpNRPdSWCpKmkx8GVbQAVTjoNz+WdoFEKIspq6MoJV4fHUc7Vl48s9SsyyvHDHOT7eeJrWdZ34a3J3I5ZS1JTiFdVXHo5j/bEE8oq0gH6hzt6BHjze0Y/7m3qWugTB7WTmF3HhSg7JmQW42Fni6WCFu70VNpY1PxxbAktVWvwwXNgFvd+E+16t2msLIcQNMvOLGDB/F/HpeYzo7MecofrbPjqdQq9PQ4lNy2Xeo615opP85+lek12gYf2xy6w8fImwi1cN+93sLBnSrg5PdPKjyQ0dtgs0WuLScjmXkkPMlRxirj2ev5LDleyCUt/DwcocdwcrPOyt8HC4vrnbW+qf21vTwtexTMPvy0oCS1U6ugJWPwfO/vBSBKhlSJoQovrsP5/KiO/3oyjww6iOhDT3IjQqmTGLDuFgbc6BN/pgaynrW93LopOz+SPsEn+GXyIl63r4aFPXCWdbS2Ku5HDpai63m7TYw8EKL0cr0nOLSMkqMMwafDtqFZz98MEq7QRcnu9v+VN/J80egfXTIf0iXNwDAT2MXSIhRC3WpYEb47sH8P2uGF5fdYx/6/Xk1wOxADzavq6EFUEjT3teH9CU6X2bsONMCisPx7H1VLJhKYxi9lbmBLjbGbYGHnY0cLenvrutYWI80I9UyyrQcCWrgJSsAlKyrz1mFXAl+/o+rQ6jjliSP/l3YmkLLYdC+BI48osEFiFEtZvWN5CdZ64QlZTFpGXhHIxJA+CpLvWMXDJhSszN1PRp5kWfZl5cyS7g35OJqFUqQzjxsLcq06gilUqFo7UFjtYWNPCwr4GSV4zc3yiLdk/rHyPXQn6mccsihKj1rC3M+GJYWyzMVOw/n4ZOgS4NXGnkeetJ5cS9zd3eipFB/ozoXI8uDdzwdLCudUOgJbCURd2O4N4ENHlwcpWxSyOEuAc093VkWt/riwaODPI3YmmEMD4JLGWhUkG7p/TPj/xi3LIIIe4ZE3o0YEi7OoQ086RfC29jF0cIo5JRQmWVlQSfNwNFC5MOgkfVLZcuhBBC3IvK8/0tLSxl5eAFjfvqn0srixBCCFGjJLCUR/FtoaMrQFv6IlZCCCGEqHoSWMqjST+w84CcZIjeYuzSCCGEEPcMCSzlYWYBrYfpn8ttISGEEKLGSGApr7Yj9Y9n/oHsFOOWRQghhLhHSGApL6/mUKcD6DRw7Ddjl0YIIYS4J0hgqYjiVpYjv8DdPypcCCGEMHkSWCqi5aNgbg0pp+ByuLFLI4QQQtR6ElgqwsZZv4ozSOdbIYQQogZIYKmodtduCx3/E4ryjFsWIYQQopaTwFJR9XuCUz0oyIBT64xdGiGEEKJWk8BSUWr19VaWIz8btyxCCCFELSeBpTLajNA/xuyEqxeNWxYhhBCiFpPAUhku/hBwH6DA0eXGLo0QQghRa0lgqax2T+sfj/wKOp1xyyKEEELUUhJYKqvZw2DlBBmxcGGXsUsjhBBC1EoSWCrLwgZaPap/LnOyCCGEENVCAktVaPuU/vHUX5CfYdyyCCGEELWQBJaqUKc9eDQDTT6c+NPYpRFCCCFqHQksVUGlgnbXWlnktpAQQghR5SSwVJXWw0BtDvFhkHzK2KURQgghahUJLFXF3gOa9Nc/X/IIrJ8GMbtApzVuuYQQQohawNzYBahV7nsN4g5ATjIc+kG/2XlC80eg+WDw7wpqM2OXUgghhLjrlLuFZefOnQwcOBBfX19UKhVr1qy54zmhoaG0b98eKysrGjVqxOLFi2865uuvv6Z+/fpYW1sTFBTEwYMHy1s04/NpDa9Ewsg/9SOHrJ2vh5clD8NnTWHdVP1U/tLyIoQQQpRZuQNLTk4Obdq04euvvy7T8TExMTz00EP07t2biIgIpkyZwvjx4/n3338Nx/z2229MnTqV2bNnEx4eTps2bejXrx/JycnlLZ7xmVtC4xAY/DVMP6sPL+1uCC+Hf4QlA+GzQH14ObNJvw6RzJIrhBBC3JJKURSlwierVKxevZrBgwff8pjXXnuN9evXc+LECcO+4cOHk56ezj///ANAUFAQnTp14quvvgJAp9Ph5+fHiy++yOuvv37TNQsKCigoKDD8nJmZiZ+fHxkZGTg6Ola0OtVLWwTnd0Dkaji1DvLTS75ubg2uDcCtEbg31j+6NQa3hmDrapQiCyGEENUpMzMTJyenMn1/V3sfln379hESElJiX79+/ZgyZQoAhYWFhIWFMXPmTMPrarWakJAQ9u3bV+o158yZw7vvvlttZa4WZhb6lpfGIfDwfIjZASfXQNxBSDuvn8MlOVK//Zet27Xw0gjsPcHa6T+bs/7RxhmsHPWtPNUp7yqkx+nLZe+pr5sQQghRjao9sCQmJuLl5VVin5eXF5mZmeTl5XH16lW0Wm2px5w+fbrUa86cOZOpU6cafi5uYblrmFlAoxD9BqDV6NciuhINqdGQelb/eCUasi5Dbqp+i9tftutb2OoDjK2bvoXGrTG4N9G33Lg3BiuHsl1Hp4P0C5B4AhKPQ9K1x4y4Gw5SXQsuXuDgpX+09wIHb32YsffWP7dy0A/7Vpvr61/8XKUqz29OCCHEPequHCVkZWWFlZWVsYtRdczM9beDXBsAfUu+VpANaefgyll9S0xumn76f8OWfv15Qab+nKJc/ZaVoA8Z/+Xgc+3W0w0hxq2RPhSVCCcnoDCr9DLbuOrfU9FC7hX9lnyy/HUvDi5qC/0IKjML/XMrB7B21LcYWTvqA1jxc6vilqVrr9u46EOTrZv+dymEEKLWqfZ/3b29vUlKSiqxLykpCUdHR2xsbDAzM8PMzKzUY7y9vau7eKbPyh582ui3O9Fp9aGlOMBkJelba66c0bfWXDmj7/iblaDfyrK6tJkVeDYF71bg1eraYwv97SedTh9yspMgO1H/ftk3bFk37C/KuUWZNfqN/JL7b5GT7sjaCWzdwc79eogxPL+238YVbF30j1aOoJbpiIQQwtRVe2AJDg5mw4YNJfZt3ryZ4OBgACwtLenQoQNbt241dN7V6XRs3bqVyZMnV3fxahe1mb61wcZF/7MP3NRik5d+7XbTtSCTevZ6642Voz6QeLcE79bg1VLf+nKrPipqtX7CPHsPoOXty6Yo+kCl04CuSP+o1ZT+s7YACrIgP/NaAMu83oJ042Px63lX9S1PKNfDWtq5sv3OVGb68GXjqu/cbON6rcXm2qO1k35FbnPrsj2aWVbuNldhLmTGQ3osZFzS337LuKTfrJ2gbkeo0wF825X91p4QQtQC5Q4s2dnZREdHG36OiYkhIiICV1dX6tWrx8yZM4mPj2fp0qUAPP/883z11Ve8+uqrPPPMM2zbto2VK1eyfv16wzWmTp3K6NGj6dixI507d2b+/Pnk5OQwduzYKqiiKMHGWf+lV7djyf3Fg8Wqq0+JSqW/XWNmDlhX/fV1Wn0Yy70COVeu9fu59piTev3nnCvXA05RzrVbWtdeT62Ccqgt9K1ilg76QGFlr3+0tL/23PHacwdQqfXhJCNO34k545K+jLdzep3+UaUGj6b68FKng/7z9Ggmt8SEELVWuYc1h4aG0rt375v2jx49msWLFzNmzBguXLhAaGhoiXNeeeUVIiMjqVu3Lm+//TZjxowpcf5XX33FJ598QmJiIm3btuXLL78kKCioTGUqz7AoIQw0BfrgkncV8tKuPb/x8SoUZEBRvn4UV1HerR+p8OwAN7O0Byc/cKoLzn7654519LfX4sPgUhhkXrr5PAtb8GkLdTuAdxt9SNQWgbbw2lakb7367z5Ngb41ybedfuVxR9+qq4sQQtxGeb6/KzUPi6mQwCKMSlH0X/5FeVCYo7+dVZitv11VkH3tedb1rfhnbZE+HDjXKxlQrJ3v3NKVlQiXDusDTPxhiD9y6w7S5WXvrQ8uvu2hTjv9o8wFJISoBhJYhLjX6LT6PknxYfogc+WM/raRmeW1zQLMra4/N+y/9nNmvD70pJwCpZRZl13qXwsw7fUtMY6++o7MVo4yNF0IUWESWIQQFVOYAwnH4HI4xIfD5SO378CsNr/WYfnaiCzbG4aYF++3cwc7D/28PHYeMtGgEMLApGa6FULcRSztwD9YvxXLuwqXI66HmMTj+s7LRTn6UV05yfqtrGxcr00q6Klfzfy/zx189C04Ni7SeiOEMJDAIoS4PRsXaNhbv92oKP9a5+TiUVhp1x9v3J+TAtkp+kdFq38tLw1SSp/J2sDc+np4KX4s3hx8wdFH399GRkYJcU+Qv+lCiIqxsAYL37KPKtLp9EEl+1qLTHbxlnQt1Fx7npWgDzqafLgao99uRaXWhxfDiKq61zow+13/WearEaJWkMAihKgZavW1/izuQPPbH6sp0AeXzMvXtxt/Lp6tWafRD/HOvHTrtbasna+PwrJ2uqHTscV/OiBfe66+tt/STj9xokczsLSt6t+GEKKcJLAIIUyPuZV+ZJJL/Vsfo9PpW2oyLt08M3B6nP55fvr1Lel4BQujAtcA8GyuX5ai+NG1gX52aSFEjZBRQkKI2qsg6/rSBumx+lFQ2sJrS0DcOKFe8fMb9udnQPKpW88+bG4NHoHg2QI8m+lDzY3rWFk7yzpVQtyBjBISQgjQ91/xbKbfKio7GZJOQnKkfkuK1HcYLsqFhKP6rTQqs5KLb9q5Xw809p76tbq8W+nXoBJC3JEEFiGEuJ3iYdc3jpLS6fSdgYsDTHKkfvK94nWsCjL1I6LuNORbba6/xVSnw/XZhT2aysgnIUoht4SEEKKqaQquDem+cn3RzRufZ16GhAj96Kj/srAFnzbXZxau0x5cAmROGlEryS0hIYQwJnOr63PG3Iqi6PvWxIfdMLNwhH5NqNh9+s1wPetrE+t56GcLtnPX/2yYQfiGn21dpTOwqJUksAghhDGoVPq5Ypz9oMVg/T6dDlLP6sNLcZBJPK6fkyYjVr/d+cL6Ydlqc/2mUl9/rjbTbyqz6/tsnMEvCPy7Qb0g/dBvIUyQ3BISQghTpim83j8mJ+Vav5gbZg++cctNAyrxT7pKre8I7N/t2ta17Ct1K4r+/a/GQNq1Cf9y0651MvbSbw7XHm3dpZ+OAGTxQ2MXRwghjEOr0c8mXDx0W6fVb4q25M86zfV9GZfg4h64uBfSzt98Tc/m+uDi3xXqddWfczVGf2xxMEmLgasX9J2Ny0RVMsjYe+lvbXk2hwb3gYN3Vf5WhAmTwCKEEKL8Mi/rg8vFvfoQc6f1nkrj4KPvJOwaoB/OnZumX3KheMtJAUV3+2u4B0KDXvrwUr+73KaqxSSwCCGEqLycKyUDTOJxfb8XF/9rMxFfCybFj87+d17GQKfVj6AqDjBZSdfXkIo7eG1emxu+llRq8G2nDzAB9+n721hYV099dTp9p+fCHP3Ef7IkQ7WTwCKEEKLqFebqR0BV5yik3DS4sBvOh0LMDkiNLvm6ufW1TsJd9es9obo25PvasO/i5yUe0Q81z8/Q37bKz7hhu+HngkxKhCULu2sjs67NxWMYleVxw/Nro7SsHKtuZmOdTr+cRG6q/haca0Mwt6yaa5sYCSxCCCFqh4xLcH6HPryc3wHZidX/nir1nW9blXaOjcsNm6v+0da15H5rZyjMLn1+HsPcPan6PkbF1Ob622TeLfXrWHm10M+UbO9V8fl5CnP0S1dYOxl1tmUJLEIIIWofRYGUKH14STh6rfOwAih3eES/EreNs74lxNrp2lb83Fn/WPyauZX+yzwnRb80Q4mRWcnX9qVcfyzMrp76Wl37PrtVZ2ZbN31w8SoOMs31Q9ZzUm4YVXaL55q869exsNUHLFsX/TVtXPVB68bnxY8+bat0jSwJLEIIIURNKcqHvKvXtrTrz3PT/rMvXb9Z2d+8vpStO9i5lVxA09zq+gSDSSeubSch8QSknSt/K9BNVJRrGLxKDW+nGi2wyEB4IYQQojIsrMHCBxx9qv7aN04wGDjg+v7CXP0orqST14NMcqS+hcUwG7JHKc9v+NnSTt96k5umD1W5aTc8T/3P86uAYtQVyCWwCCGEEHcbS9vra01VRvHtMQKqpFjVyXhRSQghhBCijCSwCCGEEMLkSWARQgghhMmTwCKEEEIIkyeBRQghhBAmTwKLEEIIIUyeBBYhhBBCmDwJLEIIIYQweRJYhBBCCGHyJLAIIYQQwuRJYBFCCCGEyZPAIoQQQgiTJ4FFCCGEECZPAosQQgghTJ65sQtQFRRFASAzM9PIJRFCCCFEWRV/bxd/j99OrQgsWVlZAPj5+Rm5JEIIIYQor6ysLJycnG57jEopS6wxcTqdjsuXL+Pg4IBKpbrtsZmZmfj5+REXF4ejo2MNlbDmST1rF6ln7XIv1PNeqCNIPStLURSysrLw9fVFrb59L5Va0cKiVqupW7duuc5xdHSs1X+4ikk9axepZ+1yL9TzXqgjSD0r404tK8Wk060QQgghTJ4EFiGEEEKYvHsusFhZWTF79mysrKyMXZRqJfWsXaSetcu9UM97oY4g9axJtaLTrRBCCCFqt3uuhUUIIYQQdx8JLEIIIYQweRJYhBBCCGHyJLAIIYQQwuTdc4Hl66+/pn79+lhbWxMUFMTBgweNXaRKeeedd1CpVCW2pk2bGl7Pz89n0qRJuLm5YW9vz6OPPkpSUpIRS3xnO3fuZODAgfj6+qJSqVizZk2J1xVFYdasWfj4+GBjY0NISAhnz54tcUxaWhojR47E0dERZ2dnxo0bR3Z2dg3W4s7uVM8xY8bc9Nn279+/xDF3Qz3nzJlDp06dcHBwwNPTk8GDBxMVFVXimLL8OY2NjeWhhx7C1tYWT09PZsyYgUajqcmq3FJZ6tirV6+bPs/nn3++xDGmXEeAb7/9ltatWxsmDwsODmbjxo2G1+/2z7HYnepZGz7L//r4449RqVRMmTLFsM/kPk/lHrJixQrF0tJS+emnn5STJ08qEyZMUJydnZWkpCRjF63CZs+erbRo0UJJSEgwbCkpKYbXn3/+ecXPz0/ZunWrcvjwYaVLly5K165djVjiO9uwYYPy5ptvKqtWrVIAZfXq1SVe//jjjxUnJydlzZo1ytGjR5VHHnlECQgIUPLy8gzH9O/fX2nTpo2yf/9+ZdeuXUqjRo2UESNG1HBNbu9O9Rw9erTSv3//Ep9tWlpaiWPuhnr269dPWbRokXLixAklIiJCefDBB5V69eop2dnZhmPu9OdUo9EoLVu2VEJCQpQjR44oGzZsUNzd3ZWZM2cao0o3KUsd77vvPmXChAklPs+MjAzD66ZeR0VRlL/++ktZv369cubMGSUqKkp54403FAsLC+XEiROKotz9n2OxO9WzNnyWNzp48KBSv359pXXr1srLL79s2G9qn+c9FVg6d+6sTJo0yfCzVqtVfH19lTlz5hixVJUze/ZspU2bNqW+lp6erlhYWCi///67Yd+pU6cUQNm3b18NlbBy/vtFrtPpFG9vb+WTTz4x7EtPT1esrKyU5cuXK4qiKJGRkQqgHDp0yHDMxo0bFZVKpcTHx9dY2cvjVoFl0KBBtzznbqynoihKcnKyAig7duxQFKVsf043bNigqNVqJTEx0XDMt99+qzg6OioFBQU1W4Ey+G8dFUX/JXfjl8F/3W11LObi4qL88MMPtfJzvFFxPRWldn2WWVlZSuPGjZXNmzeXqJcpfp73zC2hwsJCwsLCCAkJMexTq9WEhISwb98+I5as8s6ePYuvry8NGjRg5MiRxMbGAhAWFkZRUVGJOjdt2pR69erdtXWOiYkhMTGxRJ2cnJwICgoy1Gnfvn04OzvTsWNHwzEhISGo1WoOHDhQ42WujNDQUDw9PQkMDGTixImkpqYaXrtb65mRkQGAq6srULY/p/v27aNVq1Z4eXkZjunXrx+ZmZmcPHmyBktfNv+tY7Fff/0Vd3d3WrZsycyZM8nNzTW8drfVUavVsmLFCnJycggODq6VnyPcXM9iteWznDRpEg899FCJzw1M8+9lrVj8sCyuXLmCVqst8YsF8PLy4vTp00YqVeUFBQWxePFiAgMDSUhI4N1336VHjx6cOHGCxMRELC0tcXZ2LnGOl5cXiYmJxilwJRWXu7TPsfi1xMREPD09S7xubm6Oq6vrXVXv/v37M3ToUAICAjh37hxvvPEGAwYMYN++fZiZmd2V9dTpdEyZMoVu3brRsmVLgDL9OU1MTCz1My9+zZSUVkeAJ598En9/f3x9fTl27BivvfYaUVFRrFq1Crh76nj8+HGCg4PJz8/H3t6e1atX07x5cyIiImrV53irekLt+SxXrFhBeHg4hw4duuk1U/x7ec8EltpqwIABhuetW7cmKCgIf39/Vq5ciY2NjRFLJipr+PDhhuetWrWidevWNGzYkNDQUPr06WPEklXcpEmTOHHiBLt37zZ2UarNrer47LPPGp63atUKHx8f+vTpw7lz52jYsGFNF7PCAgMDiYiIICMjgz/++IPRo0ezY8cOYxeryt2qns2bN68Vn2VcXBwvv/wymzdvxtra2tjFKZN75paQu7s7ZmZmN/VwTkpKwtvb20ilqnrOzs40adKE6OhovL29KSwsJD09vcQxd3Odi8t9u8/R29ub5OTkEq9rNBrS0tLu2noDNGjQAHd3d6Kjo4G7r56TJ09m3bp1bN++nbp16xr2l+XPqbe3d6mfefFrpuJWdSxNUFAQQInP826oo6WlJY0aNaJDhw7MmTOHNm3asGDBglr1OcKt61mau/GzDAsLIzk5mfbt22Nubo65uTk7duzgyy+/xNzcHC8vL5P7PO+ZwGJpaUmHDh3YunWrYZ9Op2Pr1q0l7kve7bKzszl37hw+Pj506NABCwuLEnWOiooiNjb2rq1zQEAA3t7eJeqUmZnJgQMHDHUKDg4mPT2dsLAwwzHbtm1Dp9MZ/mG5G126dInU1FR8fHyAu6eeiqIwefJkVq9ezbZt2wgICCjxeln+nAYHB3P8+PESAW3z5s04OjoamumN6U51LE1ERARAic/TlOt4KzqdjoKCglrxOd5OcT1Lczd+ln369OH48eNEREQYto4dOzJy5EjDc5P7PKu8G68JW7FihWJlZaUsXrxYiYyMVJ599lnF2dm5RA/nu820adOU0NBQJSYmRtmzZ48SEhKiuLu7K8nJyYqi6Iel1atXT9m2bZty+PBhJTg4WAkODjZyqW8vKytLOXLkiHLkyBEFUD7//HPlyJEjysWLFxVF0Q9rdnZ2VtauXascO3ZMGTRoUKnDmtu1a6ccOHBA2b17t9K4cWOTG+57u3pmZWUp06dPV/bt26fExMQoW7ZsUdq3b680btxYyc/PN1zjbqjnxIkTFScnJyU0NLTEMNDc3FzDMXf6c1o8fLJv375KRESE8s8//ygeHh4mM0z0TnWMjo5W3nvvPeXw4cNKTEyMsnbtWqVBgwZKz549Ddcw9ToqiqK8/vrryo4dO5SYmBjl2LFjyuuvv66oVCpl06ZNiqLc/Z9jsdvVs7Z8lqX57+gnU/s876nAoiiK8n//939KvXr1FEtLS6Vz587K/v37jV2kShk2bJji4+OjWFpaKnXq1FGGDRumREdHG17Py8tTXnjhBcXFxUWxtbVVhgwZoiQkJBixxHe2fft2BbhpGz16tKIo+qHNb7/9tuLl5aVYWVkpffr0UaKiokpcIzU1VRkxYoRib2+vODo6KmPHjlWysrKMUJtbu109c3Nzlb59+yoeHh6KhYWF4u/vr0yYMOGmcH031LO0OgLKokWLDMeU5c/phQsXlAEDBig2NjaKu7u7Mm3aNKWoqKiGa1O6O9UxNjZW6dmzp+Lq6qpYWVkpjRo1UmbMmFFi7g5FMe06KoqiPPPMM4q/v79iaWmpeHh4KH369DGEFUW5+z/HYrerZ235LEvz38Biap+nSlEUperbbYQQQgghqs4904dFCCGEEHcvCSxCCCGEMHkSWIQQQghh8iSwCCGEEMLkSWARQgghhMmTwCKEEEIIkyeBRQghhBAmTwKLEEIIIUyeBBYhhBBCmDwJLEIIkzBmzBgGDx5s7GIIIUyUBBYhhBBCmDwJLEKIGvXHH3/QqlUrbGxscHNzIyQkhBkzZrBkyRLWrl2LSqVCpVIRGhoKQFxcHE888QTOzs64uroyaNAgLly4YLheccvMu+++i4eHB46Ojjz//PMUFhYap4JCiGphbuwCCCHuHQkJCYwYMYJ58+YxZMgQsrKy2LVrF6NGjSI2NpbMzEwWLVoEgKurK0VFRfTr14/g4GB27dqFubk5H3zwAf379+fYsWNYWloCsHXrVqytrQkNDeXChQuMHTsWNzc3PvzwQ2NWVwhRhSSwCCFqTEJCAhqNhqFDh+Lv7w9Aq1atALCxsaGgoABvb2/D8b/88gs6nY4ffvgBlUoFwKJFi3B2diY0NJS+ffsCYGlpyU8//YStrS0tWrTgvffeY8aMGbz//vuo1dKQLERtIH+ThRA1pk2bNvTp04dWrVrx+OOP8/3333P16tVbHn/06FGio6NxcHDA3t4ee3t7XF1dyc/P59y5cyWua2tra/g5ODiY7Oxs4uLiqrU+QoiaIy0sQogaY2ZmxubNm9m7dy+bNm3i//7v/3jzzTc5cOBAqcdnZ2fToUMHfv3115te8/DwqO7iCiFMiAQWIUSNUqlUdOvWjW7dujFr1iz8/f1ZvXo1lpaWaLXaEse2b9+e3377DU9PTxwdHW95zaNHj5KXl4eNjQ0A+/fvx97eHj8/v2qtixCi5sgtISFEjTlw4AAfffQRhw8fJjY2llWrVpGSkkKzZs2oX78+x44dIyoqiitXrlBUVMTIkSNxd3dn0KBB7Nq1i5iYGEJDQ3nppZe4dOmS4bqFhYWMGzeOyMhINmzYwOzZs5k8ebL0XxGiFpEWFiFEjXF0dGTnzp3Mnz+fzMxM/P39+eyzzxgwYAAdO3YkNDSUjh07kp2dzfbt2+nVqxc7d+7ktddeY+jQoWRlZVGnTh369OlTosWlT58+NG7cmJ49e1JQUMCIESN45513jFdRIUSVUymKohi7EEIIUVFjxowhPT2dNWvWGLsoQohqJO2lQgghhDB5EliEEEIIYfLklpAQQgghTJ60sAghhBDC5ElgEUIIIYTJk8AihBBCCJMngUUIIYQQJk8CixBCCCFMngQWIYQQQpg8CSxCCCGEMHkSWIQQQghh8v4f9CmIIxzBROMAAAAASUVORK5CYII=", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "plugin.loss_history.plot()" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "75ecc282", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", + " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", - " \n", + " \n", " \n", - " \n", + " \n", " \n", " \n", - " \n", + " \n", " \n", - " \n", + " \n", " \n", - " \n", - " \n", + " \n", + " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", "
fixed acidityvolatile aciditycitric acidresidual sugarchloridesfree sulfur dioxidetotal sulfur dioxidedensitypHsulphatesalcoholquality
03.80.080.000.60.0090000.3462.09.0000000.987110440.01.038983.821.0800001.0814.27
33.80.08114.21.101.660.60.0090002.09.0000000.98711065.80.346289.0440.00.987113.821.08000014.260.228.05
423.81.101.660.60.00900065.80.346289.0440.0000000.9871103.821.080000440.00.987112.720.2214.26
533.81.100.000.60.00900065.80.0092.09.0000000.987110440.00.987112.721.088.07
43.81.101.6665.80.009289.09.00.987113.821.0799750.228.067
6514.20.081.101.660.60.00900065.80.346289.09.0000000.9871103.821.0800009.01.038982.720.2214.27
63.81.101.6665.80.0092.0440.00.987113.820.2214.26
\n", "
" ], "text/plain": [ " fixed acidity volatile acidity citric acid residual sugar chlorides \\\n", - "0 3.8 1.10 0.00 65.8 0.009000 \n", - "1 14.2 0.08 1.66 0.6 0.251377 \n", - "2 3.8 1.10 0.00 0.6 0.009000 \n", - "3 3.8 0.08 1.66 0.6 0.009000 \n", - "4 3.8 1.10 1.66 0.6 0.009000 \n", - "5 3.8 1.10 0.00 0.6 0.009000 \n", - "6 14.2 0.08 1.66 0.6 0.009000 \n", + "0 3.8 0.08 0.00 0.6 0.346 \n", + "1 14.2 1.10 1.66 65.8 0.346 \n", + "2 3.8 1.10 1.66 65.8 0.346 \n", + "3 3.8 1.10 0.00 65.8 0.009 \n", + "4 3.8 1.10 1.66 65.8 0.009 \n", + "5 14.2 1.10 1.66 65.8 0.346 \n", + "6 3.8 1.10 1.66 65.8 0.009 \n", "\n", - " free sulfur dioxide total sulfur dioxide density pH sulphates \\\n", - "0 289.0 50.104997 1.038893 3.82 0.220000 \n", - "1 289.0 9.000000 0.987291 3.82 1.080000 \n", - "2 2.0 9.000000 0.987110 3.82 1.080000 \n", - "3 2.0 9.000000 0.987110 3.82 1.080000 \n", - "4 289.0 440.000000 0.987110 3.82 1.080000 \n", - "5 2.0 9.000000 0.987110 3.82 1.079975 \n", - "6 289.0 9.000000 0.987110 3.82 1.080000 \n", + " free sulfur dioxide total sulfur dioxide density pH sulphates \\\n", + "0 2.0 440.0 1.03898 3.82 1.08 \n", + "1 289.0 440.0 0.98711 3.82 0.22 \n", + "2 289.0 440.0 0.98711 2.72 0.22 \n", + "3 2.0 440.0 0.98711 2.72 1.08 \n", + "4 289.0 9.0 0.98711 3.82 0.22 \n", + "5 289.0 9.0 1.03898 2.72 0.22 \n", + "6 2.0 440.0 0.98711 3.82 0.22 \n", "\n", " alcohol quality \n", - "0 8.0 5 \n", - "1 8.0 6 \n", - "2 14.2 7 \n", - "3 14.2 6 \n", - "4 14.2 6 \n", - "5 8.0 6 \n", - "6 14.2 7 " + "0 14.2 7 \n", + "1 8.0 5 \n", + "2 14.2 6 \n", + "3 8.0 7 \n", + "4 8.0 7 \n", + "5 14.2 7 \n", + "6 14.2 6 " ] }, - "execution_count": 16, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -1600,7 +1843,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.10.9" } }, "nbformat": 4, From 57816b6c265e2d2949e16e4ff38799d262428b86 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 6 Apr 2023 19:45:50 +0200 Subject: [PATCH 44/95] update pandas and torch version requirement --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7e20f43c..45c9dd4a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,8 +34,8 @@ python_requires = >=3.7 install_requires = scikit-learn>=1.0 nflows>=0.14 - pandas>=1.3 - torch>=1.10.0 + pandas>=2.0 + torch>=2.0 numpy>=1.20 lifelines>=0.27 opacus>=1.3 From cc7e8fb50a5f9a241c4798d7954f4e157e18c1e7 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 6 Apr 2023 19:46:28 +0200 Subject: [PATCH 45/95] update pandas and torch version requirement --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 7be378de..ad8686c2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,8 +34,8 @@ python_requires = >=3.7 install_requires = scikit-learn>=1.0 nflows>=0.14 - pandas>=1.3 - torch>=1.10.0 + pandas>=2.0 + torch>=2.0 numpy>=1.20 lifelines>=0.27 opacus>=1.3 From 8a589966297a34e6a37526d96fa192e55423ef85 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 6 Apr 2023 19:57:34 +0200 Subject: [PATCH 46/95] update ddpm tutorial --- .../models/tabular_ddpm/gaussian_multinomial_diffsuion.py | 8 +++++--- .../tutorial8_tabular_modelling_with_diffusion.ipynb | 4 +--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py index 6c11c5be..43ebf091 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/gaussian_multinomial_diffsuion.py @@ -152,7 +152,8 @@ def __init__( self.posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) - ) + ).to(device) + self.posterior_log_variance_clipped = ( torch.from_numpy( np.log( @@ -162,11 +163,13 @@ def __init__( .float() .to(device) ) + self.posterior_mean_coef1 = ( - (betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)) + ((betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod))) .float() .to(device) ) + self.posterior_mean_coef2 = ( ( (1.0 - alphas_cumprod_prev) @@ -288,7 +291,6 @@ def gaussian_p_mean_variance( ], dim=0, ) - # model_variance = self.posterior_variance.to(x.device) model_log_variance = torch.log(model_variance) model_variance = perm_and_expand(model_variance, t, x.shape) diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb index d07618a1..686c67f5 100644 --- a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb +++ b/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb @@ -1581,9 +1581,7 @@ ], "source": [ "import random\n", - "from sklearn.preprocessing import LabelEncoder\n", - "cond = random.choices(['red', 'white', 'rose'], k=len(loader))\n", - "cond = LabelEncoder().fit_transform(cond)\n", + "cond = random.choices(outcome, k=len(loader))\n", "plugin.fit(loader, cond=cond)" ] }, From cef348ef2c0450ba2adb73b691711143caaf0eb1 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 6 Apr 2023 20:46:21 +0200 Subject: [PATCH 47/95] restore setup.cfg --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 17ac704e..06c9fd74 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,8 +34,8 @@ python_requires = >=3.7 install_requires = scikit-learn>=1.0 nflows>=0.14 - pandas>=2.0 - torch>=2.0 + pandas>=1.3 + torch>=1.10.0 numpy>=1.20 lifelines>=0.27 opacus>=1.3 From 9cb5da17b54afaab6f50745f438f585b0ce2e965 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 6 Apr 2023 20:47:08 +0200 Subject: [PATCH 48/95] restore setup.cfg --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index cab8c527..de0ac067 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,8 +34,8 @@ python_requires = >=3.7 install_requires = scikit-learn>=1.0 nflows>=0.14 - pandas>=2.0 - torch>=2.0 + pandas>=1.3 + torch>=1.10.0 numpy>=1.20 lifelines>=0.27 opacus>=1.3 From fe5ff2552048b2f7a6abd7c03b69ef9674795ce4 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 08:55:17 +0200 Subject: [PATCH 49/95] replace LabelEncoder with OrdinalEncoder --- src/synthcity/plugins/core/models/factory.py | 4 ++-- src/synthcity/plugins/core/models/feature_encoder.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py index e2d69525..40b6d6d6 100644 --- a/src/synthcity/plugins/core/models/factory.py +++ b/src/synthcity/plugins/core/models/factory.py @@ -12,9 +12,9 @@ DatetimeEncoder, FeatureEncoder, GaussianQuantileTransformer, - LabelEncoder, MinMaxScaler, OneHotEncoder, + OrdinalEncoder, RobustScaler, StandardScaler, ) @@ -54,7 +54,7 @@ FEATURE_ENCODERS = dict( datetime=DatetimeEncoder, onehot=OneHotEncoder, - label=LabelEncoder, + ordinal=OrdinalEncoder, standard=StandardScaler, minmax=MinMaxScaler, robust=RobustScaler, diff --git a/src/synthcity/plugins/core/models/feature_encoder.py b/src/synthcity/plugins/core/models/feature_encoder.py index 70807e31..93455162 100644 --- a/src/synthcity/plugins/core/models/feature_encoder.py +++ b/src/synthcity/plugins/core/models/feature_encoder.py @@ -8,9 +8,9 @@ from sklearn.base import BaseEstimator, TransformerMixin from sklearn.mixture import BayesianGaussianMixture from sklearn.preprocessing import ( - LabelEncoder, MinMaxScaler, OneHotEncoder, + OrdinalEncoder, QuantileTransformer, RobustScaler, StandardScaler, @@ -151,7 +151,7 @@ def get_feature_names_out(self) -> List[str]: OneHotEncoder = FeatureEncoder.wraps(OneHotEncoder, categorical=True) -LabelEncoder = FeatureEncoder.wraps(LabelEncoder, n_dim_out=1, categorical=True) +OrdinalEncoder = FeatureEncoder.wraps(OrdinalEncoder, categorical=True) StandardScaler = FeatureEncoder.wraps(StandardScaler) MinMaxScaler = FeatureEncoder.wraps(MinMaxScaler) RobustScaler = FeatureEncoder.wraps(RobustScaler) From 2922a1dd77342b0676acbe263b15a33b4780b3d8 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 08:56:33 +0200 Subject: [PATCH 50/95] update setup.cfg --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index de0ac067..ef292c71 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,8 +34,8 @@ python_requires = >=3.7 install_requires = scikit-learn>=1.0 nflows>=0.14 - pandas>=1.3 - torch>=1.10.0 + pandas>=1.3,<2.0 + torch>=1.10.0,<2.0 numpy>=1.20 lifelines>=0.27 opacus>=1.3 From 11fb825f5a35a9ff6238f362234c9d7c8d481195 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 08:57:14 +0200 Subject: [PATCH 51/95] update setup.cfg --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 06c9fd74..5788d984 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,8 +34,8 @@ python_requires = >=3.7 install_requires = scikit-learn>=1.0 nflows>=0.14 - pandas>=1.3 - torch>=1.10.0 + pandas>=1.3,<2.0 + torch>=1.10.0,<2.0 numpy>=1.20 lifelines>=0.27 opacus>=1.3 From 9222b4e5bb9ec27b84362a902706884a6cb8b525 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 09:57:13 +0200 Subject: [PATCH 52/95] debug datetimeDistribution --- src/synthcity/plugins/core/distribution.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/synthcity/plugins/core/distribution.py b/src/synthcity/plugins/core/distribution.py index 96ce24db..485c9a5a 100644 --- a/src/synthcity/plugins/core/distribution.py +++ b/src/synthcity/plugins/core/distribution.py @@ -383,25 +383,29 @@ class DatetimeDistribution(Distribution): :parts: 1 """ + offset: int = 120 low: datetime = datetime.utcfromtimestamp(0) high: datetime = datetime.now() - offset: int = 120 + + @validator("offset", always=True) + def _validate_offset(cls: Any, v: int) -> int: + if v < 0: + raise ValueError("offset must be greater than 0") + return v @validator("low", always=True) def _validate_low_thresh(cls: Any, v: datetime, values: Dict) -> datetime: mkey = "marginal_distribution" if mkey in values and values[mkey] is not None: v = values[mkey].index.min() - - return v - timedelta(seconds=cls.offset) + return v - timedelta(seconds=values["offset"]) @validator("high", always=True) def _validate_high_thresh(cls: Any, v: datetime, values: Dict) -> datetime: mkey = "marginal_distribution" if mkey in values and values[mkey] is not None: v = values[mkey].index.max() - - return v + timedelta(seconds=cls.offset) + return v + timedelta(seconds=values["offset"]) def get(self) -> List[Any]: return [self.name, self.low, self.high] From 95302b9c9c9fe9abcfbf0b7c008dca83fde6dc29 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 10:00:38 +0200 Subject: [PATCH 53/95] clean --- tests/plugins/generic/test_ddpm.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index 8fc4664a..733d367c 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -23,12 +23,6 @@ num_timesteps=100, model_type="mlp", ) -# plugin_params = dict( -# n_iter=1000, -# batch_size=1000, -# num_timesteps=30, -# model_type="tabnet", -# ) def extend_fixtures( From 785db826b5f8433ec387814f6c4cdc606f8e9dd2 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 12:34:20 +0200 Subject: [PATCH 54/95] update setup.cfg and goggle test --- setup.cfg | 2 -- tests/plugins/generic/test_goggle.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/setup.cfg b/setup.cfg index 5788d984..caa59d0e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,8 +58,6 @@ install_requires = monai tsai; python_version>"3.7" importlib-metadata; python_version<"3.8" - igraph - pytest-cov [options.packages.find] where = src diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 9b194ae0..2c9b5f4a 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -17,7 +17,7 @@ plugin_name = "goggle" plugin_args = { - "n_iter": 10, + "n_iter": 500, "device": "cpu", } From 27cc95c2a6547341965ad3f0af492b4a7c179a71 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 22:17:14 +0200 Subject: [PATCH 55/95] move DDPM tutorial to tutorials/plugins --- tests/plugins/generic/test_ddpm.py | 2 +- .../generic/plugin_ddpm.ipynb} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename tutorials/{tutorial8_tabular_modelling_with_diffusion.ipynb => plugins/generic/plugin_ddpm.ipynb} (99%) diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index 733d367c..c6e3e319 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -18,7 +18,7 @@ plugin_name = "ddpm" plugin_params = dict( - n_iter=1000, + n_iter=500, batch_size=1000, num_timesteps=100, model_type="mlp", diff --git a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb b/tutorials/plugins/generic/plugin_ddpm.ipynb similarity index 99% rename from tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb rename to tutorials/plugins/generic/plugin_ddpm.ipynb index 686c67f5..14ed6929 100644 --- a/tutorials/tutorial8_tabular_modelling_with_diffusion.ipynb +++ b/tutorials/plugins/generic/plugin_ddpm.ipynb @@ -6,7 +6,7 @@ "id": "97e2d93c", "metadata": {}, "source": [ - "# Tutorial 8: Modelling tabular data with diffusion models\n", + "# Modelling tabular data with diffusion models\n", "\n", "This tutorial demonstrates hot to use a denoising diffusion probabilistic model (DDPM) to synthesize tabular data. The algorithm was proposed in [TabDDPM: Modelling Tabular Data with Diffusion Models](https://arxiv.org/abs/2209.15421)." ] From 1d7c77c880e9529bfcfc9a4f5e2f7beb81148b42 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 22:17:53 +0200 Subject: [PATCH 56/95] update tabnet.py reference --- src/synthcity/plugins/core/models/tabnet.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/synthcity/plugins/core/models/tabnet.py b/src/synthcity/plugins/core/models/tabnet.py index c5fc702f..353dabb8 100644 --- a/src/synthcity/plugins/core/models/tabnet.py +++ b/src/synthcity/plugins/core/models/tabnet.py @@ -1,3 +1,10 @@ +""" +TabNet: Attentive Interpretable Tabular Learning +Reference: +- https://arxiv.org/pdf/1908.07442.pdf +- https://github.com/dreamquark-ai/tabnet +""" + # stdlib from typing import List, Optional, Tuple From 6c25377282b29cf829c2f2c85f5176efabe57194 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Fri, 7 Apr 2023 23:37:19 +0200 Subject: [PATCH 57/95] update tab_ddpm --- .../core/models/tabular_ddpm/__init__.py | 14 +++++------- src/synthcity/plugins/generic/plugin_ddpm.py | 2 -- tests/plugins/generic/test_ddpm.py | 22 +++++++++++-------- tests/plugins/generic/test_goggle.py | 3 +++ 4 files changed, 22 insertions(+), 19 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py index b70d0d33..cb4ea67a 100644 --- a/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py +++ b/src/synthcity/plugins/core/models/tabular_ddpm/__init__.py @@ -10,7 +10,7 @@ from pydantic import validate_arguments from torch import nn from torch.utils.data import DataLoader, TensorDataset -from tqdm import tqdm +from tqdm import trange # synthcity absolute from synthcity.logger import info @@ -38,7 +38,6 @@ def __init__( callbacks: Sequence[Callback] = (), device: torch.device = DEVICE, log_interval: int = 10, - print_interval: int = 100, # model params model_type: str = "mlp", model_params: Optional[dict] = None, @@ -141,8 +140,9 @@ def fit( curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 + pbar = trange(self.n_iter, desc="Epoch", leave=True) - for epoch in tqdm(range(self.n_iter)): + for epoch in pbar: self.epoch = epoch + 1 self.diffusion.train() @@ -166,21 +166,19 @@ def fit( if steps % self.log_interval == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) - if steps % self.print_interval == 0: - info( - f"Step {steps}: MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}" - ) + loss = mloss + gloss self.loss_history.append( [ steps, mloss, gloss, - mloss + gloss, + loss, ] ) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 + pbar.set_postfix(loss=loss) self._update_ema( self.ema_model.parameters(), self.diffusion.parameters() diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 09826a97..cc851a8e 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -103,7 +103,6 @@ def __init__( device: Any = DEVICE, callbacks: Sequence[Callback] = (), log_interval: int = 100, - print_interval: int = 500, model_type: str = "mlp", model_params: dict = {}, dim_embed: int = 128, @@ -139,7 +138,6 @@ def __init__( device=device, callbacks=callbacks, log_interval=log_interval, - print_interval=print_interval, model_type=model_type, model_params=model_params.copy(), dim_embed=dim_embed, diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index c6e3e319..ae0462d8 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -22,6 +22,7 @@ batch_size=1000, num_timesteps=100, model_type="mlp", + sampling_patience=100, ) @@ -84,19 +85,21 @@ def test_plugin_generate(test_plugin: Plugin) -> None: "test_plugin", extend_fixtures(is_classification=[True, False]) ) def test_plugin_generate_constraints(test_plugin: Plugin) -> None: - X = pd.DataFrame(load_iris()["data"]) + X, y = load_iris(as_frame=True, return_X_y=True) + X["target"] = y test_plugin.fit(GenericDataLoader(X)) constraints = Constraints( rules=[ - ("0", "le", 6), - ("0", "ge", 4.3), - ("1", "le", 4.4), - ("1", "ge", 3), - ("2", "le", 5.5), - ("2", "ge", 1.0), - ("3", "le", 2), - ("3", "ge", 0.1), + ("target", "eq", 1), + ("sepal length (cm)", "le", 6), + ("sepal length (cm)", "ge", 4.3), + ("sepal width (cm)", "le", 4.4), + ("sepal width (cm)", "ge", 3), + ("petal length (cm)", "le", 5.5), + ("petal length (cm)", "ge", 1.0), + ("petal width (cm)", "le", 2), + ("petal width (cm)", "ge", 0.1), ] ) @@ -104,6 +107,7 @@ def test_plugin_generate_constraints(test_plugin: Plugin) -> None: assert len(X_gen) == len(X) assert test_plugin.schema_includes(X_gen) assert constraints.filter(X_gen).sum() == len(X_gen) + assert (X_gen["target"] == 1).all() X_gen = test_plugin.generate(count=50, constraints=constraints).dataframe() assert len(X_gen) == 50 diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 2c9b5f4a..9b58ac4e 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -106,6 +106,9 @@ def test_plugin_generate(test_plugin: Plugin, serialize: bool) -> None: assert (X_gen1.numpy() != X_gen3.numpy()).any() +is_missing_goggle_deps = True + + @pytest.mark.skipif(is_missing_goggle_deps, reason="Goggle dependencies not installed") @pytest.mark.parametrize( "test_plugin", From 3623d37c8e075e1b085b640cbd899b3748daf490 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sat, 8 Apr 2023 17:52:04 +0200 Subject: [PATCH 58/95] update distribution, add optuna utils and tutorial --- src/synthcity/plugins/core/distribution.py | 134 +- src/synthcity/plugins/generic/plugin_ddpm.py | 20 +- src/synthcity/utils/optuna_sample.py | 33 + ...utorial8_hyperparameter_optimization.ipynb | 9862 +++++++++++++++++ 4 files changed, 9980 insertions(+), 69 deletions(-) create mode 100644 src/synthcity/utils/optuna_sample.py create mode 100644 tutorials/tutorial8_hyperparameter_optimization.ipynb diff --git a/src/synthcity/plugins/core/distribution.py b/src/synthcity/plugins/core/distribution.py index fb486e0a..06b3a99b 100644 --- a/src/synthcity/plugins/core/distribution.py +++ b/src/synthcity/plugins/core/distribution.py @@ -111,17 +111,25 @@ def as_constraint(self) -> Constraints: @abstractmethod def min(self) -> Any: - "Get the min value of the distribution" + """Get the min value of the distribution.""" ... @abstractmethod def max(self) -> Any: - "Get the max value of the distribution" + """Get the max value of the distribution.""" ... - @abstractmethod def __eq__(self, other: Any) -> bool: - ... + return type(self) == type(other) and self.get() == other.get() + + def __contains__(self, item: Any) -> bool: + """ + Example: + >>> dist = CategoricalDistribution(name="foo", choices=["a", "b", "c"]) + >>> "a" in dist + True + """ + return self.has(item) @abstractmethod def dtype(self) -> str: @@ -146,7 +154,7 @@ def _validate_choices(cls: Any, v: List, values: Dict) -> List: raise ValueError( "Invalid choices for CategoricalDistribution. Provide data or choices params" ) - return v + return sorted(set(v)) def get(self) -> List[Any]: return [self.name, self.choices] @@ -176,12 +184,6 @@ def min(self) -> Any: def max(self) -> Any: return max(self.choices) - def __eq__(self, other: Any) -> bool: - if not isinstance(other, CategoricalDistribution): - return False - - return self.name == other.name and set(self.choices) == set(other.choices) - def dtype(self) -> str: types = { "object": 0, @@ -259,20 +261,26 @@ def min(self) -> Any: def max(self) -> Any: return self.high - def __eq__(self, other: Any) -> bool: - if not isinstance(other, FloatDistribution): - return False - - return ( - self.name == other.name - and self.low == other.low - and self.high == other.high - ) - def dtype(self) -> str: return "float" +class LogDistribution(FloatDistribution): + low: float = np.finfo(np.float64).tiny + high: float = np.finfo(np.float64).max + + def get(self) -> List[Any]: + return [self.name, self.low, self.high] + + def sample(self, count: int = 1) -> Any: + np.random.seed(self.random_state) + msamples = self.sample_marginal(count) + if msamples is not None: + return msamples + lo, hi = np.log2(self.low), np.log2(self.high) + return 2.0 ** np.random.uniform(lo, hi, count) + + class IntegerDistribution(Distribution): """ .. inheritance-diagram:: synthcity.plugins.core.distribution.IntegerDistribution @@ -298,6 +306,12 @@ def _validate_high_thresh(cls: Any, v: int, values: Dict) -> int: return int(values[mkey].index.max()) return v + @validator("step", always=True) + def _validate_step(cls: Any, v: int, values: Dict) -> int: + if v < 1: + raise ValueError("Step must be greater than 0") + return v + def get(self) -> List[Any]: return [self.name, self.low, self.high, self.step] @@ -307,8 +321,9 @@ def sample(self, count: int = 1) -> Any: if msamples is not None: return msamples - choices = [val for val in range(self.low, self.high + 1, self.step)] - return np.random.choice(choices, count).tolist() + steps = (self.high - self.low) // self.step + samples = np.random.choice(steps + 1, count) + return samples * self.step + self.low def has(self, val: Any) -> bool: return self.low <= val and val <= self.high @@ -331,21 +346,33 @@ def min(self) -> Any: def max(self) -> Any: return self.high - def __eq__(self, other: Any) -> bool: - if not isinstance(other, IntegerDistribution): - return False - - return ( - self.name == other.name - and self.low == other.low - and self.high == other.high - ) - def dtype(self) -> str: return "int" -OFFSET = 120 +class IntLogDistribution(IntegerDistribution): + low: int = 1 + high: int = np.iinfo(np.int64).max + step: int = 2 # the next sample larger than x is step * x + + @validator("step", always=True) + def _validate_step(cls: Any, v: int, values: Dict) -> int: + if v < 2: + raise ValueError("Step must be greater than 1") + return v + + def get(self) -> List[Any]: + return [self.name, self.low, self.high, self.step] + + def sample(self, count: int = 1) -> Any: + np.random.seed(self.random_state) + msamples = self.sample_marginal(count) + if msamples is not None: + return msamples + steps = int(np.log2(self.high / self.low) / np.log2(self.step)) + samples = np.random.choice(steps + 1, count) + samples = self.low * self.step**samples + return samples.astype(int) class DatetimeDistribution(Distribution): @@ -356,25 +383,25 @@ class DatetimeDistribution(Distribution): low: datetime = datetime.utcfromtimestamp(0) high: datetime = datetime.now() + step: timedelta = timedelta(microseconds=1) + offset: timedelta = timedelta(seconds=120) @validator("low", always=True) def _validate_low_thresh(cls: Any, v: datetime, values: Dict) -> datetime: mkey = "marginal_distribution" if mkey in values and values[mkey] is not None: v = values[mkey].index.min() - - return v - timedelta(seconds=OFFSET) + return v @validator("high", always=True) def _validate_high_thresh(cls: Any, v: datetime, values: Dict) -> datetime: mkey = "marginal_distribution" if mkey in values and values[mkey] is not None: v = values[mkey].index.max() - - return v + timedelta(seconds=OFFSET) + return v def get(self) -> List[Any]: - return [self.name, self.low, self.high] + return [self.name, self.low, self.high, self.step, self.offset] def sample(self, count: int = 1) -> Any: np.random.seed(self.random_state) @@ -382,23 +409,18 @@ def sample(self, count: int = 1) -> Any: if msamples is not None: return msamples - samples = np.random.uniform( - datetime.timestamp(self.low), datetime.timestamp(self.high), count - ) - - samples_dt = [] - for s in samples: - samples_dt.append(datetime.fromtimestamp(s)) - - return samples_dt + n = (self.high - self.low) // self.step + 1 + samples = np.round(np.random.rand(count) * n - 0.5) + return self.low + samples * self.step def has(self, val: datetime) -> bool: return self.low <= val and val <= self.high def includes(self, other: "Distribution") -> bool: - return self.min() - timedelta( - seconds=OFFSET - ) <= other.min() and other.max() <= self.max() + timedelta(seconds=OFFSET) + return ( + self.min() - self.offset <= other.min() + and other.max() <= self.max() + self.offset + ) def as_constraint(self) -> Constraints: return Constraints( @@ -415,16 +437,6 @@ def min(self) -> Any: def max(self) -> Any: return self.high - def __eq__(self, other: Any) -> bool: - if not isinstance(other, DatetimeDistribution): - return False - - return ( - self.name == other.name - and self.low == other.low - and self.high == other.high - ) - def dtype(self) -> str: return "datetime" diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 631480fc..0af21cf7 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -15,7 +15,12 @@ # synthcity absolute from synthcity.plugins.core.dataloader import DataLoader -from synthcity.plugins.core.distribution import CategoricalDistribution, Distribution +from synthcity.plugins.core.distribution import ( + Distribution, + IntegerDistribution, + IntLogDistribution, + LogDistribution, +) from synthcity.plugins.core.models.tabular_ddpm import TabDDPM from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema @@ -174,13 +179,12 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: Gaussian diffusion loss MSE """ return [ - # TODO: change to loguniform distribution - CategoricalDistribution(name="lr", choices=[1e-5, 1e-4, 1e-3, 2e-3, 3e-3]), - CategoricalDistribution(name="batch_size", choices=[256, 4096]), - CategoricalDistribution(name="num_timesteps", choices=[100, 1000]), - CategoricalDistribution(name="n_iter", choices=[5000, 10000, 20000]), - CategoricalDistribution(name="n_layers_hidden", choices=[2, 4, 6, 8]), - CategoricalDistribution(name="dim_hidden", choices=[128, 256, 512, 1024]), + LogDistribution(name="lr", low=1e-5, high=1e-1), + IntLogDistribution(name="batch_size", low=256, high=4096), + IntegerDistribution(name="num_timesteps", low=10, high=1000), + IntLogDistribution(name="n_iter", low=1000, high=10000), + IntegerDistribution(name="n_layers_hidden", low=2, high=8), + IntLogDistribution(name="dim_hidden", low=128, high=1024), ] def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": diff --git a/src/synthcity/utils/optuna_sample.py b/src/synthcity/utils/optuna_sample.py new file mode 100644 index 00000000..c19dae66 --- /dev/null +++ b/src/synthcity/utils/optuna_sample.py @@ -0,0 +1,33 @@ +# stdlib +from typing import Any, Dict, List + +# third party +import optuna + +# synthcity absolute +import synthcity.plugins.core.distribution as D + + +def suggest(trial: optuna.Trial, dist: D.Distribution) -> Any: + if isinstance(dist, D.FloatDistribution): + return trial.suggest_float(dist.name, dist.low, dist.high) + elif isinstance(dist, D.LogDistribution): + return trial.suggest_float(dist.name, dist.low, dist.high, log=True) + elif isinstance(dist, D.IntegerDistribution): + return trial.suggest_int(dist.name, dist.low, dist.high, dist.step) + elif isinstance(dist, D.IntLogDistribution): + # ! does not handle step yet + return trial.suggest_int(dist.name, dist.low, dist.high, log=True) + elif isinstance(dist, D.CategoricalDistribution): + return trial.suggest_categorical(dist.name, dist.choices) + # ! the modification cannot be reflected in study.best_params + # elif isinstance(dist, D.DatetimeDistribution): + # high = (dist.high - dist.low) / dist.step + # s = trial.suggest_float(dist.name, 0, high) + # return dist.low + dist.step * s + else: + raise ValueError(f"Unknown dist: {dist}") + + +def suggest_all(trial: optuna.Trial, distributions: List[D.Distribution]) -> Dict: + return {dist.name: suggest(trial, dist) for dist in distributions} diff --git a/tutorials/tutorial8_hyperparameter_optimization.ipynb b/tutorials/tutorial8_hyperparameter_optimization.ipynb new file mode 100644 index 00000000..4cf7c965 --- /dev/null +++ b/tutorials/tutorial8_hyperparameter_optimization.ipynb @@ -0,0 +1,9862 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tutorial 8: Hyperparameter Optimization\n", + "\n", + "To automatically tune hyperparameters in a `synthcity` plugin to generate more realistic data, we use hyperparameter optimization (HPO) algorithms such as Tree-structured Parzen estimators (TPE), Bayesian optimization, and genetic programming. In this tutorial we will use `optuna`, a very popular HPO library implementing TPE, to tune the hyperparameters of the `nflow` plugin to synthesize the diabetes dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[KeOps] Warning : \n", + " The default C++ compiler could not be found on your system.\n", + " You need to either define the CXX environment variable or a symlink to the g++ command.\n", + " For example if g++-8 is the command you can do\n", + " import os\n", + " os.environ['CXX'] = 'g++-8'\n", + " \n", + "[KeOps] Warning : Cuda libraries were not detected on the system ; using cpu only mode\n" + ] + } + ], + "source": [ + "# stdlib\n", + "import sys\n", + "import warnings\n", + "\n", + "# third party\n", + "import optuna\n", + "from sklearn.datasets import load_diabetes\n", + "\n", + "# synthcity absolute\n", + "import synthcity.logger as log\n", + "from synthcity.plugins import Plugins\n", + "from synthcity.plugins.core.dataloader import GenericDataLoader\n", + "\n", + "log.add(sink=sys.stderr, level=\"INFO\")\n", + "warnings.filterwarnings(\"ignore\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load the dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
agesexbmibps1s2s3s4s5s6target
00.0380760.0506800.0616960.021872-0.044223-0.034821-0.043401-0.0025920.019907-0.017646151.0
1-0.001882-0.044642-0.051474-0.026328-0.008449-0.0191630.074412-0.039493-0.068332-0.09220475.0
20.0852990.0506800.044451-0.005670-0.045599-0.034194-0.032356-0.0025920.002861-0.025930141.0
3-0.089063-0.044642-0.011595-0.0366560.0121910.024991-0.0360380.0343090.022688-0.009362206.0
40.005383-0.044642-0.0363850.0218720.0039350.0155960.008142-0.002592-0.031988-0.046641135.0
....................................
4370.0417080.0506800.0196620.059744-0.005697-0.002566-0.028674-0.0025920.0311930.007207178.0
438-0.0055150.050680-0.015906-0.0676420.0493410.079165-0.0286740.034309-0.0181140.044485104.0
4390.0417080.050680-0.0159060.017293-0.037344-0.013840-0.024993-0.011080-0.0468830.015491132.0
440-0.045472-0.0446420.0390620.0012150.0163180.015283-0.0286740.0265600.044529-0.025930220.0
441-0.045472-0.044642-0.073030-0.0814130.0837400.0278090.173816-0.039493-0.0042220.00306457.0
\n", + "

442 rows × 11 columns

\n", + "
" + ], + "text/plain": [ + " age sex bmi bp s1 s2 s3 \\\n", + "0 0.038076 0.050680 0.061696 0.021872 -0.044223 -0.034821 -0.043401 \n", + "1 -0.001882 -0.044642 -0.051474 -0.026328 -0.008449 -0.019163 0.074412 \n", + "2 0.085299 0.050680 0.044451 -0.005670 -0.045599 -0.034194 -0.032356 \n", + "3 -0.089063 -0.044642 -0.011595 -0.036656 0.012191 0.024991 -0.036038 \n", + "4 0.005383 -0.044642 -0.036385 0.021872 0.003935 0.015596 0.008142 \n", + ".. ... ... ... ... ... ... ... \n", + "437 0.041708 0.050680 0.019662 0.059744 -0.005697 -0.002566 -0.028674 \n", + "438 -0.005515 0.050680 -0.015906 -0.067642 0.049341 0.079165 -0.028674 \n", + "439 0.041708 0.050680 -0.015906 0.017293 -0.037344 -0.013840 -0.024993 \n", + "440 -0.045472 -0.044642 0.039062 0.001215 0.016318 0.015283 -0.028674 \n", + "441 -0.045472 -0.044642 -0.073030 -0.081413 0.083740 0.027809 0.173816 \n", + "\n", + " s4 s5 s6 target \n", + "0 -0.002592 0.019907 -0.017646 151.0 \n", + "1 -0.039493 -0.068332 -0.092204 75.0 \n", + "2 -0.002592 0.002861 -0.025930 141.0 \n", + "3 0.034309 0.022688 -0.009362 206.0 \n", + "4 -0.002592 -0.031988 -0.046641 135.0 \n", + ".. ... ... ... ... \n", + "437 -0.002592 0.031193 0.007207 178.0 \n", + "438 0.034309 -0.018114 0.044485 104.0 \n", + "439 -0.011080 -0.046883 0.015491 132.0 \n", + "440 0.026560 0.044529 -0.025930 220.0 \n", + "441 -0.039493 -0.004222 0.003064 57.0 \n", + "\n", + "[442 rows x 11 columns]" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X, y = load_diabetes(return_X_y=True, as_frame=True)\n", + "X[\"target\"] = y\n", + "X" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "loader = GenericDataLoader(\n", + " X,\n", + " target_column=\"target\",\n", + " sensitive_columns=[\"sex\"],\n", + ")\n", + "train_loader, test_loader = loader.train(), loader.test()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load the plugin class" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-04-07T21:51:56.689921+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n" + ] + }, + { + "data": { + "text/plain": [ + "synthcity.plugins.generic.plugin_nflow.NormalizingFlowsPlugin" + ] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "PLUGIN = \"nflow\"\n", + "plugin_cls = type(Plugins().get(PLUGIN))\n", + "plugin_cls" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Display the hyperparameter space" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[IntegerDistribution(name='n_iter', data=None, random_state=0, marginal_distribution=None, low=100, high=5000, step=100),\n", + " IntegerDistribution(name='n_layers_hidden', data=None, random_state=0, marginal_distribution=None, low=1, high=10, step=1),\n", + " IntegerDistribution(name='n_units_hidden', data=None, random_state=0, marginal_distribution=None, low=10, high=100, step=1),\n", + " CategoricalDistribution(name='batch_size', data=None, random_state=0, marginal_distribution=None, choices=[32, 64, 128, 256, 512]),\n", + " FloatDistribution(name='dropout', data=None, random_state=0, marginal_distribution=None, low=0.0, high=0.2),\n", + " CategoricalDistribution(name='batch_norm', data=None, random_state=0, marginal_distribution=None, choices=[True, False]),\n", + " CategoricalDistribution(name='lr', data=None, random_state=0, marginal_distribution=None, choices=[0.001, 0.0001, 0.0002]),\n", + " CategoricalDistribution(name='linear_transform_type', data=None, random_state=0, marginal_distribution=None, choices=['lu', 'permutation', 'svd']),\n", + " CategoricalDistribution(name='base_transform_type', data=None, random_state=0, marginal_distribution=None, choices=['affine-coupling', 'quadratic-coupling', 'rq-coupling', 'affine-autoregressive', 'quadratic-autoregressive', 'rq-autoregressive'])]" + ] + }, + "execution_count": 51, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "plugin_cls.hyperparameter_space()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use a trial to suggest a set of hyperparameters" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'n_iter': 486,\n", + " 'n_layers_hidden': 10,\n", + " 'n_units_hidden': 87,\n", + " 'batch_size': 512,\n", + " 'dropout': 0.016022465975681178,\n", + " 'batch_norm': True,\n", + " 'lr': 0.001,\n", + " 'linear_transform_type': 'svd',\n", + " 'base_transform_type': 'affine-coupling'}" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from synthcity.utils.optuna_sample import suggest_all\n", + "\n", + "trial = optuna.create_study().ask()\n", + "params = suggest_all(trial, plugin_cls.hyperparameter_space())\n", + "params" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Evaluate the plugin with the suggested hyperparameters" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 62%|██████▏ | 299/486 [01:26<00:53, 3.47it/s]\n", + "[2023-04-07T21:53:29.785866+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + " 62%|██████▏ | 299/486 [01:30<00:56, 3.31it/s]\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
minmaxmeanstddevmedianiqrroundserrorsdurationsdirection
detection.detection_mlp.mean0.3900710.3900710.3900710.00.3900710.0102.51minimize
\n", + "
" + ], + "text/plain": [ + " min max mean stddev median \\\n", + "detection.detection_mlp.mean 0.390071 0.390071 0.390071 0.0 0.390071 \n", + "\n", + " iqr rounds errors durations direction \n", + "detection.detection_mlp.mean 0.0 1 0 2.51 minimize " + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from synthcity.benchmark import Benchmarks\n", + "\n", + "plugin = plugin_cls(**params).fit(train_loader)\n", + "report = Benchmarks.evaluate(\n", + " [(\"trial\", PLUGIN, params)],\n", + " train_loader, # Benchmarks.evaluate will split out a validation set\n", + " repeats=1,\n", + " metrics={\"detection\": [\"detection_mlp\"]}, # DELETE THIS LINE FOR ALL METRICS\n", + ")\n", + "report['trial']" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create an Optuna study and optimize the hyperparameters" + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-04-07T21:57:37.669090+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T21:57:37.689827+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:12<00:00, 8.05it/s]\n", + "[2023-04-07T21:57:53.690237+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T21:57:53.712601+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:04<00:00, 24.77it/s]\n", + "[2023-04-07T21:58:01.728358+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T21:58:01.744010+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:24<00:00, 4.08it/s]\n", + "[2023-04-07T21:58:32.292499+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T21:58:32.316002+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [01:30<00:00, 1.10it/s]\n", + "[2023-04-07T22:00:38.652411+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T22:00:38.685914+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:23<00:00, 4.21it/s]\n", + "[2023-04-07T22:01:09.148491+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T22:01:09.178259+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:06<00:00, 14.79it/s]\n", + "[2023-04-07T22:01:20.722191+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T22:01:20.751419+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [02:00<00:00, 1.21s/it]\n", + "[2023-04-07T22:03:29.180475+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T22:03:29.211421+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [01:37<00:00, 1.02it/s]\n", + "[2023-04-07T22:05:12.012437+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T22:05:12.030781+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:57<00:00, 1.74it/s]\n", + "[2023-04-07T22:06:14.408112+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-07T22:06:14.431469+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:11<00:00, 8.47it/s]\n" + ] + }, + { + "data": { + "text/plain": [ + "{'n_iter': 4929,\n", + " 'n_layers_hidden': 1,\n", + " 'n_units_hidden': 65,\n", + " 'batch_size': 256,\n", + " 'dropout': 0.04046713177503456,\n", + " 'batch_norm': True,\n", + " 'lr': 0.001,\n", + " 'linear_transform_type': 'lu',\n", + " 'base_transform_type': 'affine-coupling'}" + ] + }, + "execution_count": 55, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def objective(trial: optuna.Trial):\n", + " hp_space = Plugins().get(PLUGIN).hyperparameter_space()\n", + " params = suggest_all(trial, hp_space[1:]) # fix n_iter=100 for speed\n", + " params['n_iter'] = 100\n", + " ID = f\"trial_{trial.number}\"\n", + " report = Benchmarks.evaluate(\n", + " [(ID, PLUGIN, params)],\n", + " train_loader,\n", + " repeats=1,\n", + " metrics={\"detection\": [\"detection_mlp\"]}, # DELETE THIS LINE FOR ALL METRICS\n", + " )\n", + " score = report[ID].query('direction == \"minimize\"')['mean'].mean()\n", + " # average score across all metrics with direction=\"minimize\"\n", + " return score\n", + "\n", + "study = optuna.create_study(direction=\"minimize\")\n", + "study.optimize(objective, n_trials=10)\n", + "study.best_params" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualize the study" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "mode": "markers", + "name": "Objective Value", + "type": "scatter", + "x": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "y": [ + 0.5199275362318841, + 0.4701086956521739, + 0.518719806763285, + 0.5, + 0.5, + 0.5, + 0.5, + 0.49516908212560384, + 0.5, + 0.5 + ] + }, + { + "name": "Best Value", + "type": "scatter", + "x": [ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9 + ], + "y": [ + 0.5199275362318841, + 0.4701086956521739, + 0.4701086956521739, + 0.4701086956521739, + 0.4701086956521739, + 0.4701086956521739, + 0.4701086956521739, + 0.4701086956521739, + 0.4701086956521739, + 0.4701086956521739 + ] + } + ], + "layout": { + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "Optimization History Plot" + }, + "xaxis": { + "title": { + "text": "Trial" + } + }, + "yaxis": { + "title": { + "text": "Objective Value" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from optuna.visualization import plot_contour\n", + "from optuna.visualization import plot_edf\n", + "from optuna.visualization import plot_optimization_history\n", + "from optuna.visualization import plot_parallel_coordinate\n", + "from optuna.visualization import plot_param_importances\n", + "from optuna.visualization import plot_slice\n", + "\n", + "plot_optimization_history(study)" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "dimensions": [ + { + "label": "Objective Value", + "range": [ + 0.4701086956521739, + 0.5199275362318841 + ], + "values": [ + 0.518719806763285, + 0.49516908212560384, + 0.5, + 0.5, + 0.5199275362318841, + 0.4701086956521739, + 0.5, + 0.5, + 0.5, + 0.5 + ] + }, + { + "label": "base_transform_type", + "range": [ + 0, + 4 + ], + "ticktext": [ + "rq-coupling", + "affine-coupling", + "affine-autoregressive", + "quadratic-autoregressive", + "quadratic-coupling" + ], + "tickvals": [ + 0, + 1, + 2, + 3, + 4 + ], + "values": [ + 2, + 4, + 3, + 1, + 0, + 1, + 4, + 0, + 1, + 1 + ] + }, + { + "label": "batch_norm", + "range": [ + 0, + 1 + ], + "ticktext": [ + "False", + "True" + ], + "tickvals": [ + 0, + 1 + ], + "values": [ + 0, + 1, + 1, + 1, + 0, + 1, + 0, + 1, + 0, + 0 + ] + }, + { + "label": "batch_size", + "range": [ + 0, + 4 + ], + "ticktext": [ + "32", + "64", + "128", + "256", + "512" + ], + "tickvals": [ + 0, + 1, + 2, + 3, + 4 + ], + "values": [ + 0, + 0, + 1, + 1, + 2, + 3, + 3, + 4, + 4, + 4 + ] + }, + { + "label": "dropout", + "range": [ + 0.04046713177503456, + 0.1816709592718398 + ], + "values": [ + 0.051928275495301705, + 0.0768868475443224, + 0.13154994845426374, + 0.13397751341486178, + 0.0805911907341199, + 0.04046713177503456, + 0.16534854040828872, + 0.1816709592718398, + 0.136820133194068, + 0.11575633038847206 + ] + }, + { + "label": "linear_transform_...", + "range": [ + 0, + 2 + ], + "ticktext": [ + "svd", + "lu", + "permutation" + ], + "tickvals": [ + 0, + 1, + 2 + ], + "values": [ + 2, + 2, + 1, + 0, + 0, + 1, + 1, + 2, + 2, + 1 + ] + }, + { + "label": "lr", + "range": [ + 0, + 2 + ], + "ticktext": [ + "0.0001", + "0.0002", + "0.001" + ], + "tickvals": [ + 0, + 1, + 2 + ], + "values": [ + 2, + 2, + 1, + 2, + 2, + 2, + 2, + 0, + 1, + 1 + ] + }, + { + "label": "n_iter", + "range": [ + 249, + 4929 + ], + "values": [ + 3068, + 1368, + 249, + 1525, + 1129, + 4929, + 1595, + 2629, + 3151, + 1295 + ] + }, + { + "label": "n_layers_hidden", + "range": [ + 1, + 9 + ], + "values": [ + 7, + 5, + 7, + 2, + 1, + 1, + 9, + 7, + 4, + 9 + ] + }, + { + "label": "n_units_hidden", + "range": [ + 13, + 99 + ], + "values": [ + 79, + 99, + 80, + 24, + 13, + 65, + 94, + 46, + 62, + 72 + ] + } + ], + "labelangle": 30, + "labelside": "bottom", + "line": { + "color": [ + 0.518719806763285, + 0.49516908212560384, + 0.5, + 0.5, + 0.5199275362318841, + 0.4701086956521739, + 0.5, + 0.5, + 0.5, + 0.5 + ], + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "reversescale": true, + "showscale": true + }, + "type": "parcoords" + } + ], + "layout": { + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "Parallel Coordinate Plot" + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Visualize high-dimensional parameter relationships. \n", + "plot_parallel_coordinate(study)" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "type": "scatter", + "xaxis": "x", + "yaxis": "y" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": true, + "type": "contour", + "x": [ + 8, + 32, + 64, + 128, + 256, + 512, + 536 + ], + "xaxis": "x5", + "y": [ + 0.0334069404001943, + 0.04046713177503456, + 0.051928275495301705, + 0.0768868475443224, + 0.0805911907341199, + 0.11575633038847206, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.1816709592718398, + 0.18873115064668006 + ], + "yaxis": "y5", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + 0.4701086956521739, + null, + null + ], + [ + null, + 0.518719806763285, + null, + null, + null, + null, + null + ], + [ + null, + 0.49516908212560384, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + 0.5199275362318841, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 128, + 256, + 32, + 64, + 64, + 512, + 256, + 32, + 512, + 512 + ], + "xaxis": "x5", + "y": [ + 0.0805911907341199, + 0.04046713177503456, + 0.051928275495301705, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.0768868475443224, + 0.1816709592718398, + 0.11575633038847206 + ], + "yaxis": "y5" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 8, + 32, + 64, + 128, + 256, + 512, + 536 + ], + "xaxis": "x9", + "y": [ + 0.6, + 1, + 2, + 4, + 5, + 7, + 9, + 9.4 + ], + "yaxis": "y9", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + 0.5199275362318841, + 0.4701086956521739, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + 0.49516908212560384, + null, + null, + null, + null, + null + ], + [ + null, + 0.518719806763285, + 0.5, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + 0.5, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 128, + 256, + 32, + 64, + 64, + 512, + 256, + 32, + 512, + 512 + ], + "xaxis": "x9", + "y": [ + 1, + 1, + 7, + 7, + 2, + 4, + 9, + 5, + 7, + 9 + ], + "yaxis": "y9" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 8, + 32, + 64, + 128, + 256, + 512, + 536 + ], + "xaxis": "x13", + "y": [ + 8.7, + 13, + 24, + 46, + 62, + 65, + 72, + 79, + 80, + 94, + 99, + 103.3 + ], + "yaxis": "y13", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + 0.5199275362318841, + null, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + 0.4701086956521739, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + 0.518719806763285, + null, + null, + null, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + 0.49516908212560384, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 128, + 256, + 32, + 64, + 64, + 512, + 256, + 32, + 512, + 512 + ], + "xaxis": "x13", + "y": [ + 13, + 65, + 79, + 80, + 24, + 62, + 94, + 99, + 46, + 72 + ], + "yaxis": "y13" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 0.0334069404001943, + 0.04046713177503456, + 0.051928275495301705, + 0.0768868475443224, + 0.0805911907341199, + 0.11575633038847206, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.1816709592718398, + 0.18873115064668006 + ], + "xaxis": "x2", + "y": [ + 8, + 32, + 64, + 128, + 256, + 512, + 536 + ], + "yaxis": "y2", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + 0.518719806763285, + 0.49516908212560384, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + 0.4701086956521739, + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null, + null, + 0.5, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 0.0805911907341199, + 0.04046713177503456, + 0.051928275495301705, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.0768868475443224, + 0.1816709592718398, + 0.11575633038847206 + ], + "xaxis": "x2", + "y": [ + 128, + 256, + 32, + 64, + 64, + 512, + 256, + 32, + 512, + 512 + ], + "yaxis": "y2" + }, + { + "type": "scatter", + "xaxis": "x6", + "yaxis": "y6" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 0.0334069404001943, + 0.04046713177503456, + 0.051928275495301705, + 0.0768868475443224, + 0.0805911907341199, + 0.11575633038847206, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.1816709592718398, + 0.18873115064668006 + ], + "xaxis": "x10", + "y": [ + 0.6, + 1, + 2, + 4, + 5, + 7, + 9, + 9.4 + ], + "yaxis": "y10", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + 0.4701086956521739, + null, + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null + ], + [ + null, + null, + null, + 0.49516908212560384, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + 0.518719806763285, + null, + null, + null, + 0.5, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 0.0805911907341199, + 0.04046713177503456, + 0.051928275495301705, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.0768868475443224, + 0.1816709592718398, + 0.11575633038847206 + ], + "xaxis": "x10", + "y": [ + 1, + 1, + 7, + 7, + 2, + 4, + 9, + 5, + 7, + 9 + ], + "yaxis": "y10" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 0.0334069404001943, + 0.04046713177503456, + 0.051928275495301705, + 0.0768868475443224, + 0.0805911907341199, + 0.11575633038847206, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.1816709592718398, + 0.18873115064668006 + ], + "xaxis": "x14", + "y": [ + 8.7, + 13, + 24, + 46, + 62, + 65, + 72, + 79, + 80, + 94, + 99, + 103.3 + ], + "yaxis": "y14", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null + ], + [ + null, + 0.4701086956521739, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + 0.518719806763285, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + 0.49516908212560384, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 0.0805911907341199, + 0.04046713177503456, + 0.051928275495301705, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.0768868475443224, + 0.1816709592718398, + 0.11575633038847206 + ], + "xaxis": "x14", + "y": [ + 13, + 65, + 79, + 80, + 24, + 62, + 94, + 99, + 46, + 72 + ], + "yaxis": "y14" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 0.6, + 1, + 2, + 4, + 5, + 7, + 9, + 9.4 + ], + "xaxis": "x3", + "y": [ + 8, + 32, + 64, + 128, + 256, + 512, + 536 + ], + "yaxis": "y3", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + 0.49516908212560384, + 0.518719806763285, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + 0.5, + null, + null + ], + [ + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null + ], + [ + null, + 0.4701086956521739, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + 0.5, + null, + 0.5, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 1, + 1, + 7, + 7, + 2, + 4, + 9, + 5, + 7, + 9 + ], + "xaxis": "x3", + "y": [ + 128, + 256, + 32, + 64, + 64, + 512, + 256, + 32, + 512, + 512 + ], + "yaxis": "y3" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 0.6, + 1, + 2, + 4, + 5, + 7, + 9, + 9.4 + ], + "xaxis": "x7", + "y": [ + 0.0334069404001943, + 0.04046713177503456, + 0.051928275495301705, + 0.0768868475443224, + 0.0805911907341199, + 0.11575633038847206, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.1816709592718398, + 0.18873115064668006 + ], + "yaxis": "y7", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + 0.4701086956521739, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.518719806763285, + null, + null + ], + [ + null, + null, + null, + null, + 0.49516908212560384, + null, + null, + null + ], + [ + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 1, + 1, + 7, + 7, + 2, + 4, + 9, + 5, + 7, + 9 + ], + "xaxis": "x7", + "y": [ + 0.0805911907341199, + 0.04046713177503456, + 0.051928275495301705, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.0768868475443224, + 0.1816709592718398, + 0.11575633038847206 + ], + "yaxis": "y7" + }, + { + "type": "scatter", + "xaxis": "x11", + "yaxis": "y11" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 0.6, + 1, + 2, + 4, + 5, + 7, + 9, + 9.4 + ], + "xaxis": "x15", + "y": [ + 8.7, + 13, + 24, + 46, + 62, + 65, + 72, + 79, + 80, + 94, + 99, + 103.3 + ], + "yaxis": "y15", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + 0.5, + null, + null, + null, + null + ], + [ + null, + 0.4701086956521739, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + null, + 0.518719806763285, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + null + ], + [ + null, + null, + null, + null, + 0.49516908212560384, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 1, + 1, + 7, + 7, + 2, + 4, + 9, + 5, + 7, + 9 + ], + "xaxis": "x15", + "y": [ + 13, + 65, + 79, + 80, + 24, + 62, + 94, + 99, + 46, + 72 + ], + "yaxis": "y15" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 8.7, + 13, + 24, + 46, + 62, + 65, + 72, + 79, + 80, + 94, + 99, + 103.3 + ], + "xaxis": "x4", + "y": [ + 8, + 32, + 64, + 128, + 256, + 512, + 536 + ], + "yaxis": "y4", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + 0.518719806763285, + null, + null, + 0.49516908212560384, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null + ], + [ + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.4701086956521739, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + 0.5, + 0.5, + null, + 0.5, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 13, + 65, + 79, + 80, + 24, + 62, + 94, + 99, + 46, + 72 + ], + "xaxis": "x4", + "y": [ + 128, + 256, + 32, + 64, + 64, + 512, + 256, + 32, + 512, + 512 + ], + "yaxis": "y4" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 8.7, + 13, + 24, + 46, + 62, + 65, + 72, + 79, + 80, + 94, + 99, + 103.3 + ], + "xaxis": "x8", + "y": [ + 0.0334069404001943, + 0.04046713177503456, + 0.051928275495301705, + 0.0768868475443224, + 0.0805911907341199, + 0.11575633038847206, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.1816709592718398, + 0.18873115064668006 + ], + "yaxis": "y8", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + 0.4701086956521739, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + 0.518719806763285, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + 0.49516908212560384, + null + ], + [ + null, + 0.5199275362318841, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + 0.5, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + 0.5, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 13, + 65, + 79, + 80, + 24, + 62, + 94, + 99, + 46, + 72 + ], + "xaxis": "x8", + "y": [ + 0.0805911907341199, + 0.04046713177503456, + 0.051928275495301705, + 0.13154994845426374, + 0.13397751341486178, + 0.136820133194068, + 0.16534854040828872, + 0.0768868475443224, + 0.1816709592718398, + 0.11575633038847206 + ], + "yaxis": "y8" + }, + { + "colorbar": { + "title": { + "text": "Objective Value" + } + }, + "colorscale": [ + [ + 0, + "rgb(247,251,255)" + ], + [ + 0.125, + "rgb(222,235,247)" + ], + [ + 0.25, + "rgb(198,219,239)" + ], + [ + 0.375, + "rgb(158,202,225)" + ], + [ + 0.5, + "rgb(107,174,214)" + ], + [ + 0.625, + "rgb(66,146,198)" + ], + [ + 0.75, + "rgb(33,113,181)" + ], + [ + 0.875, + "rgb(8,81,156)" + ], + [ + 1, + "rgb(8,48,107)" + ] + ], + "connectgaps": true, + "contours": { + "coloring": "heatmap" + }, + "hoverinfo": "none", + "line": { + "smoothing": 1.3 + }, + "reversescale": true, + "showscale": false, + "type": "contour", + "x": [ + 8.7, + 13, + 24, + 46, + 62, + 65, + 72, + 79, + 80, + 94, + 99, + 103.3 + ], + "xaxis": "x12", + "y": [ + 0.6, + 1, + 2, + 4, + 5, + 7, + 9, + 9.4 + ], + "yaxis": "y12", + "z": [ + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + 0.5199275362318841, + null, + null, + null, + 0.4701086956521739, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + 0.5, + null, + null, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + 0.5, + null, + null, + null, + null, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + 0.49516908212560384, + null + ], + [ + null, + null, + null, + 0.5, + null, + null, + null, + 0.518719806763285, + 0.5, + null, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + 0.5, + null, + null, + 0.5, + null, + null + ], + [ + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null + ] + ] + }, + { + "marker": { + "color": "black", + "line": { + "color": "Grey", + "width": 2 + } + }, + "mode": "markers", + "showlegend": false, + "type": "scatter", + "x": [ + 13, + 65, + 79, + 80, + 24, + 62, + 94, + 99, + 46, + 72 + ], + "xaxis": "x12", + "y": [ + 1, + 1, + 7, + 7, + 2, + 4, + 9, + 5, + 7, + 9 + ], + "yaxis": "y12" + }, + { + "type": "scatter", + "xaxis": "x16", + "yaxis": "y16" + } + ], + "layout": { + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "Contour Plot" + }, + "xaxis": { + "anchor": "y", + "domain": [ + 0, + 0.2125 + ], + "matches": "x13", + "range": [ + 8, + 536 + ], + "showticklabels": false + }, + "xaxis10": { + "anchor": "y10", + "domain": [ + 0.2625, + 0.475 + ], + "matches": "x14", + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "showticklabels": false + }, + "xaxis11": { + "anchor": "y11", + "domain": [ + 0.525, + 0.7375 + ], + "matches": "x15", + "range": [ + 0.6, + 9.4 + ], + "showticklabels": false + }, + "xaxis12": { + "anchor": "y12", + "domain": [ + 0.7875, + 1 + ], + "matches": "x16", + "range": [ + 8.7, + 103.3 + ], + "showticklabels": false + }, + "xaxis13": { + "anchor": "y13", + "domain": [ + 0, + 0.2125 + ], + "range": [ + 8, + 536 + ], + "title": { + "text": "batch_size" + } + }, + "xaxis14": { + "anchor": "y14", + "domain": [ + 0.2625, + 0.475 + ], + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "title": { + "text": "dropout" + } + }, + "xaxis15": { + "anchor": "y15", + "domain": [ + 0.525, + 0.7375 + ], + "range": [ + 0.6, + 9.4 + ], + "title": { + "text": "n_layers_hidden" + } + }, + "xaxis16": { + "anchor": "y16", + "domain": [ + 0.7875, + 1 + ], + "range": [ + 8.7, + 103.3 + ], + "title": { + "text": "n_units_hidden" + } + }, + "xaxis2": { + "anchor": "y2", + "domain": [ + 0.2625, + 0.475 + ], + "matches": "x14", + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "showticklabels": false + }, + "xaxis3": { + "anchor": "y3", + "domain": [ + 0.525, + 0.7375 + ], + "matches": "x15", + "range": [ + 0.6, + 9.4 + ], + "showticklabels": false + }, + "xaxis4": { + "anchor": "y4", + "domain": [ + 0.7875, + 1 + ], + "matches": "x16", + "range": [ + 8.7, + 103.3 + ], + "showticklabels": false + }, + "xaxis5": { + "anchor": "y5", + "domain": [ + 0, + 0.2125 + ], + "matches": "x13", + "range": [ + 8, + 536 + ], + "showticklabels": false + }, + "xaxis6": { + "anchor": "y6", + "domain": [ + 0.2625, + 0.475 + ], + "matches": "x14", + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "showticklabels": false + }, + "xaxis7": { + "anchor": "y7", + "domain": [ + 0.525, + 0.7375 + ], + "matches": "x15", + "range": [ + 0.6, + 9.4 + ], + "showticklabels": false + }, + "xaxis8": { + "anchor": "y8", + "domain": [ + 0.7875, + 1 + ], + "matches": "x16", + "range": [ + 8.7, + 103.3 + ], + "showticklabels": false + }, + "xaxis9": { + "anchor": "y9", + "domain": [ + 0, + 0.2125 + ], + "matches": "x13", + "range": [ + 8, + 536 + ], + "showticklabels": false + }, + "yaxis": { + "anchor": "x", + "domain": [ + 0.80625, + 1 + ], + "range": [ + 8, + 536 + ], + "title": { + "text": "batch_size" + } + }, + "yaxis10": { + "anchor": "x10", + "domain": [ + 0.26875, + 0.4625 + ], + "matches": "y9", + "range": [ + 0.6, + 9.4 + ], + "showticklabels": false + }, + "yaxis11": { + "anchor": "x11", + "domain": [ + 0.26875, + 0.4625 + ], + "matches": "y9", + "range": [ + 0.6, + 9.4 + ], + "showticklabels": false + }, + "yaxis12": { + "anchor": "x12", + "domain": [ + 0.26875, + 0.4625 + ], + "matches": "y9", + "range": [ + 0.6, + 9.4 + ], + "showticklabels": false + }, + "yaxis13": { + "anchor": "x13", + "domain": [ + 0, + 0.19375 + ], + "range": [ + 8.7, + 103.3 + ], + "title": { + "text": "n_units_hidden" + } + }, + "yaxis14": { + "anchor": "x14", + "domain": [ + 0, + 0.19375 + ], + "matches": "y13", + "range": [ + 8.7, + 103.3 + ], + "showticklabels": false + }, + "yaxis15": { + "anchor": "x15", + "domain": [ + 0, + 0.19375 + ], + "matches": "y13", + "range": [ + 8.7, + 103.3 + ], + "showticklabels": false + }, + "yaxis16": { + "anchor": "x16", + "domain": [ + 0, + 0.19375 + ], + "matches": "y13", + "range": [ + 8.7, + 103.3 + ], + "showticklabels": false + }, + "yaxis2": { + "anchor": "x2", + "domain": [ + 0.80625, + 1 + ], + "matches": "y", + "range": [ + 8, + 536 + ], + "showticklabels": false + }, + "yaxis3": { + "anchor": "x3", + "domain": [ + 0.80625, + 1 + ], + "matches": "y", + "range": [ + 8, + 536 + ], + "showticklabels": false + }, + "yaxis4": { + "anchor": "x4", + "domain": [ + 0.80625, + 1 + ], + "matches": "y", + "range": [ + 8, + 536 + ], + "showticklabels": false + }, + "yaxis5": { + "anchor": "x5", + "domain": [ + 0.5375, + 0.73125 + ], + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "title": { + "text": "dropout" + } + }, + "yaxis6": { + "anchor": "x6", + "domain": [ + 0.5375, + 0.73125 + ], + "matches": "y5", + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "showticklabels": false + }, + "yaxis7": { + "anchor": "x7", + "domain": [ + 0.5375, + 0.73125 + ], + "matches": "y5", + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "showticklabels": false + }, + "yaxis8": { + "anchor": "x8", + "domain": [ + 0.5375, + 0.73125 + ], + "matches": "y5", + "range": [ + 0.0334069404001943, + 0.18873115064668006 + ], + "showticklabels": false + }, + "yaxis9": { + "anchor": "x9", + "domain": [ + 0.26875, + 0.4625 + ], + "range": [ + 0.6, + 9.4 + ], + "title": { + "text": "n_layers_hidden" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Visualize hyperparameter relationships.\n", + "plot_contour(study, params=['batch_size', 'dropout', 'n_layers_hidden', 'n_units_hidden'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize individual hyperparameters as slice plot.\n", + "plot_slice(study)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "cliponaxis": false, + "hovertemplate": [ + "lr (CategoricalDistribution): 0.00846909212308733", + "linear_transform_type (CategoricalDistribution): 0.017513441865308507", + "batch_norm (CategoricalDistribution): 0.03432980997949527", + "base_transform_type (CategoricalDistribution): 0.03608782621904944", + "n_units_hidden (IntUniformDistribution): 0.056248651052508904", + "batch_size (CategoricalDistribution): 0.06533755831900438", + "n_layers_hidden (IntUniformDistribution): 0.07065620910903445", + "n_iter (IntUniformDistribution): 0.253054441613576", + "dropout (UniformDistribution): 0.45830296971893575" + ], + "marker": { + "color": "rgb(66,146,198)" + }, + "orientation": "h", + "text": [ + "<0.01", + "0.02", + "0.03", + "0.04", + "0.06", + "0.07", + "0.07", + "0.25", + "0.46" + ], + "textposition": "outside", + "type": "bar", + "x": [ + 0.00846909212308733, + 0.017513441865308507, + 0.03432980997949527, + 0.03608782621904944, + 0.056248651052508904, + 0.06533755831900438, + 0.07065620910903445, + 0.253054441613576, + 0.45830296971893575 + ], + "y": [ + "lr", + "linear_transform_type", + "batch_norm", + "base_transform_type", + "n_units_hidden", + "batch_size", + "n_layers_hidden", + "n_iter", + "dropout" + ] + } + ], + "layout": { + "showlegend": false, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "Hyperparameter Importances" + }, + "xaxis": { + "title": { + "text": "Importance for Objective Value" + } + }, + "yaxis": { + "title": { + "text": "Hyperparameter" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Visualize parameter importances.\n", + "plot_param_importances(study)" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "cliponaxis": false, + "hovertemplate": [ + "batch_size (CategoricalDistribution): 0.0031661764258092114", + "batch_norm (CategoricalDistribution): 0.005887013322608155", + "linear_transform_type (CategoricalDistribution): 0.007340845659453336", + "n_layers_hidden (IntUniformDistribution): 0.018588433379606542", + "lr (CategoricalDistribution): 0.03405957797841154", + "n_iter (IntUniformDistribution): 0.049231044021663506", + "dropout (UniformDistribution): 0.055752145582047774", + "base_transform_type (CategoricalDistribution): 0.1964047830096296", + "n_units_hidden (IntUniformDistribution): 0.6295699806207704" + ], + "marker": { + "color": "rgb(66,146,198)" + }, + "orientation": "h", + "text": [ + "<0.01", + "<0.01", + "<0.01", + "0.02", + "0.03", + "0.05", + "0.06", + "0.20", + "0.63" + ], + "textposition": "outside", + "type": "bar", + "x": [ + 0.0031661764258092114, + 0.005887013322608155, + 0.007340845659453336, + 0.018588433379606542, + 0.03405957797841154, + 0.049231044021663506, + 0.055752145582047774, + 0.1964047830096296, + 0.6295699806207704 + ], + "y": [ + "batch_size", + "batch_norm", + "linear_transform_type", + "n_layers_hidden", + "lr", + "n_iter", + "dropout", + "base_transform_type", + "n_units_hidden" + ] + } + ], + "layout": { + "showlegend": false, + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "Hyperparameter Importances" + }, + "xaxis": { + "title": { + "text": "Importance for duration" + } + }, + "yaxis": { + "title": { + "text": "Hyperparameter" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Learn which hyperparameters are affecting the trial duration with hyperparameter importance.\n", + "optuna.visualization.plot_param_importances(\n", + " study, target=lambda t: t.duration.total_seconds(), target_name=\"duration\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.plotly.v1+json": { + "config": { + "plotlyServerURL": "https://plot.ly" + }, + "data": [ + { + "mode": "lines", + "name": "no-name-ce6b3d6c-0504-44aa-a791-928abb036a1c", + "type": "scatter", + "x": [ + 0.4701086956521739, + 0.47061191626409016, + 0.4711151368760064, + 0.4716183574879227, + 0.47212157809983896, + 0.4726247987117552, + 0.4731280193236715, + 0.47363123993558776, + 0.474134460547504, + 0.4746376811594203, + 0.47514090177133655, + 0.4756441223832528, + 0.4761473429951691, + 0.47665056360708535, + 0.4771537842190016, + 0.4776570048309179, + 0.47816022544283415, + 0.4786634460547504, + 0.4791666666666667, + 0.47966988727858295, + 0.4801731078904992, + 0.4806763285024155, + 0.48117954911433175, + 0.481682769726248, + 0.4821859903381643, + 0.48268921095008055, + 0.48319243156199676, + 0.483695652173913, + 0.4841988727858293, + 0.48470209339774556, + 0.4852053140096618, + 0.4857085346215781, + 0.48621175523349436, + 0.4867149758454106, + 0.4872181964573269, + 0.48772141706924316, + 0.4882246376811594, + 0.4887278582930757, + 0.48923107890499196, + 0.4897342995169082, + 0.4902375201288245, + 0.49074074074074076, + 0.491243961352657, + 0.4917471819645733, + 0.49225040257648955, + 0.4927536231884058, + 0.4932568438003221, + 0.49376006441223835, + 0.4942632850241546, + 0.4947665056360709, + 0.49526972624798715, + 0.4957729468599034, + 0.4962761674718197, + 0.49677938808373595, + 0.4972826086956522, + 0.4977858293075685, + 0.49828904991948475, + 0.498792270531401, + 0.4992954911433173, + 0.49979871175523355, + 0.5003019323671498, + 0.500805152979066, + 0.5013083735909823, + 0.5018115942028986, + 0.5023148148148149, + 0.5028180354267311, + 0.5033212560386474, + 0.5038244766505636, + 0.50432769726248, + 0.5048309178743962, + 0.5053341384863125, + 0.5058373590982287, + 0.506340579710145, + 0.5068438003220612, + 0.5073470209339775, + 0.5078502415458938, + 0.5083534621578101, + 0.5088566827697263, + 0.5093599033816426, + 0.5098631239935588, + 0.5103663446054751, + 0.5108695652173914, + 0.5113727858293077, + 0.5118760064412239, + 0.5123792270531402, + 0.5128824476650564, + 0.5133856682769727, + 0.513888888888889, + 0.5143921095008053, + 0.5148953301127215, + 0.5153985507246378, + 0.515901771336554, + 0.5164049919484703, + 0.5169082125603865, + 0.5174114331723029, + 0.5179146537842191, + 0.5184178743961354, + 0.5189210950080516, + 0.5194243156199679, + 0.5199275362318841 + ], + "y": [ + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.1, + 0.2, + 0.2, + 0.2, + 0.2, + 0.2, + 0.2, + 0.2, + 0.2, + 0.2, + 0.2, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.8, + 0.9, + 0.9, + 1 + ] + } + ], + "layout": { + "template": { + "data": { + "bar": [ + { + "error_x": { + "color": "#2a3f5f" + }, + "error_y": { + "color": "#2a3f5f" + }, + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "bar" + } + ], + "barpolar": [ + { + "marker": { + "line": { + "color": "#E5ECF6", + "width": 0.5 + }, + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "barpolar" + } + ], + "carpet": [ + { + "aaxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "baxis": { + "endlinecolor": "#2a3f5f", + "gridcolor": "white", + "linecolor": "white", + "minorgridcolor": "white", + "startlinecolor": "#2a3f5f" + }, + "type": "carpet" + } + ], + "choropleth": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "choropleth" + } + ], + "contour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "contour" + } + ], + "contourcarpet": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "contourcarpet" + } + ], + "heatmap": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmap" + } + ], + "heatmapgl": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "heatmapgl" + } + ], + "histogram": [ + { + "marker": { + "pattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + } + }, + "type": "histogram" + } + ], + "histogram2d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2d" + } + ], + "histogram2dcontour": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "histogram2dcontour" + } + ], + "mesh3d": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "type": "mesh3d" + } + ], + "parcoords": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "parcoords" + } + ], + "pie": [ + { + "automargin": true, + "type": "pie" + } + ], + "scatter": [ + { + "fillpattern": { + "fillmode": "overlay", + "size": 10, + "solidity": 0.2 + }, + "type": "scatter" + } + ], + "scatter3d": [ + { + "line": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatter3d" + } + ], + "scattercarpet": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattercarpet" + } + ], + "scattergeo": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergeo" + } + ], + "scattergl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattergl" + } + ], + "scattermapbox": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scattermapbox" + } + ], + "scatterpolar": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolar" + } + ], + "scatterpolargl": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterpolargl" + } + ], + "scatterternary": [ + { + "marker": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "type": "scatterternary" + } + ], + "surface": [ + { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + }, + "colorscale": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "type": "surface" + } + ], + "table": [ + { + "cells": { + "fill": { + "color": "#EBF0F8" + }, + "line": { + "color": "white" + } + }, + "header": { + "fill": { + "color": "#C8D4E3" + }, + "line": { + "color": "white" + } + }, + "type": "table" + } + ] + }, + "layout": { + "annotationdefaults": { + "arrowcolor": "#2a3f5f", + "arrowhead": 0, + "arrowwidth": 1 + }, + "autotypenumbers": "strict", + "coloraxis": { + "colorbar": { + "outlinewidth": 0, + "ticks": "" + } + }, + "colorscale": { + "diverging": [ + [ + 0, + "#8e0152" + ], + [ + 0.1, + "#c51b7d" + ], + [ + 0.2, + "#de77ae" + ], + [ + 0.3, + "#f1b6da" + ], + [ + 0.4, + "#fde0ef" + ], + [ + 0.5, + "#f7f7f7" + ], + [ + 0.6, + "#e6f5d0" + ], + [ + 0.7, + "#b8e186" + ], + [ + 0.8, + "#7fbc41" + ], + [ + 0.9, + "#4d9221" + ], + [ + 1, + "#276419" + ] + ], + "sequential": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ], + "sequentialminus": [ + [ + 0, + "#0d0887" + ], + [ + 0.1111111111111111, + "#46039f" + ], + [ + 0.2222222222222222, + "#7201a8" + ], + [ + 0.3333333333333333, + "#9c179e" + ], + [ + 0.4444444444444444, + "#bd3786" + ], + [ + 0.5555555555555556, + "#d8576b" + ], + [ + 0.6666666666666666, + "#ed7953" + ], + [ + 0.7777777777777778, + "#fb9f3a" + ], + [ + 0.8888888888888888, + "#fdca26" + ], + [ + 1, + "#f0f921" + ] + ] + }, + "colorway": [ + "#636efa", + "#EF553B", + "#00cc96", + "#ab63fa", + "#FFA15A", + "#19d3f3", + "#FF6692", + "#B6E880", + "#FF97FF", + "#FECB52" + ], + "font": { + "color": "#2a3f5f" + }, + "geo": { + "bgcolor": "white", + "lakecolor": "white", + "landcolor": "#E5ECF6", + "showlakes": true, + "showland": true, + "subunitcolor": "white" + }, + "hoverlabel": { + "align": "left" + }, + "hovermode": "closest", + "mapbox": { + "style": "light" + }, + "paper_bgcolor": "white", + "plot_bgcolor": "#E5ECF6", + "polar": { + "angularaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "radialaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "scene": { + "xaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "yaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + }, + "zaxis": { + "backgroundcolor": "#E5ECF6", + "gridcolor": "white", + "gridwidth": 2, + "linecolor": "white", + "showbackground": true, + "ticks": "", + "zerolinecolor": "white" + } + }, + "shapedefaults": { + "line": { + "color": "#2a3f5f" + } + }, + "ternary": { + "aaxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "baxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + }, + "bgcolor": "#E5ECF6", + "caxis": { + "gridcolor": "white", + "linecolor": "white", + "ticks": "" + } + }, + "title": { + "x": 0.05 + }, + "xaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + }, + "yaxis": { + "automargin": true, + "gridcolor": "white", + "linecolor": "white", + "ticks": "", + "title": { + "standoff": 15 + }, + "zerolinecolor": "white", + "zerolinewidth": 2 + } + } + }, + "title": { + "text": "Empirical Distribution Function Plot" + }, + "xaxis": { + "title": { + "text": "Objective Value" + } + }, + "yaxis": { + "range": [ + 0, + 1 + ], + "title": { + "text": "Cumulative Probability" + } + } + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Visualize empirical distribution function of the objective.\n", + "plot_edf(study)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test performance of the optimized plugin" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[2023-04-07T22:13:18.269044+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:03<00:00, 30.59it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\u001b[4m\u001b[1mPlugin : test\u001b[0m\u001b[0m\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
minmaxmeanstddevmedianiqrroundserrorsdurations
detection.detection_xgb.mean1.0000001.0000001.0000000.01.0000000.0100.14
detection.detection_mlp.mean0.5854410.5854410.5854410.00.5854410.0101.82
\n", + "
" + ], + "text/plain": [ + " min max mean stddev median \\\n", + "detection.detection_xgb.mean 1.000000 1.000000 1.000000 0.0 1.000000 \n", + "detection.detection_mlp.mean 0.585441 0.585441 0.585441 0.0 0.585441 \n", + "\n", + " iqr rounds errors durations \n", + "detection.detection_xgb.mean 0.0 1 0 0.14 \n", + "detection.detection_mlp.mean 0.0 1 0 1.82 " + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "best_params = study.best_params\n", + "best_params['n_iter'] = 100\n", + "report = Benchmarks.evaluate(\n", + " [(\"test\", PLUGIN, best_params)],\n", + " train_loader,\n", + " test_loader,\n", + " repeats=1,\n", + " metrics={\"detection\": [\"detection_mlp\", \"detection_xgb\"]}, # DELETE THIS LINE FOR ALL METRICS\n", + ")\n", + "Benchmarks.print(report)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Congratulations!\n", + "\n", + "Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement towards Machine learning and AI for medicine, you can do so in the following ways!\n", + "\n", + "### Star [Synthcity](https://github.com/vanderschaarlab/synthcity) on GitHub\n", + "\n", + "- The easiest way to help our community is just by starring the Repos! This helps raise awareness of the tools we're building.\n", + "\n", + "\n", + "### Checkout other projects from vanderschaarlab\n", + "- [HyperImpute](https://github.com/vanderschaarlab/hyperimpute)\n", + "- [AutoPrognosis](https://github.com/vanderschaarlab/autoprognosis)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "synthcity", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 2fb8508939d583acce06a05a77a599773d469842 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sat, 8 Apr 2023 19:14:54 +0200 Subject: [PATCH 59/95] update --- tests/plugins/generic/test_ddpm.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/plugins/generic/test_ddpm.py b/tests/plugins/generic/test_ddpm.py index ae0462d8..0ba32d81 100644 --- a/tests/plugins/generic/test_ddpm.py +++ b/tests/plugins/generic/test_ddpm.py @@ -18,11 +18,10 @@ plugin_name = "ddpm" plugin_params = dict( - n_iter=500, + n_iter=1000, batch_size=1000, num_timesteps=100, model_type="mlp", - sampling_patience=100, ) @@ -122,7 +121,7 @@ def test_plugin_hyperparams(test_plugin: Plugin) -> None: def test_sample_hyperparams() -> None: - for i in range(100): + for _ in range(100): args = plugin.sample_hyperparameters() assert plugin(**args) is not None From 5adfabfd435c33cb004ed9bd0ab5a56d070f895d Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sat, 8 Apr 2023 19:16:13 +0200 Subject: [PATCH 60/95] Fix plugin type of static_model of fflows --- src/synthcity/plugins/time_series/plugin_fflows.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/synthcity/plugins/time_series/plugin_fflows.py b/src/synthcity/plugins/time_series/plugin_fflows.py index d357ea24..d62557c0 100644 --- a/src/synthcity/plugins/time_series/plugin_fflows.py +++ b/src/synthcity/plugins/time_series/plugin_fflows.py @@ -11,6 +11,7 @@ from fflows import FourierFlow # synthcity absolute +from synthcity.plugins import Plugins from synthcity.plugins.core.dataloader import DataLoader from synthcity.plugins.core.distribution import ( CategoricalDistribution, @@ -24,7 +25,6 @@ from synthcity.plugins.core.models.ts_model import TimeSeriesModel from synthcity.plugins.core.plugin import Plugin from synthcity.plugins.core.schema import Schema -from synthcity.plugins.generic import GenericPlugins from synthcity.utils.constants import DEVICE @@ -134,9 +134,7 @@ def __init__( normalize=normalize, ).to(device) - self.static_model = GenericPlugins().get( - self.static_model_name, device=self.device - ) + self.static_model = Plugins().get(self.static_model_name, device=self.device) self.temporal_encoder = TimeSeriesTabularEncoder( max_clusters=encoder_max_clusters From a2a88c51fd0bf33d70d5e5f3ce7cfeb75a73ead3 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sat, 8 Apr 2023 21:39:13 +0200 Subject: [PATCH 61/95] update intlogdistribution and tutorial --- src/synthcity/plugins/core/distribution.py | 12 +- src/synthcity/utils/optuna_sample.py | 6 - ...utorial8_hyperparameter_optimization.ipynb | 4316 +++++++++++------ 3 files changed, 2828 insertions(+), 1506 deletions(-) diff --git a/src/synthcity/plugins/core/distribution.py b/src/synthcity/plugins/core/distribution.py index 06b3a99b..9934539c 100644 --- a/src/synthcity/plugins/core/distribution.py +++ b/src/synthcity/plugins/core/distribution.py @@ -353,25 +353,23 @@ def dtype(self) -> str: class IntLogDistribution(IntegerDistribution): low: int = 1 high: int = np.iinfo(np.int64).max - step: int = 2 # the next sample larger than x is step * x @validator("step", always=True) def _validate_step(cls: Any, v: int, values: Dict) -> int: - if v < 2: - raise ValueError("Step must be greater than 1") + if v != 1: + raise ValueError("Step must be 1 for IntLogDistribution") return v def get(self) -> List[Any]: - return [self.name, self.low, self.high, self.step] + return [self.name, self.low, self.high] def sample(self, count: int = 1) -> Any: np.random.seed(self.random_state) msamples = self.sample_marginal(count) if msamples is not None: return msamples - steps = int(np.log2(self.high / self.low) / np.log2(self.step)) - samples = np.random.choice(steps + 1, count) - samples = self.low * self.step**samples + lo, hi = np.log2(self.low), np.log2(self.high) + samples = 2.0 ** np.random.uniform(lo, hi, count) return samples.astype(int) diff --git a/src/synthcity/utils/optuna_sample.py b/src/synthcity/utils/optuna_sample.py index c19dae66..87b7aafc 100644 --- a/src/synthcity/utils/optuna_sample.py +++ b/src/synthcity/utils/optuna_sample.py @@ -16,15 +16,9 @@ def suggest(trial: optuna.Trial, dist: D.Distribution) -> Any: elif isinstance(dist, D.IntegerDistribution): return trial.suggest_int(dist.name, dist.low, dist.high, dist.step) elif isinstance(dist, D.IntLogDistribution): - # ! does not handle step yet return trial.suggest_int(dist.name, dist.low, dist.high, log=True) elif isinstance(dist, D.CategoricalDistribution): return trial.suggest_categorical(dist.name, dist.choices) - # ! the modification cannot be reflected in study.best_params - # elif isinstance(dist, D.DatetimeDistribution): - # high = (dist.high - dist.low) / dist.step - # s = trial.suggest_float(dist.name, 0, high) - # return dist.low + dist.step * s else: raise ValueError(f"Unknown dist: {dist}") diff --git a/tutorials/tutorial8_hyperparameter_optimization.ipynb b/tutorials/tutorial8_hyperparameter_optimization.ipynb index 4cf7c965..f95c1e0c 100644 --- a/tutorials/tutorial8_hyperparameter_optimization.ipynb +++ b/tutorials/tutorial8_hyperparameter_optimization.ipynb @@ -298,7 +298,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -320,14 +320,15 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "[2023-04-07T21:51:56.689921+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n" + "[2023-04-08T20:56:15.722354+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-08T20:56:15.722354+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n" ] }, { @@ -336,7 +337,7 @@ "synthcity.plugins.generic.plugin_nflow.NormalizingFlowsPlugin" ] }, - "execution_count": 50, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -357,7 +358,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -368,13 +369,13 @@ " IntegerDistribution(name='n_units_hidden', data=None, random_state=0, marginal_distribution=None, low=10, high=100, step=1),\n", " CategoricalDistribution(name='batch_size', data=None, random_state=0, marginal_distribution=None, choices=[32, 64, 128, 256, 512]),\n", " FloatDistribution(name='dropout', data=None, random_state=0, marginal_distribution=None, low=0.0, high=0.2),\n", - " CategoricalDistribution(name='batch_norm', data=None, random_state=0, marginal_distribution=None, choices=[True, False]),\n", - " CategoricalDistribution(name='lr', data=None, random_state=0, marginal_distribution=None, choices=[0.001, 0.0001, 0.0002]),\n", + " CategoricalDistribution(name='batch_norm', data=None, random_state=0, marginal_distribution=None, choices=[False, True]),\n", + " CategoricalDistribution(name='lr', data=None, random_state=0, marginal_distribution=None, choices=[0.0001, 0.0002, 0.001]),\n", " CategoricalDistribution(name='linear_transform_type', data=None, random_state=0, marginal_distribution=None, choices=['lu', 'permutation', 'svd']),\n", - " CategoricalDistribution(name='base_transform_type', data=None, random_state=0, marginal_distribution=None, choices=['affine-coupling', 'quadratic-coupling', 'rq-coupling', 'affine-autoregressive', 'quadratic-autoregressive', 'rq-autoregressive'])]" + " CategoricalDistribution(name='base_transform_type', data=None, random_state=0, marginal_distribution=None, choices=['affine-autoregressive', 'affine-coupling', 'quadratic-autoregressive', 'quadratic-coupling', 'rq-autoregressive', 'rq-coupling'])]" ] }, - "execution_count": 51, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -393,24 +394,24 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'n_iter': 486,\n", - " 'n_layers_hidden': 10,\n", + "{'n_iter': 100,\n", + " 'n_layers_hidden': 1,\n", " 'n_units_hidden': 87,\n", - " 'batch_size': 512,\n", - " 'dropout': 0.016022465975681178,\n", - " 'batch_norm': True,\n", + " 'batch_size': 256,\n", + " 'dropout': 0.15424246144819787,\n", + " 'batch_norm': False,\n", " 'lr': 0.001,\n", " 'linear_transform_type': 'svd',\n", - " 'base_transform_type': 'affine-coupling'}" + " 'base_transform_type': 'rq-autoregressive'}" ] }, - "execution_count": 52, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -420,6 +421,7 @@ "\n", "trial = optuna.create_study().ask()\n", "params = suggest_all(trial, plugin_cls.hyperparameter_space())\n", + "params['n_iter'] = 100 # speed up\n", "params" ] }, @@ -433,16 +435,16 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - " 62%|██████▏ | 299/486 [01:26<00:53, 3.47it/s]\n", - "[2023-04-07T21:53:29.785866+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - " 62%|██████▏ | 299/486 [01:30<00:56, 3.31it/s]\n" + "100%|██████████| 100/100 [00:38<00:00, 2.56it/s]\n", + "[2023-04-08T20:57:54.561757+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:30<00:00, 3.24it/s]\n" ] }, { @@ -481,15 +483,15 @@ " \n", " \n", " detection.detection_mlp.mean\n", - " 0.390071\n", - " 0.390071\n", - " 0.390071\n", + " 0.5\n", + " 0.5\n", + " 0.5\n", " 0.0\n", - " 0.390071\n", + " 0.5\n", " 0.0\n", " 1\n", " 0\n", - " 2.51\n", + " 5.95\n", " minimize\n", " \n", " \n", @@ -497,14 +499,14 @@ "" ], "text/plain": [ - " min max mean stddev median \\\n", - "detection.detection_mlp.mean 0.390071 0.390071 0.390071 0.0 0.390071 \n", + " min max mean stddev median iqr rounds \\\n", + "detection.detection_mlp.mean 0.5 0.5 0.5 0.0 0.5 0.0 1 \n", "\n", - " iqr rounds errors durations direction \n", - "detection.detection_mlp.mean 0.0 1 0 2.51 minimize " + " errors durations direction \n", + "detection.detection_mlp.mean 0 5.95 minimize " ] }, - "execution_count": 53, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -532,60 +534,102 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "[2023-04-07T21:57:37.669090+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T21:57:37.689827+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:12<00:00, 8.05it/s]\n", - "[2023-04-07T21:57:53.690237+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T21:57:53.712601+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:04<00:00, 24.77it/s]\n", - "[2023-04-07T21:58:01.728358+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T21:58:01.744010+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:24<00:00, 4.08it/s]\n", - "[2023-04-07T21:58:32.292499+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T21:58:32.316002+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [01:30<00:00, 1.10it/s]\n", - "[2023-04-07T22:00:38.652411+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T22:00:38.685914+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:23<00:00, 4.21it/s]\n", - "[2023-04-07T22:01:09.148491+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T22:01:09.178259+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:06<00:00, 14.79it/s]\n", - "[2023-04-07T22:01:20.722191+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T22:01:20.751419+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [02:00<00:00, 1.21s/it]\n", - "[2023-04-07T22:03:29.180475+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T22:03:29.211421+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [01:37<00:00, 1.02it/s]\n", - "[2023-04-07T22:05:12.012437+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T22:05:12.030781+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:57<00:00, 1.74it/s]\n", - "[2023-04-07T22:06:14.408112+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-07T22:06:14.431469+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:11<00:00, 8.47it/s]\n" + "[2023-04-08T21:26:16.278633+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-08T21:26:16.301778+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:39<00:00, 2.56it/s]\n", + "[2023-04-08T21:26:59.665597+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-08T21:26:59.684496+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:26<00:00, 3.74it/s]\n", + "[2023-04-08T21:27:30.475951+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-08T21:27:30.495645+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:04<00:00, 21.72it/s]\n", + "[2023-04-08T21:27:39.102805+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-08T21:27:39.117858+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [03:07<00:00, 1.88s/it]\n", + "[2023-04-08T21:31:35.546758+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "[2023-04-08T21:31:35.638203+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + " 0%| | 0/100 [00:00", - "linear_transform_type (CategoricalDistribution): 0.017513441865308507", - "batch_norm (CategoricalDistribution): 0.03432980997949527", - "base_transform_type (CategoricalDistribution): 0.03608782621904944", - "n_units_hidden (IntUniformDistribution): 0.056248651052508904", - "batch_size (CategoricalDistribution): 0.06533755831900438", - "n_layers_hidden (IntUniformDistribution): 0.07065620910903445", - "n_iter (IntUniformDistribution): 0.253054441613576", - "dropout (UniformDistribution): 0.45830296971893575" + "lr (CategoricalDistribution): 0.0", + "n_iter (IntDistribution): 0.0", + "n_layers_hidden (IntDistribution): 7.256702823093135e-31", + "batch_norm (CategoricalDistribution): 0.02499999999999996", + "n_units_hidden (IntDistribution): 0.025000000000000088", + "batch_size (CategoricalDistribution): 0.04999999999999992", + "linear_transform_type (CategoricalDistribution): 0.09999999999999984", + "dropout (FloatDistribution): 0.275000000000001", + "base_transform_type (CategoricalDistribution): 0.5249999999999992" ], "marker": { "color": "rgb(66,146,198)" }, "orientation": "h", "text": [ + "<0.01", + "<0.01", "<0.01", "0.02", "0.03", - "0.04", - "0.06", - "0.07", - "0.07", - "0.25", - "0.46" + "0.05", + "0.10", + "0.28", + "0.52" ], "textposition": "outside", "type": "bar", "x": [ - 0.00846909212308733, - 0.017513441865308507, - 0.03432980997949527, - 0.03608782621904944, - 0.056248651052508904, - 0.06533755831900438, - 0.07065620910903445, - 0.253054441613576, - 0.45830296971893575 + 0, + 0, + 7.256702823093135e-31, + 0.02499999999999996, + 0.025000000000000088, + 0.04999999999999992, + 0.09999999999999984, + 0.275000000000001, + 0.5249999999999992 ], "y": [ "lr", - "linear_transform_type", + "n_iter", + "n_layers_hidden", "batch_norm", - "base_transform_type", "n_units_hidden", "batch_size", - "n_layers_hidden", - "n_iter", - "dropout" + "linear_transform_type", + "dropout", + "base_transform_type" ] } ], @@ -7716,7 +9047,7 @@ }, { "cell_type": "code", - "execution_count": 60, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -7729,15 +9060,15 @@ { "cliponaxis": false, "hovertemplate": [ - "batch_size (CategoricalDistribution): 0.0031661764258092114", - "batch_norm (CategoricalDistribution): 0.005887013322608155", - "linear_transform_type (CategoricalDistribution): 0.007340845659453336", - "n_layers_hidden (IntUniformDistribution): 0.018588433379606542", - "lr (CategoricalDistribution): 0.03405957797841154", - "n_iter (IntUniformDistribution): 0.049231044021663506", - "dropout (UniformDistribution): 0.055752145582047774", - "base_transform_type (CategoricalDistribution): 0.1964047830096296", - "n_units_hidden (IntUniformDistribution): 0.6295699806207704" + "n_iter (IntDistribution): 0.0", + "batch_norm (CategoricalDistribution): 0.0008792721126079252", + "n_units_hidden (IntDistribution): 0.050652923907431195", + "lr (CategoricalDistribution): 0.07515326418808736", + "linear_transform_type (CategoricalDistribution): 0.08234393383908772", + "batch_size (CategoricalDistribution): 0.1506171783782107", + "dropout (FloatDistribution): 0.1928283779305551", + "n_layers_hidden (IntDistribution): 0.20147707299584372", + "base_transform_type (CategoricalDistribution): 0.2460479766481761" ], "marker": { "color": "rgb(66,146,198)" @@ -7746,37 +9077,37 @@ "text": [ "<0.01", "<0.01", - "<0.01", - "0.02", - "0.03", "0.05", - "0.06", + "0.08", + "0.08", + "0.15", + "0.19", "0.20", - "0.63" + "0.25" ], "textposition": "outside", "type": "bar", "x": [ - 0.0031661764258092114, - 0.005887013322608155, - 0.007340845659453336, - 0.018588433379606542, - 0.03405957797841154, - 0.049231044021663506, - 0.055752145582047774, - 0.1964047830096296, - 0.6295699806207704 + 0, + 0.0008792721126079252, + 0.050652923907431195, + 0.07515326418808736, + 0.08234393383908772, + 0.1506171783782107, + 0.1928283779305551, + 0.20147707299584372, + 0.2460479766481761 ], "y": [ - "batch_size", + "n_iter", "batch_norm", - "linear_transform_type", - "n_layers_hidden", + "n_units_hidden", "lr", - "n_iter", + "linear_transform_type", + "batch_size", "dropout", - "base_transform_type", - "n_units_hidden" + "n_layers_hidden", + "base_transform_type" ] } ], @@ -8627,7 +9958,7 @@ }, { "cell_type": "code", - "execution_count": 66, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -8639,210 +9970,210 @@ "data": [ { "mode": "lines", - "name": "no-name-ce6b3d6c-0504-44aa-a791-928abb036a1c", + "name": "no-name-13d7c589-c089-4e41-befb-9c6abf683ae9", "type": "scatter", "x": [ - 0.4701086956521739, - 0.47061191626409016, - 0.4711151368760064, - 0.4716183574879227, - 0.47212157809983896, - 0.4726247987117552, - 0.4731280193236715, - 0.47363123993558776, - 0.474134460547504, - 0.4746376811594203, - 0.47514090177133655, - 0.4756441223832528, - 0.4761473429951691, - 0.47665056360708535, - 0.4771537842190016, - 0.4776570048309179, - 0.47816022544283415, - 0.4786634460547504, - 0.4791666666666667, - 0.47966988727858295, - 0.4801731078904992, - 0.4806763285024155, - 0.48117954911433175, - 0.481682769726248, - 0.4821859903381643, - 0.48268921095008055, - 0.48319243156199676, - 0.483695652173913, - 0.4841988727858293, - 0.48470209339774556, - 0.4852053140096618, - 0.4857085346215781, - 0.48621175523349436, - 0.4867149758454106, - 0.4872181964573269, - 0.48772141706924316, - 0.4882246376811594, - 0.4887278582930757, - 0.48923107890499196, - 0.4897342995169082, - 0.4902375201288245, - 0.49074074074074076, - 0.491243961352657, - 0.4917471819645733, - 0.49225040257648955, - 0.4927536231884058, - 0.4932568438003221, - 0.49376006441223835, - 0.4942632850241546, - 0.4947665056360709, - 0.49526972624798715, - 0.4957729468599034, - 0.4962761674718197, - 0.49677938808373595, - 0.4972826086956522, - 0.4977858293075685, - 0.49828904991948475, - 0.498792270531401, - 0.4992954911433173, - 0.49979871175523355, - 0.5003019323671498, - 0.500805152979066, - 0.5013083735909823, - 0.5018115942028986, - 0.5023148148148149, - 0.5028180354267311, - 0.5033212560386474, - 0.5038244766505636, - 0.50432769726248, - 0.5048309178743962, - 0.5053341384863125, - 0.5058373590982287, - 0.506340579710145, - 0.5068438003220612, - 0.5073470209339775, - 0.5078502415458938, - 0.5083534621578101, - 0.5088566827697263, - 0.5093599033816426, - 0.5098631239935588, - 0.5103663446054751, - 0.5108695652173914, - 0.5113727858293077, - 0.5118760064412239, - 0.5123792270531402, - 0.5128824476650564, - 0.5133856682769727, - 0.513888888888889, - 0.5143921095008053, - 0.5148953301127215, - 0.5153985507246378, - 0.515901771336554, - 0.5164049919484703, - 0.5169082125603865, - 0.5174114331723029, - 0.5179146537842191, - 0.5184178743961354, - 0.5189210950080516, - 0.5194243156199679, - 0.5199275362318841 + 0.4788647342995169, + 0.47907822183184506, + 0.47929170936417315, + 0.4795051968965013, + 0.47971868442882937, + 0.4799321719611575, + 0.4801456594934856, + 0.4803591470258137, + 0.4805726345581418, + 0.48078612209046995, + 0.48099960962279803, + 0.48121309715512617, + 0.48142658468745425, + 0.4816400722197824, + 0.48185355975211047, + 0.4820670472844386, + 0.4822805348167667, + 0.48249402234909483, + 0.4827075098814229, + 0.48292099741375105, + 0.48313448494607913, + 0.4833479724784073, + 0.4835614600107354, + 0.4837749475430635, + 0.48398843507539163, + 0.4842019226077197, + 0.48441541014004785, + 0.48462889767237594, + 0.4848423852047041, + 0.48505587273703216, + 0.4852693602693603, + 0.4854828478016884, + 0.4856963353340165, + 0.4859098228663446, + 0.48612331039867274, + 0.4863367979310008, + 0.48655028546332896, + 0.48676377299565704, + 0.4869772605279852, + 0.48719074806031326, + 0.4874042355926414, + 0.48761772312496954, + 0.4878312106572976, + 0.48804469818962576, + 0.48825818572195384, + 0.488471673254282, + 0.48868516078661006, + 0.4888986483189382, + 0.4891121358512663, + 0.4893256233835944, + 0.4895391109159225, + 0.48975259844825064, + 0.4899660859805787, + 0.49017957351290686, + 0.49039306104523495, + 0.4906065485775631, + 0.49082003610989117, + 0.4910335236422193, + 0.4912470111745474, + 0.4914604987068755, + 0.49167398623920366, + 0.49188747377153175, + 0.4921009613038599, + 0.49231444883618797, + 0.4925279363685161, + 0.4927414239008442, + 0.4929549114331723, + 0.4931683989655004, + 0.49338188649782855, + 0.49359537403015663, + 0.49380886156248477, + 0.49402234909481285, + 0.494235836627141, + 0.4944493241594691, + 0.4946628116917972, + 0.4948762992241253, + 0.49508978675645343, + 0.4953032742887815, + 0.49551676182110965, + 0.4957302493534378, + 0.4959437368857659, + 0.496157224418094, + 0.4963707119504221, + 0.49658419948275023, + 0.4967976870150783, + 0.49701117454740645, + 0.49722466207973454, + 0.4974381496120627, + 0.49765163714439076, + 0.4978651246767189, + 0.498078612209047, + 0.4982920997413751, + 0.4985055872737032, + 0.49871907480603134, + 0.4989325623383594, + 0.49914604987068756, + 0.49935953740301564, + 0.4995730249353438, + 0.4997865124676719, + 0.5 ], "y": [ - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.1, - 0.2, - 0.2, - 0.2, - 0.2, - 0.2, - 0.2, - 0.2, - 0.2, - 0.2, - 0.2, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.8, - 0.9, - 0.9, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, + 0.14285714285714285, 1 ] } @@ -9703,15 +11034,15 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 22, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "[2023-04-07T22:13:18.269044+0200][4048][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:03<00:00, 30.59it/s]\n" + "[2023-04-08T21:36:39.947037+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", + "100%|██████████| 100/100 [00:20<00:00, 4.87it/s]\n" ] }, { @@ -9757,27 +11088,27 @@ " \n", " \n", " detection.detection_xgb.mean\n", - " 1.000000\n", - " 1.000000\n", - " 1.000000\n", + " 0.988506\n", + " 0.988506\n", + " 0.988506\n", " 0.0\n", - " 1.000000\n", + " 0.988506\n", " 0.0\n", " 1\n", " 0\n", - " 0.14\n", + " 0.18\n", " \n", " \n", " detection.detection_mlp.mean\n", - " 0.585441\n", - " 0.585441\n", - " 0.585441\n", + " 0.703640\n", + " 0.703640\n", + " 0.703640\n", " 0.0\n", - " 0.585441\n", + " 0.703640\n", " 0.0\n", " 1\n", " 0\n", - " 1.82\n", + " 3.22\n", " \n", " \n", "\n", @@ -9785,12 +11116,12 @@ ], "text/plain": [ " min max mean stddev median \\\n", - "detection.detection_xgb.mean 1.000000 1.000000 1.000000 0.0 1.000000 \n", - "detection.detection_mlp.mean 0.585441 0.585441 0.585441 0.0 0.585441 \n", + "detection.detection_xgb.mean 0.988506 0.988506 0.988506 0.0 0.988506 \n", + "detection.detection_mlp.mean 0.703640 0.703640 0.703640 0.0 0.703640 \n", "\n", " iqr rounds errors durations \n", - "detection.detection_xgb.mean 0.0 1 0 0.14 \n", - "detection.detection_mlp.mean 0.0 1 0 1.82 " + "detection.detection_xgb.mean 0.0 1 0 0.18 \n", + "detection.detection_mlp.mean 0.0 1 0 3.22 " ] }, "metadata": {}, @@ -9806,7 +11137,6 @@ ], "source": [ "best_params = study.best_params\n", - "best_params['n_iter'] = 100\n", "report = Benchmarks.evaluate(\n", " [(\"test\", PLUGIN, best_params)],\n", " train_loader,\n", From 4a7e73bfca1c99db5ea0f1432e47e6d516556453 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sat, 8 Apr 2023 22:49:02 +0200 Subject: [PATCH 62/95] try fixing goggle --- src/synthcity/plugins/core/models/convnet.py | 31 +++++--------------- src/synthcity/plugins/core/models/goggle.py | 7 +++-- tests/plugins/core/models/test_convnet.py | 4 +-- 3 files changed, 14 insertions(+), 28 deletions(-) diff --git a/src/synthcity/plugins/core/models/convnet.py b/src/synthcity/plugins/core/models/convnet.py index ae4260e6..a80bbb12 100644 --- a/src/synthcity/plugins/core/models/convnet.py +++ b/src/synthcity/plugins/core/models/convnet.py @@ -4,36 +4,19 @@ # third party import numpy as np import torch -from monai.networks.layers.factories import Act + +# from monai.networks.layers.factories import Act from monai.networks.nets import Classifier, Discriminator, Generator from pydantic import validate_arguments from torch import nn # synthcity absolute import synthcity.logger as log +from synthcity.plugins.core.models.factory import get_nonlin from synthcity.utils.constants import DEVICE from synthcity.utils.reproducibility import enable_reproducible_results -def map_nonlin(nonlin: str) -> Act: - if nonlin == "relu": - return Act.RELU - elif nonlin == "elu": - return Act.ELU - elif nonlin == "prelu": - return Act.PRELU - elif nonlin == "leaky_relu": - return Act.LEAKYRELU - elif nonlin == "sigmoid": - return Act.SIGMOID - elif nonlin == "softmax": - return Act.SOFTMAX - elif nonlin == "tanh": - return Act.TANH - - raise ValueError(f"Unknown activation {nonlin}") - - class ConvNet(nn.Module): """ Wrapper for convolutional nets for classification and regression. @@ -437,7 +420,7 @@ def suggest_image_generator_discriminator_arch( strides=[2, 2, 2, 1], kernel_size=3, dropout=generator_dropout, - act=map_nonlin(generator_nonlin), + act=get_nonlin(generator_nonlin), num_res_units=generator_n_residual_units, ), nn.Tanh(), @@ -449,7 +432,7 @@ def suggest_image_generator_discriminator_arch( kernel_size=3, last_act=None, dropout=discriminator_dropout, - act=map_nonlin(generator_nonlin), + act=get_nonlin(generator_nonlin), num_res_units=discriminator_n_residual_units, ).to(device) @@ -559,8 +542,8 @@ def suggest_image_classifier_arch( classes=classes, channels=[16, 32, 64, 1], strides=[start_stride, 2, 2, 2], - act=map_nonlin(nonlin), - last_act=map_nonlin(last_nonlin), + act=get_nonlin(nonlin), + last_act=get_nonlin(last_nonlin), dropout=dropout, num_res_units=n_residual_units, ).to(device) diff --git a/src/synthcity/plugins/core/models/goggle.py b/src/synthcity/plugins/core/models/goggle.py index bd498507..8d35a720 100644 --- a/src/synthcity/plugins/core/models/goggle.py +++ b/src/synthcity/plugins/core/models/goggle.py @@ -19,11 +19,14 @@ # synthcity absolute import synthcity.logger as log from synthcity.plugins.core.dataloader import DataLoader -from synthcity.plugins.core.models.mlp import MultiActivationHead, get_nonlin -from synthcity.plugins.core.models.RGCNConv import RGCNConv from synthcity.utils.constants import DEVICE from synthcity.utils.reproducibility import clear_cache, enable_reproducible_results +# synthcity relative +from .factory import get_nonlin +from .layers import MultiActivationHead +from .RGCNConv import RGCNConv + class Goggle(nn.Module): @validate_arguments(config=dict(arbitrary_types_allowed=True)) diff --git a/tests/plugins/core/models/test_convnet.py b/tests/plugins/core/models/test_convnet.py index 71399626..a6659dec 100644 --- a/tests/plugins/core/models/test_convnet.py +++ b/tests/plugins/core/models/test_convnet.py @@ -7,7 +7,7 @@ # synthcity absolute from synthcity.plugins.core.models.convnet import ( - map_nonlin, + get_nonlin, suggest_image_classifier_arch, suggest_image_generator_discriminator_arch, ) @@ -16,7 +16,7 @@ @pytest.mark.parametrize("nonlin", ["relu", "elu", "prelu", "leaky_relu"]) def test_get_nonlin(nonlin: str) -> None: - assert map_nonlin(nonlin) is not None + assert get_nonlin(nonlin) is not None @pytest.mark.parametrize("n_channels", [1, 3]) From 8051caa9f080570efec9bbd9c1c53c08afdba64c Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sat, 8 Apr 2023 23:33:53 +0200 Subject: [PATCH 63/95] add more activations --- src/synthcity/plugins/core/models/convnet.py | 2 -- src/synthcity/plugins/core/models/factory.py | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/synthcity/plugins/core/models/convnet.py b/src/synthcity/plugins/core/models/convnet.py index a80bbb12..67065705 100644 --- a/src/synthcity/plugins/core/models/convnet.py +++ b/src/synthcity/plugins/core/models/convnet.py @@ -4,8 +4,6 @@ # third party import numpy as np import torch - -# from monai.networks.layers.factories import Act from monai.networks.nets import Classifier, Discriminator, Generator from pydantic import validate_arguments from torch import nn diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py index a23ffc06..4de2980b 100644 --- a/src/synthcity/plugins/core/models/factory.py +++ b/src/synthcity/plugins/core/models/factory.py @@ -66,7 +66,10 @@ relu6=nn.ReLU6, celu=nn.CELU, glu=nn.GLU, + prelu=nn.PReLU, + relu6=nn.ReLU6, logsigmoid=nn.LogSigmoid, + logsoftmax=nn.LogSoftmax, softplus=nn.Softplus, ) From 3cd9917409433401bb0ca1ba69d150a72c846c0b Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 9 Apr 2023 00:28:25 +0200 Subject: [PATCH 64/95] minor fix --- src/synthcity/plugins/core/models/factory.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py index 4de2980b..a34897c3 100644 --- a/src/synthcity/plugins/core/models/factory.py +++ b/src/synthcity/plugins/core/models/factory.py @@ -28,7 +28,6 @@ ) from .layers import GumbelSoftmax -# should only contain nn modules that can be used as building blocks in larger models MODELS = dict( mlp=".mlp.MLP", # attention models @@ -63,7 +62,6 @@ silu=nn.SiLU, swish=nn.SiLU, hardtanh=nn.Hardtanh, - relu6=nn.ReLU6, celu=nn.CELU, glu=nn.GLU, prelu=nn.PReLU, @@ -117,6 +115,7 @@ def get_model(block: Union[str, type], params: dict) -> Any: Named models: - mlp - rnn + - gru - lstm - transformer - tabnet From 42cbe8c34070bb4f742ae61682febc4b977c9a62 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 9 Apr 2023 09:45:11 +0200 Subject: [PATCH 65/95] update --- src/synthcity/plugins/core/models/convnet.py | 29 ++++++++++++++++---- tests/plugins/core/models/test_convnet.py | 4 +-- 2 files changed, 26 insertions(+), 7 deletions(-) diff --git a/src/synthcity/plugins/core/models/convnet.py b/src/synthcity/plugins/core/models/convnet.py index 67065705..ae4260e6 100644 --- a/src/synthcity/plugins/core/models/convnet.py +++ b/src/synthcity/plugins/core/models/convnet.py @@ -4,17 +4,36 @@ # third party import numpy as np import torch +from monai.networks.layers.factories import Act from monai.networks.nets import Classifier, Discriminator, Generator from pydantic import validate_arguments from torch import nn # synthcity absolute import synthcity.logger as log -from synthcity.plugins.core.models.factory import get_nonlin from synthcity.utils.constants import DEVICE from synthcity.utils.reproducibility import enable_reproducible_results +def map_nonlin(nonlin: str) -> Act: + if nonlin == "relu": + return Act.RELU + elif nonlin == "elu": + return Act.ELU + elif nonlin == "prelu": + return Act.PRELU + elif nonlin == "leaky_relu": + return Act.LEAKYRELU + elif nonlin == "sigmoid": + return Act.SIGMOID + elif nonlin == "softmax": + return Act.SOFTMAX + elif nonlin == "tanh": + return Act.TANH + + raise ValueError(f"Unknown activation {nonlin}") + + class ConvNet(nn.Module): """ Wrapper for convolutional nets for classification and regression. @@ -418,7 +437,7 @@ def suggest_image_generator_discriminator_arch( strides=[2, 2, 2, 1], kernel_size=3, dropout=generator_dropout, - act=get_nonlin(generator_nonlin), + act=map_nonlin(generator_nonlin), num_res_units=generator_n_residual_units, ), nn.Tanh(), @@ -430,7 +449,7 @@ def suggest_image_generator_discriminator_arch( kernel_size=3, last_act=None, dropout=discriminator_dropout, - act=get_nonlin(generator_nonlin), + act=map_nonlin(generator_nonlin), num_res_units=discriminator_n_residual_units, ).to(device) @@ -540,8 +559,8 @@ def suggest_image_classifier_arch( classes=classes, channels=[16, 32, 64, 1], strides=[start_stride, 2, 2, 2], - act=get_nonlin(nonlin), - last_act=get_nonlin(last_nonlin), + act=map_nonlin(nonlin), + last_act=map_nonlin(last_nonlin), dropout=dropout, num_res_units=n_residual_units, ).to(device) diff --git a/tests/plugins/core/models/test_convnet.py b/tests/plugins/core/models/test_convnet.py index a6659dec..71399626 100644 --- a/tests/plugins/core/models/test_convnet.py +++ b/tests/plugins/core/models/test_convnet.py @@ -7,7 +7,7 @@ # synthcity absolute from synthcity.plugins.core.models.convnet import ( - get_nonlin, + map_nonlin, suggest_image_classifier_arch, suggest_image_generator_discriminator_arch, ) @@ -16,7 +16,7 @@ @pytest.mark.parametrize("nonlin", ["relu", "elu", "prelu", "leaky_relu"]) def test_get_nonlin(nonlin: str) -> None: - assert get_nonlin(nonlin) is not None + assert map_nonlin(nonlin) is not None @pytest.mark.parametrize("n_channels", [1, 3]) From 7c58f2d3e2f90b0b79dcf3fb313056b8e2656eb4 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 9 Apr 2023 18:21:17 +0200 Subject: [PATCH 66/95] update --- tests/plugins/generic/test_goggle.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 9b58ac4e..2c9b5f4a 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -106,9 +106,6 @@ def test_plugin_generate(test_plugin: Plugin, serialize: bool) -> None: assert (X_gen1.numpy() != X_gen3.numpy()).any() -is_missing_goggle_deps = True - - @pytest.mark.skipif(is_missing_goggle_deps, reason="Goggle dependencies not installed") @pytest.mark.parametrize( "test_plugin", From 104e3a39ef7d7d11a93d8c920781423eb6251088 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 9 Apr 2023 18:22:05 +0200 Subject: [PATCH 67/95] update --- tests/plugins/generic/test_goggle.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 9b58ac4e..2c9b5f4a 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -106,9 +106,6 @@ def test_plugin_generate(test_plugin: Plugin, serialize: bool) -> None: assert (X_gen1.numpy() != X_gen3.numpy()).any() -is_missing_goggle_deps = True - - @pytest.mark.skipif(is_missing_goggle_deps, reason="Goggle dependencies not installed") @pytest.mark.parametrize( "test_plugin", From 7b4e04ad4552980482a74096127357851ef8ebbe Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Sun, 9 Apr 2023 21:03:02 +0200 Subject: [PATCH 68/95] update --- src/synthcity/plugins/core/constraints.py | 4 +++- tests/plugins/generic/test_goggle.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/synthcity/plugins/core/constraints.py b/src/synthcity/plugins/core/constraints.py index dc79e56b..be1b2cc8 100644 --- a/src/synthcity/plugins/core/constraints.py +++ b/src/synthcity/plugins/core/constraints.py @@ -164,9 +164,11 @@ def filter(self, X: pd.DataFrame) -> pd.DataFrame: thresh, ) if res.sum() < prev: - log.info( + log.critical( f"[{feature}] quality loss for constraints {op} = {thresh}. Remaining {res.sum()}. prev length {prev}. Original dtype {X[feature].dtype}.", ) + if res.sum() < 5: + log.critical(str(X[~res])) return res @validate_arguments(config=dict(arbitrary_types_allowed=True)) diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 2c9b5f4a..9b194ae0 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -17,7 +17,7 @@ plugin_name = "goggle" plugin_args = { - "n_iter": 500, + "n_iter": 10, "device": "cpu", } From fede549c81db710193c5d1c65128003c8cdd0937 Mon Sep 17 00:00:00 2001 From: Tianzhang Cai <13818704679@163.com> Date: Mon, 10 Apr 2023 01:17:42 +0100 Subject: [PATCH 69/95] Update tabular_encoder.py --- src/synthcity/plugins/core/models/tabular_encoder.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 364a6b57..fe82aca4 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -286,6 +286,7 @@ def activation_layout( d = 0 d += 1 out.append((acts[ct], d)) + log.info(out) return out From 539effaac7853a78a6de3cf92e2a9a08d17fd3a6 Mon Sep 17 00:00:00 2001 From: Tianzhang Cai <13818704679@163.com> Date: Mon, 10 Apr 2023 02:37:11 +0100 Subject: [PATCH 70/95] Update test_goggle.py --- tests/plugins/generic/test_goggle.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 9b194ae0..1188b20d 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -19,6 +19,7 @@ plugin_args = { "n_iter": 10, "device": "cpu", + "sampling_patience": 50 } if not is_missing_goggle_deps: From 0cb9f25fce1722839a48147d19d37164f9c0c117 Mon Sep 17 00:00:00 2001 From: Tianzhang Cai <13818704679@163.com> Date: Mon, 10 Apr 2023 02:39:29 +0100 Subject: [PATCH 71/95] Update tabular_encoder.py --- src/synthcity/plugins/core/models/tabular_encoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index fe82aca4..8c4d06ea 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -286,7 +286,7 @@ def activation_layout( d = 0 d += 1 out.append((acts[ct], d)) - log.info(out) + log.critical(out) return out From 42c69413db0be0c42e851edbe8a677efbe75ea1d Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 08:52:31 +0200 Subject: [PATCH 72/95] update --- src/synthcity/plugins/core/models/tabular_encoder.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 8c4d06ea..54a3d478 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -275,7 +275,6 @@ def activation_layout( """ out = [] acts = dict(discrete=discrete_activation, continuous=continuous_activation) - # NOTE: be careful with the dim of softmax! for column_transform_info in self._column_transform_info_list: ct = column_transform_info.trans_feature_types[0] d = 0 From d7d966d63ad35777dce5a798c22a48050b2592e0 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 09:11:21 +0200 Subject: [PATCH 73/95] update tutorial 8 --- ...utorial8_hyperparameter_optimization.ipynb | 10914 +--------------- 1 file changed, 34 insertions(+), 10880 deletions(-) diff --git a/tutorials/tutorial8_hyperparameter_optimization.ipynb b/tutorials/tutorial8_hyperparameter_optimization.ipynb index f95c1e0c..971dd38d 100644 --- a/tutorials/tutorial8_hyperparameter_optimization.ipynb +++ b/tutorials/tutorial8_hyperparameter_optimization.ipynb @@ -12,24 +12,9 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[KeOps] Warning : \n", - " The default C++ compiler could not be found on your system.\n", - " You need to either define the CXX environment variable or a symlink to the g++ command.\n", - " For example if g++-8 is the command you can do\n", - " import os\n", - " os.environ['CXX'] = 'g++-8'\n", - " \n", - "[KeOps] Warning : Cuda libraries were not detected on the system ; using cpu only mode\n" - ] - } - ], + "outputs": [], "source": [ "# stdlib\n", "import sys\n", @@ -58,238 +43,9 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
agesexbmibps1s2s3s4s5s6target
00.0380760.0506800.0616960.021872-0.044223-0.034821-0.043401-0.0025920.019907-0.017646151.0
1-0.001882-0.044642-0.051474-0.026328-0.008449-0.0191630.074412-0.039493-0.068332-0.09220475.0
20.0852990.0506800.044451-0.005670-0.045599-0.034194-0.032356-0.0025920.002861-0.025930141.0
3-0.089063-0.044642-0.011595-0.0366560.0121910.024991-0.0360380.0343090.022688-0.009362206.0
40.005383-0.044642-0.0363850.0218720.0039350.0155960.008142-0.002592-0.031988-0.046641135.0
....................................
4370.0417080.0506800.0196620.059744-0.005697-0.002566-0.028674-0.0025920.0311930.007207178.0
438-0.0055150.050680-0.015906-0.0676420.0493410.079165-0.0286740.034309-0.0181140.044485104.0
4390.0417080.050680-0.0159060.017293-0.037344-0.013840-0.024993-0.011080-0.0468830.015491132.0
440-0.045472-0.0446420.0390620.0012150.0163180.015283-0.0286740.0265600.044529-0.025930220.0
441-0.045472-0.044642-0.073030-0.0814130.0837400.0278090.173816-0.039493-0.0042220.00306457.0
\n", - "

442 rows × 11 columns

\n", - "
" - ], - "text/plain": [ - " age sex bmi bp s1 s2 s3 \\\n", - "0 0.038076 0.050680 0.061696 0.021872 -0.044223 -0.034821 -0.043401 \n", - "1 -0.001882 -0.044642 -0.051474 -0.026328 -0.008449 -0.019163 0.074412 \n", - "2 0.085299 0.050680 0.044451 -0.005670 -0.045599 -0.034194 -0.032356 \n", - "3 -0.089063 -0.044642 -0.011595 -0.036656 0.012191 0.024991 -0.036038 \n", - "4 0.005383 -0.044642 -0.036385 0.021872 0.003935 0.015596 0.008142 \n", - ".. ... ... ... ... ... ... ... \n", - "437 0.041708 0.050680 0.019662 0.059744 -0.005697 -0.002566 -0.028674 \n", - "438 -0.005515 0.050680 -0.015906 -0.067642 0.049341 0.079165 -0.028674 \n", - "439 0.041708 0.050680 -0.015906 0.017293 -0.037344 -0.013840 -0.024993 \n", - "440 -0.045472 -0.044642 0.039062 0.001215 0.016318 0.015283 -0.028674 \n", - "441 -0.045472 -0.044642 -0.073030 -0.081413 0.083740 0.027809 0.173816 \n", - "\n", - " s4 s5 s6 target \n", - "0 -0.002592 0.019907 -0.017646 151.0 \n", - "1 -0.039493 -0.068332 -0.092204 75.0 \n", - "2 -0.002592 0.002861 -0.025930 141.0 \n", - "3 0.034309 0.022688 -0.009362 206.0 \n", - "4 -0.002592 -0.031988 -0.046641 135.0 \n", - ".. ... ... ... ... \n", - "437 -0.002592 0.031193 0.007207 178.0 \n", - "438 0.034309 -0.018114 0.044485 104.0 \n", - "439 -0.011080 -0.046883 0.015491 132.0 \n", - "440 0.026560 0.044529 -0.025930 220.0 \n", - "441 -0.039493 -0.004222 0.003064 57.0 \n", - "\n", - "[442 rows x 11 columns]" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "X, y = load_diabetes(return_X_y=True, as_frame=True)\n", "X[\"target\"] = y\n", @@ -298,7 +54,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -320,30 +76,11 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-04-08T20:56:15.722354+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-08T20:56:15.722354+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n" - ] - }, - { - "data": { - "text/plain": [ - "synthcity.plugins.generic.plugin_nflow.NormalizingFlowsPlugin" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "PLUGIN = \"nflow\"\n", + "PLUGIN = \"tvae\"\n", "plugin_cls = type(Plugins().get(PLUGIN))\n", "plugin_cls" ] @@ -358,28 +95,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[IntegerDistribution(name='n_iter', data=None, random_state=0, marginal_distribution=None, low=100, high=5000, step=100),\n", - " IntegerDistribution(name='n_layers_hidden', data=None, random_state=0, marginal_distribution=None, low=1, high=10, step=1),\n", - " IntegerDistribution(name='n_units_hidden', data=None, random_state=0, marginal_distribution=None, low=10, high=100, step=1),\n", - " CategoricalDistribution(name='batch_size', data=None, random_state=0, marginal_distribution=None, choices=[32, 64, 128, 256, 512]),\n", - " FloatDistribution(name='dropout', data=None, random_state=0, marginal_distribution=None, low=0.0, high=0.2),\n", - " CategoricalDistribution(name='batch_norm', data=None, random_state=0, marginal_distribution=None, choices=[False, True]),\n", - " CategoricalDistribution(name='lr', data=None, random_state=0, marginal_distribution=None, choices=[0.0001, 0.0002, 0.001]),\n", - " CategoricalDistribution(name='linear_transform_type', data=None, random_state=0, marginal_distribution=None, choices=['lu', 'permutation', 'svd']),\n", - " CategoricalDistribution(name='base_transform_type', data=None, random_state=0, marginal_distribution=None, choices=['affine-autoregressive', 'affine-coupling', 'quadratic-autoregressive', 'quadratic-coupling', 'rq-autoregressive', 'rq-coupling'])]" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "plugin_cls.hyperparameter_space()" ] @@ -394,28 +112,9 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'n_iter': 100,\n", - " 'n_layers_hidden': 1,\n", - " 'n_units_hidden': 87,\n", - " 'batch_size': 256,\n", - " 'dropout': 0.15424246144819787,\n", - " 'batch_norm': False,\n", - " 'lr': 0.001,\n", - " 'linear_transform_type': 'svd',\n", - " 'base_transform_type': 'rq-autoregressive'}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from synthcity.utils.optuna_sample import suggest_all\n", "\n", @@ -435,82 +134,9 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 100/100 [00:38<00:00, 2.56it/s]\n", - "[2023-04-08T20:57:54.561757+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:30<00:00, 3.24it/s]\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
minmaxmeanstddevmedianiqrroundserrorsdurationsdirection
detection.detection_mlp.mean0.50.50.50.00.50.0105.95minimize
\n", - "
" - ], - "text/plain": [ - " min max mean stddev median iqr rounds \\\n", - "detection.detection_mlp.mean 0.5 0.5 0.5 0.0 0.5 0.0 1 \n", - "\n", - " errors durations direction \n", - "detection.detection_mlp.mean 0 5.95 minimize " - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "from synthcity.benchmark import Benchmarks\n", "\n", @@ -534,106 +160,9 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-04-08T21:26:16.278633+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-08T21:26:16.301778+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:39<00:00, 2.56it/s]\n", - "[2023-04-08T21:26:59.665597+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-08T21:26:59.684496+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:26<00:00, 3.74it/s]\n", - "[2023-04-08T21:27:30.475951+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-08T21:27:30.495645+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:04<00:00, 21.72it/s]\n", - "[2023-04-08T21:27:39.102805+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-08T21:27:39.117858+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [03:07<00:00, 1.88s/it]\n", - "[2023-04-08T21:31:35.546758+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "[2023-04-08T21:31:35.638203+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - " 0%| | 0/100 [00:00", - "n_iter (IntDistribution): 0.0", - "n_layers_hidden (IntDistribution): 7.256702823093135e-31", - "batch_norm (CategoricalDistribution): 0.02499999999999996", - "n_units_hidden (IntDistribution): 0.025000000000000088", - "batch_size (CategoricalDistribution): 0.04999999999999992", - "linear_transform_type (CategoricalDistribution): 0.09999999999999984", - "dropout (FloatDistribution): 0.275000000000001", - "base_transform_type (CategoricalDistribution): 0.5249999999999992" - ], - "marker": { - "color": "rgb(66,146,198)" - }, - "orientation": "h", - "text": [ - "<0.01", - "<0.01", - "<0.01", - "0.02", - "0.03", - "0.05", - "0.10", - "0.28", - "0.52" - ], - "textposition": "outside", - "type": "bar", - "x": [ - 0, - 0, - 7.256702823093135e-31, - 0.02499999999999996, - 0.025000000000000088, - 0.04999999999999992, - 0.09999999999999984, - 0.275000000000001, - 0.5249999999999992 - ], - "y": [ - "lr", - "n_iter", - "n_layers_hidden", - "batch_norm", - "n_units_hidden", - "batch_size", - "linear_transform_type", - "dropout", - "base_transform_type" - ] - } - ], - "layout": { - "showlegend": false, - "template": { - "data": { - "bar": [ - { - "error_x": { - "color": "#2a3f5f" - }, - "error_y": { - "color": "#2a3f5f" - }, - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "bar" - } - ], - "barpolar": [ - { - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "barpolar" - } - ], - "carpet": [ - { - "aaxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "baxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "type": "carpet" - } - ], - "choropleth": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "choropleth" - } - ], - "contour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "contour" - } - ], - "contourcarpet": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "contourcarpet" - } - ], - "heatmap": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmap" - } - ], - "heatmapgl": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmapgl" - } - ], - "histogram": [ - { - "marker": { - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "histogram" - } - ], - "histogram2d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2d" - } - ], - "histogram2dcontour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2dcontour" - } - ], - "mesh3d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "mesh3d" - } - ], - "parcoords": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "parcoords" - } - ], - "pie": [ - { - "automargin": true, - "type": "pie" - } - ], - "scatter": [ - { - "fillpattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - }, - "type": "scatter" - } - ], - "scatter3d": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatter3d" - } - ], - "scattercarpet": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattercarpet" - } - ], - "scattergeo": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergeo" - } - ], - "scattergl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergl" - } - ], - "scattermapbox": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattermapbox" - } - ], - "scatterpolar": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolar" - } - ], - "scatterpolargl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolargl" - } - ], - "scatterternary": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterternary" - } - ], - "surface": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "surface" - } - ], - "table": [ - { - "cells": { - "fill": { - "color": "#EBF0F8" - }, - "line": { - "color": "white" - } - }, - "header": { - "fill": { - "color": "#C8D4E3" - }, - "line": { - "color": "white" - } - }, - "type": "table" - } - ] - }, - "layout": { - "annotationdefaults": { - "arrowcolor": "#2a3f5f", - "arrowhead": 0, - "arrowwidth": 1 - }, - "autotypenumbers": "strict", - "coloraxis": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "colorscale": { - "diverging": [ - [ - 0, - "#8e0152" - ], - [ - 0.1, - "#c51b7d" - ], - [ - 0.2, - "#de77ae" - ], - [ - 0.3, - "#f1b6da" - ], - [ - 0.4, - "#fde0ef" - ], - [ - 0.5, - "#f7f7f7" - ], - [ - 0.6, - "#e6f5d0" - ], - [ - 0.7, - "#b8e186" - ], - [ - 0.8, - "#7fbc41" - ], - [ - 0.9, - "#4d9221" - ], - [ - 1, - "#276419" - ] - ], - "sequential": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "sequentialminus": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ] - }, - "colorway": [ - "#636efa", - "#EF553B", - "#00cc96", - "#ab63fa", - "#FFA15A", - "#19d3f3", - "#FF6692", - "#B6E880", - "#FF97FF", - "#FECB52" - ], - "font": { - "color": "#2a3f5f" - }, - "geo": { - "bgcolor": "white", - "lakecolor": "white", - "landcolor": "#E5ECF6", - "showlakes": true, - "showland": true, - "subunitcolor": "white" - }, - "hoverlabel": { - "align": "left" - }, - "hovermode": "closest", - "mapbox": { - "style": "light" - }, - "paper_bgcolor": "white", - "plot_bgcolor": "#E5ECF6", - "polar": { - "angularaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "radialaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "scene": { - "xaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "yaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "zaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - } - }, - "shapedefaults": { - "line": { - "color": "#2a3f5f" - } - }, - "ternary": { - "aaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "baxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "caxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "title": { - "x": 0.05 - }, - "xaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - }, - "yaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - } - } - }, - "title": { - "text": "Hyperparameter Importances" - }, - "xaxis": { - "title": { - "text": "Importance for Objective Value" - } - }, - "yaxis": { - "title": { - "text": "Hyperparameter" - } - } - } - } - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Visualize parameter importances.\n", "plot_param_importances(study)" @@ -9047,908 +256,9 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.plotly.v1+json": { - "config": { - "plotlyServerURL": "https://plot.ly" - }, - "data": [ - { - "cliponaxis": false, - "hovertemplate": [ - "n_iter (IntDistribution): 0.0", - "batch_norm (CategoricalDistribution): 0.0008792721126079252", - "n_units_hidden (IntDistribution): 0.050652923907431195", - "lr (CategoricalDistribution): 0.07515326418808736", - "linear_transform_type (CategoricalDistribution): 0.08234393383908772", - "batch_size (CategoricalDistribution): 0.1506171783782107", - "dropout (FloatDistribution): 0.1928283779305551", - "n_layers_hidden (IntDistribution): 0.20147707299584372", - "base_transform_type (CategoricalDistribution): 0.2460479766481761" - ], - "marker": { - "color": "rgb(66,146,198)" - }, - "orientation": "h", - "text": [ - "<0.01", - "<0.01", - "0.05", - "0.08", - "0.08", - "0.15", - "0.19", - "0.20", - "0.25" - ], - "textposition": "outside", - "type": "bar", - "x": [ - 0, - 0.0008792721126079252, - 0.050652923907431195, - 0.07515326418808736, - 0.08234393383908772, - 0.1506171783782107, - 0.1928283779305551, - 0.20147707299584372, - 0.2460479766481761 - ], - "y": [ - "n_iter", - "batch_norm", - "n_units_hidden", - "lr", - "linear_transform_type", - "batch_size", - "dropout", - "n_layers_hidden", - "base_transform_type" - ] - } - ], - "layout": { - "showlegend": false, - "template": { - "data": { - "bar": [ - { - "error_x": { - "color": "#2a3f5f" - }, - "error_y": { - "color": "#2a3f5f" - }, - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "bar" - } - ], - "barpolar": [ - { - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "barpolar" - } - ], - "carpet": [ - { - "aaxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "baxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "type": "carpet" - } - ], - "choropleth": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "choropleth" - } - ], - "contour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "contour" - } - ], - "contourcarpet": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "contourcarpet" - } - ], - "heatmap": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmap" - } - ], - "heatmapgl": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmapgl" - } - ], - "histogram": [ - { - "marker": { - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "histogram" - } - ], - "histogram2d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2d" - } - ], - "histogram2dcontour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2dcontour" - } - ], - "mesh3d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "mesh3d" - } - ], - "parcoords": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "parcoords" - } - ], - "pie": [ - { - "automargin": true, - "type": "pie" - } - ], - "scatter": [ - { - "fillpattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - }, - "type": "scatter" - } - ], - "scatter3d": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatter3d" - } - ], - "scattercarpet": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattercarpet" - } - ], - "scattergeo": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergeo" - } - ], - "scattergl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergl" - } - ], - "scattermapbox": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattermapbox" - } - ], - "scatterpolar": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolar" - } - ], - "scatterpolargl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolargl" - } - ], - "scatterternary": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterternary" - } - ], - "surface": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "surface" - } - ], - "table": [ - { - "cells": { - "fill": { - "color": "#EBF0F8" - }, - "line": { - "color": "white" - } - }, - "header": { - "fill": { - "color": "#C8D4E3" - }, - "line": { - "color": "white" - } - }, - "type": "table" - } - ] - }, - "layout": { - "annotationdefaults": { - "arrowcolor": "#2a3f5f", - "arrowhead": 0, - "arrowwidth": 1 - }, - "autotypenumbers": "strict", - "coloraxis": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "colorscale": { - "diverging": [ - [ - 0, - "#8e0152" - ], - [ - 0.1, - "#c51b7d" - ], - [ - 0.2, - "#de77ae" - ], - [ - 0.3, - "#f1b6da" - ], - [ - 0.4, - "#fde0ef" - ], - [ - 0.5, - "#f7f7f7" - ], - [ - 0.6, - "#e6f5d0" - ], - [ - 0.7, - "#b8e186" - ], - [ - 0.8, - "#7fbc41" - ], - [ - 0.9, - "#4d9221" - ], - [ - 1, - "#276419" - ] - ], - "sequential": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "sequentialminus": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ] - }, - "colorway": [ - "#636efa", - "#EF553B", - "#00cc96", - "#ab63fa", - "#FFA15A", - "#19d3f3", - "#FF6692", - "#B6E880", - "#FF97FF", - "#FECB52" - ], - "font": { - "color": "#2a3f5f" - }, - "geo": { - "bgcolor": "white", - "lakecolor": "white", - "landcolor": "#E5ECF6", - "showlakes": true, - "showland": true, - "subunitcolor": "white" - }, - "hoverlabel": { - "align": "left" - }, - "hovermode": "closest", - "mapbox": { - "style": "light" - }, - "paper_bgcolor": "white", - "plot_bgcolor": "#E5ECF6", - "polar": { - "angularaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "radialaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "scene": { - "xaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "yaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "zaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - } - }, - "shapedefaults": { - "line": { - "color": "#2a3f5f" - } - }, - "ternary": { - "aaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "baxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "caxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "title": { - "x": 0.05 - }, - "xaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - }, - "yaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - } - } - }, - "title": { - "text": "Hyperparameter Importances" - }, - "xaxis": { - "title": { - "text": "Importance for duration" - } - }, - "yaxis": { - "title": { - "text": "Hyperparameter" - } - } - } - } - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Learn which hyperparameters are affecting the trial duration with hyperparameter importance.\n", "optuna.visualization.plot_param_importances(\n", @@ -9958,1067 +268,9 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.plotly.v1+json": { - "config": { - "plotlyServerURL": "https://plot.ly" - }, - "data": [ - { - "mode": "lines", - "name": "no-name-13d7c589-c089-4e41-befb-9c6abf683ae9", - "type": "scatter", - "x": [ - 0.4788647342995169, - 0.47907822183184506, - 0.47929170936417315, - 0.4795051968965013, - 0.47971868442882937, - 0.4799321719611575, - 0.4801456594934856, - 0.4803591470258137, - 0.4805726345581418, - 0.48078612209046995, - 0.48099960962279803, - 0.48121309715512617, - 0.48142658468745425, - 0.4816400722197824, - 0.48185355975211047, - 0.4820670472844386, - 0.4822805348167667, - 0.48249402234909483, - 0.4827075098814229, - 0.48292099741375105, - 0.48313448494607913, - 0.4833479724784073, - 0.4835614600107354, - 0.4837749475430635, - 0.48398843507539163, - 0.4842019226077197, - 0.48441541014004785, - 0.48462889767237594, - 0.4848423852047041, - 0.48505587273703216, - 0.4852693602693603, - 0.4854828478016884, - 0.4856963353340165, - 0.4859098228663446, - 0.48612331039867274, - 0.4863367979310008, - 0.48655028546332896, - 0.48676377299565704, - 0.4869772605279852, - 0.48719074806031326, - 0.4874042355926414, - 0.48761772312496954, - 0.4878312106572976, - 0.48804469818962576, - 0.48825818572195384, - 0.488471673254282, - 0.48868516078661006, - 0.4888986483189382, - 0.4891121358512663, - 0.4893256233835944, - 0.4895391109159225, - 0.48975259844825064, - 0.4899660859805787, - 0.49017957351290686, - 0.49039306104523495, - 0.4906065485775631, - 0.49082003610989117, - 0.4910335236422193, - 0.4912470111745474, - 0.4914604987068755, - 0.49167398623920366, - 0.49188747377153175, - 0.4921009613038599, - 0.49231444883618797, - 0.4925279363685161, - 0.4927414239008442, - 0.4929549114331723, - 0.4931683989655004, - 0.49338188649782855, - 0.49359537403015663, - 0.49380886156248477, - 0.49402234909481285, - 0.494235836627141, - 0.4944493241594691, - 0.4946628116917972, - 0.4948762992241253, - 0.49508978675645343, - 0.4953032742887815, - 0.49551676182110965, - 0.4957302493534378, - 0.4959437368857659, - 0.496157224418094, - 0.4963707119504221, - 0.49658419948275023, - 0.4967976870150783, - 0.49701117454740645, - 0.49722466207973454, - 0.4974381496120627, - 0.49765163714439076, - 0.4978651246767189, - 0.498078612209047, - 0.4982920997413751, - 0.4985055872737032, - 0.49871907480603134, - 0.4989325623383594, - 0.49914604987068756, - 0.49935953740301564, - 0.4995730249353438, - 0.4997865124676719, - 0.5 - ], - "y": [ - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 0.14285714285714285, - 1 - ] - } - ], - "layout": { - "template": { - "data": { - "bar": [ - { - "error_x": { - "color": "#2a3f5f" - }, - "error_y": { - "color": "#2a3f5f" - }, - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "bar" - } - ], - "barpolar": [ - { - "marker": { - "line": { - "color": "#E5ECF6", - "width": 0.5 - }, - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "barpolar" - } - ], - "carpet": [ - { - "aaxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "baxis": { - "endlinecolor": "#2a3f5f", - "gridcolor": "white", - "linecolor": "white", - "minorgridcolor": "white", - "startlinecolor": "#2a3f5f" - }, - "type": "carpet" - } - ], - "choropleth": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "choropleth" - } - ], - "contour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "contour" - } - ], - "contourcarpet": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "contourcarpet" - } - ], - "heatmap": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmap" - } - ], - "heatmapgl": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "heatmapgl" - } - ], - "histogram": [ - { - "marker": { - "pattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - } - }, - "type": "histogram" - } - ], - "histogram2d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2d" - } - ], - "histogram2dcontour": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "histogram2dcontour" - } - ], - "mesh3d": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "type": "mesh3d" - } - ], - "parcoords": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "parcoords" - } - ], - "pie": [ - { - "automargin": true, - "type": "pie" - } - ], - "scatter": [ - { - "fillpattern": { - "fillmode": "overlay", - "size": 10, - "solidity": 0.2 - }, - "type": "scatter" - } - ], - "scatter3d": [ - { - "line": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatter3d" - } - ], - "scattercarpet": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattercarpet" - } - ], - "scattergeo": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergeo" - } - ], - "scattergl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattergl" - } - ], - "scattermapbox": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scattermapbox" - } - ], - "scatterpolar": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolar" - } - ], - "scatterpolargl": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterpolargl" - } - ], - "scatterternary": [ - { - "marker": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "type": "scatterternary" - } - ], - "surface": [ - { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - }, - "colorscale": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "type": "surface" - } - ], - "table": [ - { - "cells": { - "fill": { - "color": "#EBF0F8" - }, - "line": { - "color": "white" - } - }, - "header": { - "fill": { - "color": "#C8D4E3" - }, - "line": { - "color": "white" - } - }, - "type": "table" - } - ] - }, - "layout": { - "annotationdefaults": { - "arrowcolor": "#2a3f5f", - "arrowhead": 0, - "arrowwidth": 1 - }, - "autotypenumbers": "strict", - "coloraxis": { - "colorbar": { - "outlinewidth": 0, - "ticks": "" - } - }, - "colorscale": { - "diverging": [ - [ - 0, - "#8e0152" - ], - [ - 0.1, - "#c51b7d" - ], - [ - 0.2, - "#de77ae" - ], - [ - 0.3, - "#f1b6da" - ], - [ - 0.4, - "#fde0ef" - ], - [ - 0.5, - "#f7f7f7" - ], - [ - 0.6, - "#e6f5d0" - ], - [ - 0.7, - "#b8e186" - ], - [ - 0.8, - "#7fbc41" - ], - [ - 0.9, - "#4d9221" - ], - [ - 1, - "#276419" - ] - ], - "sequential": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ], - "sequentialminus": [ - [ - 0, - "#0d0887" - ], - [ - 0.1111111111111111, - "#46039f" - ], - [ - 0.2222222222222222, - "#7201a8" - ], - [ - 0.3333333333333333, - "#9c179e" - ], - [ - 0.4444444444444444, - "#bd3786" - ], - [ - 0.5555555555555556, - "#d8576b" - ], - [ - 0.6666666666666666, - "#ed7953" - ], - [ - 0.7777777777777778, - "#fb9f3a" - ], - [ - 0.8888888888888888, - "#fdca26" - ], - [ - 1, - "#f0f921" - ] - ] - }, - "colorway": [ - "#636efa", - "#EF553B", - "#00cc96", - "#ab63fa", - "#FFA15A", - "#19d3f3", - "#FF6692", - "#B6E880", - "#FF97FF", - "#FECB52" - ], - "font": { - "color": "#2a3f5f" - }, - "geo": { - "bgcolor": "white", - "lakecolor": "white", - "landcolor": "#E5ECF6", - "showlakes": true, - "showland": true, - "subunitcolor": "white" - }, - "hoverlabel": { - "align": "left" - }, - "hovermode": "closest", - "mapbox": { - "style": "light" - }, - "paper_bgcolor": "white", - "plot_bgcolor": "#E5ECF6", - "polar": { - "angularaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "radialaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "scene": { - "xaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "yaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - }, - "zaxis": { - "backgroundcolor": "#E5ECF6", - "gridcolor": "white", - "gridwidth": 2, - "linecolor": "white", - "showbackground": true, - "ticks": "", - "zerolinecolor": "white" - } - }, - "shapedefaults": { - "line": { - "color": "#2a3f5f" - } - }, - "ternary": { - "aaxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "baxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - }, - "bgcolor": "#E5ECF6", - "caxis": { - "gridcolor": "white", - "linecolor": "white", - "ticks": "" - } - }, - "title": { - "x": 0.05 - }, - "xaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - }, - "yaxis": { - "automargin": true, - "gridcolor": "white", - "linecolor": "white", - "ticks": "", - "title": { - "standoff": 15 - }, - "zerolinecolor": "white", - "zerolinewidth": 2 - } - } - }, - "title": { - "text": "Empirical Distribution Function Plot" - }, - "xaxis": { - "title": { - "text": "Objective Value" - } - }, - "yaxis": { - "range": [ - 0, - 1 - ], - "title": { - "text": "Cumulative Probability" - } - } - } - } - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Visualize empirical distribution function of the objective.\n", "plot_edf(study)" @@ -11034,107 +286,9 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[2023-04-08T21:36:39.947037+0200][28420][CRITICAL] module disabled: D:\\Personal\\Work\\synthcity\\src\\synthcity\\plugins\\generic\\plugin_goggle.py\n", - "100%|██████████| 100/100 [00:20<00:00, 4.87it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\u001b[4m\u001b[1mPlugin : test\u001b[0m\u001b[0m\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
minmaxmeanstddevmedianiqrroundserrorsdurations
detection.detection_xgb.mean0.9885060.9885060.9885060.00.9885060.0100.18
detection.detection_mlp.mean0.7036400.7036400.7036400.00.7036400.0103.22
\n", - "
" - ], - "text/plain": [ - " min max mean stddev median \\\n", - "detection.detection_xgb.mean 0.988506 0.988506 0.988506 0.0 0.988506 \n", - "detection.detection_mlp.mean 0.703640 0.703640 0.703640 0.0 0.703640 \n", - "\n", - " iqr rounds errors durations \n", - "detection.detection_xgb.mean 0.0 1 0 0.18 \n", - "detection.detection_mlp.mean 0.0 1 0 3.22 " - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], + "outputs": [], "source": [ "best_params = study.best_params\n", "report = Benchmarks.evaluate(\n", From e20e5813559d57e0be462a3ff5da56264abc77b7 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 09:15:59 +0200 Subject: [PATCH 74/95] update --- tests/plugins/generic/test_goggle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 1188b20d..840f7c9c 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -19,7 +19,7 @@ plugin_args = { "n_iter": 10, "device": "cpu", - "sampling_patience": 50 + "sampling_patience": 50, } if not is_missing_goggle_deps: From 472ad523a7d26e8c742797a533d54535cc82961f Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 11:08:32 +0200 Subject: [PATCH 75/95] default cat nonlin of goggle <- gumbel_softmax --- src/synthcity/plugins/core/models/factory.py | 14 +++++++------- .../plugins/core/models/feature_encoder.py | 6 +++++- .../plugins/core/models/tabular_goggle.py | 4 ++-- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py index a34897c3..6f8f5e0c 100644 --- a/src/synthcity/plugins/core/models/factory.py +++ b/src/synthcity/plugins/core/models/factory.py @@ -30,13 +30,13 @@ MODELS = dict( mlp=".mlp.MLP", - # attention models - transformer=".transformer.TransformerModel", - tabnet=".tabnet.TabNet", # rnn models rnn=nn.RNN, gru=nn.GRU, lstm=nn.LSTM, + # attention models + transformer=".transformer.TransformerModel", + tabnet=".tabnet.TabNet", # time series models inceptiontime=InceptionTime, omniscalecnn=OmniScaleCNN, @@ -85,7 +85,7 @@ ) -def _factory(type_: Union[str, type], params: dict, registry: dict) -> Any: +def _get(type_: Union[str, type], params: dict, registry: dict) -> Any: if isinstance(type_, type): return type_(**params) type_ = type_.lower().replace("_", "") @@ -120,13 +120,13 @@ def get_model(block: Union[str, type], params: dict) -> Any: - transformer - tabnet """ - return _factory(block, params, MODELS) + return _get(block, params, MODELS) @validate_arguments(config=dict(arbitrary_types_allowed=True)) def get_nonlin(nonlin: Union[str, nn.Module], params: dict = {}) -> Any: """Get a nonlinearity layer from a name or a class.""" - return _factory(nonlin, params, ACTIVATIONS) + return _get(nonlin, params, ACTIVATIONS) @validate_arguments(config=dict(arbitrary_types_allowed=True)) @@ -146,4 +146,4 @@ def get_feature_encoder(encoder: Union[str, type], params: dict = {}) -> Any: """ if isinstance(encoder, type): # custom encoder encoder = FeatureEncoder.wraps(encoder) - return _factory(encoder, params, FEATURE_ENCODERS) + return _get(encoder, params, FEATURE_ENCODERS) diff --git a/src/synthcity/plugins/core/models/feature_encoder.py b/src/synthcity/plugins/core/models/feature_encoder.py index 70807e31..25c98c01 100644 --- a/src/synthcity/plugins/core/models/feature_encoder.py +++ b/src/synthcity/plugins/core/models/feature_encoder.py @@ -11,6 +11,7 @@ LabelEncoder, MinMaxScaler, OneHotEncoder, + OrdinalEncoder, QuantileTransformer, RobustScaler, StandardScaler, @@ -150,7 +151,10 @@ def get_feature_names_out(self) -> List[str]: return WrappedEncoder -OneHotEncoder = FeatureEncoder.wraps(OneHotEncoder, categorical=True) +OneHotEncoder = FeatureEncoder.wraps( + OneHotEncoder, categorical=True, handle_unknown="ignore" +) +OrdinalEncoder = FeatureEncoder.wraps(OrdinalEncoder, categorical=True) LabelEncoder = FeatureEncoder.wraps(LabelEncoder, n_dim_out=1, categorical=True) StandardScaler = FeatureEncoder.wraps(StandardScaler) MinMaxScaler = FeatureEncoder.wraps(MinMaxScaler) diff --git a/src/synthcity/plugins/core/models/tabular_goggle.py b/src/synthcity/plugins/core/models/tabular_goggle.py index 84a1051b..478109b7 100644 --- a/src/synthcity/plugins/core/models/tabular_goggle.py +++ b/src/synthcity/plugins/core/models/tabular_goggle.py @@ -43,7 +43,7 @@ def __init__( decoder_nonlin: str = "relu", encoder_max_clusters: int = 20, encoder_whitelist: list = [], - decoder_nonlin_out_discrete: str = "softmax", + decoder_nonlin_out_discrete: str = "gumbel_softmax", decoder_nonlin_out_continuous: str = "tanh", random_state: int = 0, ): @@ -107,7 +107,7 @@ def __init__( The max number of clusters to create for continuous columns when encoding with TabularEncoder. Defaults to 20. encoder_whitelist: list = [] Ignore columns from encoding with TabularEncoder. Defaults to []. - decoder_nonlin_out_discrete: str = "softmax" + decoder_nonlin_out_discrete: str = "gumbel_softmax" Activation function for discrete columns. Useful with the TabularEncoder. Defaults to "softmax". decoder_nonlin_out_continuous: str = "tanh Activation function for continuous columns. Useful with the TabularEncoder.. Defaults to "tanh". From 5dbe66685fbb6d89cb7c653dce5f595c69d2f603 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 11:33:27 +0200 Subject: [PATCH 76/95] get_nonlin('softmax') <- GumbelSoftmax() --- src/synthcity/plugins/core/models/factory.py | 4 ++-- src/synthcity/plugins/core/models/tabular_goggle.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/synthcity/plugins/core/models/factory.py b/src/synthcity/plugins/core/models/factory.py index 6f8f5e0c..c4fb7841 100644 --- a/src/synthcity/plugins/core/models/factory.py +++ b/src/synthcity/plugins/core/models/factory.py @@ -56,8 +56,8 @@ selu=nn.SELU, tanh=nn.Tanh, sigmoid=nn.Sigmoid, - softmax=nn.Softmax, - gumbelsoftmax=GumbelSoftmax, + softmax=GumbelSoftmax, + vanilla_softmax=nn.Softmax, gelu=nn.GELU, silu=nn.SiLU, swish=nn.SiLU, diff --git a/src/synthcity/plugins/core/models/tabular_goggle.py b/src/synthcity/plugins/core/models/tabular_goggle.py index 478109b7..84a1051b 100644 --- a/src/synthcity/plugins/core/models/tabular_goggle.py +++ b/src/synthcity/plugins/core/models/tabular_goggle.py @@ -43,7 +43,7 @@ def __init__( decoder_nonlin: str = "relu", encoder_max_clusters: int = 20, encoder_whitelist: list = [], - decoder_nonlin_out_discrete: str = "gumbel_softmax", + decoder_nonlin_out_discrete: str = "softmax", decoder_nonlin_out_continuous: str = "tanh", random_state: int = 0, ): @@ -107,7 +107,7 @@ def __init__( The max number of clusters to create for continuous columns when encoding with TabularEncoder. Defaults to 20. encoder_whitelist: list = [] Ignore columns from encoding with TabularEncoder. Defaults to []. - decoder_nonlin_out_discrete: str = "gumbel_softmax" + decoder_nonlin_out_discrete: str = "softmax" Activation function for discrete columns. Useful with the TabularEncoder. Defaults to "softmax". decoder_nonlin_out_continuous: str = "tanh Activation function for continuous columns. Useful with the TabularEncoder.. Defaults to "tanh". From 74e897ba8b3b3c9aa5fe0a0e68a1b255274ec51e Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 13:14:01 +0200 Subject: [PATCH 77/95] remove debug logging --- src/synthcity/plugins/core/constraints.py | 2 -- src/synthcity/plugins/core/models/tabular_encoder.py | 1 - 2 files changed, 3 deletions(-) diff --git a/src/synthcity/plugins/core/constraints.py b/src/synthcity/plugins/core/constraints.py index be1b2cc8..7660473f 100644 --- a/src/synthcity/plugins/core/constraints.py +++ b/src/synthcity/plugins/core/constraints.py @@ -167,8 +167,6 @@ def filter(self, X: pd.DataFrame) -> pd.DataFrame: log.critical( f"[{feature}] quality loss for constraints {op} = {thresh}. Remaining {res.sum()}. prev length {prev}. Original dtype {X[feature].dtype}.", ) - if res.sum() < 5: - log.critical(str(X[~res])) return res @validate_arguments(config=dict(arbitrary_types_allowed=True)) diff --git a/src/synthcity/plugins/core/models/tabular_encoder.py b/src/synthcity/plugins/core/models/tabular_encoder.py index 54a3d478..45b13f50 100644 --- a/src/synthcity/plugins/core/models/tabular_encoder.py +++ b/src/synthcity/plugins/core/models/tabular_encoder.py @@ -285,7 +285,6 @@ def activation_layout( d = 0 d += 1 out.append((acts[ct], d)) - log.critical(out) return out From 27553e958c5d2e4a73f042628cc50047a374ea3e Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 13:15:34 +0200 Subject: [PATCH 78/95] update --- src/synthcity/plugins/core/constraints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/synthcity/plugins/core/constraints.py b/src/synthcity/plugins/core/constraints.py index 7660473f..dc79e56b 100644 --- a/src/synthcity/plugins/core/constraints.py +++ b/src/synthcity/plugins/core/constraints.py @@ -164,7 +164,7 @@ def filter(self, X: pd.DataFrame) -> pd.DataFrame: thresh, ) if res.sum() < prev: - log.critical( + log.info( f"[{feature}] quality loss for constraints {op} = {thresh}. Remaining {res.sum()}. prev length {prev}. Original dtype {X[feature].dtype}.", ) return res From 7fc5ce4bb147281e79f8eaad150ffd620dfc4216 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 10 Apr 2023 13:19:05 +0200 Subject: [PATCH 79/95] update --- tests/plugins/generic/test_goggle.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/plugins/generic/test_goggle.py b/tests/plugins/generic/test_goggle.py index 840f7c9c..9b194ae0 100644 --- a/tests/plugins/generic/test_goggle.py +++ b/tests/plugins/generic/test_goggle.py @@ -19,7 +19,6 @@ plugin_args = { "n_iter": 10, "device": "cpu", - "sampling_patience": 50, } if not is_missing_goggle_deps: From b8c952253c8c9d5a5835ae6def0f8fecb10a4fcc Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 18 Apr 2023 12:01:37 +0200 Subject: [PATCH 80/95] fix merge --- src/synthcity/plugins/core/distribution.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/synthcity/plugins/core/distribution.py b/src/synthcity/plugins/core/distribution.py index b8df0355..f53ee563 100644 --- a/src/synthcity/plugins/core/distribution.py +++ b/src/synthcity/plugins/core/distribution.py @@ -379,7 +379,6 @@ class DatetimeDistribution(Distribution): :parts: 1 """ - offset: int = 120 low: datetime = datetime.utcfromtimestamp(0) high: datetime = datetime.now() step: timedelta = timedelta(microseconds=1) From ecc9d086232188d1ded0aa9f7168e6cbacb6efdb Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Tue, 18 Apr 2023 12:05:03 +0200 Subject: [PATCH 81/95] fix merge --- src/synthcity/plugins/core/models/tabnet.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/synthcity/plugins/core/models/tabnet.py b/src/synthcity/plugins/core/models/tabnet.py index 0a57c3da..a9a04fe3 100644 --- a/src/synthcity/plugins/core/models/tabnet.py +++ b/src/synthcity/plugins/core/models/tabnet.py @@ -1,7 +1,7 @@ -TabNet: Attentive Interpretable Tabular Learning -Reference: -- https://arxiv.org/pdf/1908.07442.pdf -- https://github.com/dreamquark-ai/tabnet +# TabNet: Attentive Interpretable Tabular Learning +# Reference: +# - https://arxiv.org/pdf/1908.07442.pdf +# - https://github.com/dreamquark-ai/tabnet # stdlib from typing import List, Optional, Tuple From c2775bac31f9fe904b2f90f00a4576b5f8e42ae6 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 00:29:39 +0200 Subject: [PATCH 82/95] update pip upgrade commands in workflows --- .github/workflows/test_full.yml | 2 +- .github/workflows/test_pr.yml | 2 +- .github/workflows/test_tutorials.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_full.yml b/.github/workflows/test_full.yml index e3265692..1b497579 100644 --- a/.github/workflows/test_full.yml +++ b/.github/workflows/test_full.yml @@ -27,8 +27,8 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | + python -m pip install --upgrade pip pip install -r prereq.txt - pip install --upgrade pip - name: Test Core run: | pip install .[testing] diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index 2c907433..4babfca1 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,8 +54,8 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | + python -m pip install --upgrade pip pip install -r prereq.txt - pip install --upgrade pip - name: Test Core run: | pip install .[testing] diff --git a/.github/workflows/test_tutorials.yml b/.github/workflows/test_tutorials.yml index 212e79e2..bcbb0e1f 100644 --- a/.github/workflows/test_tutorials.yml +++ b/.github/workflows/test_tutorials.yml @@ -32,8 +32,8 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | + python -m pip install --upgrade pip pip install -r prereq.txt - pip install --upgrade pip pip install .[all] From 1d9c7a4fb06abd39674c06e8e8ca10f09bd72a0f Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 00:33:31 +0200 Subject: [PATCH 83/95] update pip upgrade commands in workflows --- .github/workflows/test_full.yml | 2 +- .github/workflows/test_pr.yml | 2 +- .github/workflows/test_tutorials.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_full.yml b/.github/workflows/test_full.yml index e3265692..1b497579 100644 --- a/.github/workflows/test_full.yml +++ b/.github/workflows/test_full.yml @@ -27,8 +27,8 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | + python -m pip install --upgrade pip pip install -r prereq.txt - pip install --upgrade pip - name: Test Core run: | pip install .[testing] diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index 2c907433..4babfca1 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,8 +54,8 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | + python -m pip install --upgrade pip pip install -r prereq.txt - pip install --upgrade pip - name: Test Core run: | pip install .[testing] diff --git a/.github/workflows/test_tutorials.yml b/.github/workflows/test_tutorials.yml index 212e79e2..bcbb0e1f 100644 --- a/.github/workflows/test_tutorials.yml +++ b/.github/workflows/test_tutorials.yml @@ -32,8 +32,8 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | + python -m pip install --upgrade pip pip install -r prereq.txt - pip install --upgrade pip pip install .[all] From 385d2edaed7a3c50ca385e7ef0b96053a2bf2b81 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 00:52:17 +0200 Subject: [PATCH 84/95] keep pip version to 23.0.1 in workflows --- .github/workflows/test_full.yml | 2 +- .github/workflows/test_pr.yml | 2 +- .github/workflows/test_tutorials.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_full.yml b/.github/workflows/test_full.yml index 1b497579..e258db95 100644 --- a/.github/workflows/test_full.yml +++ b/.github/workflows/test_full.yml @@ -27,7 +27,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install --upgrade pip + pip install pip==23.0.1 pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index 4babfca1..fec1126d 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,7 +54,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install --upgrade pip + pip install pip==23.0.1 pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_tutorials.yml b/.github/workflows/test_tutorials.yml index bcbb0e1f..69195a09 100644 --- a/.github/workflows/test_tutorials.yml +++ b/.github/workflows/test_tutorials.yml @@ -32,7 +32,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install --upgrade pip + pip install pip==23.0.1 pip install -r prereq.txt pip install .[all] From 81fb12b973c410a3c99e21a5023c13e61dea17b6 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 00:57:58 +0200 Subject: [PATCH 85/95] keep pip version to 23.0.1 in workflows --- .github/workflows/test_full.yml | 2 +- .github/workflows/test_pr.yml | 2 +- .github/workflows/test_tutorials.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_full.yml b/.github/workflows/test_full.yml index 1b497579..e258db95 100644 --- a/.github/workflows/test_full.yml +++ b/.github/workflows/test_full.yml @@ -27,7 +27,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install --upgrade pip + pip install pip==23.0.1 pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index 4babfca1..fec1126d 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,7 +54,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install --upgrade pip + pip install pip==23.0.1 pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_tutorials.yml b/.github/workflows/test_tutorials.yml index bcbb0e1f..69195a09 100644 --- a/.github/workflows/test_tutorials.yml +++ b/.github/workflows/test_tutorials.yml @@ -32,7 +32,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install --upgrade pip + pip install pip==23.0.1 pip install -r prereq.txt pip install .[all] From 3884fc439a7e5f276aa90f69b3b3e50712125e46 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 11:58:16 +0200 Subject: [PATCH 86/95] update --- .github/workflows/test_full.yml | 2 +- .github/workflows/test_pr.yml | 2 +- .github/workflows/test_tutorials.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_full.yml b/.github/workflows/test_full.yml index e258db95..b34df21b 100644 --- a/.github/workflows/test_full.yml +++ b/.github/workflows/test_full.yml @@ -27,7 +27,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - pip install pip==23.0.1 + python -m pip install pip==23.0.1 pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index fec1126d..527e4c36 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,7 +54,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - pip install pip==23.0.1 + python -m pip install pip==23.0.1 pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_tutorials.yml b/.github/workflows/test_tutorials.yml index 69195a09..8a29b3c4 100644 --- a/.github/workflows/test_tutorials.yml +++ b/.github/workflows/test_tutorials.yml @@ -32,7 +32,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - pip install pip==23.0.1 + python -m pip install pip==23.0.1 pip install -r prereq.txt pip install .[all] From 7640f355b5a28611c5ff7c062a64451b917ade81 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 12:18:51 +0200 Subject: [PATCH 87/95] update --- .github/workflows/test_full.yml | 2 +- .github/workflows/test_pr.yml | 2 +- .github/workflows/test_tutorials.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test_full.yml b/.github/workflows/test_full.yml index b34df21b..0f977ba1 100644 --- a/.github/workflows/test_full.yml +++ b/.github/workflows/test_full.yml @@ -27,7 +27,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install pip==23.0.1 + python -m pip install -U pip pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index 527e4c36..bac8905e 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,7 +54,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install pip==23.0.1 + python -m pip install -U pip pip install -r prereq.txt - name: Test Core run: | diff --git a/.github/workflows/test_tutorials.yml b/.github/workflows/test_tutorials.yml index 8a29b3c4..c93a0c35 100644 --- a/.github/workflows/test_tutorials.yml +++ b/.github/workflows/test_tutorials.yml @@ -32,7 +32,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install pip==23.0.1 + python -m pip install -U pip pip install -r prereq.txt pip install .[all] From c91246b8807bb4c51103f292bf255fe1dfa3f416 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 12:27:02 +0200 Subject: [PATCH 88/95] update --- prereq.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/prereq.txt b/prereq.txt index 0d7eb1f0..ec51b0a3 100644 --- a/prereq.txt +++ b/prereq.txt @@ -1,3 +1,3 @@ numpy -torch<2.0 +torch>=1.10.0,<2.0 tsai From 38fc7966ad5381b8ad6136ae10fb90b2dcf7e906 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 12:32:26 +0200 Subject: [PATCH 89/95] update --- .github/workflows/test_pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index bac8905e..66bd6cc3 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,7 +54,7 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - python -m pip install -U pip + # python -m pip install -U pip pip install -r prereq.txt - name: Test Core run: | From 899a9d83605ce8fb677e5ee3d82f7ee0163077d3 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 12:44:00 +0200 Subject: [PATCH 90/95] update --- .github/workflows/test_pr.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index 66bd6cc3..e98fa172 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -54,7 +54,8 @@ jobs: if: ${{ matrix.os == 'macos-latest' }} - name: Install dependencies run: | - # python -m pip install -U pip + python -m pip install -U pip + pip install --upgrade setuptools, wheel pip install -r prereq.txt - name: Test Core run: | From 60fa08da1b5e5e6ec74093a2bb6adcc33cab0534 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 12:46:53 +0200 Subject: [PATCH 91/95] update --- .github/workflows/test_pr.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index e98fa172..40cc83df 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -55,7 +55,7 @@ jobs: - name: Install dependencies run: | python -m pip install -U pip - pip install --upgrade setuptools, wheel + pip install --upgrade setuptools wheel pip install -r prereq.txt - name: Test Core run: | From 50a77c52eae6d6dc5f65b40b27b5393fa91388f4 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 13:00:42 +0200 Subject: [PATCH 92/95] fix distribution --- src/synthcity/plugins/core/distribution.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/synthcity/plugins/core/distribution.py b/src/synthcity/plugins/core/distribution.py index f53ee563..788db4e0 100644 --- a/src/synthcity/plugins/core/distribution.py +++ b/src/synthcity/plugins/core/distribution.py @@ -384,12 +384,6 @@ class DatetimeDistribution(Distribution): step: timedelta = timedelta(microseconds=1) offset: timedelta = timedelta(seconds=120) - @validator("offset", always=True) - def _validate_offset(cls: Any, v: int) -> int: - if v < 0: - raise ValueError("offset must be greater than 0") - return v - @validator("low", always=True) def _validate_low_thresh(cls: Any, v: datetime, values: Dict) -> datetime: mkey = "marginal_distribution" From 727662f1f5508aca90f413dc019f9a041cfda0ed Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Thu, 20 Apr 2023 17:05:26 +0200 Subject: [PATCH 93/95] update --- src/synthcity/plugins/generic/plugin_ddpm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/synthcity/plugins/generic/plugin_ddpm.py b/src/synthcity/plugins/generic/plugin_ddpm.py index 5d9f0ac0..8789d7d6 100644 --- a/src/synthcity/plugins/generic/plugin_ddpm.py +++ b/src/synthcity/plugins/generic/plugin_ddpm.py @@ -183,8 +183,8 @@ def hyperparameter_space(**kwargs: Any) -> List[Distribution]: IntLogDistribution(name="batch_size", low=256, high=4096), IntegerDistribution(name="num_timesteps", low=10, high=1000), IntLogDistribution(name="n_iter", low=1000, high=10000), - IntegerDistribution(name="n_layers_hidden", low=2, high=8), - IntLogDistribution(name="dim_hidden", low=128, high=1024), + # IntegerDistribution(name="n_layers_hidden", low=2, high=8), + # IntLogDistribution(name="dim_hidden", low=128, high=1024), ] def _fit(self, X: DataLoader, *args: Any, **kwargs: Any) -> "TabDDPMPlugin": From 212d7cb3c9bcbd550eaf78739c0df98c6cb9925b Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 24 Apr 2023 09:19:24 +0200 Subject: [PATCH 94/95] move upgrading of wheel to prereq.txt --- .github/workflows/test_pr.yml | 1 - prereq.txt | 3 ++- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test_pr.yml b/.github/workflows/test_pr.yml index 40cc83df..bac8905e 100644 --- a/.github/workflows/test_pr.yml +++ b/.github/workflows/test_pr.yml @@ -55,7 +55,6 @@ jobs: - name: Install dependencies run: | python -m pip install -U pip - pip install --upgrade setuptools wheel pip install -r prereq.txt - name: Test Core run: | diff --git a/prereq.txt b/prereq.txt index ec51b0a3..125d1b13 100644 --- a/prereq.txt +++ b/prereq.txt @@ -1,3 +1,4 @@ +wheel>=0.40 numpy torch>=1.10.0,<2.0 -tsai +tsai \ No newline at end of file From d8e63c3537f2c8724d23de2615efab6db5d5d034 Mon Sep 17 00:00:00 2001 From: TZCai <13818704679@163.com> Date: Mon, 24 Apr 2023 14:13:02 +0200 Subject: [PATCH 95/95] update --- prereq.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/prereq.txt b/prereq.txt index 125d1b13..6554078b 100644 --- a/prereq.txt +++ b/prereq.txt @@ -1,4 +1,4 @@ -wheel>=0.40 numpy torch>=1.10.0,<2.0 -tsai \ No newline at end of file +tsai +wheel>=0.40