diff --git a/examples/lazy_booster_classification.py b/examples/lazy_booster_classification.py index 3895f03..ee52b45 100644 --- a/examples/lazy_booster_classification.py +++ b/examples/lazy_booster_classification.py @@ -18,7 +18,7 @@ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .2, random_state = 13) - clf = ms.LazyBoostingClassifier(verbose=0, ignore_warnings=True, + clf = ms.LazyBoostingClassifier(verbose=0, ignore_warnings=True, #n_jobs=2, custom_metric=None, preprocess=False) start = time() diff --git a/examples/lazy_booster_regression.py b/examples/lazy_booster_regression.py index c37b9ec..86579c6 100644 --- a/examples/lazy_booster_regression.py +++ b/examples/lazy_booster_regression.py @@ -11,7 +11,7 @@ y= data.target X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .2, random_state = 123) -regr = ms.LazyBoostingRegressor(verbose=0, ignore_warnings=True, +regr = ms.LazyBoostingRegressor(verbose=0, ignore_warnings=True, n_jobs=2, custom_metric=None, preprocess=True) models, predictioms = regr.fit(X_train, X_test, y_train, y_test) model_dictionary = regr.provide_models(X_train, X_test, y_train, y_test) diff --git a/mlsauce/lazybooster/lazyboosterclassif.py b/mlsauce/lazybooster/lazyboosterclassif.py index 1d3ff15..a82cde3 100644 --- a/mlsauce/lazybooster/lazyboosterclassif.py +++ b/mlsauce/lazybooster/lazyboosterclassif.py @@ -4,6 +4,7 @@ from sklearn.ensemble import RandomForestClassifier from copy import deepcopy from functools import partial +from joblib import Parallel, delayed from tqdm import tqdm import time @@ -351,173 +352,218 @@ def fit(self, X_train, X_test, y_train, y_test, **kwargs): ] if self.preprocess is True: + + if self.n_jobs is None: - for name, model in tqdm(self.classifiers): # do parallel exec + for name, model in tqdm(self.classifiers): # do parallel exec - other_args = ( - {} - ) # use this trick for `random_state` too --> refactor - try: - if ( - "n_jobs" in model().get_params().keys() - and name.find("LogisticRegression") == -1 - ): - other_args["n_jobs"] = self.n_jobs - except Exception: - pass - - start = time.time() - - try: - if "random_state" in model().get_params().keys(): - fitted_clf = GenericBoostingClassifier( - {**other_args, **kwargs}, - verbose=self.verbose, - base_model=model(random_state=self.random_state), - ) + other_args = ( + {} + ) # use this trick for `random_state` too --> refactor + try: + if ( + "n_jobs" in model().get_params().keys() + and name.find("LogisticRegression") == -1 + ): + other_args["n_jobs"] = self.n_jobs + except Exception: + pass - else: - fitted_clf = GenericBoostingClassifier( - base_model=model(**kwargs), - verbose=self.verbose, - ) + start = time.time() - if self.verbose > 0: - print("\n Fitting boosted " + name + " model...") - fitted_clf.fit(X_train, y_train) - - pipe = Pipeline( - [ - ("preprocessor", preprocessor), - ("classifier", fitted_clf), - ] - ) - - if self.verbose > 0: - print("\n Fitting boosted " + name + " model...") - pipe.fit(X_train, y_train) - self.models_[name] = pipe - y_pred = pipe.predict(X_test) - accuracy = accuracy_score(y_test, y_pred, normalize=True) - b_accuracy = balanced_accuracy_score(y_test, y_pred) - f1 = f1_score(y_test, y_pred, average="weighted") try: - roc_auc = roc_auc_score(y_test, y_pred) - except Exception as exception: - roc_auc = None - if self.ignore_warnings is False: - print("ROC AUC couldn't be calculated for " + name) - print(exception) - names.append(name) - Accuracy.append(accuracy) - B_Accuracy.append(b_accuracy) - ROC_AUC.append(roc_auc) - F1.append(f1) - TIME.append(time.time() - start) - if self.custom_metric is not None: - custom_metric = self.custom_metric(y_test, y_pred) - CUSTOM_METRIC.append(custom_metric) - if self.verbose > 0: - if self.custom_metric is not None: - print( - { - "Model": name, - "Accuracy": accuracy, - "Balanced Accuracy": b_accuracy, - "ROC AUC": roc_auc, - "F1 Score": f1, - self.custom_metric.__name__: custom_metric, - "Time taken": time.time() - start, - } + if "random_state" in model().get_params().keys(): + fitted_clf = GenericBoostingClassifier( + {**other_args, **kwargs}, + verbose=self.verbose, + base_model=model(random_state=self.random_state), ) + else: - print( - { - "Model": name, - "Accuracy": accuracy, - "Balanced Accuracy": b_accuracy, - "ROC AUC": roc_auc, - "F1 Score": f1, - "Time taken": time.time() - start, - } + fitted_clf = GenericBoostingClassifier( + base_model=model(**kwargs), + verbose=self.verbose, ) - if self.predictions: - predictions[name] = y_pred - except Exception as exception: - if self.ignore_warnings is False: - print(name + " model failed to execute") - print(exception) - else: # no preprocessing + if self.verbose > 0: + print("\n Fitting boosted " + name + " model...") + fitted_clf.fit(X_train, y_train) - for name, model in tqdm(self.classifiers): # do parallel exec - start = time.time() - try: - if "random_state" in model().get_params().keys(): - fitted_clf = GenericBoostingClassifier( - base_model=model(random_state=self.random_state), - verbose=self.verbose, - **kwargs + pipe = Pipeline( + [ + ("preprocessor", preprocessor), + ("classifier", fitted_clf), + ] ) - else: - fitted_clf = GenericBoostingClassifier( - base_model=model(), verbose=self.verbose, **kwargs + if self.verbose > 0: + print("\n Fitting boosted " + name + " model...") + pipe.fit(X_train, y_train) + self.models_[name] = pipe + y_pred = pipe.predict(X_test) + accuracy = accuracy_score(y_test, y_pred, normalize=True) + b_accuracy = balanced_accuracy_score(y_test, y_pred) + f1 = f1_score(y_test, y_pred, average="weighted") + try: + roc_auc = roc_auc_score(y_test, y_pred) + except Exception as exception: + roc_auc = None + if self.ignore_warnings is False: + print("ROC AUC couldn't be calculated for " + name) + print(exception) + names.append(name) + Accuracy.append(accuracy) + B_Accuracy.append(b_accuracy) + ROC_AUC.append(roc_auc) + F1.append(f1) + TIME.append(time.time() - start) + if self.custom_metric is not None: + custom_metric = self.custom_metric(y_test, y_pred) + CUSTOM_METRIC.append(custom_metric) + if self.verbose > 0: + if self.custom_metric is not None: + print( + { + "Model": name, + "Accuracy": accuracy, + "Balanced Accuracy": b_accuracy, + "ROC AUC": roc_auc, + "F1 Score": f1, + self.custom_metric.__name__: custom_metric, + "Time taken": time.time() - start, + } + ) + else: + print( + { + "Model": name, + "Accuracy": accuracy, + "Balanced Accuracy": b_accuracy, + "ROC AUC": roc_auc, + "F1 Score": f1, + "Time taken": time.time() - start, + } + ) + if self.predictions: + predictions[name] = y_pred + except Exception as exception: + if self.ignore_warnings is False: + print(name + " model failed to execute") + print(exception) + + else: + + # train_model(self, name, model, X_train, y_train, X_test, y_test, + #use_preprocessing=False, preprocessor=None, + # **kwargs): + results = Parallel(n_jobs=self.n_jobs)(delayed(self.train_model)( + name, model, X_train, y_train, X_test, y_test, + use_preprocessing=True, preprocessor=preprocessor, **kwargs + ) for name, model in tqdm(self.classifiers) ) + Accuracy = [res["accuracy"] for res in results] + B_Accuracy = [res["balanced_accuracy"] for res in results] + ROC_AUC = [res["roc_auc"] for res in results] + F1 = [res["f1"] for res in results] + names = [res["name"] for res in results] + TIME = [res["time"] for res in results] + if self.custom_metric is not None: + CUSTOM_METRIC = [res["custom_metric"] for res in results] + if self.predictions: + predictions = {res["name"]: res["predictions"] for res in results} - fitted_clf.fit(X_train, y_train) - self.models_[name] = fitted_clf - y_pred = fitted_clf.predict(X_test) - accuracy = accuracy_score(y_test, y_pred, normalize=True) - b_accuracy = balanced_accuracy_score(y_test, y_pred) - f1 = f1_score(y_test, y_pred, average="weighted") + else: # no preprocessing + + if self.n_jobs is None: + + for name, model in tqdm(self.classifiers): # do parallel exec + start = time.time() try: - roc_auc = roc_auc_score(y_test, y_pred) - except Exception as exception: - roc_auc = None - if self.ignore_warnings is False: - print("ROC AUC couldn't be calculated for " + name) - print(exception) - names.append(name) - Accuracy.append(accuracy) - B_Accuracy.append(b_accuracy) - ROC_AUC.append(roc_auc) - F1.append(f1) - TIME.append(time.time() - start) - if self.custom_metric is not None: - custom_metric = self.custom_metric(y_test, y_pred) - CUSTOM_METRIC.append(custom_metric) - if self.verbose > 0: - if self.custom_metric is not None: - print( - { - "Model": name, - "Accuracy": accuracy, - "Balanced Accuracy": b_accuracy, - "ROC AUC": roc_auc, - "F1 Score": f1, - self.custom_metric.__name__: custom_metric, - "Time taken": time.time() - start, - } + if "random_state" in model().get_params().keys(): + fitted_clf = GenericBoostingClassifier( + base_model=model(random_state=self.random_state), + verbose=self.verbose, + **kwargs ) + else: - print( - { - "Model": name, - "Accuracy": accuracy, - "Balanced Accuracy": b_accuracy, - "ROC AUC": roc_auc, - "F1 Score": f1, - "Time taken": time.time() - start, - } + fitted_clf = GenericBoostingClassifier( + base_model=model(), verbose=self.verbose, **kwargs ) - if self.predictions: - predictions[name] = y_pred - except Exception as exception: - if self.ignore_warnings is False: - print(name + " model failed to execute") - print(exception) + + fitted_clf.fit(X_train, y_train) + + self.models_[name] = fitted_clf + y_pred = fitted_clf.predict(X_test) + accuracy = accuracy_score(y_test, y_pred, normalize=True) + b_accuracy = balanced_accuracy_score(y_test, y_pred) + f1 = f1_score(y_test, y_pred, average="weighted") + try: + roc_auc = roc_auc_score(y_test, y_pred) + except Exception as exception: + roc_auc = None + if self.ignore_warnings is False: + print("ROC AUC couldn't be calculated for " + name) + print(exception) + names.append(name) + Accuracy.append(accuracy) + B_Accuracy.append(b_accuracy) + ROC_AUC.append(roc_auc) + F1.append(f1) + TIME.append(time.time() - start) + if self.custom_metric is not None: + custom_metric = self.custom_metric(y_test, y_pred) + CUSTOM_METRIC.append(custom_metric) + if self.verbose > 0: + if self.custom_metric is not None: + print( + { + "Model": name, + "Accuracy": accuracy, + "Balanced Accuracy": b_accuracy, + "ROC AUC": roc_auc, + "F1 Score": f1, + self.custom_metric.__name__: custom_metric, + "Time taken": time.time() - start, + } + ) + else: + print( + { + "Model": name, + "Accuracy": accuracy, + "Balanced Accuracy": b_accuracy, + "ROC AUC": roc_auc, + "F1 Score": f1, + "Time taken": time.time() - start, + } + ) + if self.predictions: + predictions[name] = y_pred + except Exception as exception: + if self.ignore_warnings is False: + print(name + " model failed to execute") + print(exception) + + else: + + results = Parallel(n_jobs=self.n_jobs)(delayed(self.train_model)( + name, model, X_train, y_train, X_test, y_test, + use_preprocessing=False, **kwargs + ) for name, model in tqdm(self.classifiers) + ) + Accuracy = [res["accuracy"] for res in results] + B_Accuracy = [res["balanced_accuracy"] for res in results] + ROC_AUC = [res["roc_auc"] for res in results] + F1 = [res["f1"] for res in results] + names = [res["name"] for res in results] + TIME = [res["time"] for res in results] + if self.custom_metric is not None: + CUSTOM_METRIC = [res["custom_metric"] for res in results] + if self.predictions: + predictions = {res["name"]: res["predictions"] for res in results} + if self.custom_metric is None: scores = pd.DataFrame( @@ -596,3 +642,90 @@ def provide_models(self, X_train, X_test, y_train, y_test): self.fit(X_train, X_test, y_train, y_test) return self.models_ + + + def train_model(self, name, model, X_train, y_train, X_test, y_test, + use_preprocessing=False, preprocessor=None, + **kwargs): + """ + Function to train a single model and return its results. + """ + other_args = {} + + # Handle n_jobs parameter + try: + if "n_jobs" in model().get_params().keys() and "LogisticRegression" not in name: + other_args["n_jobs"] = self.n_jobs + except Exception: + pass + + start = time.time() + + try: + # Handle random_state parameter + if "random_state" in model().get_params().keys(): + fitted_clf = GenericBoostingClassifier( + {**other_args, **kwargs}, + verbose=self.verbose, + base_model=model(random_state=self.random_state), + ) + else: + fitted_clf = GenericBoostingClassifier( + base_model=model(**kwargs), + verbose=self.verbose, + ) + + if self.verbose > 0: + print("\n Fitting boosted " + name + " model...") + + fitted_clf.fit(X_train, y_train) + + if use_preprocessing and preprocessor is not None: + pipe = Pipeline( + [ + ("preprocessor", preprocessor), + ("classifier", fitted_clf), + ] + ) + if self.verbose > 0: + print("\n Fitting pipeline with preprocessing for " + name + " model...") + pipe.fit(X_train, y_train) + y_pred = pipe.predict(X_test) + else: + # Case with no preprocessing + if self.verbose > 0: + print("\n Fitting model without preprocessing for " + name + " model...") + y_pred = fitted_clf.predict(X_test) + + accuracy = accuracy_score(y_test, y_pred, normalize=True) + b_accuracy = balanced_accuracy_score(y_test, y_pred) + f1 = f1_score(y_test, y_pred, average="weighted") + roc_auc = None + + try: + roc_auc = roc_auc_score(y_test, y_pred) + except Exception as exception: + if self.ignore_warnings is False: + print("ROC AUC couldn't be calculated for " + name) + print(exception) + + custom_metric = None + if self.custom_metric is not None: + custom_metric = self.custom_metric(y_test, y_pred) + + return { + "name": name, + "model": fitted_clf if not use_preprocessing else pipe, + "accuracy": accuracy, + "balanced_accuracy": b_accuracy, + "roc_auc": roc_auc, + "f1": f1, + "custom_metric": custom_metric, + "time": time.time() - start, + "predictions": y_pred, + } + except Exception as exception: + if self.ignore_warnings is False: + print(name + " model failed to execute") + print(exception) + return None \ No newline at end of file diff --git a/mlsauce/lazybooster/lazyboosterregression.py b/mlsauce/lazybooster/lazyboosterregression.py index a50ddb8..4304ee0 100644 --- a/mlsauce/lazybooster/lazyboosterregression.py +++ b/mlsauce/lazybooster/lazyboosterregression.py @@ -10,6 +10,7 @@ from functools import partial from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor from copy import deepcopy +from joblib import Parallel, delayed from tqdm import tqdm from sklearn.utils import all_estimators from sklearn.pipeline import Pipeline @@ -19,7 +20,7 @@ from sklearn.base import RegressorMixin from sklearn.metrics import ( r2_score, - mean_squared_error, + root_mean_squared_error, ) from .config import REGRESSORS from ..booster import GenericBoostingRegressor @@ -333,121 +334,170 @@ def fit(self, X_train, X_test, y_train, y_test, **kwargs): ] if self.preprocess is True: + + if self.n_jobs is None: - for name, regr in tqdm(self.regressors): # do parallel exec + for name, regr in tqdm(self.regressors): # do parallel exec - start = time.time() + start = time.time() - try: + try: - model = GenericBoostingRegressor( - base_model=regr(), verbose=self.verbose, **kwargs - ) + model = GenericBoostingRegressor( + base_model=regr(), verbose=self.verbose, **kwargs + ) - model.fit(X_train, y_train) + model.fit(X_train, y_train) - pipe = Pipeline( - steps=[ - ("preprocessor", preprocessor), - ("regressor", model), - ] - ) - if self.verbose > 0: - print("\n Fitting boosted " + name + " model...") - pipe.fit(X_train, y_train) + pipe = Pipeline( + steps=[ + ("preprocessor", preprocessor), + ("regressor", model), + ] + ) + if self.verbose > 0: + print("\n Fitting boosted " + name + " model...") + pipe.fit(X_train, y_train) - self.models_[name] = pipe - y_pred = pipe.predict(X_test) - r_squared = r2_score(y_test, y_pred) - adj_rsquared = adjusted_rsquared( - r_squared, X_test.shape[0], X_test.shape[1] - ) - rmse = mean_squared_error(y_test, y_pred, squared=False) + self.models_[name] = pipe + y_pred = pipe.predict(X_test) + r_squared = r2_score(y_test, y_pred) + adj_rsquared = adjusted_rsquared( + r_squared, X_test.shape[0], X_test.shape[1] + ) + rmse = root_mean_squared_error(y_test, y_pred) - names.append(name) - R2.append(r_squared) - ADJR2.append(adj_rsquared) - RMSE.append(rmse) - TIME.append(time.time() - start) - - if self.custom_metric: - custom_metric = self.custom_metric(y_test, y_pred) - CUSTOM_METRIC.append(custom_metric) - - if self.verbose > 0: - scores_verbose = { - "Model": name, - "R-Squared": r_squared, - "Adjusted R-Squared": adj_rsquared, - "RMSE": rmse, - "Time taken": time.time() - start, - } + names.append(name) + R2.append(r_squared) + ADJR2.append(adj_rsquared) + RMSE.append(rmse) + TIME.append(time.time() - start) if self.custom_metric: - scores_verbose["Custom metric"] = custom_metric - - print(scores_verbose) - if self.predictions: - predictions[name] = y_pred + custom_metric = self.custom_metric(y_test, y_pred) + CUSTOM_METRIC.append(custom_metric) + + if self.verbose > 0: + scores_verbose = { + "Model": name, + "R-Squared": r_squared, + "Adjusted R-Squared": adj_rsquared, + "RMSE": rmse, + "Time taken": time.time() - start, + } + + if self.custom_metric: + scores_verbose["Custom metric"] = custom_metric + + print(scores_verbose) + if self.predictions: + predictions[name] = y_pred + + except Exception as exception: + + if self.ignore_warnings is False: + print(name + " model failed to execute") + print(exception) + + else: + + results = Parallel(n_jobs=self.n_jobs)(delayed(self.train_model)( + name, model, X_train, y_train, X_test, y_test, + use_preprocessing=True, preprocessor=preprocessor, **kwargs + ) for name, model in tqdm(self.regressors) + ) + R2 = [result["r_squared"] for result in results if result is not None] + ADJR2 = [result["adj_rsquared"] for result in results if result is not None] + RMSE = [result["rmse"] for result in results if result is not None] + TIME = [result["time"] for result in results if result is not None] + names = [result["name"] for result in results if result is not None] + if self.custom_metric: + CUSTOM_METRIC = [ + result["custom_metric"] for result in results if result is not None + ] + if self.predictions: + predictions = { + result["name"]: result["predictions"] for result in results if result is not None + } - except Exception as exception: - if self.ignore_warnings is False: - print(name + " model failed to execute") - print(exception) else: # self.preprocess is False; no preprocessing - for name, regr in tqdm(self.regressors): # do parallel exec - start = time.time() - try: + if self.n_jobs is None: - model = GenericBoostingRegressor( - base_model=regr(), verbose=self.verbose, **kwargs - ) + for name, regr in tqdm(self.regressors): # do parallel exec + start = time.time() + try: - if self.verbose > 0: - print("\n Fitting boosted " + name + " model...") - model.fit(X_train, y_train) + model = GenericBoostingRegressor( + base_model=regr(), verbose=self.verbose, **kwargs + ) - self.models_[name] = model - y_pred = model.predict(X_test) + if self.verbose > 0: + print("\n Fitting boosted " + name + " model...") + model.fit(X_train, y_train) - r_squared = r2_score(y_test, y_pred) - adj_rsquared = adjusted_rsquared( - r_squared, X_test.shape[0], X_test.shape[1] - ) - rmse = mean_squared_error(y_test, y_pred, squared=False) + self.models_[name] = model + y_pred = model.predict(X_test) - names.append(name) - R2.append(r_squared) - ADJR2.append(adj_rsquared) - RMSE.append(rmse) - TIME.append(time.time() - start) + r_squared = r2_score(y_test, y_pred) + adj_rsquared = adjusted_rsquared( + r_squared, X_test.shape[0], X_test.shape[1] + ) + rmse = root_mean_squared_error(y_test, y_pred) - if self.custom_metric: - custom_metric = self.custom_metric(y_test, y_pred) - CUSTOM_METRIC.append(custom_metric) - - if self.verbose > 0: - scores_verbose = { - "Model": name, - "R-Squared": r_squared, - "Adjusted R-Squared": adj_rsquared, - "RMSE": rmse, - "Time taken": time.time() - start, - } + names.append(name) + R2.append(r_squared) + ADJR2.append(adj_rsquared) + RMSE.append(rmse) + TIME.append(time.time() - start) if self.custom_metric: - scores_verbose["Custom metric"] = custom_metric + custom_metric = self.custom_metric(y_test, y_pred) + CUSTOM_METRIC.append(custom_metric) + + if self.verbose > 0: + scores_verbose = { + "Model": name, + "R-Squared": r_squared, + "Adjusted R-Squared": adj_rsquared, + "RMSE": rmse, + "Time taken": time.time() - start, + } + + if self.custom_metric: + scores_verbose["Custom metric"] = custom_metric + + print(scores_verbose) + if self.predictions: + predictions[name] = y_pred + except Exception as exception: + if self.ignore_warnings is False: + print(name + " model failed to execute") + print(exception) + + else: + + results = Parallel(n_jobs=self.n_jobs)(delayed(self.train_model)( + name, model, X_train, y_train, X_test, y_test, + use_preprocessing=False, **kwargs + ) for name, model in tqdm(self.regressors) + ) + R2 = [result["r_squared"] for result in results if result is not None] + ADJR2 = [result["adj_rsquared"] for result in results if result is not None] + RMSE = [result["rmse"] for result in results if result is not None] + TIME = [result["time"] for result in results if result is not None] + names = [result["name"] for result in results if result is not None] + if self.custom_metric: + CUSTOM_METRIC = [ + result["custom_metric"] for result in results if result is not None + ] + if self.predictions: + predictions = { + result["name"]: result["predictions"] for result in results if result is not None + } - print(scores_verbose) - if self.predictions: - predictions[name] = y_pred - except Exception as exception: - if self.ignore_warnings is False: - print(name + " model failed to execute") - print(exception) scores = { "Model": names, @@ -517,3 +567,59 @@ def provide_models(self, X_train, X_test, y_train, y_test): self.fit(X_train, X_test, y_train, y_test) return self.models_ + + def train_model(self, name, regr, X_train, y_train, X_test, y_test, + use_preprocessing=False, preprocessor=None, **kwargs): + """ + Function to train a single regression model and return its results. + """ + start = time.time() + + try: + model = GenericBoostingRegressor(base_model=regr(), verbose=self.verbose, **kwargs) + + if use_preprocessing and preprocessor is not None: + pipe = Pipeline( + steps=[ + ("preprocessor", preprocessor), + ("regressor", model), + ] + ) + if self.verbose > 0: + print("\n Fitting boosted " + name + " model with preprocessing...") + pipe.fit(X_train, y_train) + y_pred = pipe.predict(X_test) + fitted_model = pipe + else: + # Case with no preprocessing + if self.verbose > 0: + print("\n Fitting boosted " + name + " model without preprocessing...") + model.fit(X_train, y_train) + y_pred = model.predict(X_test) + fitted_model = model + + r_squared = r2_score(y_test, y_pred) + adj_rsquared = adjusted_rsquared(r_squared, X_test.shape[0], X_test.shape[1]) + rmse = root_mean_squared_error(y_test, y_pred) + + custom_metric = None + if self.custom_metric: + custom_metric = self.custom_metric(y_test, y_pred) + + return { + "name": name, + "model": fitted_model, + "r_squared": r_squared, + "adj_rsquared": adj_rsquared, + "rmse": rmse, + "custom_metric": custom_metric, + "time": time.time() - start, + "predictions": y_pred, + } + + except Exception as exception: + if self.ignore_warnings is False: + print(name + " model failed to execute") + print(exception) + return None +