diff --git a/.github/meta.yaml b/.github/meta.yaml index 287d81b007..26b4c2fd3a 100644 --- a/.github/meta.yaml +++ b/.github/meta.yaml @@ -52,7 +52,7 @@ outputs: imports: - evalml requires: - - pytest ==6.2.5 + - pytest >=6.2.5 - nbval >=0.9.3 source_files: - evalml/* @@ -85,7 +85,7 @@ outputs: imports: - evalml requires: - - pytest ==6.2.5 + - pytest >=6.2.5 - nbval >=0.9.3 - python-graphviz >=0.8.4 - category_encoders >=2.0.0, <=2.5.1.post0 diff --git a/docs/source/disable-warnings.py b/docs/source/disable-warnings.py index e9a18bc0b4..49307b5ec0 100644 --- a/docs/source/disable-warnings.py +++ b/docs/source/disable-warnings.py @@ -1,6 +1,5 @@ -# flake8: noqa 401 imported to force console mode for tqdm in jupyter notebooks -from tqdm.auto import tqdm - import warnings +from tqdm.auto import tqdm # noqa: F401 + warnings.filterwarnings("ignore") diff --git a/docs/source/release_notes.rst b/docs/source/release_notes.rst index d64ca62024..3eb0096df6 100644 --- a/docs/source/release_notes.rst +++ b/docs/source/release_notes.rst @@ -10,6 +10,7 @@ Release Notes * Uncapped holidays :pr:`4428` * Unpinned kaleido :pr:`4423` * Unpinned shap and scipy :pr:`4436` + * Unpinned most pinned dependencies under project.optional-dependencies :pr:`4431` * Documentation Changes * Testing Changes * Added ability to run airflow tests in Python 3.9 :pr:`4391` diff --git a/evalml/tests/automl_tests/test_automl.py b/evalml/tests/automl_tests/test_automl.py index 155d131fc2..7919ac86e2 100644 --- a/evalml/tests/automl_tests/test_automl.py +++ b/evalml/tests/automl_tests/test_automl.py @@ -3849,13 +3849,13 @@ def test_score_batch_before_fitting_yields_error_nan_scores( def test_high_cv_check_no_warning_for_divide_by_zero(X_y_binary, dummy_binary_pipeline): X, y = X_y_binary automl = AutoMLSearch(X_train=X, y_train=y, problem_type="binary") - with pytest.warns(None) as warnings: + with warnings.catch_warnings(record=True) as automl_warnings: # mean is 0 but std is not automl._check_for_high_variance( dummy_binary_pipeline, cv_scores=[0.0, 1.0, -1.0], ) - assert len(warnings) == 0 + assert len(automl_warnings) == 0 @pytest.mark.parametrize( @@ -4370,7 +4370,7 @@ def dummy_mock_get_preprocessing_components(*args, **kwargs): mock_get_preprocessing_components.side_effect = ( dummy_mock_get_preprocessing_components ) - with pytest.warns(None) as warnings_logged: + with warnings.catch_warnings(record=True) as warnings_logged: automl = AutoMLSearch( X_train=X, y_train=y, diff --git a/evalml/tests/automl_tests/test_pipeline_search_plots.py b/evalml/tests/automl_tests/test_pipeline_search_plots.py index 3738e8a73f..0d9b5266aa 100644 --- a/evalml/tests/automl_tests/test_pipeline_search_plots.py +++ b/evalml/tests/automl_tests/test_pipeline_search_plots.py @@ -1,7 +1,7 @@ +import warnings from unittest.mock import MagicMock, patch import pandas as pd -import pytest from evalml.automl.pipeline_search_plots import SearchIterationPlot @@ -53,12 +53,12 @@ def test_jupyter(import_check, jupyter_check): mock_data = MagicMock() jupyter_check.return_value = True - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: SearchIterationPlot(mock_data.results, mock_data.objective) assert len(graph_valid) == 0 import_check.assert_called_with("ipywidgets", warning=True) jupyter_check.return_value = False - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: SearchIterationPlot(mock_data.results, mock_data.objective) assert len(graph_valid) == 0 diff --git a/evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py b/evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py index dd7b1f5cb3..72626e0c3f 100644 --- a/evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py +++ b/evalml/tests/model_understanding_tests/prediction_explanations_tests/test_force_plots.py @@ -1,3 +1,4 @@ +import warnings from itertools import product from unittest.mock import patch @@ -79,7 +80,7 @@ def test_force_plot_binary( else: # Code chunk to test where initjs is called if jupyter is recognized jupyter_check.return_value = False - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: results = graph_force_plot( pipeline, rows_to_explain=rows_to_explain, @@ -88,11 +89,11 @@ def test_force_plot_binary( matplotlib=False, ) assert not initjs.called - warnings = set([str(gv) for gv in graph_valid.list]) - assert all(["DeprecationWarning" in w for w in warnings]) + warnings_deprecated = set([str(gv) for gv in graph_valid]) + assert all(["DeprecationWarning" in w for w in warnings_deprecated]) jupyter_check.return_value = True - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: results = graph_force_plot( pipeline, rows_to_explain=rows_to_explain, @@ -101,8 +102,8 @@ def test_force_plot_binary( matplotlib=False, ) assert initjs.called - warnings = set([str(gv) for gv in graph_valid.list]) - assert all(["DeprecationWarning" in w for w in warnings]) + warnings_deprecated = set([str(gv) for gv in graph_valid]) + assert all(["DeprecationWarning" in w for w in warnings_deprecated]) # Should have a result per row to explain. assert len(results) == len(rows_to_explain) diff --git a/evalml/tests/model_understanding_tests/test_metrics.py b/evalml/tests/model_understanding_tests/test_metrics.py index 3dd89ce515..0ec8f81afe 100644 --- a/evalml/tests/model_understanding_tests/test_metrics.py +++ b/evalml/tests/model_understanding_tests/test_metrics.py @@ -611,22 +611,22 @@ def test_jupyter_graph_check( y = y.ww.iloc[:20] logistic_regression_binary_pipeline.fit(X, y) jupyter_check.return_value = False - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: graph_confusion_matrix(y, y) assert len(graph_valid) == 0 jupyter_check.return_value = True - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: rs = get_random_state(42) y_pred_proba = y * rs.random(y.shape) graph_precision_recall_curve(y, y_pred_proba) assert len(graph_valid) == 0 import_check.assert_called_with("ipywidgets", warning=True) - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: graph_confusion_matrix(y, y) assert len(graph_valid) == 0 import_check.assert_called_with("ipywidgets", warning=True) - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: rs = get_random_state(42) y_pred_proba = y * rs.random(y.shape) graph_roc_curve(y, y_pred_proba) diff --git a/evalml/tests/model_understanding_tests/test_partial_dependence.py b/evalml/tests/model_understanding_tests/test_partial_dependence.py index 5594097f71..efffe69a74 100644 --- a/evalml/tests/model_understanding_tests/test_partial_dependence.py +++ b/evalml/tests/model_understanding_tests/test_partial_dependence.py @@ -1,5 +1,6 @@ import collections import re +import warnings from unittest.mock import patch import featuretools as ft @@ -2465,7 +2466,7 @@ def test_partial_dependence_jupyter_graph_check( logistic_regression_binary_pipeline.fit(X, y) jupyter_check.return_value = True - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: graph_partial_dependence( logistic_regression_binary_pipeline, X, diff --git a/evalml/tests/model_understanding_tests/test_permutation_importance.py b/evalml/tests/model_understanding_tests/test_permutation_importance.py index d88b974844..9957524e3c 100644 --- a/evalml/tests/model_understanding_tests/test_permutation_importance.py +++ b/evalml/tests/model_understanding_tests/test_permutation_importance.py @@ -1,3 +1,4 @@ +import warnings from unittest.mock import PropertyMock, patch import numpy as np @@ -923,7 +924,7 @@ def test_jupyter_graph_check( y = y.ww.iloc[:20] logistic_regression_binary_pipeline.fit(X, y) jupyter_check.return_value = False - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: graph_permutation_importance( logistic_regression_binary_pipeline, X, @@ -933,7 +934,7 @@ def test_jupyter_graph_check( assert len(graph_valid) == 0 jupyter_check.return_value = True - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: graph_permutation_importance( logistic_regression_binary_pipeline, X, diff --git a/evalml/tests/model_understanding_tests/test_visualizations.py b/evalml/tests/model_understanding_tests/test_visualizations.py index d963fc4399..244ddb653a 100644 --- a/evalml/tests/model_understanding_tests/test_visualizations.py +++ b/evalml/tests/model_understanding_tests/test_visualizations.py @@ -1,4 +1,5 @@ import os +import warnings from collections import OrderedDict from unittest.mock import patch @@ -236,7 +237,7 @@ def test_jupyter_graph_check( false_negative=-2, ) jupyter_check.return_value = True - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: graph_binary_objective_vs_threshold( logistic_regression_binary_pipeline, X, @@ -248,7 +249,7 @@ def test_jupyter_graph_check( import_check.assert_called_with("ipywidgets", warning=True) Xr, yr = X_y_regression - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: rs = get_random_state(42) y_preds = yr * rs.random(yr.shape) graph_prediction_vs_actual(yr, y_preds) diff --git a/evalml/tests/objective_tests/test_standard_metrics.py b/evalml/tests/objective_tests/test_standard_metrics.py index 3642a239f1..9c6b9c0324 100644 --- a/evalml/tests/objective_tests/test_standard_metrics.py +++ b/evalml/tests/objective_tests/test_standard_metrics.py @@ -1,3 +1,4 @@ +import warnings from itertools import product import numpy as np @@ -708,7 +709,7 @@ def test_mse_linear_model(): def test_mcc_catches_warnings(): y_true = [1, 0, 1, 1] y_predicted = [0, 0, 0, 0] - with pytest.warns(None) as record: + with warnings.catch_warnings(record=True) as record: MCCBinary().objective_function(y_true, y_predicted) MCCMulticlass().objective_function(y_true, y_predicted) assert len(record) == 0 diff --git a/evalml/tests/pipeline_tests/test_graphs.py b/evalml/tests/pipeline_tests/test_graphs.py index b63b6f829d..14bfdaa724 100644 --- a/evalml/tests/pipeline_tests/test_graphs.py +++ b/evalml/tests/pipeline_tests/test_graphs.py @@ -1,4 +1,5 @@ import os +import warnings from unittest.mock import patch import numpy as np @@ -157,12 +158,12 @@ def test_jupyter_graph_check(import_check, jupyter_check, X_y_binary, test_pipel clf = test_pipeline clf.fit(X, y) jupyter_check.return_value = False - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: clf.graph_feature_importance() assert len(graph_valid) == 0 jupyter_check.return_value = True - with pytest.warns(None) as graph_valid: + with warnings.catch_warnings(record=True) as graph_valid: clf.graph_feature_importance() import_check.assert_called_with("ipywidgets", warning=True) diff --git a/pyproject.toml b/pyproject.toml index e1512e9787..e410a830fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,18 +77,18 @@ dependencies = [ [project.optional-dependencies] test = [ - "pytest == 7.1.2", + "pytest >= 7.1.2", "pytest-xdist >= 2.1.0", "pytest-timeout >= 1.4.2", "pytest-cov >= 2.10.1", - "nbval == 0.9.3", - "IPython >= 8.10.0, <8.12.1", - "PyYAML == 6.0.1", + "nbval >= 0.9.3", + "IPython >= 8.10.0", + "PyYAML >= 6.0.1", "coverage[toml] >= 6.4", ] dev = [ - "ruff == 0.0.228", - "darglint == 1.8.0", + "ruff >= 0.0.228", + "darglint >= 1.8.0", "pre-commit >= 2.20.0", "evalml[docs,test]", ] @@ -96,16 +96,16 @@ updater = [ "alteryx-open-src-update-checker >= 2.1.0" ] docs = [ - "docutils >=0.15.2, < 0.17", + "docutils >= 0.15.2, < 0.17", "pydata-sphinx-theme >= 0.3.1", "astroid <= 2.6.6", "Sphinx >= 5.0.0", "nbconvert >= 6.5.0", "nbsphinx >= 0.8.5, < 0.9.0", "sphinx-autoapi", - "sphinx-inline-tabs == 2022.1.2b11", - "sphinx-copybutton == 0.4.0", - "myst-parser == 0.18.0", + "sphinx-inline-tabs >= 2022.1.2b11", + "sphinx-copybutton >= 0.4.0", + "myst-parser >= 0.18.0", ] prophet = [ "prophet >= 1.1.2",