Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unpin dependencies from [project.optional-dependencies] #4431

Merged
merged 20 commits into from
Jun 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ outputs:
imports:
- evalml
requires:
- pytest ==6.2.5
- pytest >=6.2.5
MichaelFu512 marked this conversation as resolved.
Show resolved Hide resolved
- nbval >=0.9.3
source_files:
- evalml/*
Expand Down Expand Up @@ -85,7 +85,7 @@ outputs:
imports:
- evalml
requires:
- pytest ==6.2.5
- pytest >=6.2.5
- nbval >=0.9.3
- python-graphviz >=0.8.4
- category_encoders >=2.0.0, <=2.5.1.post0
Expand Down
5 changes: 2 additions & 3 deletions docs/source/disable-warnings.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# flake8: noqa 401 imported to force console mode for tqdm in jupyter notebooks
from tqdm.auto import tqdm

import warnings

from tqdm.auto import tqdm # noqa: F401

warnings.filterwarnings("ignore")
1 change: 1 addition & 0 deletions docs/source/release_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Release Notes
* Uncapped holidays :pr:`4428`
* Unpinned kaleido :pr:`4423`
* Unpinned shap and scipy :pr:`4436`
* Unpinned most pinned dependencies under project.optional-dependencies :pr:`4431`
* Documentation Changes
* Testing Changes
* Added ability to run airflow tests in Python 3.9 :pr:`4391`
Expand Down
6 changes: 3 additions & 3 deletions evalml/tests/automl_tests/test_automl.py
Original file line number Diff line number Diff line change
Expand Up @@ -3849,13 +3849,13 @@
def test_high_cv_check_no_warning_for_divide_by_zero(X_y_binary, dummy_binary_pipeline):
X, y = X_y_binary
automl = AutoMLSearch(X_train=X, y_train=y, problem_type="binary")
with pytest.warns(None) as warnings:
with warnings.catch_warnings(record=True) as automl_warnings:

Check warning on line 3852 in evalml/tests/automl_tests/test_automl.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/automl_tests/test_automl.py#L3852

Added line #L3852 was not covered by tests
# mean is 0 but std is not
automl._check_for_high_variance(
dummy_binary_pipeline,
cv_scores=[0.0, 1.0, -1.0],
)
assert len(warnings) == 0
assert len(automl_warnings) == 0

Check warning on line 3858 in evalml/tests/automl_tests/test_automl.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/automl_tests/test_automl.py#L3858

Added line #L3858 was not covered by tests


@pytest.mark.parametrize(
Expand Down Expand Up @@ -4370,7 +4370,7 @@
mock_get_preprocessing_components.side_effect = (
dummy_mock_get_preprocessing_components
)
with pytest.warns(None) as warnings_logged:
with warnings.catch_warnings(record=True) as warnings_logged:

Check warning on line 4373 in evalml/tests/automl_tests/test_automl.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/automl_tests/test_automl.py#L4373

Added line #L4373 was not covered by tests
automl = AutoMLSearch(
X_train=X,
y_train=y,
Expand Down
6 changes: 3 additions & 3 deletions evalml/tests/automl_tests/test_pipeline_search_plots.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import warnings

Check warning on line 1 in evalml/tests/automl_tests/test_pipeline_search_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/automl_tests/test_pipeline_search_plots.py#L1

Added line #L1 was not covered by tests
from unittest.mock import MagicMock, patch

import pandas as pd
import pytest

from evalml.automl.pipeline_search_plots import SearchIterationPlot

Expand Down Expand Up @@ -53,12 +53,12 @@
mock_data = MagicMock()

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 56 in evalml/tests/automl_tests/test_pipeline_search_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/automl_tests/test_pipeline_search_plots.py#L56

Added line #L56 was not covered by tests
SearchIterationPlot(mock_data.results, mock_data.objective)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)

jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:

Check warning on line 62 in evalml/tests/automl_tests/test_pipeline_search_plots.py

View check run for this annotation

Codecov / codecov/patch

evalml/tests/automl_tests/test_pipeline_search_plots.py#L62

Added line #L62 was not covered by tests
SearchIterationPlot(mock_data.results, mock_data.objective)
assert len(graph_valid) == 0
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from itertools import product
from unittest.mock import patch

Expand Down Expand Up @@ -79,7 +80,7 @@ def test_force_plot_binary(
else:
# Code chunk to test where initjs is called if jupyter is recognized
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
results = graph_force_plot(
pipeline,
rows_to_explain=rows_to_explain,
Expand All @@ -88,11 +89,11 @@ def test_force_plot_binary(
matplotlib=False,
)
assert not initjs.called
warnings = set([str(gv) for gv in graph_valid.list])
assert all(["DeprecationWarning" in w for w in warnings])
warnings_deprecated = set([str(gv) for gv in graph_valid])
assert all(["DeprecationWarning" in w for w in warnings_deprecated])

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
results = graph_force_plot(
pipeline,
rows_to_explain=rows_to_explain,
Expand All @@ -101,8 +102,8 @@ def test_force_plot_binary(
matplotlib=False,
)
assert initjs.called
warnings = set([str(gv) for gv in graph_valid.list])
assert all(["DeprecationWarning" in w for w in warnings])
warnings_deprecated = set([str(gv) for gv in graph_valid])
assert all(["DeprecationWarning" in w for w in warnings_deprecated])

# Should have a result per row to explain.
assert len(results) == len(rows_to_explain)
Expand Down
8 changes: 4 additions & 4 deletions evalml/tests/model_understanding_tests/test_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -611,22 +611,22 @@ def test_jupyter_graph_check(
y = y.ww.iloc[:20]
logistic_regression_binary_pipeline.fit(X, y)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_precision_recall_curve(y, y_pred_proba)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
graph_confusion_matrix(y, y)
assert len(graph_valid) == 0
import_check.assert_called_with("ipywidgets", warning=True)
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
rs = get_random_state(42)
y_pred_proba = y * rs.random(y.shape)
graph_roc_curve(y, y_pred_proba)
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import collections
import re
import warnings
from unittest.mock import patch

import featuretools as ft
Expand Down Expand Up @@ -2465,7 +2466,7 @@ def test_partial_dependence_jupyter_graph_check(
logistic_regression_binary_pipeline.fit(X, y)

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
graph_partial_dependence(
logistic_regression_binary_pipeline,
X,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from unittest.mock import PropertyMock, patch

import numpy as np
Expand Down Expand Up @@ -923,7 +924,7 @@ def test_jupyter_graph_check(
y = y.ww.iloc[:20]
logistic_regression_binary_pipeline.fit(X, y)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
graph_permutation_importance(
logistic_regression_binary_pipeline,
X,
Expand All @@ -933,7 +934,7 @@ def test_jupyter_graph_check(
assert len(graph_valid) == 0

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
graph_permutation_importance(
logistic_regression_binary_pipeline,
X,
Expand Down
5 changes: 3 additions & 2 deletions evalml/tests/model_understanding_tests/test_visualizations.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import warnings
from collections import OrderedDict
from unittest.mock import patch

Expand Down Expand Up @@ -236,7 +237,7 @@ def test_jupyter_graph_check(
false_negative=-2,
)
jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
graph_binary_objective_vs_threshold(
logistic_regression_binary_pipeline,
X,
Expand All @@ -248,7 +249,7 @@ def test_jupyter_graph_check(
import_check.assert_called_with("ipywidgets", warning=True)

Xr, yr = X_y_regression
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
rs = get_random_state(42)
y_preds = yr * rs.random(yr.shape)
graph_prediction_vs_actual(yr, y_preds)
Expand Down
3 changes: 2 additions & 1 deletion evalml/tests/objective_tests/test_standard_metrics.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import warnings
from itertools import product

import numpy as np
Expand Down Expand Up @@ -708,7 +709,7 @@ def test_mse_linear_model():
def test_mcc_catches_warnings():
y_true = [1, 0, 1, 1]
y_predicted = [0, 0, 0, 0]
with pytest.warns(None) as record:
with warnings.catch_warnings(record=True) as record:
MCCBinary().objective_function(y_true, y_predicted)
MCCMulticlass().objective_function(y_true, y_predicted)
assert len(record) == 0
Expand Down
5 changes: 3 additions & 2 deletions evalml/tests/pipeline_tests/test_graphs.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import warnings
from unittest.mock import patch

import numpy as np
Expand Down Expand Up @@ -157,12 +158,12 @@ def test_jupyter_graph_check(import_check, jupyter_check, X_y_binary, test_pipel
clf = test_pipeline
clf.fit(X, y)
jupyter_check.return_value = False
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
clf.graph_feature_importance()
assert len(graph_valid) == 0

jupyter_check.return_value = True
with pytest.warns(None) as graph_valid:
with warnings.catch_warnings(record=True) as graph_valid:
clf.graph_feature_importance()
import_check.assert_called_with("ipywidgets", warning=True)

Expand Down
20 changes: 10 additions & 10 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,35 +77,35 @@ dependencies = [

[project.optional-dependencies]
test = [
"pytest == 7.1.2",
"pytest >= 7.1.2",
"pytest-xdist >= 2.1.0",
"pytest-timeout >= 1.4.2",
"pytest-cov >= 2.10.1",
"nbval == 0.9.3",
"IPython >= 8.10.0, <8.12.1",
"PyYAML == 6.0.1",
"nbval >= 0.9.3",
"IPython >= 8.10.0",
"PyYAML >= 6.0.1",
"coverage[toml] >= 6.4",
]
dev = [
"ruff == 0.0.228",
"darglint == 1.8.0",
"ruff >= 0.0.228",
"darglint >= 1.8.0",
"pre-commit >= 2.20.0",
"evalml[docs,test]",
]
updater = [
"alteryx-open-src-update-checker >= 2.1.0"
]
docs = [
"docutils >=0.15.2, < 0.17",
"docutils >= 0.15.2, < 0.17",
"pydata-sphinx-theme >= 0.3.1",
"astroid <= 2.6.6",
"Sphinx >= 5.0.0",
"nbconvert >= 6.5.0",
"nbsphinx >= 0.8.5, < 0.9.0",
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A combination of nbsphinx, astroid, and docutils cause the readthedocs pipeline to fail. I'll need to open a separate ticket to uncap those.

"sphinx-autoapi",
"sphinx-inline-tabs == 2022.1.2b11",
"sphinx-copybutton == 0.4.0",
"myst-parser == 0.18.0",
"sphinx-inline-tabs >= 2022.1.2b11",
"sphinx-copybutton >= 0.4.0",
"myst-parser >= 0.18.0",
]
prophet = [
"prophet >= 1.1.2",
Expand Down
Loading