Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

suppress warning message of pandas_on_spark to_spark #1058

Merged
merged 2 commits into from
Jun 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions flaml/automl/spark/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,10 @@ def spark_metric_loss_score(
Returns:
float | the loss score. A lower value indicates a better model.
"""
import warnings

warnings.filterwarnings("ignore")

label_col = "label"
prediction_col = "prediction"
kwargs = {}
Expand Down
12 changes: 12 additions & 0 deletions flaml/automl/spark/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,10 @@ def train_test_split_pyspark(
pyspark.sql.DataFrame/pandas_on_spark DataFrame | The train dataframe.
pyspark.sql.DataFrame/pandas_on_spark DataFrame | The test dataframe.
"""
import warnings

warnings.filterwarnings("ignore")

if isinstance(df, psDataFrame):
df = df.to_spark(index_col=index_col)

Expand Down Expand Up @@ -156,6 +160,10 @@ def iloc_pandas_on_spark(
index_col: Optional[str] = "tmp_index_col",
) -> Union[psDataFrame, psSeries]:
"""Get the rows of a pandas_on_spark dataframe/series by index."""
import warnings

warnings.filterwarnings("ignore")

if isinstance(psdf, (DataFrame, Series)):
return psdf.iloc[index]
if isinstance(index, (int, slice)):
Expand Down Expand Up @@ -207,6 +215,10 @@ def spark_kFold(
Returns:
A list of (train, validation) DataFrames.
"""
import warnings

warnings.filterwarnings("ignore")

if isinstance(dataset, psDataFrame):
dataset = dataset.to_spark(index_col=index_col)

Expand Down