diff --git a/flaml/automl/spark/metrics.py b/flaml/automl/spark/metrics.py index 46de98ff3c..11915bbef0 100644 --- a/flaml/automl/spark/metrics.py +++ b/flaml/automl/spark/metrics.py @@ -54,6 +54,10 @@ def spark_metric_loss_score( Returns: float | the loss score. A lower value indicates a better model. """ + import warnings + + warnings.filterwarnings("ignore") + label_col = "label" prediction_col = "prediction" kwargs = {} diff --git a/flaml/automl/spark/utils.py b/flaml/automl/spark/utils.py index e2da0595f2..bf289f9707 100644 --- a/flaml/automl/spark/utils.py +++ b/flaml/automl/spark/utils.py @@ -92,6 +92,10 @@ def train_test_split_pyspark( pyspark.sql.DataFrame/pandas_on_spark DataFrame | The train dataframe. pyspark.sql.DataFrame/pandas_on_spark DataFrame | The test dataframe. """ + import warnings + + warnings.filterwarnings("ignore") + if isinstance(df, psDataFrame): df = df.to_spark(index_col=index_col) @@ -156,6 +160,10 @@ def iloc_pandas_on_spark( index_col: Optional[str] = "tmp_index_col", ) -> Union[psDataFrame, psSeries]: """Get the rows of a pandas_on_spark dataframe/series by index.""" + import warnings + + warnings.filterwarnings("ignore") + if isinstance(psdf, (DataFrame, Series)): return psdf.iloc[index] if isinstance(index, (int, slice)): @@ -207,6 +215,10 @@ def spark_kFold( Returns: A list of (train, validation) DataFrames. """ + import warnings + + warnings.filterwarnings("ignore") + if isinstance(dataset, psDataFrame): dataset = dataset.to_spark(index_col=index_col)