Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 3 additions & 27 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import pytest

from narwhals._utils import Implementation, generate_temporary_column_name
from tests.utils import PANDAS_VERSION
from tests.utils import PANDAS_VERSION, pyspark_session, sqlframe_session

if TYPE_CHECKING:
from collections.abc import Sequence
Expand Down Expand Up @@ -168,35 +168,13 @@ def pyspark_lazy_constructor() -> Callable[[Data], PySparkDataFrame]: # pragma:
import warnings
from atexit import register

is_spark_connect = bool(os.environ.get("SPARK_CONNECT", None))

if TYPE_CHECKING:
from pyspark.sql import SparkSession
elif is_spark_connect:
from pyspark.sql.connect.session import SparkSession
else:
from pyspark.sql import SparkSession

with warnings.catch_warnings():
# The spark session seems to trigger a polars warning.
# Polars is imported in the tests, but not used in the spark operations
warnings.filterwarnings(
"ignore", r"Using fork\(\) can cause Polars", category=RuntimeWarning
)
builder = cast("SparkSession.Builder", SparkSession.builder).appName("unit-tests")

session = (
(
builder.remote(f"sc://localhost:{os.environ.get('SPARK_PORT', '15002')}")
if is_spark_connect
else builder.master("local[1]").config("spark.ui.enabled", "false")
)
.config("spark.default.parallelism", "1")
.config("spark.sql.shuffle.partitions", "2")
# common timezone for all tests environments
.config("spark.sql.session.timeZone", "UTC")
.getOrCreate()
)
session = pyspark_session()

register(session.stop)

Expand All @@ -216,9 +194,7 @@ def _constructor(obj: Data) -> PySparkDataFrame:


def sqlframe_pyspark_lazy_constructor(obj: Data) -> SQLFrameDataFrame: # pragma: no cover
from sqlframe.duckdb import DuckDBSession

session = DuckDBSession()
session = sqlframe_session()
return session.createDataFrame([*zip(*obj.values())], schema=[*obj.keys()])


Expand Down
12 changes: 3 additions & 9 deletions tests/dtypes_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

import narwhals as nw
from narwhals.exceptions import PerformanceWarning
from tests.utils import PANDAS_VERSION, POLARS_VERSION, PYARROW_VERSION
from tests.utils import PANDAS_VERSION, POLARS_VERSION, PYARROW_VERSION, pyspark_session

if TYPE_CHECKING:
from collections.abc import Iterable
Expand Down Expand Up @@ -505,15 +505,9 @@ def test_datetime_w_tz_duckdb() -> None:
assert result["b"] == nw.List(nw.List(nw.Datetime("us", "Asia/Kathmandu")))


def test_datetime_w_tz_pyspark(constructor: Constructor) -> None: # pragma: no cover
if "pyspark" not in str(constructor) or "sqlframe" in str(constructor):
pytest.skip()
def test_datetime_w_tz_pyspark() -> None: # pragma: no cover
pytest.importorskip("pyspark")
from pyspark.sql import SparkSession

session = SparkSession.builder.config(
"spark.sql.session.timeZone", "UTC"
).getOrCreate()
session = pyspark_session()

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Christ, great call @FBruzzesi!
I had no idea we had this logic in so many places πŸ˜‚

df = nw.from_native(
session.createDataFrame([(datetime(2020, 1, 1, tzinfo=timezone.utc),)], ["a"])
Expand Down
12 changes: 3 additions & 9 deletions tests/expr_and_series/dt/convert_time_zone_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
Constructor,
assert_equal_data,
is_windows,
pyspark_session,
)

if TYPE_CHECKING:
Expand Down Expand Up @@ -153,17 +154,10 @@ def test_convert_time_zone_to_connection_tz_duckdb() -> None:
)


def test_convert_time_zone_to_connection_tz_pyspark(
constructor: Constructor,
) -> None: # pragma: no cover
if "pyspark" not in str(constructor) or "sqlframe" in str(constructor):
pytest.skip()
def test_convert_time_zone_to_connection_tz_pyspark() -> None: # pragma: no cover
pytest.importorskip("pyspark")
from pyspark.sql import SparkSession

session = SparkSession.builder.config(
"spark.sql.session.timeZone", "UTC"
).getOrCreate()
session = pyspark_session()
df = nw.from_native(
session.createDataFrame([(datetime(2020, 1, 1, tzinfo=timezone.utc),)], ["a"])
)
Expand Down
19 changes: 9 additions & 10 deletions tests/expr_and_series/dt/replace_time_zone_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,13 @@
import pytest

import narwhals as nw
from tests.utils import PANDAS_VERSION, Constructor, assert_equal_data, is_windows
from tests.utils import (
PANDAS_VERSION,
Constructor,
assert_equal_data,
is_windows,
pyspark_session,
)

if TYPE_CHECKING:
from tests.utils import ConstructorEager
Expand Down Expand Up @@ -136,17 +142,10 @@ def test_replace_time_zone_to_connection_tz_duckdb() -> None:
)


def test_replace_time_zone_to_connection_tz_pyspark(
constructor: Constructor,
) -> None: # pragma: no cover
if "pyspark" not in str(constructor) or "sqlframe" in str(constructor):
pytest.skip()
def test_replace_time_zone_to_connection_tz_pyspark() -> None: # pragma: no cover
pytest.importorskip("pyspark")
from pyspark.sql import SparkSession

session = SparkSession.builder.config(
"spark.sql.session.timeZone", "UTC"
).getOrCreate()
session = pyspark_session()
df = nw.from_native(
session.createDataFrame([(datetime(2020, 1, 1, tzinfo=timezone.utc),)], ["a"])
)
Expand Down
Loading
Loading