diff --git a/python/pyspark/sql/connect/column.py b/python/pyspark/sql/connect/column.py index d26283571cc9..d2c334ae67f7 100644 --- a/python/pyspark/sql/connect/column.py +++ b/python/pyspark/sql/connect/column.py @@ -439,9 +439,6 @@ def _test() -> None: .getOrCreate() ) - # Spark Connect has a different string representation for Column. - del pyspark.sql.connect.column.Column.getItem.__doc__ - # TODO(SPARK-41772): Enable pyspark.sql.connect.column.Column.withField doctest del pyspark.sql.connect.column.Column.withField.__doc__ diff --git a/python/pyspark/sql/connect/readwriter.py b/python/pyspark/sql/connect/readwriter.py index 8e8f4476799a..4643da317d07 100644 --- a/python/pyspark/sql/connect/readwriter.py +++ b/python/pyspark/sql/connect/readwriter.py @@ -616,12 +616,8 @@ def _test() -> None: globs = pyspark.sql.connect.readwriter.__dict__.copy() # TODO(SPARK-41817): Support reading with schema - del pyspark.sql.connect.readwriter.DataFrameReader.load.__doc__ del pyspark.sql.connect.readwriter.DataFrameReader.option.__doc__ - del pyspark.sql.connect.readwriter.DataFrameReader.text.__doc__ - del pyspark.sql.connect.readwriter.DataFrameWriter.csv.__doc__ del pyspark.sql.connect.readwriter.DataFrameWriter.option.__doc__ - del pyspark.sql.connect.readwriter.DataFrameWriter.text.__doc__ del pyspark.sql.connect.readwriter.DataFrameWriter.bucketBy.__doc__ del pyspark.sql.connect.readwriter.DataFrameWriter.sortBy.__doc__ diff --git a/python/pyspark/sql/dataframe.py b/python/pyspark/sql/dataframe.py index 09a5e9d0b07d..f45b0d70e996 100644 --- a/python/pyspark/sql/dataframe.py +++ b/python/pyspark/sql/dataframe.py @@ -506,7 +506,7 @@ def write(self) -> DataFrameWriter: -------- >>> df = spark.createDataFrame([(2, "Alice"), (5, "Bob")], schema=["age", "name"]) >>> type(df.write) - + Write the DataFrame as a table.