Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 37 additions & 13 deletions python/pyspark/sql/functions/builtin.py
Original file line number Diff line number Diff line change
Expand Up @@ -15303,7 +15303,7 @@ def shuffle(col: "ColumnOrName") -> Column:
@_try_remote_functions
def reverse(col: "ColumnOrName") -> Column:
"""
Collection function: returns a reversed string or an array with reverse order of elements.
Collection function: returns a reversed string or an array with elements in reverse order.

.. versionadded:: 1.5.0

Expand All @@ -15313,21 +15313,38 @@ def reverse(col: "ColumnOrName") -> Column:
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
The name of the column or an expression that represents the element to be reversed.

Returns
-------
:class:`~pyspark.sql.Column`
array of elements in reverse order.
A new column that contains a reversed string or an array with elements in reverse order.

Examples
--------
Example 1: Reverse a string

Copy link
Contributor

@LuciferYang LuciferYang Jan 2, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's standardize the format:

  1. import functions as sf and import other necessaries
  2. try not to use .alias
  3. perhaps show is clearer than collect when displaying results.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

>>> import pyspark.sql.functions as sf
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s='LQS krapS')]
>>> df.select(sf.reverse(df.data)).show()
+-------------+
|reverse(data)|
+-------------+
| LQS krapS|
+-------------+

Example 2: Reverse an array

>>> from pyspark.sql import functions as sf
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
>>> df.select(sf.reverse(df.data)).show()
+-------------+
|reverse(data)|
+-------------+
| [3, 1, 2]|
| [1]|
| []|
+-------------+
"""
return _invoke_function_over_columns("reverse", col)

Expand Down Expand Up @@ -15406,7 +15423,7 @@ def flatten(col: "ColumnOrName") -> Column:
@_try_remote_functions
def map_contains_key(col: "ColumnOrName", value: Any) -> Column:
"""
Returns true if the map contains the key.
Map function: Returns true if the map contains the key.

.. versionadded:: 3.4.0

Expand All @@ -15416,9 +15433,9 @@ def map_contains_key(col: "ColumnOrName", value: Any) -> Column:
Parameters
----------
col : :class:`~pyspark.sql.Column` or str
name of column or expression
The name of the column or an expression that represents the map.
value :
a literal value
A literal value.

Returns
-------
Expand All @@ -15427,15 +15444,22 @@ def map_contains_key(col: "ColumnOrName", value: Any) -> Column:

Examples
--------
>>> from pyspark.sql.functions import map_contains_key
Example 1: The key is in the map

>>> from pyspark.sql import functions as sf
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_contains_key("data", 1)).show()
>>> df.select(sf.map_contains_key("data", 1)).show()
+-------------------------+
|map_contains_key(data, 1)|
+-------------------------+
| true|
+-------------------------+
>>> df.select(map_contains_key("data", -1)).show()

Example 2: The key is not in the map

>>> from pyspark.sql import functions as sf
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(sf.map_contains_key("data", -1)).show()
+--------------------------+
|map_contains_key(data, -1)|
+--------------------------+
Expand Down