Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
5fe963f
[SPARK-46380][SQL] Replace current time/date prior to evaluating inli…
dbatomic Dec 21, 2023
0e94f34
[SPARK-46378][SQL][FOLLOWUP] Do not rely on TreeNodeTag in Project
cloud-fan Dec 21, 2023
c10b2c0
[SPARK-46471][PS][TESTS] Reorganize `OpsOnDiffFramesEnabledTests`: Fa…
zhengruifeng Dec 21, 2023
4d21e55
[SPARK-46466][SQL] Vectorized parquet reader should never do rebase f…
cloud-fan Dec 21, 2023
87d22fd
[SPARK-46470][PS][TESTS] Move `test_series_datetime` to `pyspark.pand…
zhengruifeng Dec 21, 2023
2723bb1
[SPARK-46476][BUILD][CORE][CONNECT] Move `IvyTestUtils` back to `src/…
LuciferYang Dec 21, 2023
db162e5
[SPARK-46471][PS][TESTS][FOLLOWUPS] Reorganize `OpsOnDiffFramesEnable…
zhengruifeng Dec 21, 2023
e4b5977
[SPARK-46437][DOCS] Remove cruft from the built-in SQL functions docu…
nchammas Dec 22, 2023
4fcd5bf
[SPARK-45525][SQL][PYTHON] Support for Python data source write using…
allisonwang-db Dec 22, 2023
3432fd8
[SPARK-46468][SQL] Handle COUNT bug for EXISTS subqueries with Aggreg…
agubichev Dec 22, 2023
8c63485
[SPARK-46380][SQL][FOLLOWUP] Simplify the code for ResolveInlineTable…
beliefer Dec 22, 2023
a921da8
[SPARK-46443][SQL] Decimal precision and scale should decided by H2 d…
beliefer Dec 22, 2023
6b93153
[SPARK-46472][PYTHON][DOCS] Refine docstring of `array_prepend/array_…
LuciferYang Dec 22, 2023
fc7d7bc
[SPARK-46464][DOC] Fix the scroll issue of tables when overflow
yaooqinn Dec 22, 2023
43f7932
[SPARK-46480][CORE][SQL] Fix NPE when table cache task attempt
ulysses-you Dec 22, 2023
3361f25
[SPARK-40876][SQL] Widening type promotions in Parquet readers
johanl-db Dec 22, 2023
cb2f47b
[SPARK-46485][SQL] V1Write should not add Sort when not needed
cloud-fan Dec 22, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions common/utils/src/main/resources/error/error-classes.json
Original file line number Diff line number Diff line change
Expand Up @@ -2513,6 +2513,12 @@
],
"sqlState" : "42601"
},
"INVALID_WRITER_COMMIT_MESSAGE" : {
"message" : [
"The data source writer has generated an invalid number of commit messages. Expected exactly one writer commit message from each task, but received <detail>."
],
"sqlState" : "42KDE"
},
"INVALID_WRITE_DISTRIBUTION" : {
"message" : [
"The requested write distribution is invalid."
Expand Down
7 changes: 7 additions & 0 deletions connector/connect/client/jvm/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,13 @@
<artifactId>scalacheck_${scala.binary.version}</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-common-utils_${scala.binary.version}</artifactId>
<version>${project.version}</version>
<classifier>tests</classifier>
<scope>test</scope>
</dependency>
<!-- Use mima to perform the compatibility check -->
<dependency>
<groupId>com.typesafe</groupId>
Expand Down
7 changes: 7 additions & 0 deletions core/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -451,6 +451,13 @@
<classifier>tests</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-common-utils_${scala.binary.version}</artifactId>
<version>${project.version}</version>
<classifier>tests</classifier>
<scope>test</scope>
</dependency>

<!--
This spark-tags test-dep is needed even though it isn't used in this module, otherwise testing-cmds that exclude
Expand Down
2 changes: 2 additions & 0 deletions core/src/main/scala/org/apache/spark/BarrierTaskContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,8 @@ class BarrierTaskContext private[spark] (

override def isCompleted(): Boolean = taskContext.isCompleted()

override def isFailed(): Boolean = taskContext.isFailed()

override def isInterrupted(): Boolean = taskContext.isInterrupted()

override def addTaskCompletionListener(listener: TaskCompletionListener): this.type = {
Expand Down
5 changes: 5 additions & 0 deletions core/src/main/scala/org/apache/spark/TaskContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,11 @@ abstract class TaskContext extends Serializable {
*/
def isCompleted(): Boolean

/**
* Returns true if the task has failed.
*/
def isFailed(): Boolean

/**
* Returns true if the task has been killed.
*/
Expand Down
2 changes: 2 additions & 0 deletions core/src/main/scala/org/apache/spark/TaskContextImpl.scala
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,8 @@ private[spark] class TaskContextImpl(
@GuardedBy("this")
override def isCompleted(): Boolean = synchronized(completed)

override def isFailed(): Boolean = synchronized(failureCauseOpt.isDefined)

override def isInterrupted(): Boolean = reasonIfKilled.isDefined

override def getLocalProperty(key: String): String = localProperties.getProperty(key)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -670,6 +670,16 @@ class TaskContextSuite extends SparkFunSuite with BeforeAndAfter with LocalSpark
assert(invocationOrder === Seq("C", "B", "A", "D"))
}

test("SPARK-46480: Add isFailed in TaskContext") {
val context = TaskContext.empty()
var isFailed = false
context.addTaskCompletionListener[Unit] { context =>
isFailed = context.isFailed()
}
context.markTaskFailed(new RuntimeException())
context.markTaskCompleted(None)
assert(isFailed)
}
}

private object TaskContextSuite {
Expand Down
16 changes: 14 additions & 2 deletions dev/sparktestsupport/modules.py
Original file line number Diff line number Diff line change
Expand Up @@ -770,7 +770,7 @@ def __hash__(self):
"pyspark.pandas.tests.window.test_groupby_rolling_adv",
"pyspark.pandas.tests.window.test_groupby_rolling_count",
"pyspark.pandas.tests.test_scalars",
"pyspark.pandas.tests.test_series_datetime",
"pyspark.pandas.tests.series.test_datetime",
"pyspark.pandas.tests.series.test_string_ops_adv",
"pyspark.pandas.tests.series.test_string_ops_basic",
"pyspark.pandas.tests.test_spark_functions",
Expand Down Expand Up @@ -864,6 +864,12 @@ def __hash__(self):
"pyspark.pandas.tests.test_indexing",
"pyspark.pandas.tests.test_ops_on_diff_frames",
"pyspark.pandas.tests.diff_frames_ops.test_align",
"pyspark.pandas.tests.diff_frames_ops.test_arithmetic",
"pyspark.pandas.tests.diff_frames_ops.test_arithmetic_ext",
"pyspark.pandas.tests.diff_frames_ops.test_arithmetic_ext_float",
"pyspark.pandas.tests.diff_frames_ops.test_arithmetic_chain",
"pyspark.pandas.tests.diff_frames_ops.test_arithmetic_chain_ext",
"pyspark.pandas.tests.diff_frames_ops.test_arithmetic_chain_ext_float",
"pyspark.pandas.tests.diff_frames_ops.test_basic_slow",
"pyspark.pandas.tests.diff_frames_ops.test_cov",
"pyspark.pandas.tests.diff_frames_ops.test_corrwith",
Expand Down Expand Up @@ -1046,7 +1052,7 @@ def __hash__(self):
"pyspark.pandas.tests.connect.resample.test_parity_on",
"pyspark.pandas.tests.connect.resample.test_parity_timezone",
"pyspark.pandas.tests.connect.test_parity_scalars",
"pyspark.pandas.tests.connect.test_parity_series_datetime",
"pyspark.pandas.tests.connect.series.test_parity_datetime",
"pyspark.pandas.tests.connect.series.test_parity_string_ops_adv",
"pyspark.pandas.tests.connect.series.test_parity_string_ops_basic",
"pyspark.pandas.tests.connect.test_parity_spark_functions",
Expand Down Expand Up @@ -1223,6 +1229,12 @@ def __hash__(self):
"pyspark.pandas.tests.connect.indexes.test_parity_datetime_property",
"pyspark.pandas.tests.connect.indexes.test_parity_datetime_round",
"pyspark.pandas.tests.connect.test_parity_ops_on_diff_frames",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_arithmetic",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_arithmetic_ext",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_arithmetic_ext_float",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_arithmetic_chain",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_arithmetic_chain_ext",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_arithmetic_chain_ext_float",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_groupby",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_groupby_aggregate",
"pyspark.pandas.tests.connect.diff_frames_ops.test_parity_groupby_apply",
Expand Down
1 change: 1 addition & 0 deletions docs/.gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
generated-*.html
_generated_function_html/
2 changes: 1 addition & 1 deletion docs/css/custom.css
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ body {
font-style: normal;
font-weight: 400;
overflow-wrap: anywhere;
overflow-x: hidden;
overflow-x: auto;
padding-top: 80px;
padding-bottom: 20px;
}
Expand Down
6 changes: 6 additions & 0 deletions docs/sql-error-conditions.md
Original file line number Diff line number Diff line change
Expand Up @@ -1398,6 +1398,12 @@ Rewrite the query to avoid window functions, aggregate functions, and generator

Cannot specify ORDER BY or a window frame for `<aggFunc>`.

### INVALID_WRITER_COMMIT_MESSAGE

[SQLSTATE: 42KDE](sql-error-conditions-sqlstates.html#class-42-syntax-error-or-access-rule-violation)

The data source writer has generated an invalid number of commit messages. Expected exactly one writer commit message from each task, but received `<detail>`.

### [INVALID_WRITE_DISTRIBUTION](sql-error-conditions-invalid-write-distribution-error-class.html)

[SQLSTATE: 42000](sql-error-conditions-sqlstates.html#class-42-syntax-error-or-access-rule-violation)
Expand Down
Loading