diff --git a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala index 7ad53b8f9f87..b6d6441925a3 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala @@ -1881,7 +1881,7 @@ private[spark] class DAGScheduler( val ignoreStageFailure = ignoreDecommissionFetchFailure && isExecutorDecommissioningOrDecommissioned(taskScheduler, bmAddress) if (ignoreStageFailure) { - logInfo("Ignoring fetch failure from $task of $failedStage attempt " + + logInfo(s"Ignoring fetch failure from $task of $failedStage attempt " + s"${task.stageAttemptId} when count spark.stage.maxConsecutiveAttempts " + "as executor ${bmAddress.executorId} is decommissioned and " + s" ${config.STAGE_IGNORE_DECOMMISSION_FETCH_FAILURE.key}=true") diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala index 8219af7ddc08..41dd45f68e73 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala @@ -250,7 +250,7 @@ trait ExpressionEvalHelper extends ScalaCheckDrivenPropertyChecks with PlanTestB val dataType = expression.dataType if (!checkResult(unsafeRow.get(0, dataType), expected, dataType, expression.nullable)) { - fail("Incorrect evaluation in unsafe mode (fallback mode = $fallbackMode): " + + fail(s"Incorrect evaluation in unsafe mode (fallback mode = $fallbackMode): " + s"$expression, actual: $unsafeRow, expected: $expected, " + s"dataType: $dataType, nullable: ${expression.nullable}") } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanPartitioningAndOrdering.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanPartitioningAndOrdering.scala index b03dda111684..b7470ab5059c 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanPartitioningAndOrdering.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanPartitioningAndOrdering.scala @@ -56,7 +56,7 @@ object V2ScanPartitioningAndOrdering extends Rule[LogicalPlan] with SQLConfHelpe } case _: UnknownPartitioning => None case p => - logWarning("Spark ignores the partitioning ${p.getClass.getSimpleName}." + + logWarning(s"Spark ignores the partitioning ${p.getClass.getSimpleName}." + " Please use KeyGroupedPartitioning for better performance") None }