From a92dcc402fc1c6f53dd110d605d9206a3b548327 Mon Sep 17 00:00:00 2001 From: yangjie01 Date: Thu, 10 Nov 2022 17:00:26 +0800 Subject: [PATCH] fix --- .../apache/spark/sql/kafka010/KafkaOffsetReaderAdmin.scala | 2 +- .../spark/sql/kafka010/KafkaOffsetReaderConsumer.scala | 2 +- .../apache/spark/sql/protobuf/utils/SchemaConverters.scala | 2 +- .../scala/org/apache/spark/ExecutorAllocationManager.scala | 2 +- .../src/main/scala/org/apache/spark/status/api/v1/api.scala | 2 +- .../main/scala/org/apache/spark/sql/types/StructType.scala | 2 +- .../main/scala/org/apache/spark/sql/UDFRegistration.scala | 2 +- .../spark/sql/catalyst/analysis/ResolveSessionCatalog.scala | 2 +- .../sql/execution/adaptive/ShufflePartitionsUtil.scala | 6 +++--- .../org/apache/spark/sql/execution/command/tables.scala | 2 +- .../sql/execution/datasources/v2/ShowFunctionsExec.scala | 2 +- .../execution/datasources/v2/ShowTablePropertiesExec.scala | 2 +- .../org/apache/spark/sql/hive/client/HiveClientImpl.scala | 4 ++-- 13 files changed, 16 insertions(+), 16 deletions(-) diff --git a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderAdmin.scala b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderAdmin.scala index 25c8cb8d518e..b443bbcee0fc 100644 --- a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderAdmin.scala +++ b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderAdmin.scala @@ -243,7 +243,7 @@ private[kafka010] class KafkaOffsetReaderAdmin( } tp -> offset - }.toMap + } } private def fetchSpecificOffsets0( diff --git a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderConsumer.scala b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderConsumer.scala index cdd269216874..10c7488de896 100644 --- a/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderConsumer.scala +++ b/connector/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaOffsetReaderConsumer.scala @@ -291,7 +291,7 @@ private[kafka010] class KafkaOffsetReaderConsumer( } tp -> offset - }.toMap + } } private def fetchSpecificOffsets0( diff --git a/connector/protobuf/src/main/scala/org/apache/spark/sql/protobuf/utils/SchemaConverters.scala b/connector/protobuf/src/main/scala/org/apache/spark/sql/protobuf/utils/SchemaConverters.scala index 6fcba3b89186..8f87a35cceb9 100644 --- a/connector/protobuf/src/main/scala/org/apache/spark/sql/protobuf/utils/SchemaConverters.scala +++ b/connector/protobuf/src/main/scala/org/apache/spark/sql/protobuf/utils/SchemaConverters.scala @@ -99,7 +99,7 @@ object SchemaConverters { Option( fd.getMessageType.getFields.asScala - .flatMap(structFieldFor(_, newRecordNames.toSet)) + .flatMap(structFieldFor(_, newRecordNames)) .toSeq) .filter(_.nonEmpty) .map(StructType.apply) diff --git a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala index 43180befe6f8..204ffc39a110 100644 --- a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala +++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala @@ -568,7 +568,7 @@ private[spark] class ExecutorAllocationManager( // We don't want to change our target number of executors, because we already did that // when the task backlog decreased. if (decommissionEnabled) { - val executorIdsWithoutHostLoss = executorIdsToBeRemoved.toSeq.map( + val executorIdsWithoutHostLoss = executorIdsToBeRemoved.map( id => (id, ExecutorDecommissionInfo("spark scale down"))).toArray client.decommissionExecutors( executorIdsWithoutHostLoss, diff --git a/core/src/main/scala/org/apache/spark/status/api/v1/api.scala b/core/src/main/scala/org/apache/spark/status/api/v1/api.scala index ba21d5d1f628..70c74b418e3c 100644 --- a/core/src/main/scala/org/apache/spark/status/api/v1/api.scala +++ b/core/src/main/scala/org/apache/spark/status/api/v1/api.scala @@ -424,7 +424,7 @@ class ExecutorPeakMetricsDistributions private[spark]( /** Returns the distributions for the specified metric. */ def getMetricDistribution(metricName: String): IndexedSeq[Double] = { val sorted = executorMetrics.map(_.getMetricValue(metricName)).sorted - indices.map(i => sorted(i.toInt).toDouble).toIndexedSeq + indices.map(i => sorted(i.toInt).toDouble) } } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala index d5f32aac55a4..d81c565c9ab8 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructType.scala @@ -555,7 +555,7 @@ object StructType extends AbstractDataType { def apply(fields: java.util.List[StructField]): StructType = { import scala.collection.JavaConverters._ - StructType(fields.asScala.toSeq) + StructType(fields.asScala.toArray) } private[sql] def fromAttributes(attributes: Seq[Attribute]): StructType = diff --git a/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala b/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala index fb7323a48cc6..998203364771 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/UDFRegistration.scala @@ -57,7 +57,7 @@ class UDFRegistration private[sql] (functionRegistry: FunctionRegistry) extends s""" | Registering new PythonUDF: | name: $name - | command: ${udf.func.command.toSeq} + | command: ${udf.func.command} | envVars: ${udf.func.envVars} | pythonIncludes: ${udf.func.pythonIncludes} | pythonExec: ${udf.func.pythonExec} diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala index d00d07150b0b..def472b7e61b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala @@ -545,7 +545,7 @@ class ResolveSessionCatalog(val catalogManager: CatalogManager) } else { CatalogTableType.MANAGED } - val (partitionColumns, maybeBucketSpec) = partitioning.toSeq.convertTransforms + val (partitionColumns, maybeBucketSpec) = partitioning.convertTransforms CatalogTable( identifier = table, diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ShufflePartitionsUtil.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ShufflePartitionsUtil.scala index af689db33798..dbed66683b01 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ShufflePartitionsUtil.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ShufflePartitionsUtil.scala @@ -305,9 +305,9 @@ object ShufflePartitionsUtil extends Logging { val dataSize = spec.startReducerIndex.until(spec.endReducerIndex) .map(mapStats.bytesByPartitionId).sum spec.copy(dataSize = Some(dataSize)) - }.toSeq - case None => partitionSpecs.map(_.copy(dataSize = Some(0))).toSeq - }.toSeq + } + case None => partitionSpecs.map(_.copy(dataSize = Some(0))) + } } /** diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala index ccdce5ec0ce2..4fca8c45e2f9 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala @@ -940,7 +940,7 @@ case class ShowTablePropertiesCommand( } case None => properties.filterKeys(!_.startsWith(CatalogTable.VIEW_PREFIX)) - .toSeq.sortBy(_._1).map(p => Row(p._1, p._2)).toSeq + .toSeq.sortBy(_._1).map(p => Row(p._1, p._2)) } } } diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowFunctionsExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowFunctionsExec.scala index cc95ee53531d..b8a9003b559a 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowFunctionsExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowFunctionsExec.scala @@ -39,7 +39,7 @@ case class ShowFunctionsExec( pattern: Option[String]) extends V2CommandExec with LeafExecNode { private def applyPattern(names: Seq[String]): Seq[String] = { - StringUtils.filterPattern(names.toSeq, pattern.getOrElse("*")) + StringUtils.filterPattern(names, pattern.getOrElse("*")) } override protected def run(): Seq[InternalRow] = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala index 1e62e901d6f9..61b8e91fd348 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ShowTablePropertiesExec.scala @@ -47,7 +47,7 @@ case class ShowTablePropertiesExec( } case None => properties.toSeq.sortBy(_._1).map(kv => - toCatalystRow(kv._1, kv._2)).toSeq + toCatalystRow(kv._1, kv._2)) } } } diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala index 213d930653dd..5f48f4399078 100644 --- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala +++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala @@ -762,7 +762,7 @@ private[hive] class HiveClientImpl( assert(s.values.forall(_.nonEmpty), s"partition spec '$s' is invalid") shim.getPartitionNames(client, table.database, table.identifier.table, s.asJava, -1) } - hivePartitionNames.sorted.toSeq + hivePartitionNames.sorted } override def getPartitionOption( @@ -798,7 +798,7 @@ private[hive] class HiveClientImpl( val parts = shim.getPartitions(client, hiveTable, partSpec.asJava) .map(fromHivePartition(_, absoluteUri)) HiveCatalogMetrics.incrementFetchedPartitions(parts.length) - parts.toSeq + parts } override def getPartitionsByFilter(