diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala index 6c04839c42b14..5ad1dc88d3776 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/DataSourceScanExec.scala @@ -463,7 +463,7 @@ case class FileSourceScanExec( driverMetrics("staticFilesNum") = filesNum driverMetrics("staticFilesSize") = filesSize } - if (relation.partitionSchemaOption.isDefined) { + if (relation.partitionSchema.nonEmpty) { driverMetrics("numPartitions") = partitions.length } } @@ -482,7 +482,7 @@ case class FileSourceScanExec( None } } ++ { - if (relation.partitionSchemaOption.isDefined) { + if (relation.partitionSchema.nonEmpty) { Map( "numPartitions" -> SQLMetrics.createMetric(sparkContext, "number of partitions read"), "pruningTime" -> diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFsRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFsRelation.scala index 4ed8943ef46f4..fd1824055dcfd 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFsRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/HadoopFsRelation.scala @@ -57,9 +57,6 @@ case class HadoopFsRelation( PartitioningUtils.mergeDataAndPartitionSchema(dataSchema, partitionSchema, sparkSession.sessionState.conf.caseSensitiveAnalysis) - def partitionSchemaOption: Option[StructType] = - if (partitionSchema.isEmpty) None else Some(partitionSchema) - override def toString: String = { fileFormat match { case source: DataSourceRegister => source.shortName() diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitions.scala index 2e8e5426d47be..be70e18d220e5 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PruneFileSourcePartitions.scala @@ -62,7 +62,7 @@ private[sql] object PruneFileSourcePartitions _, _, _)) - if filters.nonEmpty && fsRelation.partitionSchemaOption.isDefined => + if filters.nonEmpty && fsRelation.partitionSchema.nonEmpty => val normalizedFilters = DataSourceStrategy.normalizeExprs( filters.filter(f => f.deterministic && !SubqueryExpression.hasSubquery(f)), logicalRelation.output)