diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala index ab18a3119c09f..b0d06e862ca7b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala @@ -274,8 +274,8 @@ class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging { extraOptions + ("paths" -> objectMapper.writeValueAsString(paths.toArray)) } - val finalOptions = - sessionOptions.filterKeys(!optionsWithPath.contains(_)) ++ optionsWithPath.originalMap + val finalOptions = sessionOptions.filterKeys(!optionsWithPath.contains(_)).toMap ++ + optionsWithPath.originalMap val dsOptions = new CaseInsensitiveStringMap(finalOptions.asJava) val (table, catalog, ident) = provider match { case _: SupportsCatalogOptions if userSpecifiedSchema.nonEmpty => diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala index bd1997bee53f7..6fc4dc5aed6e7 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala @@ -321,8 +321,8 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) { extraOptions + ("path" -> path.get) } - val finalOptions = - sessionOptions.filterKeys(!optionsWithPath.contains(_)) ++ optionsWithPath.originalMap + val finalOptions = sessionOptions.filterKeys(!optionsWithPath.contains(_)).toMap ++ + optionsWithPath.originalMap val dsOptions = new CaseInsensitiveStringMap(finalOptions.asJava) def getTable: Table = { diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala index c22f917d3cf91..93a48946fbafc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala @@ -221,8 +221,8 @@ final class DataStreamReader private[sql](sparkSession: SparkSession) extends Lo case provider: TableProvider if !provider.isInstanceOf[FileDataSourceV2] => val sessionOptions = DataSourceV2Utils.extractSessionConfigs( source = provider, conf = sparkSession.sessionState.conf) - val finalOptions = - sessionOptions.filterKeys(!optionsWithPath.contains(_)) ++ optionsWithPath.originalMap + val finalOptions = sessionOptions.filterKeys(!optionsWithPath.contains(_)).toMap ++ + optionsWithPath.originalMap val dsOptions = new CaseInsensitiveStringMap(finalOptions.asJava) val table = DataSourceV2Utils.getTableFromProvider(provider, dsOptions, userSpecifiedSchema) import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits._ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala index 682f3b98ec2e8..dda6dec9c4ebc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamWriter.scala @@ -365,8 +365,8 @@ final class DataStreamWriter[T] private[sql](ds: Dataset[T]) { val provider = cls.getConstructor().newInstance().asInstanceOf[TableProvider] val sessionOptions = DataSourceV2Utils.extractSessionConfigs( source = provider, conf = df.sparkSession.sessionState.conf) - val finalOptions = - sessionOptions.filterKeys(!optionsWithPath.contains(_)) ++ optionsWithPath.originalMap + val finalOptions = sessionOptions.filterKeys(!optionsWithPath.contains(_)).toMap ++ + optionsWithPath.originalMap val dsOptions = new CaseInsensitiveStringMap(finalOptions.asJava) val table = DataSourceV2Utils.getTableFromProvider( provider, dsOptions, userSpecifiedSchema = None)