diff --git a/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala b/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala index 6bd63fa5ffa4..1c9a9a120345 100644 --- a/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala +++ b/core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala @@ -94,6 +94,8 @@ class ConfigEntrySuite extends SparkFunSuite { assert(conf.get(bytes) === 1024L) conf.set(bytes.key, "1k") assert(conf.get(bytes) === 1L) + conf.set(bytes.key, "2048") + assert(conf.get(bytes) === 2048) } test("conf entry: regex") { diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index 9f71ecb756a6..4110ae54ee6c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -1415,8 +1415,8 @@ object SQLConf { " bigger files (which is scheduled first). This configuration is effective only when using" + " file-based sources such as Parquet, JSON and ORC.") .version("2.0.0") - .longConf - .createWithDefault(4 * 1024 * 1024) + .bytesConf(ByteUnit.BYTE) + .createWithDefaultString("4MB") val FILES_MIN_PARTITION_NUM = buildConf("spark.sql.files.minPartitionNum") .doc("The suggested (not guaranteed) minimum number of split file partitions. " +