diff --git a/dev/deps/spark-deps-hadoop-2.7-hive-2.3 b/dev/deps/spark-deps-hadoop-2.7-hive-2.3
index 771aeebcd6a2..5c66534ba7b8 100644
--- a/dev/deps/spark-deps-hadoop-2.7-hive-2.3
+++ b/dev/deps/spark-deps-hadoop-2.7-hive-2.3
@@ -202,12 +202,12 @@ orc-shims/1.6.7//orc-shims-1.6.7.jar
oro/2.0.8//oro-2.0.8.jar
osgi-resource-locator/1.0.3//osgi-resource-locator-1.0.3.jar
paranamer/2.8//paranamer-2.8.jar
-parquet-column/1.10.1//parquet-column-1.10.1.jar
-parquet-common/1.10.1//parquet-common-1.10.1.jar
-parquet-encoding/1.10.1//parquet-encoding-1.10.1.jar
-parquet-format/2.4.0//parquet-format-2.4.0.jar
-parquet-hadoop/1.10.1//parquet-hadoop-1.10.1.jar
-parquet-jackson/1.10.1//parquet-jackson-1.10.1.jar
+parquet-column/1.11.1//parquet-column-1.11.1.jar
+parquet-common/1.11.1//parquet-common-1.11.1.jar
+parquet-encoding/1.11.1//parquet-encoding-1.11.1.jar
+parquet-format-structures/1.11.1//parquet-format-structures-1.11.1.jar
+parquet-hadoop/1.11.1//parquet-hadoop-1.11.1.jar
+parquet-jackson/1.11.1//parquet-jackson-1.11.1.jar
protobuf-java/2.5.0//protobuf-java-2.5.0.jar
py4j/0.10.9.1//py4j-0.10.9.1.jar
pyrolite/4.30//pyrolite-4.30.jar
diff --git a/dev/deps/spark-deps-hadoop-3.2-hive-2.3 b/dev/deps/spark-deps-hadoop-3.2-hive-2.3
index fcb4118c201f..b0134c8e0d64 100644
--- a/dev/deps/spark-deps-hadoop-3.2-hive-2.3
+++ b/dev/deps/spark-deps-hadoop-3.2-hive-2.3
@@ -172,12 +172,12 @@ orc-shims/1.6.7//orc-shims-1.6.7.jar
oro/2.0.8//oro-2.0.8.jar
osgi-resource-locator/1.0.3//osgi-resource-locator-1.0.3.jar
paranamer/2.8//paranamer-2.8.jar
-parquet-column/1.10.1//parquet-column-1.10.1.jar
-parquet-common/1.10.1//parquet-common-1.10.1.jar
-parquet-encoding/1.10.1//parquet-encoding-1.10.1.jar
-parquet-format/2.4.0//parquet-format-2.4.0.jar
-parquet-hadoop/1.10.1//parquet-hadoop-1.10.1.jar
-parquet-jackson/1.10.1//parquet-jackson-1.10.1.jar
+parquet-column/1.11.1//parquet-column-1.11.1.jar
+parquet-common/1.11.1//parquet-common-1.11.1.jar
+parquet-encoding/1.11.1//parquet-encoding-1.11.1.jar
+parquet-format-structures/1.11.1//parquet-format-structures-1.11.1.jar
+parquet-hadoop/1.11.1//parquet-hadoop-1.11.1.jar
+parquet-jackson/1.11.1//parquet-jackson-1.11.1.jar
protobuf-java/2.5.0//protobuf-java-2.5.0.jar
py4j/0.10.9.1//py4j-0.10.9.1.jar
pyrolite/4.30//pyrolite-4.30.jar
diff --git a/pom.xml b/pom.xml
index 94f32309fc6a..c43d7f462857 100644
--- a/pom.xml
+++ b/pom.xml
@@ -136,7 +136,7 @@
2.7.0
10.14.2.0
- 1.10.1
+ 1.11.1
1.6.7
9.4.34.v20201102
4.0.3
@@ -2290,6 +2290,10 @@
commons-pool
commons-pool
+
+ javax.annotation
+ javax.annotation-api
+
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala
index e97c6cd29709..fcc08ee16e80 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaSuite.scala
@@ -251,7 +251,7 @@ class ParquetSchemaInferenceSuite extends ParquetSchemaTest {
"""
|message root {
| optional group _1 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required int32 key;
| optional binary value (UTF8);
| }
@@ -267,7 +267,7 @@ class ParquetSchemaInferenceSuite extends ParquetSchemaTest {
"""
|message root {
| optional group _1 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required group key {
| optional binary _1 (UTF8);
| optional binary _2 (UTF8);
@@ -300,7 +300,7 @@ class ParquetSchemaInferenceSuite extends ParquetSchemaTest {
"""
|message root {
| optional group _1 (MAP_KEY_VALUE) {
- | repeated group map {
+ | repeated group key_value {
| required int32 key;
| optional group value {
| optional binary _1 (UTF8);
@@ -740,7 +740,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
nullable = true))),
"""message root {
| optional group f1 (MAP_KEY_VALUE) {
- | repeated group map {
+ | repeated group key_value {
| required int32 num;
| required binary str (UTF8);
| }
@@ -759,7 +759,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
nullable = true))),
"""message root {
| optional group f1 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required int32 key;
| required binary value (UTF8);
| }
@@ -797,7 +797,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
nullable = true))),
"""message root {
| optional group f1 (MAP_KEY_VALUE) {
- | repeated group map {
+ | repeated group key_value {
| required int32 num;
| optional binary str (UTF8);
| }
@@ -816,7 +816,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
nullable = true))),
"""message root {
| optional group f1 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required int32 key;
| optional binary value (UTF8);
| }
@@ -857,7 +857,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
nullable = true))),
"""message root {
| optional group f1 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required int32 key;
| required binary value (UTF8);
| }
@@ -893,7 +893,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
nullable = true))),
"""message root {
| optional group f1 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required int32 key;
| optional binary value (UTF8);
| }
@@ -1447,7 +1447,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
parquetSchema =
"""message root {
| required group f0 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required int32 key;
| required group value {
| required int32 value_f0;
@@ -1472,7 +1472,7 @@ class ParquetSchemaSuite extends ParquetSchemaTest {
expectedSchema =
"""message root {
| required group f0 (MAP) {
- | repeated group map (MAP_KEY_VALUE) {
+ | repeated group key_value (MAP_KEY_VALUE) {
| required int32 key;
| required group value {
| required int64 value_f1;
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
index 440fe997ae13..c4e43d24b0b8 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamSuite.scala
@@ -214,7 +214,9 @@ class StreamSuite extends StreamTest {
.start(outputDir.getAbsolutePath)
try {
query.processAllAvailable()
- val outputDf = spark.read.parquet(outputDir.getAbsolutePath).as[Long]
+ // Parquet write page-level CRC checksums will change the file size and
+ // affect the data order when reading these files. Please see PARQUET-1746 for details.
+ val outputDf = spark.read.parquet(outputDir.getAbsolutePath).sort('a).as[Long]
checkDataset[Long](outputDf, (0L to 10L).toArray: _*)
} finally {
query.stop()
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
index 5357f4b63d79..c91ee92350fc 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
@@ -1528,7 +1528,7 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto
Seq(tbl, ext_tbl).foreach { tblName =>
sql(s"INSERT INTO $tblName VALUES (1, 'a', '2019-12-13')")
- val expectedSize = 601
+ val expectedSize = 651
// analyze table
sql(s"ANALYZE TABLE $tblName COMPUTE STATISTICS NOSCAN")
var tableStats = getTableStats(tblName)