diff --git a/connector/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala b/connector/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
index 5fd39393335d4..ff3c07203f74b 100644
--- a/connector/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
+++ b/connector/avro/src/main/scala/org/apache/spark/sql/avro/AvroOptions.scala
@@ -81,14 +81,14 @@ private[sql] class AvroOptions(
/**
* Top level record name in write result, which is required in Avro spec.
- * See https://avro.apache.org/docs/1.11.4/specification/#schema-record .
+ * See https://avro.apache.org/docs/1.11.5/specification/#schema-record .
* Default value is "topLevelRecord"
*/
val recordName: String = parameters.getOrElse(RECORD_NAME, "topLevelRecord")
/**
* Record namespace in write result. Default value is "".
- * See Avro spec for details: https://avro.apache.org/docs/1.11.4/specification/#schema-record .
+ * See Avro spec for details: https://avro.apache.org/docs/1.11.5/specification/#schema-record .
*/
val recordNamespace: String = parameters.getOrElse(RECORD_NAMESPACE, "")
diff --git a/dev/deps/spark-deps-hadoop-3-hive-2.3 b/dev/deps/spark-deps-hadoop-3-hive-2.3
index f110a1988fbf5..c7aa3eea703ae 100644
--- a/dev/deps/spark-deps-hadoop-3-hive-2.3
+++ b/dev/deps/spark-deps-hadoop-3-hive-2.3
@@ -21,9 +21,9 @@ arrow-memory-core/12.0.1//arrow-memory-core-12.0.1.jar
arrow-memory-netty/12.0.1//arrow-memory-netty-12.0.1.jar
arrow-vector/12.0.1//arrow-vector-12.0.1.jar
audience-annotations/0.5.0//audience-annotations-0.5.0.jar
-avro-ipc/1.11.4//avro-ipc-1.11.4.jar
-avro-mapred/1.11.4//avro-mapred-1.11.4.jar
-avro/1.11.4//avro-1.11.4.jar
+avro-ipc/1.11.5//avro-ipc-1.11.5.jar
+avro-mapred/1.11.5//avro-mapred-1.11.5.jar
+avro/1.11.5//avro-1.11.5.jar
aws-java-sdk-bundle/1.12.262//aws-java-sdk-bundle-1.12.262.jar
azure-data-lake-store-sdk/2.3.9//azure-data-lake-store-sdk-2.3.9.jar
azure-keyvault-core/1.0.0//azure-keyvault-core-1.0.0.jar
diff --git a/docs/sql-data-sources-avro.md b/docs/sql-data-sources-avro.md
index a23c438af6d4e..271819d363882 100644
--- a/docs/sql-data-sources-avro.md
+++ b/docs/sql-data-sources-avro.md
@@ -417,7 +417,7 @@ applications. Read the [Advanced Dependency Management](https://spark.apache
Submission Guide for more details.
## Supported types for Avro -> Spark SQL conversion
-Currently Spark supports reading all [primitive types](https://avro.apache.org/docs/1.11.4/specification/#primitive-types) and [complex types](https://avro.apache.org/docs/1.11.4/specification/#complex-types) under records of Avro.
+Currently Spark supports reading all [primitive types](https://avro.apache.org/docs/1.11.5/specification/#primitive-types) and [complex types](https://avro.apache.org/docs/1.11.5/specification/#complex-types) under records of Avro.
| Avro type | Spark SQL type |
@@ -481,7 +481,7 @@ In addition to the types listed above, it supports reading `union` types. The fo
3. `union(something, null)`, where something is any supported Avro type. This will be mapped to the same Spark SQL type as that of something, with nullable set to true.
All other union types are considered complex. They will be mapped to StructType where field names are member0, member1, etc., in accordance with members of the union. This is consistent with the behavior when converting between Avro and Parquet.
-It also supports reading the following Avro [logical types](https://avro.apache.org/docs/1.11.4/specification/#logical-types):
+It also supports reading the following Avro [logical types](https://avro.apache.org/docs/1.11.5/specification/#logical-types):
| Avro logical type | Avro type | Spark SQL type |
diff --git a/pom.xml b/pom.xml
index 2cad20594c712..50124447081fd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -158,7 +158,7 @@
-->
4.2.19
- 1.11.4
+ 1.11.5
1.12.0
1.11.655
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 737d5383abd89..c5254658db5e3 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -1103,7 +1103,7 @@ object DependencyOverrides {
dependencyOverrides += "com.google.guava" % "guava" % guavaVersion,
dependencyOverrides += "xerces" % "xercesImpl" % "2.12.2",
dependencyOverrides += "jline" % "jline" % "2.14.6",
- dependencyOverrides += "org.apache.avro" % "avro" % "1.11.4",
+ dependencyOverrides += "org.apache.avro" % "avro" % "1.11.5",
dependencyOverrides += "org.apache.commons" % "commons-compress" % "1.23.0")
}
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
index d35cc79c6a32b..f6c7c2673069d 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/client/HiveClientSuite.scala
@@ -908,7 +908,7 @@ class HiveClientSuite(version: String, allVersions: Seq[String])
test("Decimal support of Avro Hive serde") {
val tableName = "tab1"
// TODO: add the other logical types. For details, see the link:
- // https://avro.apache.org/docs/1.11.4/specification/#logical-types
+ // https://avro.apache.org/docs/1.11.5/specification/#logical-types
val avroSchema =
"""{
| "name": "test_record",