diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala index afef88f7e97e8..cd106e7f2093e 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala @@ -3586,37 +3586,6 @@ class AstBuilder extends SqlBaseBaseVisitor[AnyRef] with SQLConfHelper with Logg ctx.SERDE != null) } - /** - * Create a [[CacheTableStatement]]. - * - * For example: - * {{{ - * CACHE [LAZY] TABLE multi_part_name - * [OPTIONS tablePropertyList] [[AS] query] - * }}} - */ - override def visitCacheTable(ctx: CacheTableContext): LogicalPlan = withOrigin(ctx) { - import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ - - val query = Option(ctx.query).map(plan) - val tableName = visitMultipartIdentifier(ctx.multipartIdentifier) - if (query.isDefined && tableName.length > 1) { - val catalogAndNamespace = tableName.init - throw new ParseException("It is not allowed to add catalog/namespace " + - s"prefix ${catalogAndNamespace.quoted} to " + - "the table name in CACHE TABLE AS SELECT", ctx) - } - val options = Option(ctx.options).map(visitPropertyKeyValues).getOrElse(Map.empty) - CacheTableStatement(tableName, query, ctx.LAZY != null, options) - } - - /** - * Create an [[UncacheTableStatement]] logical plan. - */ - override def visitUncacheTable(ctx: UncacheTableContext): LogicalPlan = withOrigin(ctx) { - UncacheTableStatement(visitMultipartIdentifier(ctx.multipartIdentifier), ctx.EXISTS != null) - } - /** * Create a [[TruncateTable]] command. * diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statements.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statements.scala index 281d57b3648f4..a0b7bfb7e51a3 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statements.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statements.scala @@ -410,22 +410,6 @@ case class UseStatement(isNamespaceSet: Boolean, nameParts: Seq[String]) extends */ case class RepairTableStatement(tableName: Seq[String]) extends ParsedStatement -/** - * A CACHE TABLE statement, as parsed from SQL - */ -case class CacheTableStatement( - tableName: Seq[String], - plan: Option[LogicalPlan], - isLazy: Boolean, - options: Map[String, String]) extends ParsedStatement - -/** - * An UNCACHE TABLE statement, as parsed from SQL - */ -case class UncacheTableStatement( - tableName: Seq[String], - ifExists: Boolean) extends ParsedStatement - /** * A TRUNCATE TABLE statement, as parsed from SQL */ diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala index c58ff81f17131..dc928336c5617 100644 --- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala +++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala @@ -1918,33 +1918,6 @@ class DDLParserSuite extends AnalysisTest { asSerde = true)) } - test("CACHE TABLE") { - comparePlans( - parsePlan("CACHE TABLE a.b.c"), - CacheTableStatement(Seq("a", "b", "c"), None, false, Map.empty)) - - comparePlans( - parsePlan("CACHE LAZY TABLE a.b.c"), - CacheTableStatement(Seq("a", "b", "c"), None, true, Map.empty)) - - comparePlans( - parsePlan("CACHE LAZY TABLE a.b.c OPTIONS('storageLevel' 'DISK_ONLY')"), - CacheTableStatement(Seq("a", "b", "c"), None, true, Map("storageLevel" -> "DISK_ONLY"))) - - intercept("CACHE TABLE a.b.c AS SELECT * FROM testData", - "It is not allowed to add catalog/namespace prefix a.b") - } - - test("UNCACHE TABLE") { - comparePlans( - parsePlan("UNCACHE TABLE a.b.c"), - UncacheTableStatement(Seq("a", "b", "c"), ifExists = false)) - - comparePlans( - parsePlan("UNCACHE TABLE IF EXISTS a.b.c"), - UncacheTableStatement(Seq("a", "b", "c"), ifExists = true)) - } - test("TRUNCATE table") { comparePlans( parsePlan("TRUNCATE TABLE a.b.c"), diff --git a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala index f49caf7f04a20..582f11a2be8fa 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSessionCatalog.scala @@ -446,20 +446,6 @@ class ResolveSessionCatalog( ShowCreateTableCommand(ident.asTableIdentifier) } - case CacheTableStatement(tbl, plan, isLazy, options) => - val name = if (plan.isDefined) { - // CACHE TABLE ... AS SELECT creates a temp view with the input query. - // Temp view doesn't belong to any catalog and we shouldn't resolve catalog in the name. - tbl - } else { - parseTempViewOrV1Table(tbl, "CACHE TABLE") - } - CacheTableCommand(name.asTableIdentifier, plan, isLazy, options) - - case UncacheTableStatement(tbl, ifExists) => - val name = parseTempViewOrV1Table(tbl, "UNCACHE TABLE") - UncacheTableCommand(name.asTableIdentifier, ifExists) - case TruncateTable(ResolvedV1TableIdentifier(ident), partitionSpec) => TruncateTableCommand( ident.asTableIdentifier, @@ -561,12 +547,9 @@ class ResolveSessionCatalog( "SHOW VIEWS, only SessionCatalog supports this command.") } - case ShowTableProperties(ResolvedV1TableIdentifier(ident), propertyKey) => + case ShowTableProperties(ResolvedV1TableOrViewIdentifier(ident), propertyKey) => ShowTablePropertiesCommand(ident.asTableIdentifier, propertyKey) - case ShowTableProperties(r: ResolvedView, propertyKey) => - ShowTablePropertiesCommand(r.identifier.asTableIdentifier, propertyKey) - case DescribeFunction(ResolvedFunc(identifier), extended) => DescribeFunctionCommand(identifier.asFunctionIdentifier, extended) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala index 568c7112954f5..c82e3818b48cc 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala @@ -192,6 +192,40 @@ class SparkSqlAstBuilder extends AstBuilder { unquotedPath } + /** + * Create a [[CacheTableCommand]]. + * + * For example: + * {{{ + * CACHE [LAZY] TABLE multi_part_name + * [OPTIONS tablePropertyList] [[AS] query] + * }}} + */ + override def visitCacheTable(ctx: CacheTableContext): LogicalPlan = withOrigin(ctx) { + import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._ + + val query = Option(ctx.query).map(plan) + val tableName = visitMultipartIdentifier(ctx.multipartIdentifier) + if (query.isDefined && tableName.length > 1) { + val catalogAndNamespace = tableName.init + throw new ParseException("It is not allowed to add catalog/namespace " + + s"prefix ${catalogAndNamespace.quoted} to " + + "the table name in CACHE TABLE AS SELECT", ctx) + } + val options = Option(ctx.options).map(visitPropertyKeyValues).getOrElse(Map.empty) + CacheTableCommand(tableName, query, ctx.LAZY != null, options) + } + + + /** + * Create an [[UncacheTableCommand]] logical plan. + */ + override def visitUncacheTable(ctx: UncacheTableContext): LogicalPlan = withOrigin(ctx) { + UncacheTableCommand( + visitMultipartIdentifier(ctx.multipartIdentifier), + ctx.EXISTS != null) + } + /** * Create a [[ClearCacheCommand]] logical plan. */ diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/cache.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/cache.scala index f99dc8d9f1a8e..3f0945d1e817b 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/cache.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/cache.scala @@ -19,26 +19,27 @@ package org.apache.spark.sql.execution.command import java.util.Locale -import org.apache.spark.sql.{Dataset, Row, SparkSession} -import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, Row, SparkSession} import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.logical.{IgnoreCachedData, LogicalPlan} import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap +import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.MultipartIdentifierHelper import org.apache.spark.storage.StorageLevel case class CacheTableCommand( - tableIdent: TableIdentifier, + multipartIdentifier: Seq[String], plan: Option[LogicalPlan], isLazy: Boolean, options: Map[String, String]) extends RunnableCommand { - require(plan.isEmpty || tableIdent.database.isEmpty, - "Database name is not allowed in CACHE TABLE AS SELECT") + require(plan.isEmpty || multipartIdentifier.length == 1, + "Namespace name is not allowed in CACHE TABLE AS SELECT") override def innerChildren: Seq[QueryPlan[_]] = plan.toSeq override def run(sparkSession: SparkSession): Seq[Row] = { + val tableName = multipartIdentifier.quoted plan.foreach { logicalPlan => - Dataset.ofRows(sparkSession, logicalPlan).createTempView(tableIdent.quotedString) + Dataset.ofRows(sparkSession, logicalPlan).createTempView(tableName) } val storageLevelKey = "storagelevel" @@ -49,34 +50,46 @@ case class CacheTableCommand( logWarning(s"Invalid options: ${withoutStorageLevel.mkString(", ")}") } + val table = sparkSession.table(tableName) if (storageLevelValue.nonEmpty) { - sparkSession.catalog.cacheTable( - tableIdent.quotedString, StorageLevel.fromString(storageLevelValue.get)) + sparkSession.sharedState.cacheManager.cacheQuery( + table, + Some(tableName), + StorageLevel.fromString(storageLevelValue.get)) } else { - sparkSession.catalog.cacheTable(tableIdent.quotedString) + sparkSession.sharedState.cacheManager.cacheQuery(table, Some(tableName)) } if (!isLazy) { // Performs eager caching - sparkSession.table(tableIdent).count() + table.count() } Seq.empty[Row] } } - case class UncacheTableCommand( - tableIdent: TableIdentifier, + multipartIdentifier: Seq[String], ifExists: Boolean) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { - val tableId = tableIdent.quotedString - if (!ifExists || sparkSession.catalog.tableExists(tableId)) { - sparkSession.catalog.uncacheTable(tableId) + val tableName = multipartIdentifier.quoted + table(sparkSession, tableName).foreach { table => + val cascade = !sparkSession.sessionState.catalog.isTempView(multipartIdentifier) + sparkSession.sharedState.cacheManager.uncacheQuery(table, cascade) } Seq.empty[Row] } + + private def table(sparkSession: SparkSession, name: String): Option[DataFrame] = { + try { + Some(sparkSession.table(name)) + } catch { + case ex: AnalysisException if ifExists && ex.getMessage.contains("Table or view not found") => + None + } + } } /** diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala index 6313370476c93..ef3f4daa6dc6b 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala @@ -25,6 +25,7 @@ import org.apache.spark.executor.DataReadMethod._ import org.apache.spark.executor.DataReadMethod.DataReadMethod import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart} import org.apache.spark.sql.catalyst.TableIdentifier +import org.apache.spark.sql.catalyst.analysis.TempTableAlreadyExistsException import org.apache.spark.sql.catalyst.expressions.SubqueryExpression import org.apache.spark.sql.catalyst.plans.logical.{BROADCAST, Join, JoinStrategyHint, SHUFFLE_HASH} import org.apache.spark.sql.catalyst.util.DateTimeConstants @@ -140,6 +141,16 @@ class CachedTableSuite extends QueryTest with SQLTestUtils } } + test("cache table as select - existing temp view") { + withTempView("tempView") { + sql("CREATE TEMPORARY VIEW tempView as SELECT 1") + val e = intercept[TempTableAlreadyExistsException] { + sql("CACHE TABLE tempView AS SELECT 1") + } + assert(e.getMessage.contains("Temporary view 'tempView' already exists")) + } + } + test("uncaching temp table") { withTempView("tempTable1", "tempTable2") { testData.select("key").createOrReplaceTempView("tempTable1") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala index 98580568a8df6..ffbc2287d81ad 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala @@ -30,6 +30,7 @@ import org.apache.spark.sql.catalyst.parser.ParseException import org.apache.spark.sql.connector.catalog._ import org.apache.spark.sql.connector.catalog.CatalogManager.SESSION_CATALOG_NAME import org.apache.spark.sql.connector.catalog.CatalogV2Util.withDefaultOwnership +import org.apache.spark.sql.execution.columnar.InMemoryRelation import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} import org.apache.spark.sql.internal.SQLConf.{PARTITION_OVERWRITE_MODE, PartitionOverwriteMode, V2_SESSION_CATALOG_IMPLEMENTATION} import org.apache.spark.sql.internal.connector.SimpleTableProvider @@ -2018,28 +2019,29 @@ class DataSourceV2SQLSuite } } - test("CACHE TABLE") { + test("CACHE/UNCACHE TABLE") { val t = "testcat.ns1.ns2.tbl" withTable(t) { - spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") + def isCached(table: String): Boolean = { + spark.table(table).queryExecution.withCachedData.isInstanceOf[InMemoryRelation] + } - testV1CommandSupportingTempView("CACHE TABLE", t) + spark.sql(s"CREATE TABLE $t (id bigint, data string) USING foo") + sql(s"CACHE TABLE $t") + assert(isCached(t)) - val e = intercept[AnalysisException] { - sql(s"CACHE LAZY TABLE $t") - } - assert(e.message.contains("CACHE TABLE is only supported with temp views or v1 tables")) + sql(s"UNCACHE TABLE $t") + assert(!isCached(t)) } - } - test("UNCACHE TABLE") { - val t = "testcat.ns1.ns2.tbl" - withTable(t) { - sql(s"CREATE TABLE $t (id bigint, data string) USING foo") - - testV1CommandSupportingTempView("UNCACHE TABLE", t) - testV1CommandSupportingTempView("UNCACHE TABLE", s"IF EXISTS $t") + // Test a scenario where a table does not exist. + val e = intercept[AnalysisException] { + sql(s"UNCACHE TABLE $t") } + assert(e.message.contains("Table or view not found: testcat.ns1.ns2.tbl")) + + // If "IF EXISTS" is set, UNCACHE TABLE will not throw an exception. + sql(s"UNCACHE TABLE IF EXISTS $t") } test("SHOW COLUMNS") { @@ -2555,11 +2557,15 @@ class DataSourceV2SQLSuite } } - private def testNotSupportedV2Command(sqlCommand: String, sqlParams: String): Unit = { + private def testNotSupportedV2Command( + sqlCommand: String, + sqlParams: String, + sqlCommandInMessage: Option[String] = None): Unit = { val e = intercept[AnalysisException] { sql(s"$sqlCommand $sqlParams") } - assert(e.message.contains(s"$sqlCommand is not supported for v2 tables")) + val cmdStr = sqlCommandInMessage.getOrElse(sqlCommand) + assert(e.message.contains(s"$cmdStr is not supported for v2 tables")) } private def testV1Command(sqlCommand: String, sqlParams: String): Unit = { diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala index 61c16baedb7cc..1a826c00c81f2 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala @@ -337,5 +337,48 @@ class SparkSqlParserSuite extends AnalysisTest { |FROM v """.stripMargin, "LINES TERMINATED BY only supports newline '\\n' right now") - } + } + + test("CACHE TABLE") { + assertEqual( + "CACHE TABLE a.b.c", + CacheTableCommand(Seq("a", "b", "c"), None, false, Map.empty)) + + assertEqual( + "CACHE TABLE t AS SELECT * FROM testData", + CacheTableCommand( + Seq("t"), + Some(Project(Seq(UnresolvedStar(None)), UnresolvedRelation(Seq("testData")))), + false, + Map.empty)) + + assertEqual( + "CACHE LAZY TABLE a.b.c", + CacheTableCommand(Seq("a", "b", "c"), None, true, Map.empty)) + + assertEqual( + "CACHE LAZY TABLE a.b.c OPTIONS('storageLevel' 'DISK_ONLY')", + CacheTableCommand( + Seq("a", "b", "c"), + None, + true, + Map("storageLevel" -> "DISK_ONLY"))) + + intercept("CACHE TABLE a.b.c AS SELECT * FROM testData", + "It is not allowed to add catalog/namespace prefix a.b") + } + + test("UNCACHE TABLE") { + assertEqual( + "UNCACHE TABLE a.b.c", + UncacheTableCommand(Seq("a", "b", "c"), ifExists = false)) + + assertEqual( + "UNCACHE TABLE IF EXISTS a.b.c", + UncacheTableCommand(Seq("a", "b", "c"), ifExists = true)) + } + + test("CLEAR CACHE") { + assertEqual("CLEAR CACHE", ClearCacheCommand) + } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala index 4872906dbfec3..b4f921efcac81 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala @@ -705,7 +705,7 @@ class SQLMetricsSuite extends SharedSparkSession with SQLMetricsTestUtils sql("CREATE TEMPORARY VIEW inMemoryTable AS SELECT 1 AS c1") sql("CACHE TABLE inMemoryTable") testSparkPlanMetrics(spark.table("inMemoryTable"), 1, - Map(1L -> (("Scan In-memory table `inMemoryTable`", Map.empty))) + Map(1L -> (("Scan In-memory table inMemoryTable", Map.empty))) ) sql("CREATE TEMPORARY VIEW ```a``b``` AS SELECT 2 AS c1") diff --git a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala index 7cc60bb505089..5bf7892478082 100644 --- a/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala +++ b/sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/HiveThriftServer2Suites.scala @@ -305,7 +305,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { val plan = statement.executeQuery("explain select * from test_table") plan.next() plan.next() - assert(plan.getString(1).contains("Scan In-memory table `test_table`")) + assert(plan.getString(1).contains("Scan In-memory table test_table")) val rs1 = statement.executeQuery("SELECT key FROM test_table ORDER BY KEY DESC") val buf1 = new collection.mutable.ArrayBuffer[Int]() @@ -391,7 +391,7 @@ class HiveThriftBinaryServerSuite extends HiveThriftJdbcTest { val plan = statement.executeQuery("explain select key from test_map ORDER BY key DESC") plan.next() plan.next() - assert(plan.getString(1).contains("Scan In-memory table `test_table`")) + assert(plan.getString(1).contains("Scan In-memory table test_table")) val rs = statement.executeQuery("SELECT key FROM test_map ORDER BY KEY DESC") val buf = new collection.mutable.ArrayBuffer[Int]() diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala index fc793534641df..81c3f271b18d4 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/CachedTableSuite.scala @@ -113,7 +113,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto e = intercept[AnalysisException] { sql("UNCACHE TABLE nonexistentTable") }.getMessage - assert(e.contains(s"$expectedErrorMsg default.nonexistentTable")) + assert(e.contains(s"$expectedErrorMsg nonexistentTable")) sql("UNCACHE TABLE IF EXISTS nonexistentTable") } @@ -364,14 +364,14 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto // Cache the table 'cachedTable' in temp db with qualified table name, // and then check whether the table is cached with expected name sql(s"CACHE TABLE $db.cachedTable OPTIONS('storageLevel' 'MEMORY_ONLY')") - assertCached(sql(s"SELECT * FROM $db.cachedTable"), s"`$db`.`cachedTable`", MEMORY_ONLY) + assertCached(sql(s"SELECT * FROM $db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY) assert(spark.catalog.isCached(s"$db.cachedTable"), s"Table '$db.cachedTable' should be cached.") // Refresh the table 'cachedTable' in temp db with qualified table name, and then check // whether the table is still cached with the same name and storage level. sql(s"REFRESH TABLE $db.cachedTable") - assertCached(sql(s"select * from $db.cachedTable"), s"`$db`.`cachedTable`", MEMORY_ONLY) + assertCached(sql(s"select * from $db.cachedTable"), s"$db.cachedTable", MEMORY_ONLY) assert(spark.catalog.isCached(s"$db.cachedTable"), s"Table '$db.cachedTable' should be cached after refreshing with its qualified name.") @@ -382,7 +382,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto // 'cachedTable', instead of '$db.cachedTable' activateDatabase(db) { sql("REFRESH TABLE cachedTable") - assertCached(sql("SELECT * FROM cachedTable"), s"`$db`.`cachedTable`", MEMORY_ONLY) + assertCached(sql("SELECT * FROM cachedTable"), s"$db.cachedTable", MEMORY_ONLY) assert(spark.catalog.isCached("cachedTable"), s"Table '$db.cachedTable' should be cached after refreshing with its " + "unqualified name.") @@ -403,13 +403,13 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto // Cache the table 'cachedTable' in default db without qualified table name , and then // check whether the table is cached with expected name. sql("CACHE TABLE cachedTable OPTIONS('storageLevel' 'DISK_ONLY')") - assertCached(sql("SELECT * FROM cachedTable"), "`default`.`cachedTable`", DISK_ONLY) + assertCached(sql("SELECT * FROM cachedTable"), "cachedTable", DISK_ONLY) assert(spark.catalog.isCached("cachedTable"), "Table 'cachedTable' should be cached.") // Refresh the table 'cachedTable' in default db with unqualified table name, and then // check whether the table is still cached with the same name. sql("REFRESH TABLE cachedTable") - assertCached(sql("SELECT * FROM cachedTable"), "`default`.`cachedTable`", DISK_ONLY) + assertCached(sql("SELECT * FROM cachedTable"), "cachedTable", DISK_ONLY) assert(spark.catalog.isCached("cachedTable"), "Table 'cachedTable' should be cached after refreshing with its unqualified name.") @@ -421,7 +421,7 @@ class CachedTableSuite extends QueryTest with SQLTestUtils with TestHiveSingleto activateDatabase(db) { sql("REFRESH TABLE default.cachedTable") assertCached( - sql("SELECT * FROM default.cachedTable"), "`default`.`cachedTable`", DISK_ONLY) + sql("SELECT * FROM default.cachedTable"), "cachedTable", DISK_ONLY) assert(spark.catalog.isCached("default.cachedTable"), "Table 'cachedTable' should be cached after refreshing with its qualified name.") } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala index f7c13ea047da7..a25c61c96f3d8 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala @@ -596,7 +596,7 @@ private[hive] class TestHiveQueryExecution( override lazy val analyzed: LogicalPlan = sparkSession.withActive { val describedTables = logical match { - case CacheTableCommand(tbl, _, _, _) => tbl :: Nil + case CacheTableCommand(tbl, _, _, _) => tbl.asTableIdentifier :: Nil case _ => Nil }