@@ -190,33 +190,20 @@ private[hive] class HiveMetastoreCatalog(sparkSession: SparkSession) extends Log
190190 } else {
191191 selectParquetLocationDirectories(relation.tableMeta.identifier.table, Option (rootPath))
192192 }
193- withTableCreationLock(tableIdentifier, {
194- val cached = getCached(
195- tableIdentifier,
196- paths,
197- metastoreSchema,
198- fileFormatClass,
199- None )
200- val logicalRelation = cached.getOrElse {
201- val (dataSchema, updatedTable) = inferIfNeeded(relation, options, fileFormat)
202- val created =
203- LogicalRelation (
204- DataSource (
205- sparkSession = sparkSession,
206- paths = paths.map(_.toString),
207- userSpecifiedSchema = Option (dataSchema),
208- // We don't support hive bucketed tables, only ones we write out.
209- bucketSpec = None ,
210- options = options,
211- className = fileType).resolveRelation(),
212- table = updatedTable)
213-
214- catalogProxy.cacheTable(tableIdentifier, created)
215- created
216- }
217-
218- logicalRelation
219- })
193+ // SPY-1453: disable the cache completely until we migrated to partitioning
194+ val (dataSchema, updatedTable) = inferIfNeeded(relation, options, fileFormat)
195+ val created =
196+ LogicalRelation (
197+ DataSource (
198+ sparkSession = sparkSession,
199+ paths = paths.map(_.toString),
200+ userSpecifiedSchema = Option (dataSchema),
201+ // We don't support hive bucketed tables, only ones we write out.
202+ bucketSpec = None ,
203+ options = options,
204+ className = fileType).resolveRelation(),
205+ table = updatedTable)
206+ created
220207 }
221208 // The inferred schema may have different field names as the table schema, we should respect
222209 // it, but also respect the exprId in table relation output.
0 commit comments