diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java index 9a599cfa4af4..926ffda4873c 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java @@ -14,6 +14,8 @@ package io.trino.plugin.hive.metastore.file; import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -21,6 +23,7 @@ import com.google.common.collect.Sets; import com.google.common.io.ByteStreams; import io.airlift.json.JsonCodec; +import io.trino.collect.cache.EvictableCacheBuilder; import io.trino.hdfs.DynamicHdfsConfiguration; import io.trino.hdfs.HdfsConfig; import io.trino.hdfs.HdfsConfiguration; @@ -120,6 +123,7 @@ import static io.trino.spi.security.PrincipalType.USER; import static java.lang.String.format; import static java.util.Objects.requireNonNull; +import static java.util.concurrent.TimeUnit.SECONDS; import static java.util.stream.Collectors.toList; import static java.util.stream.Collectors.toSet; import static org.apache.hadoop.hive.common.FileUtils.unescapePathName; @@ -159,6 +163,9 @@ public class FileHiveMetastore private final JsonCodec> rolesCodec = JsonCodec.listJsonCodec(String.class); private final JsonCodec> roleGrantsCodec = JsonCodec.listJsonCodec(RoleGrant.class); + // TODO Remove this speed-up workaround once that https://github.com/trinodb/trino/issues/13115 gets implemented + private final LoadingCache> listTablesCache; + @VisibleForTesting public static FileHiveMetastore createTestingFileHiveMetastore(File catalogDirectory) { @@ -189,6 +196,10 @@ public FileHiveMetastore(NodeVersion nodeVersion, HdfsEnvironment hdfsEnvironmen catch (IOException e) { throw new TrinoException(HIVE_METASTORE_ERROR, e); } + + listTablesCache = EvictableCacheBuilder.newBuilder() + .expireAfterWrite(10, SECONDS) + .build(CacheLoader.from(this::doListAllTables)); } @Override @@ -511,6 +522,12 @@ public synchronized List getTablesWithParameter(String databaseName, Str @GuardedBy("this") private List listAllTables(String databaseName) + { + return listTablesCache.getUnchecked(databaseName); + } + + @GuardedBy("this") + private List doListAllTables(String databaseName) { requireNonNull(databaseName, "databaseName is null"); @@ -620,6 +637,9 @@ public synchronized void renameTable(String databaseName, String tableName, Stri catch (IOException e) { throw new TrinoException(HIVE_METASTORE_ERROR, e); } + finally { + listTablesCache.invalidateAll(); + } } @Override @@ -1345,6 +1365,9 @@ private void writeFile(String type, Path path, JsonCodec codec, T value, catch (Exception e) { throw new TrinoException(HIVE_METASTORE_ERROR, "Could not write " + type, e); } + finally { + listTablesCache.invalidateAll(); + } } private void renameSchemaFile(SchemaType type, Path oldMetadataDirectory, Path newMetadataDirectory) @@ -1357,6 +1380,9 @@ private void renameSchemaFile(SchemaType type, Path oldMetadataDirectory, Path n catch (IOException e) { throw new TrinoException(HIVE_METASTORE_ERROR, "Could not rename " + type + " schema", e); } + finally { + listTablesCache.invalidateAll(); + } } private void deleteSchemaFile(SchemaType type, Path metadataDirectory) @@ -1369,6 +1395,9 @@ private void deleteSchemaFile(SchemaType type, Path metadataDirectory) catch (IOException e) { throw new TrinoException(HIVE_METASTORE_ERROR, "Could not delete " + type + " schema", e); } + finally { + listTablesCache.invalidateAll(); + } } private Path getDatabaseMetadataDirectory(String databaseName)