diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java index eae3148ae04b..0b61822fb519 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java @@ -52,33 +52,6 @@ default TypedTable getTable(String name, Codec key return getTable(name, keyCodec, valueCodec, CacheType.PARTIAL_CACHE); } - /** - * Gets an existing TableStore with implicit key/value conversion and - * with default cache type for cache. Default cache type is partial cache. - * - * @param name - Name of the TableStore to get - * @param keyType - * @param valueType - * @return - TableStore. - * @throws IOException on Failure - */ - Table getTable(String name, - Class keyType, Class valueType) throws IOException; - - /** - * Gets an existing TableStore with implicit key/value conversion and - * with specified cache type. - * @param name - Name of the TableStore to get - * @param keyType - * @param valueType - * @param cacheType - * @return - TableStore. - * @throws IOException - */ - Table getTable(String name, - Class keyType, Class valueType, - TableCache.CacheType cacheType) throws IOException; - /** * Gets table store with implict key/value conversion. * diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index b2ecc2faed71..491f8ff54192 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -61,7 +61,6 @@ public class RDBStore implements DBStore { LoggerFactory.getLogger(RDBStore.class); private final RocksDatabase db; private final File dbLocation; - private final CodecRegistry codecRegistry; private RocksDBStoreMetrics metrics; private final RDBCheckpointManager checkPointManager; private final String checkpointsParentDir; @@ -90,7 +89,6 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics stati Preconditions.checkNotNull(families); Preconditions.checkArgument(!families.isEmpty()); this.maxDbUpdatesSizeThreshold = maxDbUpdatesSizeThreshold; - codecRegistry = registry; dbLocation = dbFile; this.dbOptions = dbOptions; this.statistics = statistics; @@ -297,26 +295,12 @@ public RDBTable getTable(String name) throws IOException { return new RDBTable(this.db, handle, rdbMetrics); } - @Override - public TypedTable getTable(String name, - Class keyType, Class valueType) throws IOException { - return new TypedTable<>(getTable(name), codecRegistry, keyType, - valueType); - } - @Override public TypedTable getTable( String name, Codec keyCodec, Codec valueCodec, TableCache.CacheType cacheType) throws IOException { return new TypedTable<>(getTable(name), keyCodec, valueCodec, cacheType); } - @Override - public Table getTable(String name, - Class keyType, Class valueType, - TableCache.CacheType cacheType) throws IOException { - return new TypedTable<>(getTable(name), codecRegistry, keyType, valueType, cacheType); - } - @Override public ArrayList listTables() { ArrayList
returnList = new ArrayList<>(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java index f39d55327aab..dcf482aad2ad 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java @@ -68,31 +68,6 @@ public class TypedTable implements Table { = new CodecBuffer.Capacity(this, BUFFER_SIZE_DEFAULT); private final TableCache cache; - /** - * The same as this(rawTable, codecRegistry, keyType, valueType, - * CacheType.PARTIAL_CACHE). - */ - TypedTable(RDBTable rawTable, CodecRegistry codecRegistry, Class keyType, Class valueType) - throws IOException { - this(rawTable, codecRegistry, keyType, valueType, CacheType.PARTIAL_CACHE); - } - - /** - * Create an TypedTable from the raw table with specified cache type. - * - * @param rawTable The underlying (untyped) table in RocksDB. - * @param codecRegistry To look up codecs. - * @param keyType The key type. - * @param valueType The value type. - * @param cacheType How to cache the entries? - * @throws IOException if failed to iterate the raw table. - */ - TypedTable(RDBTable rawTable, CodecRegistry codecRegistry, Class keyType, Class valueType, - CacheType cacheType) throws IOException { - this(rawTable, codecRegistry.getCodecFromClass(keyType), codecRegistry.getCodecFromClass(valueType), - cacheType); - } - /** * Create an TypedTable from the raw table with specified cache type. * @@ -102,8 +77,7 @@ public class TypedTable implements Table { * @param cacheType How to cache the entries? * @throws IOException */ - public TypedTable( - RDBTable rawTable, Codec keyCodec, Codec valueCodec, CacheType cacheType) throws IOException { + TypedTable(RDBTable rawTable, Codec keyCodec, Codec valueCodec, CacheType cacheType) throws IOException { this.rawTable = Objects.requireNonNull(rawTable, "rawTable==null"); this.keyCodec = Objects.requireNonNull(keyCodec, "keyCodec == null"); this.valueCodec = Objects.requireNonNull(valueCodec, "valueCodec == null"); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index fd593016d19d..065e8728e748 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -42,7 +42,7 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.db.cache.TableCache; +import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.junit.jupiter.api.AfterEach; @@ -309,13 +309,13 @@ public void batchDelete() throws Exception { @Test public void putGetTypedTableCodec() throws Exception { - try (Table testTable = rdbStore.getTable("Ten", String.class, String.class)) { + try (Table testTable = rdbStore.getTable("Ten", StringCodec.get(), StringCodec.get())) { testTable.put("test1", "123"); assertFalse(testTable.isEmpty()); assertEquals("123", testTable.get("test1")); } try (Table testTable = rdbStore.getTable("Ten", - StringCodec.get(), ByteStringCodec.get(), TableCache.CacheType.NO_CACHE)) { + StringCodec.get(), ByteStringCodec.get(), CacheType.NO_CACHE)) { assertEquals("123", testTable.get("test1").toStringUtf8()); } } @@ -407,8 +407,7 @@ public void testGetByteBuffer() throws Exception { final String tableName = families.get(0); try (RDBTable testTable = rdbStore.getTable(tableName)) { final TypedTable typedTable = new TypedTable<>( - testTable, CodecRegistry.newBuilder().build(), - String.class, String.class); + testTable, StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE); for (int i = 0; i < 20; i++) { final int valueSize = TypedTable.BUFFER_SIZE_DEFAULT * i / 4; @@ -594,7 +593,7 @@ public void testStringPrefixedIterator() throws Exception { final List> data = generateKVs(prefixes, keyCount); try (TypedTable table = rdbStore.getTable( - "PrefixFirst", String.class, String.class)) { + "PrefixFirst", StringCodec.get(), StringCodec.get())) { populateTable(table, data); for (String prefix : prefixes) { assertIterator(keyCount, prefix, table); @@ -633,7 +632,7 @@ static void assertIterator(int expectedCount, String prefix, @Test public void testStringPrefixedIteratorCloseDb() throws Exception { try (Table testTable = rdbStore.getTable( - "PrefixFirst", String.class, String.class)) { + "PrefixFirst", StringCodec.get(), StringCodec.get())) { // iterator should seek to right pos in the middle rdbStore.close(); assertThrows(IOException.class, () -> testTable.iterator("abc")); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java index 8fb71086988b..58b646c10bb1 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions; import org.apache.ozone.test.GenericTestUtils; @@ -69,7 +70,6 @@ public class TestTypedRDBTableStore { "Ninth", "Ten"); private RDBStore rdbStore = null; private ManagedDBOptions options = null; - private CodecRegistry codecRegistry; @BeforeEach public void setUp(@TempDir File tempDir) throws Exception { @@ -91,9 +91,6 @@ public void setUp(@TempDir File tempDir) throws Exception { } rdbStore = TestRDBStore.newRDBStore(tempDir, options, configSet, MAX_DB_UPDATES_SIZE_THRESHOLD); - - codecRegistry = CodecRegistry.newBuilder().build(); - } @AfterEach @@ -123,10 +120,9 @@ public void putGetAndEmpty() throws Exception { private Table createTypedTable(String name) throws IOException { - return new TypedTable( + return new TypedTable<>( rdbStore.getTable(name), - codecRegistry, - String.class, String.class); + StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE); } @Test @@ -253,7 +249,7 @@ public void testIteratorOnException() throws Exception { when(rdbTable.iterator((CodecBuffer) null)) .thenThrow(new IOException()); try (Table testTable = new TypedTable<>(rdbTable, - codecRegistry, String.class, String.class)) { + StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE)) { assertThrows(IOException.class, testTable::iterator); } } @@ -411,8 +407,7 @@ public void testCountEstimatedRowsInTable() throws Exception { public void testByteArrayTypedTable() throws Exception { try (Table testTable = new TypedTable<>( rdbStore.getTable("Ten"), - codecRegistry, - byte[].class, byte[].class)) { + ByteArrayCodec.get(), ByteArrayCodec.get(), CacheType.PARTIAL_CACHE)) { byte[] key = new byte[] {1, 2, 3}; byte[] value = new byte[] {4, 5, 6}; testTable.put(key, value); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java index 671e7c1f9e39..a9db3657c405 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/recovery/ReconOmMetadataManagerImpl.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.cache.TableCache; @@ -112,7 +113,7 @@ private void initializeNewRdbStore(File dbFile) throws IOException { @Override public Table getKeyTableLite(BucketLayout bucketLayout) throws IOException { String tableName = bucketLayout.isFileSystemOptimized() ? FILE_TABLE : KEY_TABLE; - return getStore().getTable(tableName, String.class, KeyEntityInfoProtoWrapper.class); + return getStore().getTable(tableName, StringCodec.get(), KeyEntityInfoProtoWrapper.getCodec()); } @Override diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java index 63a148454376..4f94250c0254 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.freon.FreonSubcommand; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo.Builder; @@ -111,9 +112,7 @@ public Void call() throws Exception { // initialization: create one bucket and volume in OM. writeOmBucketVolume(); - omKeyTable = omDb.getTable(OmMetadataManagerImpl.KEY_TABLE, String.class, - OmKeyInfo.class); - + omKeyTable = OMDBDefinition.KEY_TABLE.getTable(omDb); timer = getMetrics().timer("om-generator"); runTests(this::writeOmKeys); @@ -142,9 +141,7 @@ public void writeOmKeys(long index) throws Exception { private void writeOmBucketVolume() throws IOException { - Table volTable = - omDb.getTable(OmMetadataManagerImpl.VOLUME_TABLE, String.class, - OmVolumeArgs.class); + final Table volTable = OMDBDefinition.VOLUME_TABLE.getTable(omDb); String admin = getUserId(); String owner = getUserId(); @@ -166,9 +163,7 @@ private void writeOmBucketVolume() throws IOException { volTable.put("/" + volumeName, omVolumeArgs); - final Table userTable = - omDb.getTable(OmMetadataManagerImpl.USER_TABLE, String.class, - PersistedUserVolumeInfo.class); + final Table userTable = OMDBDefinition.USER_TABLE.getTable(omDb); PersistedUserVolumeInfo currentUserVolumeInfo = userTable.get(getUserId()); @@ -189,9 +184,7 @@ private void writeOmBucketVolume() throws IOException { userTable.put(getUserId(), currentUserVolumeInfo); - Table bucketTable = - omDb.getTable(OmMetadataManagerImpl.BUCKET_TABLE, String.class, - OmBucketInfo.class); + final Table bucketTable = OMDBDefinition.BUCKET_TABLE.getTable(omDb); OmBucketInfo omBucketInfo = new OmBucketInfo.Builder() .setBucketName(bucketName) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java index 74ae33ae956a..f85ec7099a92 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -30,12 +30,16 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.ByteArrayCodec; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.db.TypedTable; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; @@ -79,6 +83,7 @@ public class FSORepairTool extends RepairTool { private static final Logger LOG = LoggerFactory.getLogger(FSORepairTool.class); private static final String REACHABLE_TABLE = "reachable"; + private static final byte[] EMPTY_BYTE_ARRAY = {}; @CommandLine.Option(names = {"--db"}, required = true, @@ -105,6 +110,7 @@ public void execute() throws Exception { Impl repairTool = new Impl(); repairTool.run(); } catch (Exception ex) { + LOG.error("FSO repair failed", ex); throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); } @@ -124,6 +130,7 @@ private class Impl { private final Table deletedTable; private final Table snapshotInfoTable; private DBStore reachableDB; + private TypedTable reachableTable; private final ReportStatistics reachableStats; private final ReportStatistics unreachableStats; private final ReportStatistics unreferencedStats; @@ -134,27 +141,13 @@ private class Impl { this.unreferencedStats = new ReportStatistics(0, 0, 0); this.store = getStoreFromPath(omDBPath); - volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, - String.class, - OmVolumeArgs.class); - bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, - String.class, - OmBucketInfo.class); - directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, - String.class, - OmDirectoryInfo.class); - fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, - String.class, - OmKeyInfo.class); - deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE, - String.class, - OmKeyInfo.class); - deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE, - String.class, - RepeatedOmKeyInfo.class); - snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, - String.class, - SnapshotInfo.class); + this.volumeTable = OMDBDefinition.VOLUME_TABLE.getTable(store); + this.bucketTable = OMDBDefinition.BUCKET_TABLE.getTable(store); + this.directoryTable = OMDBDefinition.DIRECTORY_TABLE.getTable(store); + this.fileTable = OMDBDefinition.FILE_TABLE.getTable(store); + this.deletedDirectoryTable = OMDBDefinition.DELETED_DIR_TABLE.getTable(store); + this.deletedTable = OMDBDefinition.DELETED_TABLE.getTable(store); + this.snapshotInfoTable = OMDBDefinition.SNAPSHOT_INFO_TABLE.getTable(store); } public Report run() throws Exception { @@ -461,7 +454,7 @@ private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs vo private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException { String reachableKey = buildReachableKey(volume, bucket, object); // No value is needed for this table. - reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{}); + reachableTable.put(reachableKey, EMPTY_BYTE_ARRAY); } /** @@ -471,7 +464,7 @@ private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObj protected boolean isReachable(String fileOrDirKey) throws IOException { String reachableParentKey = buildReachableParentKey(fileOrDirKey); - return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null; + return reachableTable.get(reachableParentKey) != null; } private void openReachableDB() throws IOException { @@ -488,6 +481,7 @@ private void openReachableDB() throws IOException { .setPath(reachableDBFile.getParentFile().toPath()) .addTable(REACHABLE_TABLE) .build(); + reachableTable = reachableDB.getTable(REACHABLE_TABLE, StringCodec.get(), ByteArrayCodec.get()); } private void closeReachableDB() throws IOException {