From d8c68ee14e72e07745477fbb17e631a931df606c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Mar 2023 10:31:05 -0700 Subject: [PATCH 1/3] HDDS-8122: Turning of auto compaction for sst-file-filtering service as well as garbage collection tasks --- .../hadoop/hdds/utils/db/DBStoreBuilder.java | 14 +++ .../hadoop/hdds/utils/db/RocksDatabase.java | 3 +- .../hdds/utils/db/TestDBStoreBuilder.java | 67 +++++++++++ .../hadoop/ozone/om/TestOmSnapshot.java | 18 ++- .../ozone/om/OmMetadataManagerImpl.java | 109 +++++++++--------- .../hadoop/ozone/om/SstFilteringService.java | 3 +- 6 files changed, 158 insertions(+), 56 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index c3b0151cb9bc..23468c753d42 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -326,6 +326,20 @@ private ManagedColumnFamilyOptions getDefaultCfOptions() { .orElseGet(defaultCfProfile::getColumnFamilyOptions); } + /** + * Pass true to disable auto compaction for Column Family by default. + * Sets Disable auto compaction flag for Default Column Family option + * @param defaultCFAutoCompaction + */ + public DBStoreBuilder disableDefaultCFAutoCompaction( + boolean defaultCFAutoCompaction) { + ManagedColumnFamilyOptions defaultCFOptions = + getDefaultCfOptions(); + defaultCFOptions.setDisableAutoCompactions(defaultCFAutoCompaction); + setDefaultCFOptions(defaultCFOptions); + return this; + } + /** * Get default column family options, but with column family write buffer * size limit overridden. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index ff1bc6cfa889..be0150e4e97b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -278,7 +278,8 @@ public String getName(StringCodec codec) { return codec.fromPersistedFormat(nameBytes); } - protected ColumnFamilyHandle getHandle() { + @VisibleForTesting + public ColumnFamilyHandle getHandle() { return handle; } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index bb4294331af9..16f12aebef55 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -29,6 +29,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.Assert; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import java.io.File; import java.io.IOException; @@ -36,6 +38,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; +import java.util.Optional; /** * Tests RDBStore creation. @@ -232,4 +235,68 @@ public File getDBLocation(ConfigurationSource conf) { Assert.assertTrue(checked); } } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testIfAutoCompactionDisabled(boolean disableAutoCompaction, + @TempDir Path tempDir) + throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.toString()); + File newFolder = new File(tempDir.toString(), "newFolder"); + + if (!newFolder.exists()) { + Assert.assertTrue(newFolder.mkdirs()); + } + + String sampleTableName = "sampleTable"; + final DBDefinition sampleDB = new DBDefinition() { + + private final DBColumnFamilyDefinition sampleTable = + new DBColumnFamilyDefinition<>(sampleTableName, String.class, + new StringCodec(), Long.class, new LongCodec()); + + + @Override + public String getName() { + return "sampleDB"; + } + + @Override + public String getLocationConfigKey() { + return null; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return new DBColumnFamilyDefinition[]{sampleTable}; + } + + @Override + public File getDBLocation(ConfigurationSource conf) { + return null; + } + }; + + try (DBStore dbStore = DBStoreBuilder.newBuilder(conf, sampleDB) + .setName("SampleStore") + .disableDefaultCFAutoCompaction(disableAutoCompaction) + .setPath(newFolder.toPath()).build()) { + Assert.assertTrue(dbStore instanceof RDBStore); + + RDBStore rdbStore = (RDBStore) dbStore; + Collection cfFamilies = + rdbStore.getColumnFamilies(); + + // we also have the default column family, so there are 2 + Assert.assertEquals(2, cfFamilies.size()); + + for (RocksDatabase.ColumnFamily cfFamily : cfFamilies) { + // the value should be different from the default value + Assertions.assertEquals(cfFamily.getHandle().getDescriptor() + .getOptions().disableAutoCompactions(), + disableAutoCompaction); + } + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index 8b70349f4bb9..1fa120de2974 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.utils.db.DBProfile; +import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; @@ -856,7 +857,6 @@ public void testUniqueSnapshotId() String snapshotName = UUID.randomUUID().toString(); store.createSnapshot(volumeName, bucketName, snapshotName); - List ozoneManagers = ((MiniOzoneHAClusterImpl) cluster) .getOzoneManagersList(); List snapshotIds = new ArrayList<>(); @@ -888,4 +888,20 @@ public void testUniqueSnapshotId() assertEquals(1, snapshotIds.stream().distinct().count()); } + + @Test + public void testSnapshotOpensWithDisabledAutoCompaction() throws Exception { + String snapPrefix = createSnapshot(volumeName, bucketName); + RDBStore snapshotDBStore = (RDBStore) + ((OmSnapshot)cluster.getOzoneManager().getOmSnapshotManager() + .checkForSnapshot(volumeName, bucketName, snapPrefix)) + .getMetadataManager().getStore(); + + for(String table : snapshotDBStore.getTableNames().values()) { + Assertions.assertTrue(snapshotDBStore.getDb().getColumnFamily(table) + .getHandle().getDescriptor() + .getOptions().disableAutoCompactions()); + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index ede9b5893a32..8e91f248b311 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -16,33 +16,22 @@ */ package org.apache.hadoop.ozone.om; -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.stream.Collectors; -import java.util.stream.Stream; - +import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Optional; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TableCacheMetrics; +import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.TransactionInfoCodec; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.RDBCheckpointManager; import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.RDBCheckpointManager; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.TypedTable; @@ -53,70 +42,79 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.hdds.utils.TransactionInfoCodec; import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec; import org.apache.hadoop.ozone.om.codec.OmDBAccessIdInfoCodec; +import org.apache.hadoop.ozone.om.codec.OmDBSnapshotInfoCodec; +import org.apache.hadoop.ozone.om.codec.OmDBTenantStateCodec; import org.apache.hadoop.ozone.om.codec.OmDBUserPrincipalInfoCodec; import org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec; import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec; +import org.apache.hadoop.ozone.om.codec.OmKeyRenameInfoCodec; import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec; import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmDBTenantStateCodec; import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec; import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmKeyRenameInfoCodec; import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec; -import org.apache.hadoop.ozone.om.codec.OmDBSnapshotInfoCodec; import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; +import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.helpers.OmDBUserPrincipalInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.WithMetadata; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; -import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; -import org.apache.hadoop.ozone.storage.proto - .OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; +import org.apache.hadoop.util.Time; +import org.apache.ratis.util.ExitUtils; +import org.eclipse.jetty.util.StringUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.stream.Collectors; +import java.util.stream.Stream; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; -import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; - -import org.apache.hadoop.util.Time; -import org.apache.ratis.util.ExitUtils; -import org.eclipse.jetty.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * Ozone metadata manager interface. @@ -333,7 +331,8 @@ protected OmMetadataManagerImpl() { File checkpoint = Paths.get(metaDir.toPath().toString(), dbName).toFile(); RDBCheckpointManager.waitForCheckpointDirectoryExist(checkpoint); } - setStore(loadDB(conf, metaDir, dbName, true)); + setStore(loadDB(conf, metaDir, dbName, true, + java.util.Optional.of(Boolean.TRUE))); initializeOmTables(false); } @@ -468,12 +467,15 @@ public void start(OzoneConfiguration configuration) throws IOException { public static DBStore loadDB(OzoneConfiguration configuration, File metaDir) throws IOException { - return loadDB(configuration, metaDir, OM_DB_NAME, false); + return loadDB(configuration, metaDir, OM_DB_NAME, false, + java.util.Optional.empty()); } public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, - String dbName, boolean readOnly) throws IOException { - + String dbName, boolean readOnly, + java.util.Optional + disableAutoCompaction) + throws IOException { final int maxFSSnapshots = configuration.getInt( OZONE_OM_FS_SNAPSHOT_MAX_LIMIT, OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT); RocksDBConfiguration rocksDBConfiguration = @@ -484,8 +486,9 @@ public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, .setPath(Paths.get(metaDir.getPath())) .setMaxFSSnapshots(maxFSSnapshots) .setEnableCompactionLog(true); - DBStore dbStore = addOMTablesAndCodecs(dbStoreBuilder).build(); - return dbStore; + disableAutoCompaction.ifPresent( + dbStoreBuilder::disableDefaultCFAutoCompaction); + return addOMTablesAndCodecs(dbStoreBuilder).build(); } public static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { @@ -671,7 +674,7 @@ public DBStore getStore() { */ @Override public String getVolumeKey(String volume) { - return OzoneConsts.OM_KEY_PREFIX + volume; + return OM_KEY_PREFIX + volume; } /** @@ -984,13 +987,13 @@ public List listBuckets(final String volumeName, List result = new ArrayList<>(); if (Strings.isNullOrEmpty(volumeName)) { throw new OMException("Volume name is required.", - ResultCodes.VOLUME_NOT_FOUND); + VOLUME_NOT_FOUND); } String volumeNameBytes = getVolumeKey(volumeName); if (volumeTable.get(volumeNameBytes) == null) { throw new OMException("Volume " + volumeName + " not found.", - ResultCodes.VOLUME_NOT_FOUND); + VOLUME_NOT_FOUND); } String startKey; @@ -1071,18 +1074,18 @@ public List listKeys(String volumeName, String bucketName, if (Strings.isNullOrEmpty(volumeName)) { throw new OMException("Volume name is required.", - ResultCodes.VOLUME_NOT_FOUND); + VOLUME_NOT_FOUND); } if (Strings.isNullOrEmpty(bucketName)) { throw new OMException("Bucket name is required.", - ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } String bucketNameBytes = getBucketKey(volumeName, bucketName); if (getBucketTable().get(bucketNameBytes) == null) { throw new OMException("Bucket " + bucketName + " not found.", - ResultCodes.BUCKET_NOT_FOUND); + BUCKET_NOT_FOUND); } String seekKey; @@ -1290,7 +1293,7 @@ public List listVolumes(String userName, int index = 0; if (!Strings.isNullOrEmpty(startKey)) { index = volumes.indexOf( - startKey.startsWith(OzoneConsts.OM_KEY_PREFIX) ? + startKey.startsWith(OM_KEY_PREFIX) ? startKey.substring(1) : startKey); @@ -1310,7 +1313,7 @@ public List listVolumes(String userName, // this probably means om db is corrupted or some entries are // accidentally removed. throw new OMException("Volume info not found for " + volumeName, - ResultCodes.VOLUME_NOT_FOUND); + VOLUME_NOT_FOUND); } result.add(volumeArgs); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index 17be6142d868..98541cf26f3e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -45,6 +45,7 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -145,7 +146,7 @@ public BackgroundTaskResult call() throws Exception { RDBStore rdbStore = (RDBStore) OmMetadataManagerImpl .loadDB(ozoneManager.getConfiguration(), new File(snapshotDir), - dbName, true); + dbName, true, Optional.of(Boolean.TRUE)); RocksDatabase db = rdbStore.getDb(); db.deleteFilesNotMatchingPrefix(prefixPairs, filterFunction); rdbStore.close(); From e728a42a56436314987533e48ea474a8ae43f745 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Mar 2023 10:33:52 -0700 Subject: [PATCH 2/3] HDDS-8122: Fix checkstyle --- .../org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java | 1 - .../test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index 16f12aebef55..df2b5d08cba5 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -38,7 +38,6 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; -import java.util.Optional; /** * Tests RDBStore creation. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index 1fa120de2974..d253b2ca8b54 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -24,7 +24,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; import org.apache.hadoop.hdds.utils.db.DBProfile; -import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; @@ -897,7 +896,7 @@ public void testSnapshotOpensWithDisabledAutoCompaction() throws Exception { .checkForSnapshot(volumeName, bucketName, snapPrefix)) .getMetadataManager().getStore(); - for(String table : snapshotDBStore.getTableNames().values()) { + for (String table : snapshotDBStore.getTableNames().values()) { Assertions.assertTrue(snapshotDBStore.getDb().getColumnFamily(table) .getHandle().getDescriptor() .getOptions().disableAutoCompactions()); From 31f7c2d48481ccb19c830619839cc13fa208fa04 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Mar 2023 10:39:38 -0700 Subject: [PATCH 3/3] HDDS-8122: Revert imports --- .../ozone/om/OmMetadataManagerImpl.java | 92 ++++++++++--------- 1 file changed, 47 insertions(+), 45 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 8e91f248b311..20b9c0727d6e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -16,22 +16,33 @@ */ package org.apache.hadoop.ozone.om; -import com.google.common.annotations.VisibleForTesting; +import java.io.File; +import java.io.IOException; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; +import java.util.stream.Collectors; +import java.util.stream.Stream; + import com.google.common.base.Optional; -import com.google.common.base.Strings; -import com.google.common.collect.Lists; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TableCacheMetrics; -import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.TransactionInfoCodec; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.hdds.utils.db.RDBCheckpointManager; import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.RDBCheckpointManager; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.hdds.utils.db.TypedTable; @@ -42,79 +53,70 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; +import org.apache.hadoop.hdds.utils.TransactionInfoCodec; import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec; import org.apache.hadoop.ozone.om.codec.OmDBAccessIdInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmDBSnapshotInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmDBTenantStateCodec; import org.apache.hadoop.ozone.om.codec.OmDBUserPrincipalInfoCodec; import org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec; import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec; -import org.apache.hadoop.ozone.om.codec.OmKeyRenameInfoCodec; import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec; import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec; +import org.apache.hadoop.ozone.om.codec.OmDBTenantStateCodec; import org.apache.hadoop.ozone.om.codec.OmVolumeArgsCodec; import org.apache.hadoop.ozone.om.codec.RepeatedOmKeyInfoCodec; +import org.apache.hadoop.ozone.om.codec.OmKeyRenameInfoCodec; import org.apache.hadoop.ozone.om.codec.S3SecretValueCodec; +import org.apache.hadoop.ozone.om.codec.OmDBSnapshotInfoCodec; import org.apache.hadoop.ozone.om.codec.TokenIdentifierCodec; import org.apache.hadoop.ozone.om.codec.UserVolumeInfoCodec; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo; -import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.helpers.OmDBUserPrincipalInfo; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; +import org.apache.hadoop.ozone.om.helpers.OmDBTenantState; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyRenameInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.WithMetadata; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.storage.proto + .OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; -import org.apache.hadoop.util.Time; -import org.apache.ratis.util.ExitUtils; -import org.eclipse.jetty.util.StringUtil; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Paths; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; -import java.util.TreeSet; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Strings; +import com.google.common.collect.Lists; +import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.OzoneConsts.DB_TRANSIENT_MARKER; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; + +import org.apache.hadoop.util.Time; +import org.apache.ratis.util.ExitUtils; +import org.eclipse.jetty.util.StringUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Ozone metadata manager interface. @@ -674,7 +676,7 @@ public DBStore getStore() { */ @Override public String getVolumeKey(String volume) { - return OM_KEY_PREFIX + volume; + return OzoneConsts.OM_KEY_PREFIX + volume; } /** @@ -987,13 +989,13 @@ public List listBuckets(final String volumeName, List result = new ArrayList<>(); if (Strings.isNullOrEmpty(volumeName)) { throw new OMException("Volume name is required.", - VOLUME_NOT_FOUND); + ResultCodes.VOLUME_NOT_FOUND); } String volumeNameBytes = getVolumeKey(volumeName); if (volumeTable.get(volumeNameBytes) == null) { throw new OMException("Volume " + volumeName + " not found.", - VOLUME_NOT_FOUND); + ResultCodes.VOLUME_NOT_FOUND); } String startKey; @@ -1074,18 +1076,18 @@ public List listKeys(String volumeName, String bucketName, if (Strings.isNullOrEmpty(volumeName)) { throw new OMException("Volume name is required.", - VOLUME_NOT_FOUND); + ResultCodes.VOLUME_NOT_FOUND); } if (Strings.isNullOrEmpty(bucketName)) { throw new OMException("Bucket name is required.", - BUCKET_NOT_FOUND); + ResultCodes.BUCKET_NOT_FOUND); } String bucketNameBytes = getBucketKey(volumeName, bucketName); if (getBucketTable().get(bucketNameBytes) == null) { throw new OMException("Bucket " + bucketName + " not found.", - BUCKET_NOT_FOUND); + ResultCodes.BUCKET_NOT_FOUND); } String seekKey; @@ -1293,7 +1295,7 @@ public List listVolumes(String userName, int index = 0; if (!Strings.isNullOrEmpty(startKey)) { index = volumes.indexOf( - startKey.startsWith(OM_KEY_PREFIX) ? + startKey.startsWith(OzoneConsts.OM_KEY_PREFIX) ? startKey.substring(1) : startKey); @@ -1313,7 +1315,7 @@ public List listVolumes(String userName, // this probably means om db is corrupted or some entries are // accidentally removed. throw new OMException("Volume info not found for " + volumeName, - VOLUME_NOT_FOUND); + ResultCodes.VOLUME_NOT_FOUND); } result.add(volumeArgs); }