diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index c3b0151cb9bc..23468c753d42 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -326,6 +326,20 @@ private ManagedColumnFamilyOptions getDefaultCfOptions() { .orElseGet(defaultCfProfile::getColumnFamilyOptions); } + /** + * Pass true to disable auto compaction for Column Family by default. + * Sets Disable auto compaction flag for Default Column Family option + * @param defaultCFAutoCompaction + */ + public DBStoreBuilder disableDefaultCFAutoCompaction( + boolean defaultCFAutoCompaction) { + ManagedColumnFamilyOptions defaultCFOptions = + getDefaultCfOptions(); + defaultCFOptions.setDisableAutoCompactions(defaultCFAutoCompaction); + setDefaultCFOptions(defaultCFOptions); + return this; + } + /** * Get default column family options, but with column family write buffer * size limit overridden. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index ff1bc6cfa889..be0150e4e97b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -278,7 +278,8 @@ public String getName(StringCodec codec) { return codec.fromPersistedFormat(nameBytes); } - protected ColumnFamilyHandle getHandle() { + @VisibleForTesting + public ColumnFamilyHandle getHandle() { return handle; } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index bb4294331af9..df2b5d08cba5 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -29,6 +29,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.Assert; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import java.io.File; import java.io.IOException; @@ -232,4 +234,68 @@ public File getDBLocation(ConfigurationSource conf) { Assert.assertTrue(checked); } } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testIfAutoCompactionDisabled(boolean disableAutoCompaction, + @TempDir Path tempDir) + throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.toString()); + File newFolder = new File(tempDir.toString(), "newFolder"); + + if (!newFolder.exists()) { + Assert.assertTrue(newFolder.mkdirs()); + } + + String sampleTableName = "sampleTable"; + final DBDefinition sampleDB = new DBDefinition() { + + private final DBColumnFamilyDefinition sampleTable = + new DBColumnFamilyDefinition<>(sampleTableName, String.class, + new StringCodec(), Long.class, new LongCodec()); + + + @Override + public String getName() { + return "sampleDB"; + } + + @Override + public String getLocationConfigKey() { + return null; + } + + @Override + public DBColumnFamilyDefinition[] getColumnFamilies() { + return new DBColumnFamilyDefinition[]{sampleTable}; + } + + @Override + public File getDBLocation(ConfigurationSource conf) { + return null; + } + }; + + try (DBStore dbStore = DBStoreBuilder.newBuilder(conf, sampleDB) + .setName("SampleStore") + .disableDefaultCFAutoCompaction(disableAutoCompaction) + .setPath(newFolder.toPath()).build()) { + Assert.assertTrue(dbStore instanceof RDBStore); + + RDBStore rdbStore = (RDBStore) dbStore; + Collection cfFamilies = + rdbStore.getColumnFamilies(); + + // we also have the default column family, so there are 2 + Assert.assertEquals(2, cfFamilies.size()); + + for (RocksDatabase.ColumnFamily cfFamily : cfFamilies) { + // the value should be different from the default value + Assertions.assertEquals(cfFamily.getHandle().getDescriptor() + .getOptions().disableAutoCompactions(), + disableAutoCompaction); + } + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index 8b70349f4bb9..d253b2ca8b54 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -856,7 +856,6 @@ public void testUniqueSnapshotId() String snapshotName = UUID.randomUUID().toString(); store.createSnapshot(volumeName, bucketName, snapshotName); - List ozoneManagers = ((MiniOzoneHAClusterImpl) cluster) .getOzoneManagersList(); List snapshotIds = new ArrayList<>(); @@ -888,4 +887,20 @@ public void testUniqueSnapshotId() assertEquals(1, snapshotIds.stream().distinct().count()); } + + @Test + public void testSnapshotOpensWithDisabledAutoCompaction() throws Exception { + String snapPrefix = createSnapshot(volumeName, bucketName); + RDBStore snapshotDBStore = (RDBStore) + ((OmSnapshot)cluster.getOzoneManager().getOmSnapshotManager() + .checkForSnapshot(volumeName, bucketName, snapPrefix)) + .getMetadataManager().getStore(); + + for (String table : snapshotDBStore.getTableNames().values()) { + Assertions.assertTrue(snapshotDBStore.getDb().getColumnFamily(table) + .getHandle().getDescriptor() + .getOptions().disableAutoCompactions()); + } + } + } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index ede9b5893a32..20b9c0727d6e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -333,7 +333,8 @@ protected OmMetadataManagerImpl() { File checkpoint = Paths.get(metaDir.toPath().toString(), dbName).toFile(); RDBCheckpointManager.waitForCheckpointDirectoryExist(checkpoint); } - setStore(loadDB(conf, metaDir, dbName, true)); + setStore(loadDB(conf, metaDir, dbName, true, + java.util.Optional.of(Boolean.TRUE))); initializeOmTables(false); } @@ -468,12 +469,15 @@ public void start(OzoneConfiguration configuration) throws IOException { public static DBStore loadDB(OzoneConfiguration configuration, File metaDir) throws IOException { - return loadDB(configuration, metaDir, OM_DB_NAME, false); + return loadDB(configuration, metaDir, OM_DB_NAME, false, + java.util.Optional.empty()); } public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, - String dbName, boolean readOnly) throws IOException { - + String dbName, boolean readOnly, + java.util.Optional + disableAutoCompaction) + throws IOException { final int maxFSSnapshots = configuration.getInt( OZONE_OM_FS_SNAPSHOT_MAX_LIMIT, OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT); RocksDBConfiguration rocksDBConfiguration = @@ -484,8 +488,9 @@ public static DBStore loadDB(OzoneConfiguration configuration, File metaDir, .setPath(Paths.get(metaDir.getPath())) .setMaxFSSnapshots(maxFSSnapshots) .setEnableCompactionLog(true); - DBStore dbStore = addOMTablesAndCodecs(dbStoreBuilder).build(); - return dbStore; + disableAutoCompaction.ifPresent( + dbStoreBuilder::disableDefaultCFAutoCompaction); + return addOMTablesAndCodecs(dbStoreBuilder).build(); } public static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index 43effdcc0c3a..725190569d55 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -45,6 +45,7 @@ import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; @@ -145,7 +146,7 @@ public BackgroundTaskResult call() throws Exception { try (RDBStore rdbStore = (RDBStore) OmMetadataManagerImpl .loadDB(ozoneManager.getConfiguration(), new File(snapshotDir), - dbName, true)) { + dbName, true, Optional.of(Boolean.TRUE))) { RocksDatabase db = rdbStore.getDb(); db.deleteFilesNotMatchingPrefix(prefixPairs, filterFunction); }