diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index c87fcc4bf062..581d4d10713e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -542,6 +542,12 @@ public final class OzoneConsts { */ public static final String ETAG = "ETag"; + /** + * A constant string used as a separator in various contexts within + * the OMDBCheckpoint functions. + */ + public static final String OM_SNAPSHOT_SEPARATOR = "-"; + private OzoneConsts() { // Never Constructed } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 680e80bfd7a2..688aba5ee2ea 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -57,7 +58,6 @@ public final class SnapshotInfo implements Auditable, CopyObject { SnapshotInfo::getProtobuf, SnapshotInfo.class); - private static final String SEPARATOR = "-"; private static final long INVALID_TIMESTAMP = -1; private static final UUID INITIAL_SNAPSHOT_ID = UUID.randomUUID(); @@ -548,7 +548,7 @@ public Map toAuditMap() { public static String getCheckpointDirName(UUID snapshotId) { Objects.requireNonNull(snapshotId, "SnapshotId is needed to create checkpoint directory"); - return SEPARATOR + snapshotId; + return OM_SNAPSHOT_SEPARATOR + snapshotId; } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java index 37086b2c8404..f9c5ffa878d5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; @@ -28,8 +29,10 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.doCallRealMethod; @@ -54,6 +57,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -68,6 +72,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; @@ -77,13 +82,22 @@ import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneSnapshot; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.DBOptions; +import org.rocksdb.RocksDB; /** * Class used for testing the OM DB Checkpoint provider servlet using inode based transfer logic. @@ -106,6 +120,9 @@ public class TestOMDbCheckpointServletInodeBasedXfer { @BeforeEach void init() throws Exception { conf = new OzoneConfiguration(); + // ensure cache entries are not evicted thereby snapshot db's are not closed + conf.setTimeDuration(OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL, + 100, TimeUnit.MINUTES); } @AfterEach @@ -193,33 +210,12 @@ public void write(int b) throws IOException { @Test void testContentsOfTarballWithSnapshot() throws Exception { - setupCluster(); - setupMocks(); - when(requestMock.getParameter(OZONE_DB_CHECKPOINT_INCLUDE_SNAPSHOT_DATA)).thenReturn("true"); String volumeName = "vol" + RandomStringUtils.secure().nextNumeric(5); String bucketName = "buck" + RandomStringUtils.secure().nextNumeric(5); - // Create a "spy" dbstore keep track of the checkpoint. - writeData(volumeName, bucketName, true); - DBStore dbStore = om.getMetadataManager().getStore(); - DBStore spyDbStore = spy(dbStore); AtomicReference realCheckpoint = new AtomicReference<>(); - when(spyDbStore.getCheckpoint(true)).thenAnswer(b -> { - DBCheckpoint checkpoint = spy(dbStore.getCheckpoint(true)); - // Don't delete the checkpoint, because we need to compare it - // with the snapshot data. - doNothing().when(checkpoint).cleanupCheckpoint(); - realCheckpoint.set(checkpoint); - return checkpoint; - }); - // Init the mock with the spyDbstore - doCallRealMethod().when(omDbCheckpointServletMock).initialize(any(), any(), - eq(false), any(), any(), eq(false)); - omDbCheckpointServletMock.initialize(spyDbStore, om.getMetrics().getDBCheckpointMetrics(), - false, - om.getOmAdminUsernames(), om.getOmAdminGroups(), false); - + setupClusterAndMocks(volumeName, bucketName, realCheckpoint); + DBStore dbStore = om.getMetadataManager().getStore(); // Get the tarball. - when(responseMock.getOutputStream()).thenReturn(servletOutputStream); omDbCheckpointServletMock.doGet(requestMock, responseMock); String testDirName = folder.resolve("testDir").toString(); String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME; @@ -252,6 +248,8 @@ void testContentsOfTarballWithSnapshot() throws Exception { populateInodesOfFilesInDirectory(dbStore, Paths.get(snapshotPath), inodesFromOmDataDir, hardLinkMapFromOmData); } + populateInodesOfFilesInDirectory(dbStore, Paths.get(dbStore.getRocksDBCheckpointDiffer().getSSTBackupDir()), + inodesFromOmDataDir, hardLinkMapFromOmData); Path hardlinkFilePath = newDbDir.toPath().resolve(OmSnapshotManager.OM_HARDLINK_FILE); Map> hardlinkMapFromTarball = readFileToMap(hardlinkFilePath.toString()); @@ -278,13 +276,150 @@ void testContentsOfTarballWithSnapshot() throws Exception { assertFalse(hardlinkFilePath.toFile().exists()); } + /** + * Verifies that a manually added entry to the snapshot's delete table + * is persisted and can be retrieved from snapshot db loaded from OM DB checkpoint. + */ + @Test + public void testSnapshotDBConsistency() throws Exception { + String volumeName = "vol" + RandomStringUtils.secure().nextNumeric(5); + String bucketName = "buck" + RandomStringUtils.secure().nextNumeric(5); + AtomicReference realCheckpoint = new AtomicReference<>(); + setupClusterAndMocks(volumeName, bucketName, realCheckpoint); + List snapshots = new ArrayList<>(); + client.getObjectStore().listSnapshot(volumeName, bucketName, "", null) + .forEachRemaining(snapshots::add); + OzoneSnapshot snapshotToModify = snapshots.get(0); + String dummyKey = "dummyKey"; + writeDummyKeyToDeleteTableOfSnapshotDB(snapshotToModify, bucketName, volumeName, dummyKey); + // Get the tarball. + omDbCheckpointServletMock.doGet(requestMock, responseMock); + String testDirName = folder.resolve("testDir").toString(); + String newDbDirName = testDirName + OM_KEY_PREFIX + OM_DB_NAME; + File newDbDir = new File(newDbDirName); + assertTrue(newDbDir.mkdirs()); + FileUtil.unTar(tempFile, newDbDir); + Set allPathsInTarball = getAllPathsInTarball(newDbDir); + // create hardlinks now + OmSnapshotUtils.createHardLinks(newDbDir.toPath()); + for (Path old : allPathsInTarball) { + assertTrue(old.toFile().delete()); + } + Path snapshotDbDir = Paths.get(newDbDir.toPath().toString(), OM_SNAPSHOT_CHECKPOINT_DIR, + OM_DB_NAME + "-" + snapshotToModify.getSnapshotId()); + deleteWalFiles(snapshotDbDir); + assertTrue(Files.exists(snapshotDbDir)); + String value = getValueFromSnapshotDeleteTable(dummyKey, snapshotDbDir.toString()); + assertNotNull(value); + } + + private static void deleteWalFiles(Path snapshotDbDir) throws IOException { + try (Stream filesInTarball = Files.list(snapshotDbDir)) { + List files = filesInTarball.filter(p -> p.toString().contains(".log")) + .collect(Collectors.toList()); + for (Path p : files) { + Files.delete(p); + } + } + } + + private static Set getAllPathsInTarball(File newDbDir) throws IOException { + Set allPathsInTarball = new HashSet<>(); + try (Stream filesInTarball = Files.list(newDbDir.toPath())) { + List files = filesInTarball.collect(Collectors.toList()); + for (Path p : files) { + File file = p.toFile(); + if (file.getName().equals(OmSnapshotManager.OM_HARDLINK_FILE)) { + continue; + } + allPathsInTarball.add(p); + } + } + return allPathsInTarball; + } + + private void writeDummyKeyToDeleteTableOfSnapshotDB(OzoneSnapshot snapshotToModify, String bucketName, + String volumeName, String keyName) + throws IOException { + try (UncheckedAutoCloseableSupplier supplier = om.getOmSnapshotManager() + .getSnapshot(snapshotToModify.getSnapshotId())) { + OmSnapshot omSnapshot = supplier.get(); + OmKeyInfo dummyOmKeyInfo = + new OmKeyInfo.Builder().setBucketName(bucketName).setVolumeName(volumeName).setKeyName(keyName) + .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build(); + RepeatedOmKeyInfo dummyRepeatedKeyInfo = + new RepeatedOmKeyInfo.Builder().setOmKeyInfos(Collections.singletonList(dummyOmKeyInfo)).build(); + omSnapshot.getMetadataManager().getDeletedTable().put(dummyOmKeyInfo.getKeyName(), dummyRepeatedKeyInfo); + } + } + + private void setupClusterAndMocks(String volumeName, String bucketName, + AtomicReference realCheckpoint) throws Exception { + setupCluster(); + setupMocks(); + om.getKeyManager().getSnapshotSstFilteringService().pause(); + when(requestMock.getParameter(OZONE_DB_CHECKPOINT_INCLUDE_SNAPSHOT_DATA)).thenReturn("true"); + // Create a "spy" dbstore keep track of the checkpoint. + writeData(volumeName, bucketName, true); + DBStore dbStore = om.getMetadataManager().getStore(); + DBStore spyDbStore = spy(dbStore); + when(spyDbStore.getCheckpoint(true)).thenAnswer(b -> { + DBCheckpoint checkpoint = spy(dbStore.getCheckpoint(true)); + // Don't delete the checkpoint, because we need to compare it + // with the snapshot data. + doNothing().when(checkpoint).cleanupCheckpoint(); + realCheckpoint.set(checkpoint); + return checkpoint; + }); + // Init the mock with the spyDbstore + doCallRealMethod().when(omDbCheckpointServletMock).initialize(any(), any(), + eq(false), any(), any(), eq(false)); + omDbCheckpointServletMock.initialize(spyDbStore, om.getMetrics().getDBCheckpointMetrics(), + false, + om.getOmAdminUsernames(), om.getOmAdminGroups(), false); + when(responseMock.getOutputStream()).thenReturn(servletOutputStream); + } + + String getValueFromSnapshotDeleteTable(String key, String snapshotDB) { + String result = null; + List cfDescriptors = new ArrayList<>(); + int count = 1; + int deletedTableCFIndex = 0; + cfDescriptors.add(new ColumnFamilyDescriptor("default".getBytes(StandardCharsets.UTF_8))); + for (String cfName : OMDBDefinition.getAllColumnFamilies()) { + if (cfName.equals(OMDBDefinition.DELETED_TABLE)) { + deletedTableCFIndex = count; + } + cfDescriptors.add(new ColumnFamilyDescriptor(cfName.getBytes(StandardCharsets.UTF_8))); + count++; + } + // For holding handles + List cfHandles = new ArrayList<>(); + try (DBOptions options = new DBOptions().setCreateIfMissing(false).setCreateMissingColumnFamilies(true); + RocksDB db = RocksDB.openReadOnly(options, snapshotDB, cfDescriptors, cfHandles)) { + + ColumnFamilyHandle deletedTableCF = cfHandles.get(deletedTableCFIndex); // 0 is default + byte[] value = db.get(deletedTableCF, key.getBytes(StandardCharsets.UTF_8)); + if (value != null) { + result = new String(value, StandardCharsets.UTF_8); + } + } catch (Exception e) { + fail("Exception while reading from snapshot DB " + e.getMessage()); + } finally { + for (ColumnFamilyHandle handle : cfHandles) { + handle.close(); + } + } + return result; + } + public static Map> readFileToMap(String filePath) throws IOException { Map> dataMap = new HashMap<>(); try (BufferedReader reader = Files.newBufferedReader(Paths.get(filePath), StandardCharsets.UTF_8)) { String line; while ((line = reader.readLine()) != null) { String trimmedLine = line.trim(); - if (trimmedLine.isEmpty() || !trimmedLine.contains("\t")) { + if (!trimmedLine.contains("\t")) { continue; } int tabIndex = trimmedLine.indexOf("\t"); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 3fe5aca7a919..fe2d36e5f966 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.includeSnapshotData; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.logEstimatedTarballSize; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX; @@ -50,6 +51,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Stream; import javax.servlet.ServletException; @@ -255,6 +257,9 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina hardLinkFileMap, getCompactionLogDir()); writeDBToArchive(sstFilesToExclude, tmpSstBackupDir, maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap, getSstBackupDir()); + // This is done to ensure all data to be copied correctly is flushed in the snapshot DB + transferSnapshotData(sstFilesToExclude, tmpdir, snapshotPaths, maxTotalSstSize, + archiveOutputStream, hardLinkFileMap); } writeHardlinkFile(getConf(), hardLinkFileMap, archiveOutputStream); includeRatisSnapshotCompleteFlag(archiveOutputStream); @@ -268,6 +273,36 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina } } + /** + * Transfers the snapshot data from the specified snapshot directories into the archive output stream, + * handling deduplication and managing resource locking. + * + * @param sstFilesToExclude Set of SST file identifiers to exclude from the archive. + * @param tmpdir Temporary directory for intermediate processing. + * @param snapshotPaths Set of paths to snapshot directories to be processed. + * @param maxTotalSstSize AtomicLong to track the cumulative size of SST files included. + * @param archiveOutputStream Archive output stream to write the snapshot data. + * @param hardLinkFileMap Map of hardlink file paths to their unique identifiers for deduplication. + * @throws IOException if an I/O error occurs during processing. + */ + private void transferSnapshotData(Set sstFilesToExclude, Path tmpdir, Set snapshotPaths, + AtomicLong maxTotalSstSize, ArchiveOutputStream archiveOutputStream, + Map hardLinkFileMap) throws IOException { + OzoneManager om = (OzoneManager) getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); + OMMetadataManager omMetadataManager = om.getMetadataManager(); + for (Path snapshotDir : snapshotPaths) { + String snapshotId = OmSnapshotManager.extractSnapshotIDFromCheckpointDirName(snapshotDir.toString()); + omMetadataManager.getLock().acquireReadLock(SNAPSHOT_DB_LOCK, snapshotId); + try { + // invalidate closes the snapshot DB + om.getOmSnapshotManager().invalidateCacheEntry(UUID.fromString(snapshotId)); + writeDBToArchive(sstFilesToExclude, snapshotDir, maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap); + } finally { + omMetadataManager.getLock().releaseReadLock(SNAPSHOT_DB_LOCK, snapshotId); + } + } + } + private boolean writeDBToArchive(Set sstFilesToExclude, Path dir, AtomicLong maxTotalSstSize, ArchiveOutputStream archiveOutputStream, Path tmpdir, Map hardLinkFileMap) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 7457fdfb4fb6..4a2e93f6c436 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_CHECKPOINT_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIFF_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR; +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_FS_SNAPSHOT_MAX_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_CACHE_CLEANUP_SERVICE_RUN_INTERVAL; @@ -769,6 +770,15 @@ public static String getSnapshotPath(OzoneConfiguration conf, OM_DB_NAME + checkpointDirName; } + public static String extractSnapshotIDFromCheckpointDirName(String snapshotPath) { + // Find "om.db-" in the path and return whatever comes after + int index = snapshotPath.lastIndexOf(OM_DB_NAME); + if (index == -1 || index + OM_DB_NAME.length() + OM_SNAPSHOT_SEPARATOR.length() >= snapshotPath.length()) { + throw new IllegalArgumentException("Invalid snapshot path " + snapshotPath); + } + return snapshotPath.substring(index + OM_DB_NAME.length() + OM_SNAPSHOT_SEPARATOR.length()); + } + public static String getSnapshotLocalPropertyYamlPath(OzoneConfiguration conf, SnapshotInfo snapshotInfo) { return getSnapshotPath(conf, snapshotInfo) + ".yaml"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java index 6d053e1e5e05..9894e8f5d6bf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om.codec; +import java.util.ArrayList; +import java.util.List; import java.util.Map; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; @@ -358,5 +360,13 @@ public String getName() { public String getLocationConfigKey() { return OMConfigKeys.OZONE_OM_DB_DIRS; } + + public static List getAllColumnFamilies() { + List columnFamilies = new ArrayList<>(); + COLUMN_FAMILIES.values().forEach(cf -> { + columnFamilies.add(cf.getName()); + }); + return columnFamilies; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index b465956f35e6..eedf18f6534a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -139,6 +139,7 @@ public void invalidate(UUID key) { LOG.warn("SnapshotId: '{}' does not exist in snapshot cache.", k); } else { try { + v.get().getMetadataManager().getStore().flushDB(); v.get().close(); } catch (IOException e) { throw new IllegalStateException("Failed to close snapshotId: " + key, e);