From 6e241f65e1d1d2469f5e46b2cde7ecce44a6e0c0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 09:39:47 -0400 Subject: [PATCH 01/97] HDDS-13765. SnapshotLocalData yaml should also track snapshotId Change-Id: Iba47aeb21663dfa407ab71339cef02c0d74b49f2 --- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + .../hadoop/ozone/om/OmSnapshotLocalData.java | 11 +++++++++- .../ozone/om/OmSnapshotLocalDataYaml.java | 8 +++++--- .../hadoop/ozone/om/OmSnapshotManager.java | 4 ++-- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 20 +++++++++++++------ 5 files changed, 32 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index bb6eef205e44..c9064da1781c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -213,6 +213,7 @@ public final class OzoneConsts { public static final String OM_SLD_LAST_DEFRAG_TIME = "lastDefragTime"; public static final String OM_SLD_NEEDS_DEFRAG = "needsDefrag"; public static final String OM_SLD_VERSION_SST_FILE_INFO = "versionSstFileInfos"; + public static final String OM_SLD_SNAP_ID = "snapshotId"; public static final String OM_SLD_PREV_SNAP_ID = "previousSnapshotId"; public static final String OM_SLD_VERSION_META_SST_FILES = "sstFiles"; public static final String OM_SLD_VERSION_META_PREV_SNAP_VERSION = "previousSnapshotVersion"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 7a351ba5c337..5f65fd4c0d08 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -41,6 +41,9 @@ */ public abstract class OmSnapshotLocalData { + // Unique identifier for the snapshot. This is used to identify the snapshot. + private UUID snapshotId; + // Version of the snapshot local data. 0 indicates not defragged snapshot. // defragged snapshots will have version > 0. private int version; @@ -70,7 +73,8 @@ public abstract class OmSnapshotLocalData { /** * Creates a OmSnapshotLocalData object with default values. */ - public OmSnapshotLocalData(List notDefraggedSSTFileList, UUID previousSnapshotId) { + public OmSnapshotLocalData(UUID snapshotId, List notDefraggedSSTFileList, UUID previousSnapshotId) { + this.snapshotId = snapshotId; this.isSSTFiltered = false; this.lastDefragTime = 0L; this.needsDefrag = false; @@ -93,6 +97,7 @@ public OmSnapshotLocalData(OmSnapshotLocalData source) { this.needsDefrag = source.needsDefrag; this.checksum = source.checksum; this.version = source.version; + this.snapshotId = source.snapshotId; this.previousSnapshotId = source.previousSnapshotId; this.versionSstFileInfos = new LinkedHashMap<>(); setVersionSstFileInfos(source.versionSstFileInfos); @@ -167,6 +172,10 @@ public UUID getPreviousSnapshotId() { return previousSnapshotId; } + public UUID getSnapshotId() { + return snapshotId; + } + public void setPreviousSnapshotId(UUID previousSnapshotId) { this.previousSnapshotId = previousSnapshotId; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index 3a80915e6eac..1d4fedfacaaf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -70,8 +70,8 @@ public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { /** * Creates a new OmSnapshotLocalDataYaml with default values. */ - public OmSnapshotLocalDataYaml(List liveFileMetaDatas, UUID previousSnapshotId) { - super(liveFileMetaDatas, previousSnapshotId); + public OmSnapshotLocalDataYaml(UUID snapshotId, List liveFileMetaDatas, UUID previousSnapshotId) { + super(snapshotId, liveFileMetaDatas, previousSnapshotId); } /** @@ -227,8 +227,10 @@ private final class ConstructSnapshotLocalData extends AbstractConstruct { public Object construct(Node node) { MappingNode mnode = (MappingNode) node; Map nodes = constructMapping(mnode); + UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); - OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(Collections.emptyList(), prevSnapId); + OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(snapId, Collections.emptyList(), + prevSnapId); // Set version from YAML Integer version = (Integer) nodes.get(OzoneConsts.OM_SLD_VERSION); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index d86b1ce6473f..d531f95c46b6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -645,8 +645,8 @@ public static void createNewOmSnapshotLocalDataFile(OmSnapshotManager snapshotMa SnapshotInfo snapshotInfo) throws IOException { Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(getSnapshotSSTFileList(snapshotStore), - snapshotInfo.getPathPreviousSnapshotId()); + OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); snapshotLocalDataYaml.writeToYaml(snapshotManager, snapshotLocalDataPath.toFile()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index eda95dc7b31d..8b41e5072185 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -104,7 +104,7 @@ private LiveFileMetaData createLiveFileMetaData(String fileName, String table, S /** * Creates a snapshot local data YAML file. */ - private Pair writeToYaml(String snapshotName) throws IOException { + private Pair writeToYaml(UUID snapshotId, String snapshotName) throws IOException { String yamlFilePath = snapshotName + ".yaml"; UUID previousSnapshotId = UUID.randomUUID(); // Create snapshot data with not defragged SST files @@ -112,7 +112,8 @@ private Pair writeToYaml(String snapshotName) throws IOException { createLiveFileMetaData("sst1", "table1", "k1", "k2"), createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); - OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(notDefraggedSSTFileList, previousSnapshotId); + OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(snapshotId, notDefraggedSSTFileList, + previousSnapshotId); // Set version dataYaml.setVersion(42); @@ -146,7 +147,8 @@ private Pair writeToYaml(String snapshotName) throws IOException { @Test public void testWriteToYaml() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot1"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1"); File yamlFile = yamlFilePrevIdPair.getLeft(); UUID prevSnapId = yamlFilePrevIdPair.getRight(); @@ -172,6 +174,7 @@ public void testWriteToYaml() throws IOException { assertEquals(2, defraggedSSTFiles.get(43).getSstFiles().size()); assertEquals(1, defraggedSSTFiles.get(44).getSstFiles().size()); assertEquals(prevSnapId, snapshotData.getPreviousSnapshotId()); + assertEquals(snapshotId, snapshotData.getSnapshotId()); assertEquals(ImmutableMap.of( 0, new VersionMeta(0, ImmutableList.of(new SstFileInfo("sst1", "k1", "k2", "table1"), @@ -186,7 +189,8 @@ public void testWriteToYaml() throws IOException { @Test public void testUpdateSnapshotDataFile() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot2"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalDataYaml dataYaml = @@ -228,7 +232,8 @@ public void testEmptyFile() throws IOException { @Test public void testChecksum() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot3"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); @@ -244,7 +249,8 @@ public void testChecksum() throws IOException { @Test public void testYamlContainsAllFields() throws IOException { - Pair yamlFilePrevIdPair = writeToYaml("snapshot4"); + UUID snapshotId = UUID.randomUUID(); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4"); File yamlFile = yamlFilePrevIdPair.getLeft(); String content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); @@ -255,5 +261,7 @@ public void testYamlContainsAllFields() throws IOException { assertThat(content).contains(OzoneConsts.OM_SLD_LAST_DEFRAG_TIME); assertThat(content).contains(OzoneConsts.OM_SLD_NEEDS_DEFRAG); assertThat(content).contains(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO); + assertThat(content).contains(OzoneConsts.OM_SLD_SNAP_ID); + assertThat(content).contains(OzoneConsts.OM_SLD_PREV_SNAP_ID); } } From a8695001fdbc60e5dfad0bb1f9ec1526e06c9db9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 11:21:27 -0400 Subject: [PATCH 02/97] HDDS-13627. In memory Manager for Snapshot Local Data Change-Id: Ifd2feca1fddb144e4955db025f0b15a2ab1f3bfe --- .../OMDBCheckpointServletInodeBasedXfer.java | 3 +- .../ozone/om/OmSnapshotLocalDataYaml.java | 24 +-- .../hadoop/ozone/om/OmSnapshotManager.java | 75 ++------ .../snapshot/OMSnapshotPurgeResponse.java | 13 +- .../snapshot/OmSnapshotLocalDataManager.java | 160 ++++++++++++++++++ .../ozone/om/TestOmSnapshotLocalDataYaml.java | 23 +-- .../ozone/om/TestOmSnapshotManager.java | 15 +- ...TestOMSnapshotPurgeRequestAndResponse.java | 6 +- .../TestOMSnapshotCreateResponse.java | 5 +- .../TestOMSnapshotDeleteResponse.java | 5 +- 10 files changed, 225 insertions(+), 104 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 1acd9593c822..8a58ed6aa764 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -67,6 +67,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -298,7 +299,7 @@ private void transferSnapshotData(Set sstFilesToExclude, Path tmpdir, Se writeDBToArchive(sstFilesToExclude, snapshotDir, maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap, false); Path snapshotLocalPropertyYaml = Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(snapshotDir)); + OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotDir)); if (Files.exists(snapshotLocalPropertyYaml)) { File yamlFile = snapshotLocalPropertyYaml.toFile(); hardLinkFileMap.put(yamlFile.getAbsolutePath(), yamlFile.getName()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index 1d4fedfacaaf..a3683e11c16f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -31,6 +31,7 @@ import org.apache.commons.pool2.impl.DefaultPooledObject; import org.apache.hadoop.hdds.server.YamlUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ozone.compaction.log.SstFileInfo; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.rocksdb.LiveFileMetaData; @@ -66,6 +67,7 @@ public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { public static final Tag SNAPSHOT_YAML_TAG = new Tag("OmSnapshotLocalData"); public static final Tag SNAPSHOT_VERSION_META_TAG = new Tag("VersionMeta"); public static final Tag SST_FILE_INFO_TAG = new Tag("SstFileInfo"); + public static final String YAML_FILE_EXTENSION = ".yaml"; /** * Creates a new OmSnapshotLocalDataYaml with default values. @@ -88,7 +90,7 @@ public OmSnapshotLocalDataYaml(OmSnapshotLocalData source) { * @return true if the checksum is valid, false otherwise * @throws IOException if there's an error computing the checksum */ - public static boolean verifyChecksum(OmSnapshotManager snapshotManager, OmSnapshotLocalData snapshotData) + public static boolean verifyChecksum(OmSnapshotLocalDataManager localDataManager, OmSnapshotLocalData snapshotData) throws IOException { Preconditions.checkNotNull(snapshotData, "snapshotData cannot be null"); @@ -106,7 +108,7 @@ public static boolean verifyChecksum(OmSnapshotManager snapshotManager, OmSnapsh snapshotDataCopy.setChecksum(null); // Get the YAML representation - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + try (UncheckedAutoCloseableSupplier yaml = localDataManager.getSnapshotLocalYaml()) { // Compute new checksum snapshotDataCopy.computeAndSetChecksum(yaml.get()); @@ -272,8 +274,8 @@ public Object construct(Node node) { * (without triggering checksum computation or persistence). * @return YAML string representation */ - public String getYaml(OmSnapshotManager snapshotManager) throws IOException { - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + public String getYaml(OmSnapshotLocalDataManager snapshotLocalDataManager) throws IOException { + try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { return yaml.get().dump(this); } } @@ -283,9 +285,9 @@ public String getYaml(OmSnapshotManager snapshotManager) throws IOException { * @param yamlFile The file to write to * @throws IOException If there's an error writing to the file */ - public void writeToYaml(OmSnapshotManager snapshotManager, File yamlFile) throws IOException { + public void writeToYaml(OmSnapshotLocalDataManager snapshotLocalDataManager, File yamlFile) throws IOException { // Create Yaml - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { // Compute Checksum and update SnapshotData computeAndSetChecksum(yaml.get()); // Write the SnapshotData with checksum to Yaml file. @@ -299,11 +301,11 @@ public void writeToYaml(OmSnapshotManager snapshotManager, File yamlFile) throws * @return A new OmSnapshotLocalDataYaml instance * @throws IOException If there's an error reading the file */ - public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotManager snapshotManager, File yamlFile) - throws IOException { + public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotLocalDataManager snapshotLocalDataManager, + File yamlFile) throws IOException { Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { - return getFromYamlStream(snapshotManager, inputFileStream); + return getFromYamlStream(snapshotLocalDataManager, inputFileStream); } } @@ -311,10 +313,10 @@ public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotManager snapshot * Read the YAML content InputStream, and return OmSnapshotLocalDataYaml instance. * @throws IOException */ - public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotManager snapshotManager, + public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotLocalDataManager snapshotLocalDataManager, InputStream input) throws IOException { OmSnapshotLocalDataYaml dataYaml; - try (UncheckedAutoCloseableSupplier yaml = snapshotManager.getSnapshotLocalYaml()) { + try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { dataYaml = yaml.get().load(input); } catch (YAMLException ex) { // Unchecked exception. Convert to IOException diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index d531f95c46b6..19fe367bb923 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -82,7 +82,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.ratis.RatisHelper; @@ -102,6 +101,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDiffCleanupService; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; @@ -117,7 +117,6 @@ import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.yaml.snakeyaml.Yaml; /** * This class is used to manage/create OM snapshots. @@ -186,7 +185,7 @@ public final class OmSnapshotManager implements AutoCloseable { private final List columnFamilyDescriptors; private final List columnFamilyHandles; private final SnapshotDiffCleanupService snapshotDiffCleanupService; - private final GenericObjectPool yamlPool; + private final OmSnapshotLocalDataManager snapshotLocalDataManager; private final int maxPageSize; @@ -197,7 +196,7 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) { - this.yamlPool = new GenericObjectPool<>(new OmSnapshotLocalDataYaml.YamlFactory()); + this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", @@ -517,11 +516,12 @@ public static DBCheckpoint createOmSnapshotCheckpoint( } OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager(); + OmSnapshotLocalDataManager snapshotLocalDataManager = omSnapshotManager.getSnapshotLocalDataManager(); OzoneConfiguration configuration = ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getConfiguration(); try (OmMetadataManagerImpl checkpointMetadataManager = OmMetadataManagerImpl.createCheckpointMetadataManager(configuration, dbCheckpoint)) { // Create the snapshot local property file. - OmSnapshotManager.createNewOmSnapshotLocalDataFile(omSnapshotManager, + snapshotLocalDataManager.createNewOmSnapshotLocalDataFile( (RDBStore) checkpointMetadataManager.getStore(), snapshotInfo); } @@ -628,28 +628,12 @@ private static void deleteKeysFromDelKeyTableInSnapshotScope( * @param store AOS or snapshot DB for not defragged or defragged snapshot respectively. * @return a Map of (table, set of SST files corresponding to the table) */ - private static List getSnapshotSSTFileList(RDBStore store) - throws IOException { + public static List getSnapshotSSTFileList(RDBStore store) throws IOException { return store.getDb().getLiveFilesMetaData().stream() .filter(lfm -> COLUMN_FAMILIES_TO_TRACK_IN_SNAPSHOT.contains(StringUtils.bytes2String(lfm.columnFamilyName()))) .collect(Collectors.toList()); } - /** - * Creates and writes snapshot local properties to a YAML file with not defragged SST file list. - * @param snapshotManager snapshot manager instance. - * @param snapshotStore snapshot metadata manager. - * @param snapshotInfo snapshot info instance corresponding to snapshot. - */ - public static void createNewOmSnapshotLocalDataFile(OmSnapshotManager snapshotManager, RDBStore snapshotStore, - SnapshotInfo snapshotInfo) throws IOException { - Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); - Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), - getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataYaml.writeToYaml(snapshotManager, snapshotLocalDataPath.toFile()); - } - // Get OmSnapshot if the keyName has ".snapshot" key indicator @SuppressWarnings("unchecked") public UncheckedAutoCloseableSupplier getActiveFsMetadataOrSnapshot( @@ -691,24 +675,8 @@ public UncheckedAutoCloseableSupplier getSnapshot( return getSnapshot(volumeName, bucketName, snapshotName, true); } - public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { - try { - Yaml yaml = yamlPool.borrowObject(); - return new UncheckedAutoCloseableSupplier() { - - @Override - public void close() { - yamlPool.returnObject(yaml); - } - - @Override - public Yaml get() { - return yaml; - } - }; - } catch (Exception e) { - throw new IOException("Failed to get snapshot local yaml", e); - } + public OmSnapshotLocalDataManager getSnapshotLocalDataManager() { + return snapshotLocalDataManager; } private UncheckedAutoCloseableSupplier getSnapshot( @@ -856,29 +824,6 @@ public static String extractSnapshotIDFromCheckpointDirName(String snapshotPath) return snapshotPath.substring(index + OM_DB_NAME.length() + OM_SNAPSHOT_SEPARATOR.length()); } - /** - * Returns the path to the YAML file that stores local properties for the given snapshot. - * - * @param omMetadataManager metadata manager to get the base path - * @param snapshotInfo snapshot metadata - * @return the path to the snapshot's local property YAML file - */ - public static String getSnapshotLocalPropertyYamlPath(OMMetadataManager omMetadataManager, - SnapshotInfo snapshotInfo) { - Path snapshotPath = getSnapshotPath(omMetadataManager, snapshotInfo); - return getSnapshotLocalPropertyYamlPath(snapshotPath); - } - - /** - * Returns the path to the YAML file that stores local properties for the given snapshot. - * - * @param snapshotPath path to the snapshot checkpoint dir - * @return the path to the snapshot's local property YAML file - */ - public static String getSnapshotLocalPropertyYamlPath(Path snapshotPath) { - return snapshotPath.toString() + ".yaml"; - } - public static boolean isSnapshotKey(String[] keyParts) { return (keyParts.length > 1) && (keyParts[0].compareTo(OM_SNAPSHOT_INDICATOR) == 0); @@ -1199,8 +1144,8 @@ public void close() { if (options != null) { options.close(); } - if (yamlPool != null) { - yamlPool.close(); + if (snapshotLocalDataManager != null) { + snapshotLocalDataManager.close(); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index ef3555f54350..75ba2a8f9501 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +99,9 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager() .removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId()); // Delete Snapshot checkpoint directory. - deleteCheckpointDirectory(omMetadataManager, snapshotInfo); + OmSnapshotLocalDataManager snapshotLocalDataManager = ((OmMetadataManagerImpl) omMetadataManager) + .getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager(); + deleteCheckpointDirectory(snapshotLocalDataManager, omMetadataManager, snapshotInfo); // Delete snapshotInfo from the table. omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } @@ -117,8 +120,8 @@ private void updateSnapInfo(OmMetadataManagerImpl metadataManager, /** * Deletes the checkpoint directory for a snapshot. */ - private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, - SnapshotInfo snapshotInfo) { + private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalDataManager, + OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { // Acquiring write lock to avoid race condition with sst filtering service which creates a sst filtered file // inside the snapshot directory. Any operation apart which doesn't create/delete files under this snapshot // directory can run in parallel along with this operation. @@ -127,8 +130,8 @@ private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); - Path snapshotLocalDataPath = Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(omMetadataManager, snapshotInfo)); + // TODO: Do not delete on snapshot purge. OmSnapshotLocalDataManager should delete orphan local data files. + Path snapshotLocalDataPath = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); try { FileUtils.deleteDirectory(snapshotDirPath.toFile()); Files.deleteIfExists(snapshotLocalDataPath); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java new file mode 100644 index 000000000000..fb6d7cf744a9 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -0,0 +1,160 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; + +import com.google.common.graph.GraphBuilder; +import com.google.common.graph.MutableGraph; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Objects; +import java.util.UUID; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.yaml.snakeyaml.Yaml; + +/** + * Manages local data and metadata associated with Ozone Manager (OM) snapshots, + * including the creation, storage, and representation of data as YAML files. + */ +public class OmSnapshotLocalDataManager implements AutoCloseable { + + private final GenericObjectPool yamlPool; + private final MutableGraph localDataGraph; + private final OMMetadataManager omMetadataManager; + + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) { + this.yamlPool = new GenericObjectPool(new OmSnapshotLocalDataYaml.YamlFactory()); + this.localDataGraph = GraphBuilder.directed().build(); + this.omMetadataManager = omMetadataManager; + init(); + } + + /** + * Returns the path to the YAML file that stores local properties for the given snapshot. + * + * @param snapshotPath path to the snapshot checkpoint dir + * @return the path to the snapshot's local property YAML file + */ + public static String getSnapshotLocalPropertyYamlPath(Path snapshotPath) { + return snapshotPath.toString() + YAML_FILE_EXTENSION; + } + + /** + * Returns the path to the YAML file that stores local properties for the given snapshot. + * + * @param snapshotInfo snapshot metadata + * @return the path to the snapshot's local property YAML file + */ + public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) { + Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); + return getSnapshotLocalPropertyYamlPath(snapshotPath); + } + + /** + * Creates and writes snapshot local properties to a YAML file with not defragged SST file list. + * @param snapshotStore snapshot metadata manager. + * @param snapshotInfo snapshot info instance corresponding to snapshot. + */ + public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInfo snapshotInfo) throws IOException { + Path snapshotLocalDataPath = Paths.get( + getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); + Files.deleteIfExists(snapshotLocalDataPath); + OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); + snapshotLocalDataYaml.writeToYaml(this, snapshotLocalDataPath.toFile()); + } + + private void init() { + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + File snapshotDir = new File(store.getSnapshotsParentDir()); + for (File yamlFile : + Objects.requireNonNull(snapshotDir.listFiles( + (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)))) { + System.out.println(yamlFile.getAbsolutePath()); + } + } + + @Override + public void close() { + if (yamlPool != null) { + yamlPool.close(); + } + } + + private final class VersionLocalDataNode { + private UUID snapshotId; + private int version; + private UUID previousSnapshotId; + private int previousSnapshotVersion; + + private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { + this.previousSnapshotId = previousSnapshotId; + this.previousSnapshotVersion = previousSnapshotVersion; + this.snapshotId = snapshotId; + this.version = version; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof VersionLocalDataNode)) { + return false; + } + + VersionLocalDataNode that = (VersionLocalDataNode) o; + return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && + snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); + } + + @Override + public int hashCode() { + return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); + } + } + + public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { + try { + Yaml yaml = yamlPool.borrowObject(); + return new UncheckedAutoCloseableSupplier() { + + @Override + public void close() { + yamlPool.returnObject(yaml); + } + + @Override + public Yaml get() { + return yaml; + } + }; + } catch (Exception e) { + throw new IOException("Failed to get snapshot local yaml", e); + } + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 8b41e5072185..71933f8112c4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ozone.compaction.log.SstFileInfo; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; @@ -59,7 +60,7 @@ public class TestOmSnapshotLocalDataYaml { private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - private static OmSnapshotManager omSnapshotManager; + private static OmSnapshotLocalDataManager snapshotLocalDataManager; private static final Yaml YAML = new OmSnapshotLocalDataYaml.YamlFactory().create(); private static final UncheckedAutoCloseableSupplier YAML_SUPPLIER = new UncheckedAutoCloseableSupplier() { @Override @@ -77,8 +78,8 @@ public void close() { @BeforeAll public static void setupClassMocks() throws IOException { - omSnapshotManager = mock(OmSnapshotManager.class); - when(omSnapshotManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); + snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); + when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); } @BeforeEach @@ -137,7 +138,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw File yamlFile = new File(testRoot, yamlFilePath); // Create YAML file with SnapshotData - dataYaml.writeToYaml(omSnapshotManager, yamlFile); + dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); // Check YAML file exists assertTrue(yamlFile.exists()); @@ -153,7 +154,7 @@ public void testWriteToYaml() throws IOException { UUID prevSnapId = yamlFilePrevIdPair.getRight(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Verify fields assertEquals(44, snapshotData.getVersion()); @@ -194,7 +195,7 @@ public void testUpdateSnapshotDataFile() throws IOException { File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalDataYaml dataYaml = - OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Update snapshot data dataYaml.setSstFiltered(false); @@ -203,10 +204,10 @@ public void testUpdateSnapshotDataFile() throws IOException { singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); // Write updated data back to file - dataYaml.writeToYaml(omSnapshotManager, yamlFile); + dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); // Read back the updated data - dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); @@ -225,7 +226,7 @@ public void testEmptyFile() throws IOException { assertTrue(emptyFile.createNewFile()); IOException ex = assertThrows(IOException.class, () -> - OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, emptyFile)); + OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, emptyFile)); assertThat(ex).hasMessageContaining("Failed to load snapshot file. File is empty."); } @@ -236,7 +237,7 @@ public void testChecksum() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, yamlFile); + OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); // Get the original checksum String originalChecksum = snapshotData.getChecksum(); @@ -244,7 +245,7 @@ public void testChecksum() throws IOException { // Verify the checksum is not null or empty assertThat(originalChecksum).isNotNull().isNotEmpty(); - assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(omSnapshotManager, snapshotData)); + assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(snapshotLocalDataManager, snapshotData)); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index df2b026bce40..62f9561d2b83 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -82,6 +82,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.util.Time; import org.apache.ozone.compaction.log.SstFileInfo; @@ -107,6 +108,7 @@ class TestOmSnapshotManager { private SnapshotChainManager snapshotChainManager; private OmMetadataManagerImpl omMetadataManager; private OmSnapshotManager omSnapshotManager; + private OmSnapshotLocalDataManager snapshotLocalDataManager; private static final String CANDIDATE_DIR_NAME = OM_DB_NAME + SNAPSHOT_CANDIDATE_DIR; private File leaderDir; @@ -139,6 +141,7 @@ void init(@TempDir File tempDir) throws Exception { om = omTestManagers.getOzoneManager(); omMetadataManager = (OmMetadataManagerImpl) om.getMetadataManager(); omSnapshotManager = om.getOmSnapshotManager(); + snapshotLocalDataManager = om.getOmSnapshotManager().getSnapshotLocalDataManager(); snapshotChainManager = omMetadataManager.getSnapshotChainManager(); } @@ -158,8 +161,8 @@ void cleanup() throws IOException { SnapshotInfo snapshotInfo = snapshotInfoTable.get(snapshotInfoKey); snapshotChainManager.deleteSnapshot(snapshotInfo); snapshotInfoTable.delete(snapshotInfoKey); - Path snapshotYaml = Paths.get(OmSnapshotManager.getSnapshotLocalPropertyYamlPath( - om.getMetadataManager(), snapshotInfo)); + + Path snapshotYaml = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); Files.deleteIfExists(snapshotYaml); } omSnapshotManager.invalidateCache(); @@ -310,19 +313,19 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { when(mockedStore.getDb()).thenReturn(mockedDb); when(mockedDb.getLiveFilesMetaData()).thenReturn(mockedLiveFiles); - Path snapshotYaml = Paths.get(OmSnapshotManager.getSnapshotLocalPropertyYamlPath( - omMetadataManager, snapshotInfo)); + Path snapshotYaml = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager, snapshotInfo).toFile()); // Create an existing YAML file for the snapshot assertTrue(snapshotYaml.toFile().createNewFile()); assertEquals(0, Files.size(snapshotYaml)); // Create a new YAML file for the snapshot - OmSnapshotManager.createNewOmSnapshotLocalDataFile(omSnapshotManager, mockedStore, snapshotInfo); + snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(mockedStore, snapshotInfo); // Verify that previous file was overwritten assertTrue(Files.exists(snapshotYaml)); assertTrue(Files.size(snapshotYaml) > 0); // Verify the contents of the YAML file - OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(omSnapshotManager, snapshotYaml.toFile()); + OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, + snapshotYaml.toFile()); assertNotNull(localData); assertEquals(0, localData.getVersion()); assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index d2ceb5a44786..0fb26a4cd993 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -47,11 +47,11 @@ import org.apache.hadoop.hdds.utils.db.CodecException; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; @@ -164,7 +164,7 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertTrue(Files.exists(checkpoint)); assertTrue(Files.exists(Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); + OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); } OMRequest snapshotPurgeRequest = createPurgeKeysRequest( @@ -191,7 +191,7 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); assertFalse(Files.exists(Paths.get( - OmSnapshotManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); + OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); } assertEquals(initialSnapshotPurgeCount + 1, getOmSnapshotIntMetrics().getNumSnapshotPurges()); assertEquals(initialSnapshotPurgeFailCount, getOmSnapshotIntMetrics().getNumSnapshotPurgeFails()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index 1e78943c7b5e..ce24040a3eab 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -91,9 +92,11 @@ public void close() { }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); + OmSnapshotLocalDataManager snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - when(omSnapshotManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(snapshotLocalDataManager); + when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index 24fdc138fd72..f8d40951b2bf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -83,9 +84,11 @@ public void close() { }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); + OmSnapshotLocalDataManager omSnapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); - when(omSnapshotManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); + when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(omSnapshotLocalDataManager); + when(omSnapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } From 252d338d168e4eefa13b2fe48d5e14b5fb81b125 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:16:37 -0400 Subject: [PATCH 03/97] HDDS-13627. In memory Manager for Snapshot Local Data Change-Id: I34536ff06efb7d5a4942853f0fd83942ab398b5f --- hadoop-hdds/common/pom.xml | 4 + .../apache/hadoop/ozone/util/Checksum.java | 24 +++ .../hadoop/ozone/util/ObjectSerializer.java | 65 +++++++++ .../hadoop/ozone/util/YamlSerializer.java | 138 ++++++++++++++++++ .../hadoop/ozone/om/OmSnapshotLocalData.java | 8 +- .../ozone/om/OmSnapshotLocalDataYaml.java | 128 +--------------- .../hadoop/ozone/om/OmSnapshotManager.java | 2 +- .../snapshot/OmSnapshotLocalDataManager.java | 78 +++++----- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 59 ++++---- .../ozone/om/TestOmSnapshotManager.java | 3 +- .../TestOMSnapshotCreateResponse.java | 17 --- .../TestOMSnapshotDeleteResponse.java | 17 --- 12 files changed, 319 insertions(+), 224 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 0216b808a7cf..f22aeda491ac 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -108,6 +108,10 @@ org.apache.commons commons-lang3 + + org.apache.commons + commons-pool2 + org.apache.hadoop hadoop-common diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java new file mode 100644 index 000000000000..03e0559a6d3d --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import org.apache.hadoop.hdds.utils.db.CopyObject; + +public interface Checksum> extends CopyObject { + String getChecksum(); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java new file mode 100644 index 000000000000..b9727d559148 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; + +public interface ObjectSerializer extends Closeable { + + /** + * Loads an object of type T from the specified file. + * + * @param path the file from which the object will be loaded + * @return the object of type T that has been deserialized from the file + * @throws IOException if an I/O error occurs during reading from the file + */ + T load(File path) throws IOException; + + /** + * Loads an object of type T from the specified input stream. + * + * @param inputStream the input stream from which the object will be deserialized + * @return the deserialized object of type T + * @throws IOException if an I/O error occurs during reading from the input stream + */ + T load(InputStream inputStream) throws IOException; + + /** + * Serializes the given data object of type T and saves it to the specified file. + * + * @param path the file where the serialized object will be saved + * @param data the object of type T to be serialized and saved + * @throws IOException if an I/O error occurs during writing to the file + */ + void save(File path, T data) throws IOException; + + /** + * Verifies the checksum of the provided data object of type T. + * + * @param data the object of type T whose checksum is to be verified + * @return true if the checksum of the data is valid, false otherwise + * @throws IOException if an I/O error occurs during verification + */ + boolean verifyChecksum(T data) throws IOException; + + @Override + void close() throws IOException; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java new file mode 100644 index 000000000000..07a128044b00 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java @@ -0,0 +1,138 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.util; + +import com.google.common.base.Preconditions; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import org.apache.commons.pool2.BasePooledObjectFactory; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.hadoop.hdds.server.YamlUtils; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; + +public abstract class YamlSerializer> implements ObjectSerializer { + + private static final Logger LOG = LoggerFactory.getLogger(YamlSerializer.class); + + private final GenericObjectPool yamlPool; + + public YamlSerializer(BasePooledObjectFactory yamlFactory) { + this.yamlPool = new GenericObjectPool<>(yamlFactory); + } + + private UncheckedAutoCloseableSupplier getYaml() throws IOException { + try { + Yaml yaml = yamlPool.borrowObject(); + return new UncheckedAutoCloseableSupplier() { + + @Override + public void close() { + yamlPool.returnObject(yaml); + } + + @Override + public Yaml get() { + return yaml; + } + }; + } catch (Exception e) { + throw new IOException("Failed to get yaml object.", e); + } + } + + @Override + public T load(File yamlFile) throws IOException { + Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); + try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { + return load(inputFileStream); + } + } + + @Override + public T load(InputStream input) throws IOException{ + T dataYaml; + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + dataYaml = yaml.get().load(input); + } catch (Exception e) { + throw new IOException("Failed to load file", e); + } + + if (dataYaml == null) { + // If Yaml#load returned null, then the file is empty. This is valid yaml + // but considered an error in this case since we have lost data about + // the snapshot. + throw new IOException("Failed to load file. File is empty."); + } + + return dataYaml; + } + + @Override + public boolean verifyChecksum(T data) throws IOException { + Preconditions.checkNotNull(data, "data cannot be null"); + + // Get the stored checksum + String storedChecksum = data.getChecksum(); + if (storedChecksum == null) { + LOG.warn("No checksum found in snapshot data for verification"); + return false; + } + + // Create a copy of the snapshot data for computing checksum + T copy = data.copyObject(); + + // Get the YAML representation + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute new checksum + computeAndSetChecksum(yaml.get(), copy); + + // Compare the stored and computed checksums + String computedChecksum = copy.getChecksum(); + boolean isValid = storedChecksum.equals(computedChecksum); + + if (!isValid) { + LOG.warn("Checksum verification failed for snapshot local data. " + + "Stored: {}, Computed: {}", storedChecksum, computedChecksum); + } + return isValid; + } + } + + @Override + public void save(File yamlFile, T data) throws IOException { + // Create Yaml + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute Checksum and update SnapshotData + computeAndSetChecksum(yaml.get(), data); + // Write the object with checksum to Yaml file. + YamlUtils.dump(yaml.get(), data, yamlFile, LOG); + } + } + + @Override + public void close() throws IOException { + + } + + public abstract void computeAndSetChecksum(Yaml yaml, T data) throws IOException; + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5f65fd4c0d08..e82bad8832a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -31,6 +31,7 @@ import java.util.stream.Collectors; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.utils.db.CopyObject; +import org.apache.hadoop.ozone.util.Checksum; import org.apache.ozone.compaction.log.SstFileInfo; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -39,7 +40,7 @@ * OmSnapshotLocalData is the in-memory representation of snapshot local metadata. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerData */ -public abstract class OmSnapshotLocalData { +public class OmSnapshotLocalData implements Checksum { // Unique identifier for the snapshot. This is used to identify the snapshot. private UUID snapshotId; @@ -258,6 +259,11 @@ public void setVersion(int version) { this.version = version; } + @Override + public OmSnapshotLocalData copyObject() { + return new OmSnapshotLocalData(this); + } + /** * Represents metadata for a specific version in a snapshot. * This class maintains the version of the previous snapshot and a list of SST (Sorted String Table) files diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index a3683e11c16f..94632d7385d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -31,6 +31,7 @@ import org.apache.commons.pool2.impl.DefaultPooledObject; import org.apache.hadoop.hdds.server.YamlUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ozone.compaction.log.SstFileInfo; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; @@ -60,7 +61,7 @@ * Checksum of the YAML fields are computed and stored in the YAML file transparently to callers. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml */ -public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { +public final class OmSnapshotLocalDataYaml { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataYaml.class); @@ -69,61 +70,6 @@ public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { public static final Tag SST_FILE_INFO_TAG = new Tag("SstFileInfo"); public static final String YAML_FILE_EXTENSION = ".yaml"; - /** - * Creates a new OmSnapshotLocalDataYaml with default values. - */ - public OmSnapshotLocalDataYaml(UUID snapshotId, List liveFileMetaDatas, UUID previousSnapshotId) { - super(snapshotId, liveFileMetaDatas, previousSnapshotId); - } - - /** - * Copy constructor to create a deep copy. - * @param source The source OmSnapshotLocalData to copy from - */ - public OmSnapshotLocalDataYaml(OmSnapshotLocalData source) { - super(source); - } - - /** - * Verifies the checksum of the snapshot data. - * @param snapshotData The snapshot data to verify - * @return true if the checksum is valid, false otherwise - * @throws IOException if there's an error computing the checksum - */ - public static boolean verifyChecksum(OmSnapshotLocalDataManager localDataManager, OmSnapshotLocalData snapshotData) - throws IOException { - Preconditions.checkNotNull(snapshotData, "snapshotData cannot be null"); - - // Get the stored checksum - String storedChecksum = snapshotData.getChecksum(); - if (storedChecksum == null) { - LOG.warn("No checksum found in snapshot data for verification"); - return false; - } - - // Create a copy of the snapshot data for computing checksum - OmSnapshotLocalDataYaml snapshotDataCopy = new OmSnapshotLocalDataYaml(snapshotData); - - // Clear the existing checksum in the copy - snapshotDataCopy.setChecksum(null); - - // Get the YAML representation - try (UncheckedAutoCloseableSupplier yaml = localDataManager.getSnapshotLocalYaml()) { - // Compute new checksum - snapshotDataCopy.computeAndSetChecksum(yaml.get()); - - // Compare the stored and computed checksums - String computedChecksum = snapshotDataCopy.getChecksum(); - boolean isValid = storedChecksum.equals(computedChecksum); - - if (!isValid) { - LOG.warn("Checksum verification failed for snapshot local data. " + - "Stored: {}, Computed: {}", storedChecksum, computedChecksum); - } - return isValid; - } - } - /** * Representer class to define which fields need to be stored in yaml file. */ @@ -131,7 +77,7 @@ private static class OmSnapshotLocalDataRepresenter extends Representer { OmSnapshotLocalDataRepresenter(DumperOptions options) { super(options); - this.addClassTag(OmSnapshotLocalDataYaml.class, SNAPSHOT_YAML_TAG); + this.addClassTag(OmSnapshotLocalData.class, SNAPSHOT_YAML_TAG); this.addClassTag(VersionMeta.class, SNAPSHOT_VERSION_META_TAG); this.addClassTag(SstFileInfo.class, SST_FILE_INFO_TAG); representers.put(SstFileInfo.class, new RepresentSstFileInfo()); @@ -192,7 +138,7 @@ private static class SnapshotLocalDataConstructor extends SafeConstructor { this.yamlConstructors.put(SNAPSHOT_YAML_TAG, new ConstructSnapshotLocalData()); this.yamlConstructors.put(SNAPSHOT_VERSION_META_TAG, new ConstructVersionMeta()); this.yamlConstructors.put(SST_FILE_INFO_TAG, new ConstructSstFileInfo()); - TypeDescription omDesc = new TypeDescription(OmSnapshotLocalDataYaml.class); + TypeDescription omDesc = new TypeDescription(OmSnapshotLocalData.class); omDesc.putMapPropertyType(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO, Integer.class, VersionMeta.class); this.addTypeDescription(omDesc); TypeDescription versionMetaDesc = new TypeDescription(VersionMeta.class); @@ -231,7 +177,7 @@ public Object construct(Node node) { Map nodes = constructMapping(mnode); UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); - OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(snapId, Collections.emptyList(), + OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); // Set version from YAML @@ -269,70 +215,6 @@ public Object construct(Node node) { } } - /** - * Returns the YAML representation of this object as a String - * (without triggering checksum computation or persistence). - * @return YAML string representation - */ - public String getYaml(OmSnapshotLocalDataManager snapshotLocalDataManager) throws IOException { - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - return yaml.get().dump(this); - } - } - - /** - * Computes checksum (stored in this object), and writes this object to a YAML file. - * @param yamlFile The file to write to - * @throws IOException If there's an error writing to the file - */ - public void writeToYaml(OmSnapshotLocalDataManager snapshotLocalDataManager, File yamlFile) throws IOException { - // Create Yaml - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - // Compute Checksum and update SnapshotData - computeAndSetChecksum(yaml.get()); - // Write the SnapshotData with checksum to Yaml file. - YamlUtils.dump(yaml.get(), this, yamlFile, LOG); - } - } - - /** - * Creates a OmSnapshotLocalDataYaml instance from a YAML file. - * @param yamlFile The YAML file to read from - * @return A new OmSnapshotLocalDataYaml instance - * @throws IOException If there's an error reading the file - */ - public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotLocalDataManager snapshotLocalDataManager, - File yamlFile) throws IOException { - Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); - try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { - return getFromYamlStream(snapshotLocalDataManager, inputFileStream); - } - } - - /** - * Read the YAML content InputStream, and return OmSnapshotLocalDataYaml instance. - * @throws IOException - */ - public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotLocalDataManager snapshotLocalDataManager, - InputStream input) throws IOException { - OmSnapshotLocalDataYaml dataYaml; - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - dataYaml = yaml.get().load(input); - } catch (YAMLException ex) { - // Unchecked exception. Convert to IOException - throw new IOException(ex); - } - - if (dataYaml == null) { - // If Yaml#load returned null, then the file is empty. This is valid yaml - // but considered an error in this case since we have lost data about - // the snapshot. - throw new IOException("Failed to load snapshot file. File is empty."); - } - - return dataYaml; - } - /** * Factory class for constructing and pooling instances of the Yaml object. * This class extends BasePooledObjectFactory to support object pooling, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 19fe367bb923..ac59c43c0580 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -195,7 +195,7 @@ public final class OmSnapshotManager implements AutoCloseable { private int fsSnapshotMaxLimit; private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); - public OmSnapshotManager(OzoneManager ozoneManager) { + public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index fb6d7cf744a9..bd69a5f2b8c2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; +import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import java.io.File; @@ -28,13 +29,16 @@ import java.nio.file.Paths; import java.util.Objects; import java.util.UUID; -import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; /** @@ -43,14 +47,23 @@ */ public class OmSnapshotLocalDataManager implements AutoCloseable { - private final GenericObjectPool yamlPool; + private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); + + private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; private final OMMetadataManager omMetadataManager; - public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) { - this.yamlPool = new GenericObjectPool(new OmSnapshotLocalDataYaml.YamlFactory()); + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; + this.snapshotLocalDataSerializer = new YamlSerializer( + new OmSnapshotLocalDataYaml.YamlFactory()) { + + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; init(); } @@ -84,30 +97,47 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf Path snapshotLocalDataPath = Paths.get( getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + OmSnapshotLocalData snapshotLocalDataYaml = new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataYaml.writeToYaml(this, snapshotLocalDataPath.toFile()); + snapshotLocalDataSerializer.save(snapshotLocalDataPath.toFile(), snapshotLocalDataYaml); + } + + public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { + Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotInfo)); + return snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); + } + + public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { + return snapshotLocalDataSerializer.load(snapshotDataPath); } - private void init() { + private void init() throws IOException { RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); - for (File yamlFile : - Objects.requireNonNull(snapshotDir.listFiles( - (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)))) { + File[] yamlFiles = snapshotDir.listFiles( + (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)); + if (yamlFiles == null) { + throw new IOException("Error while listing yaml files inside directory: " + snapshotDir.getAbsolutePath()); + } + for (File yamlFile : yamlFiles) { System.out.println(yamlFile.getAbsolutePath()); } } + @Override public void close() { - if (yamlPool != null) { - yamlPool.close(); + if (snapshotLocalDataSerializer != null) { + try { + snapshotLocalDataSerializer.close(); + } catch (IOException e) { + LOG.error("Failed to close snapshot local data serializer", e); + } } } - private final class VersionLocalDataNode { + private static final class VersionLocalDataNode { private UUID snapshotId; private int version; private UUID previousSnapshotId; @@ -137,24 +167,4 @@ public int hashCode() { } } - public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { - try { - Yaml yaml = yamlPool.borrowObject(); - return new UncheckedAutoCloseableSupplier() { - - @Override - public void close() { - yamlPool.returnObject(yaml); - } - - @Override - public Yaml get() { - return yaml; - } - }; - } catch (Exception e) { - throw new IOException("Failed to get snapshot local yaml", e); - } - } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 71933f8112c4..34435366781a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -45,8 +45,10 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -60,26 +62,26 @@ public class TestOmSnapshotLocalDataYaml { private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - private static OmSnapshotLocalDataManager snapshotLocalDataManager; - private static final Yaml YAML = new OmSnapshotLocalDataYaml.YamlFactory().create(); - private static final UncheckedAutoCloseableSupplier YAML_SUPPLIER = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return YAML; - } - - @Override - public void close() { - - } - }; + private static final OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); + private static ObjectSerializer omSnapshotLocalDataSerializer; private static final Instant NOW = Instant.now(); @BeforeAll - public static void setupClassMocks() throws IOException { - snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); + public static void setupSerializer() throws IOException { + omSnapshotLocalDataSerializer = new YamlSerializer(yamlFactory) { + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; + } + + @AfterAll + public static void cleanupSerializer() throws IOException { + if (omSnapshotLocalDataSerializer != null) { + omSnapshotLocalDataSerializer.close(); + } } @BeforeEach @@ -113,7 +115,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw createLiveFileMetaData("sst1", "table1", "k1", "k2"), createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); - OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(snapshotId, notDefraggedSSTFileList, + OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList, previousSnapshotId); // Set version @@ -138,7 +140,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw File yamlFile = new File(testRoot, yamlFilePath); // Create YAML file with SnapshotData - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Check YAML file exists assertTrue(yamlFile.exists()); @@ -154,7 +156,7 @@ public void testWriteToYaml() throws IOException { UUID prevSnapId = yamlFilePrevIdPair.getRight(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Verify fields assertEquals(44, snapshotData.getVersion()); @@ -194,8 +196,8 @@ public void testUpdateSnapshotDataFile() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml dataYaml = - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData dataYaml = + omSnapshotLocalDataSerializer.load(yamlFile); // Update snapshot data dataYaml.setSstFiltered(false); @@ -204,10 +206,10 @@ public void testUpdateSnapshotDataFile() throws IOException { singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); // Write updated data back to file - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Read back the updated data - dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + dataYaml = omSnapshotLocalDataSerializer.load(yamlFile); // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); @@ -225,10 +227,9 @@ public void testEmptyFile() throws IOException { File emptyFile = new File(testRoot, "empty.yaml"); assertTrue(emptyFile.createNewFile()); - IOException ex = assertThrows(IOException.class, () -> - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, emptyFile)); + IOException ex = assertThrows(IOException.class, () -> omSnapshotLocalDataSerializer.load(emptyFile)); - assertThat(ex).hasMessageContaining("Failed to load snapshot file. File is empty."); + assertThat(ex).hasMessageContaining("Failed to load file. File is empty."); } @Test @@ -237,7 +238,7 @@ public void testChecksum() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Get the original checksum String originalChecksum = snapshotData.getChecksum(); @@ -245,7 +246,7 @@ public void testChecksum() throws IOException { // Verify the checksum is not null or empty assertThat(originalChecksum).isNotNull().isNotEmpty(); - assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(snapshotLocalDataManager, snapshotData)); + assertTrue(omSnapshotLocalDataSerializer.verifyChecksum(snapshotData)); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 62f9561d2b83..7f808df3f978 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -324,8 +324,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertTrue(Files.exists(snapshotYaml)); assertTrue(Files.size(snapshotYaml) > 0); // Verify the contents of the YAML file - OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, - snapshotYaml.toFile()); + OmSnapshotLocalData localData = snapshotLocalDataManager.getOmSnapshotLocalData(snapshotYaml.toFile()); assertNotNull(localData); assertEquals(0, localData.getVersion()); assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index ce24040a3eab..2cafae138fd4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -51,13 +50,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotCreateResponse. @@ -77,26 +74,12 @@ public void setup() throws Exception { String fsPath = folder.getAbsolutePath(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(snapshotLocalDataManager); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index f8d40951b2bf..2d5d7b2870f7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -44,12 +43,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotDeleteResponse. @@ -69,26 +66,12 @@ public void setup() throws Exception { String fsPath = folder.toAbsolutePath().toString(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager omSnapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(omSnapshotLocalDataManager); - when(omSnapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } From 4099bc687ae22d6d4d7be6680d543abd01f1fd61 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:16:37 -0400 Subject: [PATCH 04/97] HDDS-13767. Refactor SnapshotLocalDataYaml related code into OmSnapshotLocalDataManager Change-Id: I34536ff06efb7d5a4942853f0fd83942ab398b5f --- hadoop-hdds/common/pom.xml | 4 + .../apache/hadoop/ozone/util/Checksum.java | 28 +++ .../hadoop/ozone/util/ObjectSerializer.java | 73 ++++++++ .../hadoop/ozone/util/YamlSerializer.java | 159 ++++++++++++++++++ .../hadoop/ozone/om/OmSnapshotLocalData.java | 8 +- .../ozone/om/OmSnapshotLocalDataYaml.java | 141 +--------------- .../snapshot/OmSnapshotLocalDataManager.java | 104 ++++-------- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 60 +++---- .../ozone/om/TestOmSnapshotManager.java | 3 +- .../TestOMSnapshotCreateResponse.java | 17 -- .../TestOMSnapshotDeleteResponse.java | 17 -- 11 files changed, 338 insertions(+), 276 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 0216b808a7cf..f22aeda491ac 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -108,6 +108,10 @@ org.apache.commons commons-lang3 + + org.apache.commons + commons-pool2 + org.apache.hadoop hadoop-common diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java new file mode 100644 index 000000000000..4d11bde5aef3 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import org.apache.hadoop.hdds.utils.db.CopyObject; + +/** + * Represents a generic interface for objects capable of generating or providing + * a checksum value. + */ +public interface Checksum> extends CopyObject { + String getChecksum(); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java new file mode 100644 index 000000000000..b861ad93fdfb --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ObjectSerializer.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; + +/** + * Represents a generic interface for serialization and deserialization + * operations of objects that extend the {@link Checksum} interface. + * This interface provides functionality for loading and saving objects + * from/to files or input streams, as well as verifying checksum integrity. + * + * @param the type of the object handled by the serializer, must extend {@code Checksum} + */ +public interface ObjectSerializer extends Closeable { + + /** + * Loads an object of type T from the specified file. + * + * @param path the file from which the object will be loaded + * @return the object of type T that has been deserialized from the file + * @throws IOException if an I/O error occurs during reading from the file + */ + T load(File path) throws IOException; + + /** + * Loads an object of type T from the specified input stream. + * + * @param inputStream the input stream from which the object will be deserialized + * @return the deserialized object of type T + * @throws IOException if an I/O error occurs during reading from the input stream + */ + T load(InputStream inputStream) throws IOException; + + /** + * Serializes the given data object of type T and saves it to the specified file. + * + * @param path the file where the serialized object will be saved + * @param data the object of type T to be serialized and saved + * @throws IOException if an I/O error occurs during writing to the file + */ + void save(File path, T data) throws IOException; + + /** + * Verifies the checksum of the provided data object of type T. + * + * @param data the object of type T whose checksum is to be verified + * @return true if the checksum of the data is valid, false otherwise + * @throws IOException if an I/O error occurs during verification + */ + boolean verifyChecksum(T data) throws IOException; + + @Override + void close() throws IOException; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java new file mode 100644 index 000000000000..32aeb928f172 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/YamlSerializer.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.util; + +import com.google.common.base.Preconditions; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import org.apache.commons.pool2.BasePooledObjectFactory; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.hadoop.hdds.server.YamlUtils; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.Yaml; + +/** + * An abstract serializer for objects that extend the {@link Checksum} interface. + * This class provides mechanisms for serializing and deserializing objects + * in a YAML format. + */ +public abstract class YamlSerializer> implements ObjectSerializer { + + private static final Logger LOG = LoggerFactory.getLogger(YamlSerializer.class); + + private final GenericObjectPool yamlPool; + + public YamlSerializer(BasePooledObjectFactory yamlFactory) { + this.yamlPool = new GenericObjectPool<>(yamlFactory); + } + + private UncheckedAutoCloseableSupplier getYaml() throws IOException { + try { + Yaml yaml = yamlPool.borrowObject(); + return new UncheckedAutoCloseableSupplier() { + + @Override + public void close() { + yamlPool.returnObject(yaml); + } + + @Override + public Yaml get() { + return yaml; + } + }; + } catch (Exception e) { + throw new IOException("Failed to get yaml object.", e); + } + } + + /** + * {@inheritDoc} + */ + @Override + public T load(File yamlFile) throws IOException { + Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); + try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { + return load(inputFileStream); + } + } + + /** + * {@inheritDoc} + */ + @Override + public T load(InputStream input) throws IOException { + T dataYaml; + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + dataYaml = yaml.get().load(input); + } catch (Exception e) { + throw new IOException("Failed to load file", e); + } + + if (dataYaml == null) { + // If Yaml#load returned null, then the file is empty. This is valid yaml + // but considered an error in this case since we have lost data about + // the snapshot. + throw new IOException("Failed to load file. File is empty."); + } + + return dataYaml; + } + + /** + * {@inheritDoc} + */ + @Override + public boolean verifyChecksum(T data) throws IOException { + Preconditions.checkNotNull(data, "data cannot be null"); + + // Get the stored checksum + String storedChecksum = data.getChecksum(); + if (storedChecksum == null) { + LOG.warn("No checksum found in snapshot data for verification"); + return false; + } + + // Create a copy of the snapshot data for computing checksum + T copy = data.copyObject(); + + // Get the YAML representation + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute new checksum + computeAndSetChecksum(yaml.get(), copy); + + // Compare the stored and computed checksums + String computedChecksum = copy.getChecksum(); + boolean isValid = storedChecksum.equals(computedChecksum); + + if (!isValid) { + LOG.warn("Checksum verification failed for snapshot local data. " + + "Stored: {}, Computed: {}", storedChecksum, computedChecksum); + } + return isValid; + } + } + + /** + * {@inheritDoc} + */ + @Override + public void save(File yamlFile, T data) throws IOException { + // Create Yaml + try (UncheckedAutoCloseableSupplier yaml = getYaml()) { + // Compute Checksum and update SnapshotData + computeAndSetChecksum(yaml.get(), data); + // Write the object with checksum to Yaml file. + YamlUtils.dump(yaml.get(), data, yamlFile, LOG); + } + } + + /** + * {@inheritDoc} + */ + @Override + public void close() { + yamlPool.close(); + } + + public abstract void computeAndSetChecksum(Yaml yaml, T data) throws IOException; + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5f65fd4c0d08..e82bad8832a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -31,6 +31,7 @@ import java.util.stream.Collectors; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.utils.db.CopyObject; +import org.apache.hadoop.ozone.util.Checksum; import org.apache.ozone.compaction.log.SstFileInfo; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -39,7 +40,7 @@ * OmSnapshotLocalData is the in-memory representation of snapshot local metadata. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerData */ -public abstract class OmSnapshotLocalData { +public class OmSnapshotLocalData implements Checksum { // Unique identifier for the snapshot. This is used to identify the snapshot. private UUID snapshotId; @@ -258,6 +259,11 @@ public void setVersion(int version) { this.version = version; } + @Override + public OmSnapshotLocalData copyObject() { + return new OmSnapshotLocalData(this); + } + /** * Represents metadata for a specific version in a snapshot. * This class maintains the version of the previous snapshot and a list of SST (Sorted String Table) files diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index a3683e11c16f..543c4c6397cc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -17,11 +17,6 @@ package org.apache.hadoop.ozone.om; -import com.google.common.base.Preconditions; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.nio.file.Files; import java.util.Collections; import java.util.List; import java.util.Map; @@ -29,21 +24,15 @@ import org.apache.commons.pool2.BasePooledObjectFactory; import org.apache.commons.pool2.PooledObject; import org.apache.commons.pool2.impl.DefaultPooledObject; -import org.apache.hadoop.hdds.server.YamlUtils; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.ozone.compaction.log.SstFileInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; -import org.rocksdb.LiveFileMetaData; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.DumperOptions; import org.yaml.snakeyaml.LoaderOptions; import org.yaml.snakeyaml.TypeDescription; import org.yaml.snakeyaml.Yaml; import org.yaml.snakeyaml.constructor.AbstractConstruct; import org.yaml.snakeyaml.constructor.SafeConstructor; -import org.yaml.snakeyaml.error.YAMLException; import org.yaml.snakeyaml.introspector.BeanAccess; import org.yaml.snakeyaml.introspector.Property; import org.yaml.snakeyaml.introspector.PropertyUtils; @@ -60,68 +49,14 @@ * Checksum of the YAML fields are computed and stored in the YAML file transparently to callers. * Inspired by org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml */ -public final class OmSnapshotLocalDataYaml extends OmSnapshotLocalData { - - private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataYaml.class); +public final class OmSnapshotLocalDataYaml { public static final Tag SNAPSHOT_YAML_TAG = new Tag("OmSnapshotLocalData"); public static final Tag SNAPSHOT_VERSION_META_TAG = new Tag("VersionMeta"); public static final Tag SST_FILE_INFO_TAG = new Tag("SstFileInfo"); public static final String YAML_FILE_EXTENSION = ".yaml"; - /** - * Creates a new OmSnapshotLocalDataYaml with default values. - */ - public OmSnapshotLocalDataYaml(UUID snapshotId, List liveFileMetaDatas, UUID previousSnapshotId) { - super(snapshotId, liveFileMetaDatas, previousSnapshotId); - } - - /** - * Copy constructor to create a deep copy. - * @param source The source OmSnapshotLocalData to copy from - */ - public OmSnapshotLocalDataYaml(OmSnapshotLocalData source) { - super(source); - } - - /** - * Verifies the checksum of the snapshot data. - * @param snapshotData The snapshot data to verify - * @return true if the checksum is valid, false otherwise - * @throws IOException if there's an error computing the checksum - */ - public static boolean verifyChecksum(OmSnapshotLocalDataManager localDataManager, OmSnapshotLocalData snapshotData) - throws IOException { - Preconditions.checkNotNull(snapshotData, "snapshotData cannot be null"); - - // Get the stored checksum - String storedChecksum = snapshotData.getChecksum(); - if (storedChecksum == null) { - LOG.warn("No checksum found in snapshot data for verification"); - return false; - } - - // Create a copy of the snapshot data for computing checksum - OmSnapshotLocalDataYaml snapshotDataCopy = new OmSnapshotLocalDataYaml(snapshotData); - - // Clear the existing checksum in the copy - snapshotDataCopy.setChecksum(null); - - // Get the YAML representation - try (UncheckedAutoCloseableSupplier yaml = localDataManager.getSnapshotLocalYaml()) { - // Compute new checksum - snapshotDataCopy.computeAndSetChecksum(yaml.get()); - - // Compare the stored and computed checksums - String computedChecksum = snapshotDataCopy.getChecksum(); - boolean isValid = storedChecksum.equals(computedChecksum); - - if (!isValid) { - LOG.warn("Checksum verification failed for snapshot local data. " + - "Stored: {}, Computed: {}", storedChecksum, computedChecksum); - } - return isValid; - } + private OmSnapshotLocalDataYaml() { } /** @@ -131,7 +66,7 @@ private static class OmSnapshotLocalDataRepresenter extends Representer { OmSnapshotLocalDataRepresenter(DumperOptions options) { super(options); - this.addClassTag(OmSnapshotLocalDataYaml.class, SNAPSHOT_YAML_TAG); + this.addClassTag(OmSnapshotLocalData.class, SNAPSHOT_YAML_TAG); this.addClassTag(VersionMeta.class, SNAPSHOT_VERSION_META_TAG); this.addClassTag(SstFileInfo.class, SST_FILE_INFO_TAG); representers.put(SstFileInfo.class, new RepresentSstFileInfo()); @@ -192,7 +127,7 @@ private static class SnapshotLocalDataConstructor extends SafeConstructor { this.yamlConstructors.put(SNAPSHOT_YAML_TAG, new ConstructSnapshotLocalData()); this.yamlConstructors.put(SNAPSHOT_VERSION_META_TAG, new ConstructVersionMeta()); this.yamlConstructors.put(SST_FILE_INFO_TAG, new ConstructSstFileInfo()); - TypeDescription omDesc = new TypeDescription(OmSnapshotLocalDataYaml.class); + TypeDescription omDesc = new TypeDescription(OmSnapshotLocalData.class); omDesc.putMapPropertyType(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO, Integer.class, VersionMeta.class); this.addTypeDescription(omDesc); TypeDescription versionMetaDesc = new TypeDescription(VersionMeta.class); @@ -231,7 +166,7 @@ public Object construct(Node node) { Map nodes = constructMapping(mnode); UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); - OmSnapshotLocalDataYaml snapshotLocalData = new OmSnapshotLocalDataYaml(snapId, Collections.emptyList(), + OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); // Set version from YAML @@ -269,70 +204,6 @@ public Object construct(Node node) { } } - /** - * Returns the YAML representation of this object as a String - * (without triggering checksum computation or persistence). - * @return YAML string representation - */ - public String getYaml(OmSnapshotLocalDataManager snapshotLocalDataManager) throws IOException { - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - return yaml.get().dump(this); - } - } - - /** - * Computes checksum (stored in this object), and writes this object to a YAML file. - * @param yamlFile The file to write to - * @throws IOException If there's an error writing to the file - */ - public void writeToYaml(OmSnapshotLocalDataManager snapshotLocalDataManager, File yamlFile) throws IOException { - // Create Yaml - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - // Compute Checksum and update SnapshotData - computeAndSetChecksum(yaml.get()); - // Write the SnapshotData with checksum to Yaml file. - YamlUtils.dump(yaml.get(), this, yamlFile, LOG); - } - } - - /** - * Creates a OmSnapshotLocalDataYaml instance from a YAML file. - * @param yamlFile The YAML file to read from - * @return A new OmSnapshotLocalDataYaml instance - * @throws IOException If there's an error reading the file - */ - public static OmSnapshotLocalDataYaml getFromYamlFile(OmSnapshotLocalDataManager snapshotLocalDataManager, - File yamlFile) throws IOException { - Preconditions.checkNotNull(yamlFile, "yamlFile cannot be null"); - try (InputStream inputFileStream = Files.newInputStream(yamlFile.toPath())) { - return getFromYamlStream(snapshotLocalDataManager, inputFileStream); - } - } - - /** - * Read the YAML content InputStream, and return OmSnapshotLocalDataYaml instance. - * @throws IOException - */ - public static OmSnapshotLocalDataYaml getFromYamlStream(OmSnapshotLocalDataManager snapshotLocalDataManager, - InputStream input) throws IOException { - OmSnapshotLocalDataYaml dataYaml; - try (UncheckedAutoCloseableSupplier yaml = snapshotLocalDataManager.getSnapshotLocalYaml()) { - dataYaml = yaml.get().load(input); - } catch (YAMLException ex) { - // Unchecked exception. Convert to IOException - throw new IOException(ex); - } - - if (dataYaml == null) { - // If Yaml#load returned null, then the file is empty. This is valid yaml - // but considered an error in this case since we have lost data about - // the snapshot. - throw new IOException("Failed to load snapshot file. File is empty."); - } - - return dataYaml; - } - /** * Factory class for constructing and pooling instances of the Yaml object. * This class extends BasePooledObjectFactory to support object pooling, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index fb6d7cf744a9..98536444a61c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,22 +19,21 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; -import com.google.common.graph.GraphBuilder; -import com.google.common.graph.MutableGraph; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.Objects; -import java.util.UUID; -import org.apache.commons.pool2.impl.GenericObjectPool; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; /** @@ -43,15 +42,21 @@ */ public class OmSnapshotLocalDataManager implements AutoCloseable { - private final GenericObjectPool yamlPool; - private final MutableGraph localDataGraph; + private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); + + private final ObjectSerializer snapshotLocalDataSerializer; private final OMMetadataManager omMetadataManager; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) { - this.yamlPool = new GenericObjectPool(new OmSnapshotLocalDataYaml.YamlFactory()); - this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; - init(); + this.snapshotLocalDataSerializer = new YamlSerializer( + new OmSnapshotLocalDataYaml.YamlFactory()) { + + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; } /** @@ -84,77 +89,28 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf Path snapshotLocalDataPath = Paths.get( getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalDataYaml snapshotLocalDataYaml = new OmSnapshotLocalDataYaml(snapshotInfo.getSnapshotId(), + OmSnapshotLocalData snapshotLocalDataYaml = new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataYaml.writeToYaml(this, snapshotLocalDataPath.toFile()); + snapshotLocalDataSerializer.save(snapshotLocalDataPath.toFile(), snapshotLocalDataYaml); } - private void init() { - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - File snapshotDir = new File(store.getSnapshotsParentDir()); - for (File yamlFile : - Objects.requireNonNull(snapshotDir.listFiles( - (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)))) { - System.out.println(yamlFile.getAbsolutePath()); - } + public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { + Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotInfo)); + return snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); } - @Override - public void close() { - if (yamlPool != null) { - yamlPool.close(); - } + public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { + return snapshotLocalDataSerializer.load(snapshotDataPath); } - private final class VersionLocalDataNode { - private UUID snapshotId; - private int version; - private UUID previousSnapshotId; - private int previousSnapshotVersion; - - private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { - this.previousSnapshotId = previousSnapshotId; - this.previousSnapshotVersion = previousSnapshotVersion; - this.snapshotId = snapshotId; - this.version = version; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof VersionLocalDataNode)) { - return false; + @Override + public void close() { + if (snapshotLocalDataSerializer != null) { + try { + snapshotLocalDataSerializer.close(); + } catch (IOException e) { + LOG.error("Failed to close snapshot local data serializer", e); } - - VersionLocalDataNode that = (VersionLocalDataNode) o; - return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && - snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); - } - - @Override - public int hashCode() { - return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); - } - } - - public UncheckedAutoCloseableSupplier getSnapshotLocalYaml() throws IOException { - try { - Yaml yaml = yamlPool.borrowObject(); - return new UncheckedAutoCloseableSupplier() { - - @Override - public void close() { - yamlPool.returnObject(yaml); - } - - @Override - public Yaml get() { - return yaml; - } - }; - } catch (Exception e) { - throw new IOException("Failed to get snapshot local yaml", e); } } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 71933f8112c4..23d332ae75b9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -44,9 +44,10 @@ import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; -import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.util.ObjectSerializer; +import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; @@ -60,26 +61,26 @@ public class TestOmSnapshotLocalDataYaml { private static String testRoot = new FileSystemTestHelper().getTestRootDir(); - private static OmSnapshotLocalDataManager snapshotLocalDataManager; - private static final Yaml YAML = new OmSnapshotLocalDataYaml.YamlFactory().create(); - private static final UncheckedAutoCloseableSupplier YAML_SUPPLIER = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return YAML; - } - - @Override - public void close() { - - } - }; + private static final OmSnapshotLocalDataYaml.YamlFactory YAML_FACTORY = new OmSnapshotLocalDataYaml.YamlFactory(); + private static ObjectSerializer omSnapshotLocalDataSerializer; private static final Instant NOW = Instant.now(); @BeforeAll - public static void setupClassMocks() throws IOException { - snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(YAML_SUPPLIER); + public static void setupSerializer() throws IOException { + omSnapshotLocalDataSerializer = new YamlSerializer(YAML_FACTORY) { + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; + } + + @AfterAll + public static void cleanupSerializer() throws IOException { + if (omSnapshotLocalDataSerializer != null) { + omSnapshotLocalDataSerializer.close(); + } } @BeforeEach @@ -113,7 +114,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw createLiveFileMetaData("sst1", "table1", "k1", "k2"), createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); - OmSnapshotLocalDataYaml dataYaml = new OmSnapshotLocalDataYaml(snapshotId, notDefraggedSSTFileList, + OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList, previousSnapshotId); // Set version @@ -138,7 +139,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw File yamlFile = new File(testRoot, yamlFilePath); // Create YAML file with SnapshotData - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Check YAML file exists assertTrue(yamlFile.exists()); @@ -154,7 +155,7 @@ public void testWriteToYaml() throws IOException { UUID prevSnapId = yamlFilePrevIdPair.getRight(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Verify fields assertEquals(44, snapshotData.getVersion()); @@ -194,8 +195,8 @@ public void testUpdateSnapshotDataFile() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml dataYaml = - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData dataYaml = + omSnapshotLocalDataSerializer.load(yamlFile); // Update snapshot data dataYaml.setSstFiltered(false); @@ -204,10 +205,10 @@ public void testUpdateSnapshotDataFile() throws IOException { singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); // Write updated data back to file - dataYaml.writeToYaml(snapshotLocalDataManager, yamlFile); + omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); // Read back the updated data - dataYaml = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + dataYaml = omSnapshotLocalDataSerializer.load(yamlFile); // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); @@ -225,10 +226,9 @@ public void testEmptyFile() throws IOException { File emptyFile = new File(testRoot, "empty.yaml"); assertTrue(emptyFile.createNewFile()); - IOException ex = assertThrows(IOException.class, () -> - OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, emptyFile)); + IOException ex = assertThrows(IOException.class, () -> omSnapshotLocalDataSerializer.load(emptyFile)); - assertThat(ex).hasMessageContaining("Failed to load snapshot file. File is empty."); + assertThat(ex).hasMessageContaining("Failed to load file. File is empty."); } @Test @@ -237,7 +237,7 @@ public void testChecksum() throws IOException { Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file - OmSnapshotLocalDataYaml snapshotData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, yamlFile); + OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); // Get the original checksum String originalChecksum = snapshotData.getChecksum(); @@ -245,7 +245,7 @@ public void testChecksum() throws IOException { // Verify the checksum is not null or empty assertThat(originalChecksum).isNotNull().isNotEmpty(); - assertTrue(OmSnapshotLocalDataYaml.verifyChecksum(snapshotLocalDataManager, snapshotData)); + assertTrue(omSnapshotLocalDataSerializer.verifyChecksum(snapshotData)); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 62f9561d2b83..7f808df3f978 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -324,8 +324,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertTrue(Files.exists(snapshotYaml)); assertTrue(Files.size(snapshotYaml) > 0); // Verify the contents of the YAML file - OmSnapshotLocalData localData = OmSnapshotLocalDataYaml.getFromYamlFile(snapshotLocalDataManager, - snapshotYaml.toFile()); + OmSnapshotLocalData localData = snapshotLocalDataManager.getOmSnapshotLocalData(snapshotYaml.toFile()); assertNotNull(localData); assertEquals(0, localData.getVersion()); assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index ce24040a3eab..2cafae138fd4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -51,13 +50,11 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateSnapshotResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotCreateResponse. @@ -77,26 +74,12 @@ public void setup() throws Exception { String fsPath = folder.getAbsolutePath(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager snapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(snapshotLocalDataManager); - when(snapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index f8d40951b2bf..2d5d7b2870f7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -33,7 +33,6 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; -import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -44,12 +43,10 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.util.Time; -import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.yaml.snakeyaml.Yaml; /** * This class tests OMSnapshotDeleteResponse. @@ -69,26 +66,12 @@ public void setup() throws Exception { String fsPath = folder.toAbsolutePath().toString(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, fsPath); - OmSnapshotLocalDataYaml.YamlFactory yamlFactory = new OmSnapshotLocalDataYaml.YamlFactory(); - Yaml yaml = yamlFactory.create(); - UncheckedAutoCloseableSupplier yamlSupplier = new UncheckedAutoCloseableSupplier() { - @Override - public Yaml get() { - return yaml; - } - - @Override - public void close() { - - } - }; OzoneManager ozoneManager = mock(OzoneManager.class); OmSnapshotManager omSnapshotManager = mock(OmSnapshotManager.class); OmSnapshotLocalDataManager omSnapshotLocalDataManager = mock(OmSnapshotLocalDataManager.class); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); when(ozoneManager.getOmSnapshotManager()).thenReturn(omSnapshotManager); when(omSnapshotManager.getSnapshotLocalDataManager()).thenReturn(omSnapshotLocalDataManager); - when(omSnapshotLocalDataManager.getSnapshotLocalYaml()).thenReturn(yamlSupplier); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); batchOperation = omMetadataManager.getStore().initBatchOperation(); } From e02670c2cb18d98a4cd60a3690ca6a7a358afe5f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:37:30 -0400 Subject: [PATCH 05/97] HDDS-13767. Fix pmd Change-Id: I32bcaf2a1fb290f1790c02872a0230cd65586636 --- .../java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index e82bad8832a5..5af678f903fb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -194,6 +194,7 @@ public void addVersionSSTFileInfos(List sstFiles, int previousSnaps * Returns the checksum of the YAML representation. * @return checksum */ + @Override public String getChecksum() { return checksum; } From 79580e9359f367e01fe33930369e0de2feb6edaf Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 8 Oct 2025 22:50:57 -0400 Subject: [PATCH 06/97] HDDS-13627. Fix checkstyle Change-Id: I985170e38fb8beeb784048e85a08a4c79e1aec97 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 01d33a914556..1bfb0d30705e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,7 +19,6 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; -import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import java.io.File; From afbc5928d74a72622dbf9089cacd4fb4364e4dac Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 10 Oct 2025 14:15:13 -0400 Subject: [PATCH 07/97] HDDS-13627. Add tests Change-Id: Id3f2c49050bc3476b9e0f5f51dacb6d9acc4c2f7 --- .../ozone/om/OmSnapshotLocalDataYaml.java | 3 +- .../hadoop/ozone/om/OmSnapshotManager.java | 7 + .../snapshot/OmSnapshotLocalDataManager.java | 124 +++++- .../TestOmSnapshotLocalDataManager.java | 372 ++++++++++++++++++ 4 files changed, 492 insertions(+), 14 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index 543c4c6397cc..a9e8266fbf89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -165,7 +165,8 @@ public Object construct(Node node) { MappingNode mnode = (MappingNode) node; Map nodes = constructMapping(mnode); UUID snapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_SNAP_ID)); - UUID prevSnapId = UUID.fromString((String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID)); + String prevNodeStr = (String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID); + UUID prevSnapId = prevNodeStr == null ? null : UUID.fromString(prevNodeStr); OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index ac59c43c0580..7b9beb80cf6f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -803,6 +803,13 @@ public static Path getSnapshotPath(OMMetadataManager omMetadataManager, Snapshot checkpointPrefix + snapshotInfo.getCheckpointDir()); } + public static Path getSnapshotPath(OMMetadataManager omMetadataManager, UUID snapshotId) { + RDBStore store = (RDBStore) omMetadataManager.getStore(); + String checkpointPrefix = store.getDbLocation().getName(); + return Paths.get(store.getSnapshotsParentDir(), + checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotId)); + } + public static String getSnapshotPath(OzoneConfiguration conf, SnapshotInfo snapshotInfo) { return getSnapshotPath(conf, snapshotInfo.getCheckpointDirName()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 1bfb0d30705e..c01b77189e3b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; +import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import java.io.File; @@ -26,8 +27,19 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.Stack; import java.util.UUID; +import java.util.stream.Collectors; +import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -49,7 +61,8 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); private final ObjectSerializer snapshotLocalDataSerializer; - private final MutableGraph localDataGraph; + private final MutableGraph localDataGraph; + private final Map> versionNodeMap; private final OMMetadataManager omMetadataManager; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { @@ -63,9 +76,15 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO data.computeAndSetChecksum(yaml); } }; + this.versionNodeMap = new HashMap<>(); init(); } + @VisibleForTesting + Map> getVersionNodeMap() { + return versionNodeMap; + } + /** * Returns the path to the YAML file that stores local properties for the given snapshot. * @@ -83,7 +102,11 @@ public static String getSnapshotLocalPropertyYamlPath(Path snapshotPath) { * @return the path to the snapshot's local property YAML file */ public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) { - Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); + return getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId()); + } + + public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) { + Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId); return getSnapshotLocalPropertyYamlPath(snapshotPath); } @@ -102,25 +125,101 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf } public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { - Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotInfo)); - return snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); + return getOmSnapshotLocalData(snapshotInfo.getSnapshotId()); + } + + public OmSnapshotLocalData getOmSnapshotLocalData(UUID snapshotId) throws IOException { + Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotId)); + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); + if (!Objects.equals(snapshotLocalData.getSnapshotId(), snapshotId)) { + throw new IOException("SnapshotId in path : " + snapshotLocalDataPath + " contains snapshotLocalData " + + "corresponding to snapshotId " + snapshotLocalData.getSnapshotId() + ". Expected snapshotId " + snapshotId); + } + return snapshotLocalData; } public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { return snapshotLocalDataSerializer.load(snapshotDataPath); } + private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { + return versionNodeMap.getOrDefault(snapshotId, Collections.emptyMap()).get(version); + } + + private void addVersionNode(LocalDataVersionNode versionNode) throws IOException { + if (getVersionNode(versionNode.snapshotId, versionNode.version) == null) { + LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : + getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); + if (versionNode.previousSnapshotId != null && previousVersionNode == null) { + throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + + "loaded"); + } + localDataGraph.addNode(versionNode); + if (previousVersionNode != null) { + localDataGraph.putEdge(versionNode, previousVersionNode); + } + versionNodeMap.computeIfAbsent(versionNode.snapshotId, k -> new HashMap<>()) + .put(versionNode.version, versionNode); + } + } + + private List getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { + UUID snapshotId = snapshotLocalData.getSnapshotId(); + UUID previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); + return snapshotLocalData.getVersionSstFileInfos().entrySet().stream() + .map(entry -> new LocalDataVersionNode(snapshotId, entry.getKey(), + previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())).collect(Collectors.toList()); + } + + public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { + if (versionNodeMap.containsKey(snapshotLocalData.getSnapshotId())) { + return; + } + Set visitedSnapshotIds = new HashSet<>(); + Stack>> stack = new Stack<>(); + stack.push(Triple.of(snapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId(), + getVersionNodes(snapshotLocalData))); + while (!stack.isEmpty()) { + Triple> versionNodeToProcess = stack.peek(); + UUID snapId = versionNodeToProcess.getLeft(); + UUID prevSnapId = versionNodeToProcess.getMiddle(); + List versionNodes = versionNodeToProcess.getRight(); + if (visitedSnapshotIds.contains(snapId)) { + for (LocalDataVersionNode versionNode : versionNodes) { + addVersionNode(versionNode); + } + stack.pop(); + } else { + if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { + OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); + stack.push(Triple.of(prevSnapshotLocalData.getSnapshotId(), prevSnapshotLocalData.getPreviousSnapshotId(), + getVersionNodes(prevSnapshotLocalData))); + } + visitedSnapshotIds.add(snapId); + } + } + } + private void init() throws IOException { RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); - File[] yamlFiles = snapshotDir.listFiles( + File[] localDataFiles = snapshotDir.listFiles( (dir, name) -> name.startsWith(checkpointPrefix) && name.endsWith(YAML_FILE_EXTENSION)); - if (yamlFiles == null) { + if (localDataFiles == null) { throw new IOException("Error while listing yaml files inside directory: " + snapshotDir.getAbsolutePath()); } - for (File yamlFile : yamlFiles) { - System.out.println(yamlFile.getAbsolutePath()); + Arrays.sort(localDataFiles, Comparator.comparing(File::getName)); + for (File localDataFile : localDataFiles) { + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataSerializer.load(localDataFile); + File file = new File(getSnapshotLocalPropertyYamlPath(snapshotLocalData.getSnapshotId())); + String expectedPath = file.getAbsolutePath(); + String actualPath = localDataFile.getAbsolutePath(); + if (!expectedPath.equals(actualPath)) { + throw new IOException("Unexpected path for local data file with snapshotId:" + snapshotLocalData.getSnapshotId() + + " : " + actualPath + ". " + "Expected: " + expectedPath); + } + addVersionNodeWithDependents(snapshotLocalData); } } @@ -135,13 +234,13 @@ public void close() { } } - private static final class VersionLocalDataNode { + static final class LocalDataVersionNode { private UUID snapshotId; private int version; private UUID previousSnapshotId; private int previousSnapshotVersion; - private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { + private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { this.previousSnapshotId = previousSnapshotId; this.previousSnapshotVersion = previousSnapshotVersion; this.snapshotId = snapshotId; @@ -150,11 +249,11 @@ private VersionLocalDataNode(UUID snapshotId, int version, UUID previousSnapshot @Override public boolean equals(Object o) { - if (!(o instanceof VersionLocalDataNode)) { + if (!(o instanceof LocalDataVersionNode)) { return false; } - VersionLocalDataNode that = (VersionLocalDataNode) o; + LocalDataVersionNode that = (LocalDataVersionNode) o; return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); } @@ -164,5 +263,4 @@ public int hashCode() { return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); } } - } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java new file mode 100644 index 000000000000..13f19190a7ed --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -0,0 +1,372 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; +import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; +import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData; +import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.util.YamlSerializer; +import org.apache.ozone.compaction.log.SstFileInfo; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.rocksdb.LiveFileMetaData; +import org.yaml.snakeyaml.Yaml; + +/** + * Test class for OmSnapshotLocalDataManager. + */ +public class TestOmSnapshotLocalDataManager { + + private static YamlSerializer snapshotLocalDataYamlSerializer; + + @Mock + private OMMetadataManager omMetadataManager; + + @Mock + private RDBStore rdbStore; + + @Mock + private RDBStore snapshotStore; + + @TempDir + private Path tempDir; + + private OmSnapshotLocalDataManager localDataManager; + private AutoCloseable mocks; + + private File snapshotsDir; + private File dbLocation; + + @BeforeAll + public static void setupClass() { + snapshotLocalDataYamlSerializer = new YamlSerializer( + new OmSnapshotLocalDataYaml.YamlFactory()) { + + @Override + public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IOException { + data.computeAndSetChecksum(yaml); + } + }; + } + + @AfterAll + public static void teardownClass() throws IOException { + snapshotLocalDataYamlSerializer.close(); + snapshotLocalDataYamlSerializer = null; + } + + @BeforeEach + public void setUp() throws IOException { + mocks = MockitoAnnotations.openMocks(this); + + // Setup mock behavior + when(omMetadataManager.getStore()).thenReturn(rdbStore); + + this.snapshotsDir = tempDir.resolve("snapshots").toFile(); + FileUtils.deleteDirectory(snapshotsDir); + snapshotsDir.mkdirs(); + dbLocation = tempDir.resolve("db").toFile(); + FileUtils.deleteDirectory(dbLocation); + dbLocation.mkdirs(); + + when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); + when(rdbStore.getDbLocation()).thenReturn(dbLocation); + } + + @AfterEach + public void tearDown() throws Exception { + if (localDataManager != null) { + localDataManager.close(); + } + if (mocks != null) { + mocks.close(); + } + } + + @Test + public void testConstructor() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + assertNotNull(localDataManager); + } + + @Test + public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOException { + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); + assertNotNull(yamlPath); + Path expectedYamlPath = Paths.get(snapshotsDir.getAbsolutePath(), "db" + OM_SNAPSHOT_SEPARATOR + snapshotId + + YAML_FILE_EXTENSION); + assertEquals(expectedYamlPath.toAbsolutePath().toString(), yamlPath.getAbsolutePath()); + } + + @Test + public void testCreateNewOmSnapshotLocalDataFile() throws IOException { + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); + + // Setup snapshot store mock + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + snapshotDbLocation.mkdirs(); + List sstFiles = new ArrayList<>(); + sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9")); + sstFiles.add(createMockLiveFileMetaData("file3.sst", FILE_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file4.sst", FILE_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file5.sst", DIRECTORY_TABLE, "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file6.sst", "colFamily1", "key1", "key7")); + List sstFileInfos = IntStream.range(0, sstFiles.size() - 1) + .mapToObj(sstFiles::get).map(SstFileInfo::new).collect(Collectors.toList()); + when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation); + RocksDatabase rocksDatabase = mock(RocksDatabase.class); + when(snapshotStore.getDb()).thenReturn(rocksDatabase); + when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); + + // Verify file was created + OmSnapshotLocalData snapshotLocalData = localDataManager.getOmSnapshotLocalData(snapshotId); + assertEquals(1, snapshotLocalData.getVersionSstFileInfos().size()); + OmSnapshotLocalData.VersionMeta versionMeta = snapshotLocalData.getVersionSstFileInfos().get(0); + OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); + assertEquals(expectedVersionMeta, versionMeta); + } + + @Test + public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); + + // Create and write snapshot local data file + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + // Write the file manually for testing + Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); + writeLocalDataToFile(localData, yamlPath); + + // Test retrieval + OmSnapshotLocalData retrieved = localDataManager.getOmSnapshotLocalData(snapshotInfo); + + assertNotNull(retrieved); + assertEquals(snapshotId, retrieved.getSnapshotId()); + } + + @Test + public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOException { + UUID snapshotId = UUID.randomUUID(); + UUID wrongSnapshotId = UUID.randomUUID(); + + // Create local data with wrong snapshot ID + OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); + writeLocalDataToFile(localData, yamlPath); + // Should throw IOException due to mismatched IDs + assertThrows(IOException.class, () -> { + localDataManager.getOmSnapshotLocalData(snapshotId); + }); + } + + @Test + public void testGetOmSnapshotLocalDataWithFile() throws IOException { + UUID snapshotId = UUID.randomUUID(); + + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + Path yamlPath = tempDir.resolve("test-snapshot.yaml"); + writeLocalDataToFile(localData, yamlPath); + + OmSnapshotLocalData retrieved = localDataManager + .getOmSnapshotLocalData(yamlPath.toFile()); + + assertNotNull(retrieved); + assertEquals(snapshotId, retrieved.getSnapshotId()); + } + + @Test + public void testAddVersionNodeWithDependents() throws IOException { + List versionIds = Stream.of(UUID.randomUUID(), UUID.randomUUID()) + .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); + UUID snapshotId = versionIds.get(0); + UUID previousSnapshotId = versionIds.get(1); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + // Create snapshot directory structure and files + createSnapshotLocalDataFile(snapshotId, previousSnapshotId); + createSnapshotLocalDataFile(previousSnapshotId, null); + OmSnapshotLocalData localData = createMockLocalData(snapshotId, previousSnapshotId); + + // Should not throw exception + localDataManager.addVersionNodeWithDependents(localData); + } + + @Test + public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { + UUID snapshotId = UUID.randomUUID(); + + createSnapshotLocalDataFile(snapshotId, null); + + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + + // First addition + localDataManager.addVersionNodeWithDependents(localData); + + // Second addition - should handle gracefully + localDataManager.addVersionNodeWithDependents(localData); + } + + @Test + public void testInitWithExistingYamlFiles() throws IOException { + List versionIds = Stream.of(UUID.randomUUID(), UUID.randomUUID()) + .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); + UUID snapshotId = versionIds.get(0); + UUID previousSnapshotId = versionIds.get(1); + + createSnapshotLocalDataFile(previousSnapshotId, null); + createSnapshotLocalDataFile(snapshotId, previousSnapshotId); + + // Initialize - should load existing files + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + assertNotNull(localDataManager); + Map> versionMap = + localDataManager.getVersionNodeMap(); + assertEquals(2, versionMap.size()); + assertEquals(versionMap.keySet(), new HashSet<>(versionIds)); + } + + @Test + public void testInitWithInvalidPathThrowsException() throws IOException { + UUID snapshotId = UUID.randomUUID(); + + // Create a file with wrong location + OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); + Path wrongPath = Paths.get(snapshotsDir.getAbsolutePath(), "db-wrong-name.yaml"); + writeLocalDataToFile(localData, wrongPath); + + // Should throw IOException during init + assertThrows(IOException.class, () -> { + new OmSnapshotLocalDataManager(omMetadataManager); + }); + } + + @Test + public void testClose() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + + // Should not throw exception + localDataManager.close(); + } + + // Helper methods + + private SnapshotInfo createMockSnapshotInfo(UUID snapshotId, UUID previousSnapshotId) { + SnapshotInfo.Builder builder = SnapshotInfo.newBuilder() + .setSnapshotId(snapshotId) + .setName("snapshot-" + snapshotId); + + if (previousSnapshotId != null) { + builder.setPathPreviousSnapshotId(previousSnapshotId); + } + + return builder.build(); + } + + private LiveFileMetaData createMockLiveFileMetaData(String fileName, String columnFamilyName, String smallestKey, + String largestKey) { + LiveFileMetaData liveFileMetaData = mock(LiveFileMetaData.class); + when(liveFileMetaData.columnFamilyName()).thenReturn(StringUtils.string2Bytes(columnFamilyName)); + when(liveFileMetaData.fileName()).thenReturn(fileName); + when(liveFileMetaData.smallestKey()).thenReturn(StringUtils.string2Bytes(smallestKey)); + when(liveFileMetaData.largestKey()).thenReturn(StringUtils.string2Bytes(largestKey)); + return liveFileMetaData; + } + + private OmSnapshotLocalData createMockLocalData(UUID snapshotId, UUID previousSnapshotId) { + List sstFiles = new ArrayList<>(); + sstFiles.add(createMockLiveFileMetaData("file1.sst", "columnFamily1", "key1", "key7")); + sstFiles.add(createMockLiveFileMetaData("file2.sst", "columnFamily1", "key3", "key10")); + sstFiles.add(createMockLiveFileMetaData("file3.sst", "columnFamily2", "key1", "key8")); + sstFiles.add(createMockLiveFileMetaData("file4.sst", "columnFamily2", "key0", "key10")); + return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId); + } + + private void createSnapshotLocalDataFile(UUID snapshotId, UUID previousSnapshotId) + throws IOException { + OmSnapshotLocalData localData = createMockLocalData(snapshotId, previousSnapshotId); + + String fileName = "db" + OM_SNAPSHOT_SEPARATOR + snapshotId.toString() + YAML_FILE_EXTENSION; + Path yamlPath = Paths.get(snapshotsDir.getAbsolutePath(), fileName); + + writeLocalDataToFile(localData, yamlPath); + } + + private void writeLocalDataToFile(OmSnapshotLocalData localData, Path filePath) + throws IOException { + // This is a simplified version - in real implementation, + // you would use the YamlSerializer + snapshotLocalDataYamlSerializer.save(filePath.toFile(), localData); + } +} From 70ac2c7ac04ee126c002e0e1df371a66cada19f9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 11 Oct 2025 20:40:53 -0400 Subject: [PATCH 08/97] HDDS-13783. Implement locks for OmSnapshotLocalDataManager Change-Id: I432960725b4c6c55aa906b5780cc3027e41e10db --- .../hadoop/ozone/om/OmSnapshotManager.java | 3 +- .../snapshot/OmSnapshotLocalDataManager.java | 422 ++++++++++++++++-- .../TestOmSnapshotLocalDataManager.java | 48 +- 3 files changed, 415 insertions(+), 58 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 7b9beb80cf6f..743c1e584e25 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -196,7 +196,8 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { - this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); + this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), + ozoneManager.getConfiguration()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index c01b77189e3b..c9715cf30320 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,29 +17,40 @@ package org.apache.hadoop.ozone.om.snapshot; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; +import com.google.common.util.concurrent.Striped; import java.io.File; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; +import java.nio.file.StandardCopyOption; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.Stack; import java.util.UUID; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.function.Function; import java.util.stream.Collectors; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -48,6 +59,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.util.ObjectSerializer; import org.apache.hadoop.ozone.util.YamlSerializer; +import org.apache.ratis.util.function.CheckedSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; @@ -59,13 +71,16 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); + private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; private final Map> versionNodeMap; private final OMMetadataManager omMetadataManager; + private Striped locks; - public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, + OzoneConfiguration configuration) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; this.snapshotLocalDataSerializer = new YamlSerializer( @@ -77,7 +92,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } }; this.versionNodeMap = new HashMap<>(); - init(); + init(configuration); } @VisibleForTesting @@ -116,28 +131,39 @@ public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) { * @param snapshotInfo snapshot info instance corresponding to snapshot. */ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInfo snapshotInfo) throws IOException { - Path snapshotLocalDataPath = Paths.get( - getSnapshotLocalPropertyYamlPath(snapshotStore.getDbLocation().toPath())); - Files.deleteIfExists(snapshotLocalDataPath); - OmSnapshotLocalData snapshotLocalDataYaml = new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()); - snapshotLocalDataSerializer.save(snapshotLocalDataPath.toFile(), snapshotLocalDataYaml); + try (WritableOmSnapshotLocalDataProvider snapshotLocalData = + new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), + () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()), + null))) { + snapshotLocalData.commit(); + } } - public OmSnapshotLocalData getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { return getOmSnapshotLocalData(snapshotInfo.getSnapshotId()); } - public OmSnapshotLocalData getOmSnapshotLocalData(UUID snapshotId) throws IOException { - Path snapshotLocalDataPath = Paths.get(getSnapshotLocalPropertyYamlPath(snapshotId)); - OmSnapshotLocalData snapshotLocalData = snapshotLocalDataSerializer.load(snapshotLocalDataPath.toFile()); - if (!Objects.equals(snapshotLocalData.getSnapshotId(), snapshotId)) { - throw new IOException("SnapshotId in path : " + snapshotLocalDataPath + " contains snapshotLocalData " + - "corresponding to snapshotId " + snapshotLocalData.getSnapshotId() + ". Expected snapshotId " + snapshotId); - } + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId) throws IOException { + ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId); return snapshotLocalData; } + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(SnapshotInfo snapshotInfo) + throws IOException { + return getWritableOmSnapshotLocalData(snapshotInfo.getSnapshotId(), snapshotInfo.getPathPreviousSnapshotId()); + } + + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(UUID snapshotId, UUID previousSnapshotId) + throws IOException { + return new WritableOmSnapshotLocalDataProvider(snapshotId, previousSnapshotId); + } + + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(UUID snapshotId) + throws IOException { + return new WritableOmSnapshotLocalDataProvider(snapshotId); + } + public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException { return snapshotLocalDataSerializer.load(snapshotDataPath); } @@ -148,12 +174,9 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { private void addVersionNode(LocalDataVersionNode versionNode) throws IOException { if (getVersionNode(versionNode.snapshotId, versionNode.version) == null) { + validateVersionAddition(versionNode); LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); - if (versionNode.previousSnapshotId != null && previousVersionNode == null) { - throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + - "loaded"); - } localDataGraph.addNode(versionNode); if (previousVersionNode != null) { localDataGraph.putEdge(versionNode, previousVersionNode); @@ -163,12 +186,13 @@ private void addVersionNode(LocalDataVersionNode versionNode) throws IOException } } - private List getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { + private Map getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { UUID snapshotId = snapshotLocalData.getSnapshotId(); UUID previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); return snapshotLocalData.getVersionSstFileInfos().entrySet().stream() .map(entry -> new LocalDataVersionNode(snapshotId, entry.getKey(), - previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())).collect(Collectors.toList()); + previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())) + .collect(Collectors.toMap(LocalDataVersionNode::getVersion, Function.identity())); } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -176,14 +200,14 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) return; } Set visitedSnapshotIds = new HashSet<>(); - Stack>> stack = new Stack<>(); + Stack>> stack = new Stack<>(); stack.push(Triple.of(snapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(snapshotLocalData))); + getVersionNodes(snapshotLocalData).values())); while (!stack.isEmpty()) { - Triple> versionNodeToProcess = stack.peek(); + Triple> versionNodeToProcess = stack.peek(); UUID snapId = versionNodeToProcess.getLeft(); UUID prevSnapId = versionNodeToProcess.getMiddle(); - List versionNodes = versionNodeToProcess.getRight(); + Collection versionNodes = versionNodeToProcess.getRight(); if (visitedSnapshotIds.contains(snapId)) { for (LocalDataVersionNode versionNode : versionNodes) { addVersionNode(versionNode); @@ -191,16 +215,22 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) stack.pop(); } else { if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { - OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(prevSnapId)); + OmSnapshotLocalData prevSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); stack.push(Triple.of(prevSnapshotLocalData.getSnapshotId(), prevSnapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(prevSnapshotLocalData))); + getVersionNodes(prevSnapshotLocalData).values())); + } visitedSnapshotIds.add(snapId); } } } - private void init() throws IOException { + private void init(OzoneConfiguration configuration) throws IOException { + boolean fair = configuration.getBoolean(OZONE_MANAGER_FAIR_LOCK, OZONE_MANAGER_FAIR_LOCK_DEFAULT); + String stripeSizeKey = OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX + SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME; + int size = configuration.getInt(stripeSizeKey, OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT); + this.locks = SimpleStriped.readWriteLock(size, fair); RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); @@ -223,6 +253,59 @@ private void init() throws IOException { } } + private void validateVersionRemoval(UUID snapshotId, int version) throws IOException { + LocalDataVersionNode versionNode = getVersionNode(snapshotId, version); + if (versionNode != null && localDataGraph.inDegree(versionNode) != 0) { + Set versionNodes = localDataGraph.predecessors(versionNode); + throw new IOException(String.format("Cannot remove Snapshot %s with version : %d since it still has " + + "predecessors : %s", snapshotId, version, versionNodes)); + } + } + + private void validateVersionAddition(LocalDataVersionNode versionNode) throws IOException { + LocalDataVersionNode previousVersionNode = getVersionNode(versionNode.previousSnapshotId, + versionNode.previousSnapshotVersion); + if (versionNode.previousSnapshotId != null && previousVersionNode == null) { + throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + + "loaded"); + } + } + + private Map validateModification(OmSnapshotLocalData snapshotLocalData) + throws IOException { + Map versionNodes = getVersionNodes(snapshotLocalData); + for (LocalDataVersionNode node : versionNodes.values()) { + validateVersionAddition(node); + } + Map snapVersionNodeMap = + getVersionNodeMap().getOrDefault(snapshotLocalData.getSnapshotId(), Collections.emptyMap()); + for (Map.Entry entry : snapVersionNodeMap.entrySet()) { + if (!versionNodes.containsKey(entry.getKey())) { + validateVersionRemoval(snapshotLocalData.getSnapshotId(), entry.getKey()); + } + } + return versionNodes; + } + + private void upsertNode(UUID snapshotId, Map versionNodes) throws IOException { + Map existingVersions = getVersionNodeMap().getOrDefault(snapshotId, + Collections.emptyMap()); + getVersionNodeMap().remove(snapshotId); + for (Map.Entry entry : versionNodes.entrySet()) { + addVersionNode(entry.getValue()); + if (existingVersions.containsKey(entry.getKey())) { + for (LocalDataVersionNode predecessor : + localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } + for (LocalDataVersionNode localDataVersionNode : existingVersions.values()) { + localDataGraph.removeNode(localDataVersionNode); + } + getVersionNodeMap().put(snapshotId, versionNodes); + } + @Override public void close() { if (snapshotLocalDataSerializer != null) { @@ -234,11 +317,251 @@ public void close() { } } + /** + * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the + * access and initialization of local snapshot data in a thread-safe manner. + * It provides mechanisms to handle snapshot data, retrieve associated previous + * snapshot data, and manage lock synchronization for safe concurrent operations. + * + * This class works with snapshot identifiers and ensures that the appropriate + * local data for a given snapshot is loaded and accessible. Additionally, it + * maintains locking mechanisms to ensure thread-safe initialization and access + * to both the current and previous snapshot local data. The implementation also + * supports handling errors in the snapshot data initialization process. + * + * Key Functionalities: + * - Initializes and provides access to snapshot local data associated with a + * given snapshot identifier. + * - Resolves and retrieves data for the previous snapshot if applicable. + * - Ensures safe concurrent read operations using locking mechanisms. + * - Validates the integrity and consistency of snapshot data during initialization. + * - Ensures that appropriate locks are released upon closing. + * + * Thread-Safety: + * This class utilizes locks to guarantee thread-safe operations when accessing + * or modifying snapshot data. State variables relating to snapshot data are + * properly synchronized to ensure consistency during concurrent operations. + * + * Usage Considerations: + * - Ensure proper handling of exceptions while interacting with this class, + * particularly during initialization and cleanup. + * - Always invoke the {@code close()} method after usage to release acquired locks + * and avoid potential deadlocks. + */ + public class ReadableOmSnapshotLocalDataProvider implements AutoCloseable { + + private final UUID snapshotId; + private final Lock lock; + private final OmSnapshotLocalData snapshotLocalData; + private OmSnapshotLocalData previousSnapshotLocalData; + private volatile boolean isPreviousSnapshotLoaded = false; + private final UUID resolvedPreviousSnapshotId; + + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { + this(snapshotId, locks.get(snapshotId).readLock()); + } + + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock) throws IOException { + this(snapshotId, lock, null, null); + } + + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock, + CheckedSupplier, IOException> snapshotLocalDataSupplier, + UUID snapshotIdToBeResolved) throws IOException { + this.snapshotId = snapshotId; + this.lock = lock; + Pair pair = initialize(lock, snapshotId, snapshotIdToBeResolved, + snapshotLocalDataSupplier); + this.snapshotLocalData = pair.getKey(); + this.resolvedPreviousSnapshotId = pair.getValue(); + this.previousSnapshotLocalData = null; + this.isPreviousSnapshotLoaded = false; + } + + public OmSnapshotLocalData getSnapshotLocalData() { + return snapshotLocalData; + } + + public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { + if (!isPreviousSnapshotLoaded) { + synchronized (this) { + if (!isPreviousSnapshotLoaded) { + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : + snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + this.isPreviousSnapshotLoaded = true; + } + } + } + return previousSnapshotLocalData; + } + + private Pair initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, + CheckedSupplier, IOException> snapshotLocalDataSupplier) + throws IOException { + snapIdLock.lock(); + ReadWriteLock lockIdAcquired = locks.get(snapId); + ReadWriteLock previousReadLockAcquired = null; + boolean haspreviousReadLockAcquiredAcquired = false; + try { + snapshotLocalDataSupplier = snapshotLocalDataSupplier == null ? () -> { + File snapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(snapId)); + return Pair.of(snapshotLocalDataSerializer.load(snapshotLocalDataFile), snapshotLocalDataFile); + } : snapshotLocalDataSupplier; + Pair pair = snapshotLocalDataSupplier.get(); + OmSnapshotLocalData ssLocalData = pair.getKey(); + if (!Objects.equals(ssLocalData.getSnapshotId(), snapId)) { + String loadPath = pair.getValue() == null ? null : pair.getValue().getAbsolutePath(); + throw new IOException("SnapshotId in path : " + loadPath + " contains snapshotLocalData corresponding " + + "to snapshotId " + ssLocalData.getSnapshotId() + ". Expected snapshotId " + snapId); + } + + UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); + if (previousSnapshotId != null) { + if (versionNodeMap.containsKey(previousSnapshotId)) { + throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); + } + toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : + toResolveSnapshotId; + previousReadLockAcquired = locks.get(previousSnapshotId); + if (lockIdAcquired == previousReadLockAcquired) { + previousReadLockAcquired = null; + } + if (previousReadLockAcquired != null) { + previousReadLockAcquired.readLock().lock(); + haspreviousReadLockAcquiredAcquired = true; + } + Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId); + UUID currentIteratedSnapshotId = previousSnapshotId; + while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { + Set previousIds = + previousVersionNodeMap.values().stream().map(LocalDataVersionNode::getPreviousSnapshotId) + .collect(Collectors.toSet()); + if (previousIds.size() > 1) { + throw new IOException(String.format("Snapshot %s versions has multiple previous snapshotIds %s", + currentIteratedSnapshotId, previousIds)); + } + if (previousIds.isEmpty()) { + throw new IOException(String.format("Snapshot %s versions doesn't have previous Id thus snapshot " + + "%s cannot be resolved against id %s", + currentIteratedSnapshotId, snapId, toResolveSnapshotId)); + } + UUID previousId = previousIds.iterator().next(); + ReadWriteLock lockToBeAcquired = locks.get(previousId); + if (lockToBeAcquired == lockIdAcquired) { + lockToBeAcquired = null; + } + if (lockToBeAcquired != null) { + if (lockToBeAcquired != previousReadLockAcquired) { + lockToBeAcquired.readLock().lock(); + haspreviousReadLockAcquiredAcquired = true; + } else { + previousReadLockAcquired = null; + } + } + try { + for (Map.Entry entry : previousVersionNodeMap.entrySet()) { + Set versionNode = localDataGraph.successors(entry.getValue()); + if (versionNode.size() > 1) { + throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", + currentIteratedSnapshotId, entry.getValue(), versionNode)); + } + entry.setValue(versionNode.iterator().next()); + } + } finally { + if (previousReadLockAcquired != null) { + previousReadLockAcquired.readLock().unlock(); + } + previousReadLockAcquired = lockToBeAcquired; + currentIteratedSnapshotId = previousId; + } + } + ssLocalData.setPreviousSnapshotId(toResolveSnapshotId); + Map versionMetaMap = ssLocalData.getVersionSstFileInfos(); + for (Map.Entry entry : versionMetaMap.entrySet()) { + OmSnapshotLocalData.VersionMeta versionMeta = entry.getValue(); + LocalDataVersionNode relativePreviousVersionNode = + previousVersionNodeMap.get(versionMeta.getPreviousSnapshotVersion()); + if (relativePreviousVersionNode == null) { + throw new IOException(String.format("Unable to resolve previous version node for snapshot: %s" + + " with version : %d against previous snapshot %s previous version : %d", + snapId, entry.getKey(), toResolveSnapshotId, versionMeta.getPreviousSnapshotVersion())); + } + } + } else { + toResolveSnapshotId = null; + } + return Pair.of(ssLocalData, toResolveSnapshotId); + } catch (IOException e) { + if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { + previousReadLockAcquired.readLock().unlock(); + } + snapIdLock.unlock(); + throw e; + } + } + + @Override + public void close() { + if (resolvedPreviousSnapshotId != null) { + locks.get(resolvedPreviousSnapshotId).readLock().unlock(); + } + lock.unlock(); + } + } + + /** + * This class represents a writable provider for managing local data of + * OmSnapshot. It extends the functionality of {@code ReadableOmSnapshotLocalDataProvider} + * and provides support for write operations, such as committing changes. + * + * The writable snapshot data provider interacts with version nodes and + * facilitates atomic updates to snapshot properties and files. + * + * This class is designed to ensure thread-safe operations and uses locks to + * guarantee consistent state across concurrent activities. + * + * The default usage includes creating an instance of this provider with + * specific snapshot identifiers and optionally handling additional parameters + * such as data resolution or a supplier for snapshot data. + */ + public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapshotLocalDataProvider { + + private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { + super(snapshotId, locks.get(snapshotId).writeLock()); + } + + private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { + super(snapshotId, locks.get(snapshotId).writeLock(), null, snapshotIdToBeResolved); + } + + private WritableOmSnapshotLocalDataProvider(UUID snapshotId, + CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { + super(snapshotId, locks.get(snapshotId).writeLock(), snapshotLocalDataSupplier, null); + } + + public synchronized void commit() throws IOException { + Map localDataVersionNodes = validateModification(super.snapshotLocalData); + String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); + String tmpFilePath = filePath + ".tmp"; + File tmpFile = new File(tmpFilePath); + if (tmpFile.exists()) { + tmpFile.delete(); + } + snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); + FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + upsertNode(super.snapshotId, localDataVersionNodes); + + } + } + static final class LocalDataVersionNode { - private UUID snapshotId; - private int version; - private UUID previousSnapshotId; - private int previousSnapshotVersion; + private final UUID snapshotId; + private final int version; + private final UUID previousSnapshotId; + private final int previousSnapshotVersion; private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { this.previousSnapshotId = previousSnapshotId; @@ -247,12 +570,27 @@ private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshot this.version = version; } + private int getVersion() { + return version; + } + + private UUID getSnapshotId() { + return snapshotId; + } + + private UUID getPreviousSnapshotId() { + return previousSnapshotId; + } + + private int getPreviousSnapshotVersion() { + return previousSnapshotVersion; + } + @Override public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { return false; } - LocalDataVersionNode that = (LocalDataVersionNode) o; return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); @@ -262,5 +600,15 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); } + + @Override + public String toString() { + return "LocalDataVersionNode{" + + "snapshotId=" + snapshotId + + ", version=" + version + + ", previousSnapshotId=" + previousSnapshotId + + ", previousSnapshotVersion=" + previousSnapshotVersion + + '}'; + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 13f19190a7ed..cc3b758b8451 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -43,6 +43,7 @@ import java.util.stream.Stream; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -50,6 +51,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; import org.junit.jupiter.api.AfterAll; @@ -70,6 +72,8 @@ public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; + private static OzoneConfiguration conf; + @Mock private OMMetadataManager omMetadataManager; @@ -90,6 +94,7 @@ public class TestOmSnapshotLocalDataManager { @BeforeAll public static void setupClass() { + conf = new OzoneConfiguration(); snapshotLocalDataYamlSerializer = new YamlSerializer( new OmSnapshotLocalDataYaml.YamlFactory()) { @@ -136,7 +141,7 @@ public void tearDown() throws Exception { @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); assertNotNull(localDataManager); } @@ -145,7 +150,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -175,16 +180,19 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); // Verify file was created - OmSnapshotLocalData snapshotLocalData = localDataManager.getOmSnapshotLocalData(snapshotId); - assertEquals(1, snapshotLocalData.getVersionSstFileInfos().size()); - OmSnapshotLocalData.VersionMeta versionMeta = snapshotLocalData.getVersionSstFileInfos().get(0); - OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); - assertEquals(expectedVersionMeta, versionMeta); + OmSnapshotLocalData.VersionMeta versionMeta; + try (ReadableOmSnapshotLocalDataProvider snapshotLocalData = localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertEquals(1, snapshotLocalData.getSnapshotLocalData().getVersionSstFileInfos().size()); + versionMeta = snapshotLocalData.getSnapshotLocalData().getVersionSstFileInfos().get(0); + OmSnapshotLocalData.VersionMeta expectedVersionMeta = + new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); + assertEquals(expectedVersionMeta, versionMeta); + } } @Test @@ -195,17 +203,17 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); writeLocalDataToFile(localData, yamlPath); // Test retrieval - OmSnapshotLocalData retrieved = localDataManager.getOmSnapshotLocalData(snapshotInfo); - - assertNotNull(retrieved); - assertEquals(snapshotId, retrieved.getSnapshotId()); + try (ReadableOmSnapshotLocalDataProvider retrieved = localDataManager.getOmSnapshotLocalData(snapshotInfo)) { + assertNotNull(retrieved.getSnapshotLocalData()); + assertEquals(snapshotId, retrieved.getSnapshotLocalData().getSnapshotId()); + } } @Test @@ -216,7 +224,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -232,7 +240,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -250,7 +258,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -266,7 +274,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -288,7 +296,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); assertNotNull(localDataManager); Map> versionMap = @@ -308,13 +316,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager); + new OmSnapshotLocalDataManager(omMetadataManager, conf); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); // Should not throw exception localDataManager.close(); From b554cc7a2ab6a49659b25dfe7d2dbe829819fa46 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 11 Oct 2025 20:49:53 -0400 Subject: [PATCH 09/97] HDDS-13783. Implement locks for OmSnapshotLocalDataManager Change-Id: I3c5514e5bbd251a2b5297d8f074cfde5c71fa543 --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index c9715cf30320..8018b94fecf0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -149,6 +149,13 @@ public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotI return snapshotLocalData; } + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId, UUID previousSnapshotID) + throws IOException { + ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId, + previousSnapshotID); + return snapshotLocalData; + } + public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { return getWritableOmSnapshotLocalData(snapshotInfo.getSnapshotId(), snapshotInfo.getPathPreviousSnapshotId()); @@ -361,6 +368,10 @@ protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOExceptio this(snapshotId, locks.get(snapshotId).readLock()); } + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapIdToResolve) throws IOException { + this(snapshotId, locks.get(snapshotId).readLock(), null, snapIdToResolve); + } + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock) throws IOException { this(snapshotId, lock, null, null); } From 49eccfac45c62092e775bdccb589381b978cee8f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 11 Oct 2025 20:51:14 -0400 Subject: [PATCH 10/97] HDDS-13783. Refactor inline variable Change-Id: Ib5a9e6c91bdccba17820263c47eaf2c8400e930d --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 8018b94fecf0..d384ab164834 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -145,15 +145,12 @@ public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(SnapshotInfo s } public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId) throws IOException { - ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId); - return snapshotLocalData; + return new ReadableOmSnapshotLocalDataProvider(snapshotId); } public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(UUID snapshotId, UUID previousSnapshotID) throws IOException { - ReadableOmSnapshotLocalDataProvider snapshotLocalData = new ReadableOmSnapshotLocalDataProvider(snapshotId, - previousSnapshotID); - return snapshotLocalData; + return new ReadableOmSnapshotLocalDataProvider(snapshotId, previousSnapshotID); } public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(SnapshotInfo snapshotInfo) From 51eda04dc47976fa1114567ba1b14327cf14fb57 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 07:22:40 -0400 Subject: [PATCH 11/97] HDDS-13627. Refactor map data structure Change-Id: Ica36e0615c7bc6aa9b6a7f6fafafd0f830d4bafb --- .../snapshot/OmSnapshotLocalDataManager.java | 113 ++++++++++++------ .../TestOmSnapshotLocalDataManager.java | 2 +- 2 files changed, 75 insertions(+), 40 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index c01b77189e3b..75611955b6f8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -28,21 +28,19 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Arrays; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.Stack; import java.util.UUID; -import java.util.stream.Collectors; -import org.apache.commons.lang3.tuple.Triple; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; +import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -62,7 +60,7 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; - private final Map> versionNodeMap; + private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { @@ -81,7 +79,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } @VisibleForTesting - Map> getVersionNodeMap() { + Map getVersionNodeMap() { return versionNodeMap; } @@ -143,32 +141,34 @@ public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws } private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { - return versionNodeMap.getOrDefault(snapshotId, Collections.emptyMap()).get(version); + if (!versionNodeMap.containsKey(snapshotId)) { + return null; + } + return versionNodeMap.get(snapshotId).getVersionNode(version); } - private void addVersionNode(LocalDataVersionNode versionNode) throws IOException { - if (getVersionNode(versionNode.snapshotId, versionNode.version) == null) { - LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : - getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); - if (versionNode.previousSnapshotId != null && previousVersionNode == null) { - throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + - "loaded"); - } - localDataGraph.addNode(versionNode); - if (previousVersionNode != null) { - localDataGraph.putEdge(versionNode, previousVersionNode); + private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) + throws IOException { + if (!versionNodeMap.containsKey(snapshotId)) { + for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { + if (getVersionNode(versionNode.snapshotId, versionNode.version) != null) { + throw new IOException("Unable to add " + versionNode + " since it already exists"); + } + LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : + getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); + if (versionNode.previousSnapshotId != null && previousVersionNode == null) { + throw new IOException("Unable to add " + versionNode + " since previous snapshot with version hasn't been " + + "loaded"); + } + localDataGraph.addNode(versionNode); + if (previousVersionNode != null) { + localDataGraph.putEdge(versionNode, previousVersionNode); + } } - versionNodeMap.computeIfAbsent(versionNode.snapshotId, k -> new HashMap<>()) - .put(versionNode.version, versionNode); + versionNodeMap.put(snapshotId, snapshotVersionsMeta); + return true; } - } - - private List getVersionNodes(OmSnapshotLocalData snapshotLocalData) throws IOException { - UUID snapshotId = snapshotLocalData.getSnapshotId(); - UUID previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); - return snapshotLocalData.getVersionSstFileInfos().entrySet().stream() - .map(entry -> new LocalDataVersionNode(snapshotId, entry.getKey(), - previousSnapshotId, entry.getValue().getPreviousSnapshotVersion())).collect(Collectors.toList()); + return false; } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -176,24 +176,20 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) return; } Set visitedSnapshotIds = new HashSet<>(); - Stack>> stack = new Stack<>(); - stack.push(Triple.of(snapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(snapshotLocalData))); + Stack> stack = new Stack<>(); + stack.push(Pair.of(snapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(snapshotLocalData))); while (!stack.isEmpty()) { - Triple> versionNodeToProcess = stack.peek(); + Pair versionNodeToProcess = stack.peek(); UUID snapId = versionNodeToProcess.getLeft(); - UUID prevSnapId = versionNodeToProcess.getMiddle(); - List versionNodes = versionNodeToProcess.getRight(); + SnapshotVersionsMeta snapshotVersionsMeta = versionNodeToProcess.getRight(); if (visitedSnapshotIds.contains(snapId)) { - for (LocalDataVersionNode versionNode : versionNodes) { - addVersionNode(versionNode); - } + addSnapshotVersionMeta(snapId, snapshotVersionsMeta); stack.pop(); } else { + UUID prevSnapId = snapshotVersionsMeta.getPreviousSnapshotId(); if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); - stack.push(Triple.of(prevSnapshotLocalData.getSnapshotId(), prevSnapshotLocalData.getPreviousSnapshotId(), - getVersionNodes(prevSnapshotLocalData))); + stack.push(Pair.of(prevSnapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(prevSnapshotLocalData))); } visitedSnapshotIds.add(snapId); } @@ -263,4 +259,43 @@ public int hashCode() { return Objects.hash(snapshotId, version, previousSnapshotId, previousSnapshotVersion); } } + + static final class SnapshotVersionsMeta { + private final UUID previousSnapshotId; + private final Map snapshotVersions; + private int version; + + private SnapshotVersionsMeta(OmSnapshotLocalData snapshotLocalData) { + this.previousSnapshotId = snapshotLocalData.getPreviousSnapshotId(); + this.snapshotVersions = getVersionNodes(snapshotLocalData); + this.version = snapshotLocalData.getVersion(); + } + + private Map getVersionNodes(OmSnapshotLocalData snapshotLocalData) { + UUID snapshotId = snapshotLocalData.getSnapshotId(); + UUID prevSnapshotId = snapshotLocalData.getPreviousSnapshotId(); + Map versionNodes = new HashMap<>(); + for (Map.Entry entry : snapshotLocalData.getVersionSstFileInfos().entrySet()) { + versionNodes.put(entry.getKey(), new LocalDataVersionNode(snapshotId, entry.getKey(), + prevSnapshotId, entry.getValue().getPreviousSnapshotVersion())); + } + return versionNodes; + } + + UUID getPreviousSnapshotId() { + return previousSnapshotId; + } + + int getVersion() { + return version; + } + + Map getSnapshotVersions() { + return snapshotVersions; + } + + LocalDataVersionNode getVersionNode(int snapshotVersion) { + return snapshotVersions.get(snapshotVersion); + } + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 13f19190a7ed..d0ac0ad19bcb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -291,7 +291,7 @@ public void testInitWithExistingYamlFiles() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); assertNotNull(localDataManager); - Map> versionMap = + Map versionMap = localDataManager.getVersionNodeMap(); assertEquals(2, versionMap.size()); assertEquals(versionMap.keySet(), new HashSet<>(versionIds)); From 96689fafc305eadd65605d48676e1befbbe3da77 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 08:01:37 -0400 Subject: [PATCH 12/97] HDDS-13783. Add more condition to upsert Change-Id: I1d93dbc048a42cc55ff1f8ffa420e52f967527b8 --- .../snapshot/OmSnapshotLocalDataManager.java | 51 ++++++++++--------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 81d2b884ae14..6ab3d404732d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -32,7 +32,6 @@ import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.util.Arrays; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; @@ -44,14 +43,11 @@ import java.util.UUID; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; -import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.SimpleStriped; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -219,7 +215,8 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) } else { UUID prevSnapId = snapshotVersionsMeta.getPreviousSnapshotId(); if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { - OmSnapshotLocalData prevSnapshotLocalData = getOmSnapshotLocalData(prevSnapId); + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(prevSnapId)); + OmSnapshotLocalData prevSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); stack.push(Pair.of(prevSnapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(prevSnapshotLocalData))); } visitedSnapshotIds.add(snapId); @@ -272,31 +269,34 @@ private void validateVersionAddition(LocalDataVersionNode versionNode) throws IO } } - private Map validateModification(OmSnapshotLocalData snapshotLocalData) + private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) throws IOException { - Map versionNodes = getVersionNodes(snapshotLocalData); - for (LocalDataVersionNode node : versionNodes.values()) { + SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { validateVersionAddition(node); } - Map snapVersionNodeMap = - getVersionNodeMap().getOrDefault(snapshotLocalData.getSnapshotId(), Collections.emptyMap()); - for (Map.Entry entry : snapVersionNodeMap.entrySet()) { - if (!versionNodes.containsKey(entry.getKey())) { - validateVersionRemoval(snapshotLocalData.getSnapshotId(), entry.getKey()); + UUID snapshotId = snapshotLocalData.getSnapshotId(); + Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? + getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); + for (Map.Entry entry : existingVersions.entrySet()) { + if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { + validateVersionRemoval(snapshotId, entry.getKey()); } } - return versionNodes; + return versionsToBeAdded; } - private void upsertNode(UUID snapshotId, Map versionNodes) throws IOException { - Map existingVersions = getVersionNodeMap().getOrDefault(snapshotId, - Collections.emptyMap()); - getVersionNodeMap().remove(snapshotId); - for (Map.Entry entry : versionNodes.entrySet()) { - addVersionNode(entry.getValue()); + private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + if (!addSnapshotVersionMeta(snapshotId, snapshotVersions)) { + throw new IOException("Unable to upsert " + snapshotVersions + " since it already exists"); + } + + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { if (existingVersions.containsKey(entry.getKey())) { - for (LocalDataVersionNode predecessor : - localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { + for (LocalDataVersionNode predecessor : localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { localDataGraph.putEdge(predecessor, entry.getValue()); } } @@ -304,7 +304,7 @@ private void upsertNode(UUID snapshotId, Map vers for (LocalDataVersionNode localDataVersionNode : existingVersions.values()) { localDataGraph.removeNode(localDataVersionNode); } - getVersionNodeMap().put(snapshotId, versionNodes); + getVersionNodeMap().put(snapshotId, snapshotVersions); } @Override @@ -437,7 +437,8 @@ private Pair initialize(Lock snapIdLock, UUID snapId, previousReadLockAcquired.readLock().lock(); haspreviousReadLockAcquiredAcquired = true; } - Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId); + Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId) + .getSnapshotVersions(); UUID currentIteratedSnapshotId = previousSnapshotId; while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { Set previousIds = @@ -547,7 +548,7 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, } public synchronized void commit() throws IOException { - Map localDataVersionNodes = validateModification(super.snapshotLocalData); + SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); String tmpFilePath = filePath + ".tmp"; File tmpFile = new File(tmpFilePath); From 0674299a8eabb9abe0b7103dcf72b265d46669b4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:05:04 -0400 Subject: [PATCH 13/97] HDDS-13783. Add java doc comment Change-Id: I34202928a7a367dd0a1e57219317ff34de352b78 --- .../snapshot/OmSnapshotLocalDataManager.java | 62 +++++++++++++------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 6ab3d404732d..b002dda14186 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -44,6 +44,7 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; +import jnr.ffi.annotations.In; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -177,13 +178,10 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { return versionNodeMap.get(snapshotId).getVersionNode(version); } - private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) + private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) throws IOException { if (!versionNodeMap.containsKey(snapshotId)) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { - if (getVersionNode(versionNode.snapshotId, versionNode.version) != null) { - throw new IOException("Unable to add " + versionNode + " since it already exists"); - } validateVersionAddition(versionNode); LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); @@ -193,9 +191,7 @@ private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta sna } } versionNodeMap.put(snapshotId, snapshotVersionsMeta); - return true; } - return false; } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -290,21 +286,20 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); - if (!addSnapshotVersionMeta(snapshotId, snapshotVersions)) { - throw new IOException("Unable to upsert " + snapshotVersions + " since it already exists"); - } - + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); + localDataGraph.removeNode(existingVersion.getValue()); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - if (existingVersions.containsKey(entry.getKey())) { - for (LocalDataVersionNode predecessor : localDataGraph.predecessors(existingVersions.get(entry.getKey()))) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + localDataGraph.putEdge(predecessor, entry.getValue()); } } - for (LocalDataVersionNode localDataVersionNode : existingVersions.values()) { - localDataGraph.removeNode(localDataVersionNode); - } - getVersionNodeMap().put(snapshotId, snapshotVersions); } @Override @@ -401,10 +396,15 @@ public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { return previousSnapshotLocalData; } + /** + * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the + * snapshotId to be resolved by iterating through the chain of previous snapshot ids. + */ private Pair initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { snapIdLock.lock(); + // Get the Lock instance for the snapshot id and track it. ReadWriteLock lockIdAcquired = locks.get(snapId); ReadWriteLock previousReadLockAcquired = null; boolean haspreviousReadLockAcquiredAcquired = false; @@ -420,7 +420,9 @@ private Pair initialize(Lock snapIdLock, UUID snapId, throw new IOException("SnapshotId in path : " + loadPath + " contains snapshotLocalData corresponding " + "to snapshotId " + ssLocalData.getSnapshotId() + ". Expected snapshotId " + snapId); } - + // Get previous snapshotId and acquire read lock on the id. We need to do this outside the loop instead of a + // do while loop since the nodes that may be added may not be present in the graph so it may not be possible + // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); if (previousSnapshotId != null) { if (versionNodeMap.containsKey(previousSnapshotId)) { @@ -430,6 +432,9 @@ private Pair initialize(Lock snapIdLock, UUID snapId, toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : toResolveSnapshotId; previousReadLockAcquired = locks.get(previousSnapshotId); + // Stripe lock could return the same lock object for multiple snapshotIds so in case a write lock is + // acquired previously on the same lock then this could cause a deadlock. If the same lock instance is + // returned then acquiring this read lock is unnecessary. if (lockIdAcquired == previousReadLockAcquired) { previousReadLockAcquired = null; } @@ -440,7 +445,10 @@ private Pair initialize(Lock snapIdLock, UUID snapId, Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId) .getSnapshotVersions(); UUID currentIteratedSnapshotId = previousSnapshotId; + // Iterate through the chain of previous snapshot ids until the snapshot id to be resolved is found. while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { + // All versions for the snapshot should point to the same previous snapshot id. Otherwise this is a sign + // of corruption. Set previousIds = previousVersionNodeMap.values().stream().map(LocalDataVersionNode::getPreviousSnapshotId) .collect(Collectors.toSet()); @@ -455,27 +463,42 @@ private Pair initialize(Lock snapIdLock, UUID snapId, } UUID previousId = previousIds.iterator().next(); ReadWriteLock lockToBeAcquired = locks.get(previousId); + // If stripe lock returns the same lock object corresponding to snapshot id then no read lock needs to be + // acquired. if (lockToBeAcquired == lockIdAcquired) { lockToBeAcquired = null; } if (lockToBeAcquired != null) { + // If a read lock has already been acquired on the same lock based on the previous iteration snapshot id + // then no need to acquire another read lock on the same lock and this lock could just piggyback on the + // same lock. if (lockToBeAcquired != previousReadLockAcquired) { lockToBeAcquired.readLock().lock(); haspreviousReadLockAcquiredAcquired = true; } else { + // Set the previous read lock to null since the same lock instance is going to be used for current + // iteration lock as well. previousReadLockAcquired = null; } } try { + // Get the version node for the snapshot and update the version node to the successor to point to the + // previous node. for (Map.Entry entry : previousVersionNodeMap.entrySet()) { Set versionNode = localDataGraph.successors(entry.getValue()); if (versionNode.size() > 1) { throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", currentIteratedSnapshotId, entry.getValue(), versionNode)); } + if (versionNode.isEmpty()) { + throw new IOException(String.format("Snapshot %s version %d doesn't have successor", + currentIteratedSnapshotId, entry.getValue())); + } entry.setValue(versionNode.iterator().next()); } } finally { + // Release the read lock acquired on the previous snapshot id if it was acquired. Now that the instance + // is no longer needed we can release the read lock for the snapshot iterated in the previous snapshot. if (previousReadLockAcquired != null) { previousReadLockAcquired.readLock().unlock(); } @@ -500,6 +523,7 @@ private Pair initialize(Lock snapIdLock, UUID snapId, } return Pair.of(ssLocalData, toResolveSnapshotId); } catch (IOException e) { + // Release all the locks in case of an exception and rethrow the exception. if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { previousReadLockAcquired.readLock().unlock(); } From 5d9fc4999d37f73dd9eb9bd66d63d6a571fa7d29 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:05:41 -0400 Subject: [PATCH 14/97] HDDS-13783. Add java doc comment Change-Id: Iad6f26cb71ec921c51ee2d138745df1a2663533f --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index b002dda14186..be892856c53e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -44,7 +44,6 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; -import jnr.ffi.annotations.In; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; From 2d8817603eef010a29b5fb431aa3be149ed2bd1c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:23:23 -0400 Subject: [PATCH 15/97] HDDS-13783. Implement full lock Change-Id: Ic5f7e249cfb9cb3973cbcd4abd36b22a6ff8f5aa --- .../snapshot/OmSnapshotLocalDataManager.java | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index be892856c53e..b07b6627601e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -43,6 +43,7 @@ import java.util.UUID; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; @@ -55,9 +56,11 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.util.ObjectSerializer; import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ratis.util.function.CheckedSupplier; +import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; @@ -75,6 +78,9 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final MutableGraph localDataGraph; private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; + // Used for acquiring locks on the entire data structure. + private static ReadWriteLock fullLock; + // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private Striped locks; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, @@ -90,6 +96,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } }; this.versionNodeMap = new HashMap<>(); + this.fullLock = new ReentrantReadWriteLock(); init(configuration); } @@ -246,6 +253,21 @@ private void init(OzoneConfiguration configuration) throws IOException { } } + public UncheckedAutoCloseableSupplier lock() { + this.fullLock.writeLock().lock(); + return new UncheckedAutoCloseableSupplier() { + @Override + public OMLockDetails get() { + return OMLockDetails.EMPTY_DETAILS_LOCK_ACQUIRED; + } + + @Override + public void close() { + fullLock.writeLock().unlock(); + } + }; + } + private void validateVersionRemoval(UUID snapshotId, int version) throws IOException { LocalDataVersionNode versionNode = getVersionNode(snapshotId, version); if (versionNode != null && localDataGraph.inDegree(versionNode) != 0) { @@ -559,15 +581,18 @@ public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapsho private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { super(snapshotId, locks.get(snapshotId).writeLock()); + fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { super(snapshotId, locks.get(snapshotId).writeLock(), null, snapshotIdToBeResolved); + fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { super(snapshotId, locks.get(snapshotId).writeLock(), snapshotLocalDataSupplier, null); + fullLock.readLock().lock(); } public synchronized void commit() throws IOException { @@ -582,7 +607,12 @@ public synchronized void commit() throws IOException { FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); upsertNode(super.snapshotId, localDataVersionNodes); + } + @Override + public void close() { + super.close(); + fullLock.readLock().unlock(); } } From a3c4c690822f96435ff1f6135898d1c4eecab5e0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:52:13 -0400 Subject: [PATCH 16/97] HDDS-13783. Refactor and move modify method into WritableOmSnapshotLocalDataProvider Change-Id: I3a004b4b435075a4348960aeed642e8da71e7e72 --- .../snapshot/OmSnapshotLocalDataManager.java | 81 ++++++++++--------- 1 file changed, 44 insertions(+), 37 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index b07b6627601e..a870347fc2e8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -253,6 +253,13 @@ private void init(OzoneConfiguration configuration) throws IOException { } } + /** + * Acquires a write lock and provides an auto-closeable supplier for specifying details + * of the lock acquisition. The lock is released when the returned supplier is closed. + * + * @return an instance of {@code UncheckedAutoCloseableSupplier} representing + * the acquired lock details, where the lock will automatically be released on close. + */ public UncheckedAutoCloseableSupplier lock() { this.fullLock.writeLock().lock(); return new UncheckedAutoCloseableSupplier() { @@ -286,43 +293,6 @@ private void validateVersionAddition(LocalDataVersionNode versionNode) throws IO } } - private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) - throws IOException { - SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); - for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { - validateVersionAddition(node); - } - UUID snapshotId = snapshotLocalData.getSnapshotId(); - Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? - getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); - for (Map.Entry entry : existingVersions.entrySet()) { - if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { - validateVersionRemoval(snapshotId, entry.getKey()); - } - } - return versionsToBeAdded; - } - - private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { - SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); - Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : - existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); - // Track all predecessors of the existing versions and remove the node from the graph. - for (Map.Entry existingVersion : existingVersions.entrySet()) { - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); - localDataGraph.removeNode(existingVersion.getValue()); - } - // Add the nodes to be added in the graph and map. - addSnapshotVersionMeta(snapshotId, snapshotVersions); - // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } - } - } - @Override public void close() { if (snapshotLocalDataSerializer != null) { @@ -595,6 +565,43 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, fullLock.readLock().lock(); } + private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) + throws IOException { + SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { + validateVersionAddition(node); + } + UUID snapshotId = snapshotLocalData.getSnapshotId(); + Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? + getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); + for (Map.Entry entry : existingVersions.entrySet()) { + if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { + validateVersionRemoval(snapshotId, entry.getKey()); + } + } + return versionsToBeAdded; + } + + private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); + localDataGraph.removeNode(existingVersion.getValue()); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } + public synchronized void commit() throws IOException { SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); From 686d0c77cea774ba44fff1901efdf86789e36e59 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 09:58:57 -0400 Subject: [PATCH 17/97] HDDS-13783. Make full lock non static Change-Id: I06990bc9ab8fc7e1eb7bec255646a650bd8c35fe --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index a870347fc2e8..34b6ec602139 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -79,7 +79,7 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; // Used for acquiring locks on the entire data structure. - private static ReadWriteLock fullLock; + private final ReadWriteLock fullLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private Striped locks; From 491a54b0698f34557eb982280ccdffc434d9a4d9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 10:20:14 -0400 Subject: [PATCH 18/97] HDDS-13783. Fix remove Change-Id: I4c6c61c83aa9fadab8ecef854b99dcc0a89a2208 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 34b6ec602139..17b21cf8b43a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -589,8 +589,9 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) Map> predecessors = new HashMap<>(); // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersions.get(existingVersion))); - localDataGraph.removeNode(existingVersion.getValue()); + LocalDataVersionNode existingVersionNode = existingVersion.getValue(); + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); From 5e69ee9a1e18e48df12d1a69b3694544dccb0b94 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 17:35:35 -0400 Subject: [PATCH 19/97] HDDS-13627. Fix findbugs Change-Id: I0e476322372a302572f1fe79cbf2e874bfeac2ed --- .../om/snapshot/TestOmSnapshotLocalDataManager.java | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index d0ac0ad19bcb..34bde4814a6e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -25,6 +25,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -86,7 +87,6 @@ public class TestOmSnapshotLocalDataManager { private AutoCloseable mocks; private File snapshotsDir; - private File dbLocation; @BeforeAll public static void setupClass() { @@ -115,10 +115,11 @@ public void setUp() throws IOException { this.snapshotsDir = tempDir.resolve("snapshots").toFile(); FileUtils.deleteDirectory(snapshotsDir); - snapshotsDir.mkdirs(); - dbLocation = tempDir.resolve("db").toFile(); + assertTrue(snapshotsDir.exists() || snapshotsDir.mkdirs()); + File dbLocation = tempDir.resolve("db").toFile(); FileUtils.deleteDirectory(dbLocation); - dbLocation.mkdirs(); + assertTrue(dbLocation.exists() || dbLocation.mkdirs()); + when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); @@ -161,7 +162,8 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { // Setup snapshot store mock File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); - snapshotDbLocation.mkdirs(); + assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); + List sstFiles = new ArrayList<>(); sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7")); sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9")); From d36622a5ad8b5eeb0431650f483a7e2481623275 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 22:34:22 -0400 Subject: [PATCH 20/97] HDDS-13785. Remove orphan versions from SnapshotLocalData Yaml file Change-Id: I31004e0c95dad64411c6fe848501a82f2f773cba --- .../apache/hadoop/ozone/OzoneConfigKeys.java | 5 + .../hadoop/ozone/om/OmSnapshotLocalData.java | 4 + .../hadoop/ozone/om/OmSnapshotManager.java | 6 +- .../snapshot/OmSnapshotLocalDataManager.java | 123 +++++++++++++++--- .../ozone/om/snapshot/SnapshotUtils.java | 11 ++ .../TestOmSnapshotLocalDataManager.java | 43 +++--- 6 files changed, 155 insertions(+), 37 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 1d47fb72958f..41eeb10e5c2c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -631,6 +631,11 @@ public final class OzoneConfigKeys { OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT = TimeUnit.DAYS.toMillis(30); + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = + "ozone.om.snapshot.local.data.manager.service.interval"; + + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; + public static final String OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL = "ozone.om.snapshot.compaction.dag.prune.daemon.run.interval"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5af678f903fb..7c29c8bc148e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -190,6 +190,10 @@ public void addVersionSSTFileInfos(List sstFiles, int previousSnaps this.versionSstFileInfos.put(version, new VersionMeta(previousSnapshotVersion, sstFiles)); } + public void removeVersionSSTFileInfos(int snapshotVersion) { + this.versionSstFileInfos.remove(snapshotVersion); + } + /** * Returns the checksum of the YAML representation. * @return checksum diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 743c1e584e25..ad3a820c2c95 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -196,10 +196,10 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), - ozoneManager.getConfiguration()); - boolean isFilesystemSnapshotEnabled = - ozoneManager.isFilesystemSnapshotEnabled(); + omMetadataManager.getSnapshotChainManager(), ozoneManager.getConfiguration()); + boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", isFilesystemSnapshotEnabled ? "enabled" : "disabled"); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 17b21cf8b43a..deb44e02034f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -21,6 +21,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; @@ -41,6 +43,8 @@ import java.util.Set; import java.util.Stack; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -48,6 +52,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.Scheduler; import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -55,6 +60,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.util.ObjectSerializer; @@ -73,6 +79,7 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; + private static final String LOCAL_DATA_MANAGER_SERVICE_NAME = "OmSnapshotLocalDataManagerService"; private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; @@ -82,8 +89,12 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final ReadWriteLock fullLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private Striped locks; + private Map snapshotToBeCheckedForOrphans; + private Scheduler scheduler; + private volatile boolean closed; public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, + SnapshotChainManager snapshotChainManager, OzoneConfiguration configuration) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; @@ -97,7 +108,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO }; this.versionNodeMap = new HashMap<>(); this.fullLock = new ReentrantReadWriteLock(); - init(configuration); + init(configuration, snapshotChainManager); } @VisibleForTesting @@ -186,7 +197,7 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) throws IOException { - if (!versionNodeMap.containsKey(snapshotId)) { + if (!versionNodeMap.containsKey(snapshotId) && !snapshotVersionsMeta.getSnapshotVersions().isEmpty()) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { validateVersionAddition(versionNode); LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : @@ -226,10 +237,28 @@ public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) } } - private void init(OzoneConfiguration configuration) throws IOException { + private void increamentOrphanCheckCount(UUID snapshotId) { + this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : v + 1); + } + + private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { + this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> { + if (v == null) { + return null; + } + int newValue = v - decrementBy; + if (newValue <= 0) { + return null; + } + return newValue; + }); + } + + private void init(OzoneConfiguration configuration, SnapshotChainManager snapshotChainManager) throws IOException { boolean fair = configuration.getBoolean(OZONE_MANAGER_FAIR_LOCK, OZONE_MANAGER_FAIR_LOCK_DEFAULT); String stripeSizeKey = OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX + SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME; int size = configuration.getInt(stripeSizeKey, OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT); + this.snapshotToBeCheckedForOrphans = new ConcurrentHashMap<>(); this.locks = SimpleStriped.readWriteLock(size, fair); RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); @@ -251,6 +280,48 @@ private void init(OzoneConfiguration configuration) throws IOException { } addVersionNodeWithDependents(snapshotLocalData); } + for (UUID snapshotId : versionNodeMap.keySet()) { + increamentOrphanCheckCount(snapshotId); + } + this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); + long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + this.scheduler.scheduleWithFixedDelay( + () -> { + try { + checkOrphanSnapshotVersions(omMetadataManager, snapshotChainManager); + } catch (IOException e) { + LOG.error("Exception while checking orphan snapshot versions", e); + } + }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); + } + + private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager) + throws IOException { + for (Map.Entry entry : snapshotToBeCheckedForOrphans.entrySet()) { + UUID snapshotId = entry.getKey(); + int countBeforeCheck = entry.getValue(); + try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = + new WritableOmSnapshotLocalDataProvider(snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); + boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); + for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap().get( + snapshotId).getSnapshotVersions().entrySet()) { + LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); + // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 + // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 + // node can be only deleted if the snapshot is also purged. + boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 + && (versionEntry.getVersion() != 0 || isSnapshotPurged); + if (toRemove) { + snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); + } + } + snapshotLocalDataProvider.commit(); + } + decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + } } /** @@ -373,16 +444,12 @@ public OmSnapshotLocalData getSnapshotLocalData() { return snapshotLocalData; } - public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { + public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { if (!isPreviousSnapshotLoaded) { - synchronized (this) { - if (!isPreviousSnapshotLoaded) { - File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); - this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : - snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); - this.isPreviousSnapshotLoaded = true; - } - } + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : + snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + this.isPreviousSnapshotLoaded = true; } return previousSnapshotLocalData; } @@ -593,6 +660,7 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); localDataGraph.removeNode(existingVersionNode); } + // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); // Reconnect all the predecessors for existing nodes. @@ -601,19 +669,36 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) localDataGraph.putEdge(predecessor, entry.getValue()); } } + // The previous snapshotId could have become an orphan entry or could have orphan versions. + if (existingSnapVersions != null) { + increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + } } public synchronized void commit() throws IOException { SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); - String tmpFilePath = filePath + ".tmp"; - File tmpFile = new File(tmpFilePath); - if (tmpFile.exists()) { - tmpFile.delete(); + File snapshotLocalDataFile = new File(filePath); + if (!localDataVersionNodes.getSnapshotVersions().isEmpty()) { + String tmpFilePath = filePath + ".tmp"; + File tmpFile = new File(tmpFilePath); + boolean tmpFileExists = tmpFile.exists(); + if (tmpFileExists) { + tmpFileExists = !tmpFile.delete(); + } + if (!tmpFileExists) { + throw new IOException("Unable to delete tmp file " + tmpFilePath); + } + snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); + FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + } else if (snapshotLocalDataFile.exists()) { + LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path : {}", + super.snapshotId, snapshotLocalDataFile.getAbsolutePath()); + if (snapshotLocalDataFile.delete()) { + throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); + } } - snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); - FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, - StandardCopyOption.REPLACE_EXISTING); upsertNode(super.snapshotId, localDataVersionNodes); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index ea6d88c8e194..f1ef5035e7c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -34,6 +34,8 @@ import java.util.Objects; import java.util.Optional; import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.CodecException; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -89,6 +91,15 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, return snapshotInfo; } + public static boolean isSnapshotPurged(SnapshotChainManager chainManager, OMMetadataManager omMetadataManager, + UUID snapshotId) throws RocksDatabaseException, CodecException { + String tableKey = chainManager.getTableKey(snapshotId); + if (tableKey == null) { + return true; + } + return !omMetadataManager.getSnapshotInfoTable().isExist(tableKey); + } + public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, SnapshotChainManager chainManager, UUID snapshotId) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 5be849ab7641..4d676fc90003 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -25,7 +25,10 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; import java.io.File; @@ -50,6 +53,7 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.util.YamlSerializer; @@ -61,6 +65,7 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -77,6 +82,9 @@ public class TestOmSnapshotLocalDataManager { @Mock private OMMetadataManager omMetadataManager; + @Mock + private SnapshotChainManager chainManager; + @Mock private RDBStore rdbStore; @@ -91,6 +99,7 @@ public class TestOmSnapshotLocalDataManager { private File snapshotsDir; private File dbLocation; + private MockedStatic snapshotUtilMock; @BeforeAll public static void setupClass() { @@ -117,16 +126,17 @@ public void setUp() throws IOException { // Setup mock behavior when(omMetadataManager.getStore()).thenReturn(rdbStore); - this.snapshotsDir = tempDir.resolve("snapshots").toFile(); FileUtils.deleteDirectory(snapshotsDir); - snapshotsDir.mkdirs(); + assertTrue(snapshotsDir.exists() || snapshotsDir.mkdirs()); dbLocation = tempDir.resolve("db").toFile(); FileUtils.deleteDirectory(dbLocation); - dbLocation.mkdirs(); + assertTrue(dbLocation.exists() || dbLocation.mkdirs()); when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); + this.snapshotUtilMock = mockStatic(SnapshotUtils.class); + snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())).thenReturn(false); } @AfterEach @@ -137,11 +147,14 @@ public void tearDown() throws Exception { if (mocks != null) { mocks.close(); } + if (snapshotUtilMock != null) { + snapshotUtilMock.close(); + } } @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); assertNotNull(localDataManager); } @@ -150,7 +163,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -166,7 +179,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { // Setup snapshot store mock File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); - snapshotDbLocation.mkdirs(); + assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); List sstFiles = new ArrayList<>(); sstFiles.add(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key7")); sstFiles.add(createMockLiveFileMetaData("file2.sst", KEY_TABLE, "key3", "key9")); @@ -180,7 +193,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); @@ -203,7 +216,7 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); @@ -224,7 +237,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -240,7 +253,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -258,7 +271,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -274,7 +287,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -296,7 +309,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); assertNotNull(localDataManager); Map versionMap = @@ -316,13 +329,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager, conf); + new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); // Should not throw exception localDataManager.close(); From ee213d15e5d9f9ef1a59345f626a2d355e2ae126 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 22:39:12 -0400 Subject: [PATCH 21/97] HDDS-13785. Fix findbugs Change-Id: Id317c8b56e8b25c122b68eaf96599b9690d08f79 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index deb44e02034f..7df7fd09a2ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -365,12 +365,17 @@ private void validateVersionAddition(LocalDataVersionNode versionNode) throws IO } @Override - public void close() { - if (snapshotLocalDataSerializer != null) { - try { - snapshotLocalDataSerializer.close(); - } catch (IOException e) { - LOG.error("Failed to close snapshot local data serializer", e); + public synchronized void close() { + if (!closed) { + if (snapshotLocalDataSerializer != null) { + try { + snapshotLocalDataSerializer.close(); + } catch (IOException e) { + LOG.error("Failed to close snapshot local data serializer", e); + } + } + if (scheduler != null) { + scheduler.close(); } } } From a95604ed3e21e4ed5a2f884f1166f4641362c15b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 23:25:39 -0400 Subject: [PATCH 22/97] HDDS-13627. Fix tests Change-Id: Ie5e5f3dab4324103e8855dd15619d7755f0422e6 --- .../om/response/snapshot/OMSnapshotPurgeResponse.java | 9 ++------- .../filter/AbstractReclaimableFilterTest.java | 11 ++++++++--- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 75ba2a8f9501..267547bc1e54 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -23,9 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import jakarta.annotation.Nonnull; import java.io.IOException; -import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Map; import org.apache.commons.io.FileUtils; @@ -130,14 +128,11 @@ private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalD boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); - // TODO: Do not delete on snapshot purge. OmSnapshotLocalDataManager should delete orphan local data files. - Path snapshotLocalDataPath = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); try { FileUtils.deleteDirectory(snapshotDirPath.toFile()); - Files.deleteIfExists(snapshotLocalDataPath); } catch (IOException ex) { - LOG.error("Failed to delete snapshot directory {} and/or local data file {} for snapshot {}", - snapshotDirPath, snapshotLocalDataPath, snapshotInfo.getTableKey(), ex); + LOG.error("Failed to delete snapshot directory {} for snapshot {}", + snapshotDirPath, snapshotInfo.getTableKey(), ex); } finally { omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java index e8c362d9a5f4..13ba79a77f82 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java @@ -27,6 +27,7 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; @@ -61,6 +62,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; @@ -188,9 +190,9 @@ private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOException { try (MockedStatic rocksdb = Mockito.mockStatic(ManagedRocksDB.class); MockedConstruction mockedSnapshotDiffManager = - Mockito.mockConstruction(SnapshotDiffManager.class, (mock, context) -> + mockConstruction(SnapshotDiffManager.class, (mock, context) -> doNothing().when(mock).close()); - MockedConstruction mockedCache = Mockito.mockConstruction(SnapshotCache.class, + MockedConstruction mockedCache = mockConstruction(SnapshotCache.class, (mock, context) -> { Map> map = new HashMap<>(); when(mock.get(any(UUID.class))).thenAnswer(i -> { @@ -237,7 +239,10 @@ private void mockOmSnapshotManager(OzoneManager om) throws RocksDBException, IOE conf.set(OZONE_METADATA_DIRS, testDir.toAbsolutePath().toFile().getAbsolutePath()); when(om.getConfiguration()).thenReturn(conf); when(om.isFilesystemSnapshotEnabled()).thenReturn(true); - this.omSnapshotManager = new OmSnapshotManager(om); + try (MockedConstruction ignored = + mockConstruction(OmSnapshotLocalDataManager.class)) { + this.omSnapshotManager = new OmSnapshotManager(om); + } } } From 5a90fcfa158543e2f444d2ac888d9b03ace99301 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 13 Oct 2025 23:29:51 -0400 Subject: [PATCH 23/97] HDDS-13627. remove checksum interface Change-Id: I55bd5c3ef7fc32910a9111328638de2edffcd541 --- .../apache/hadoop/ozone/util/Checksum.java | 28 ------------------- 1 file changed, 28 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java deleted file mode 100644 index 4d11bde5aef3..000000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/Checksum.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.util; - -import org.apache.hadoop.hdds.utils.db.CopyObject; - -/** - * Represents a generic interface for objects capable of generating or providing - * a checksum value. - */ -public interface Checksum> extends CopyObject { - String getChecksum(); -} From 20d7d6add8a0f16362b3d513a348b4f9afeff809 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 06:35:33 -0400 Subject: [PATCH 24/97] HDDS-13627. Fix test failures Change-Id: I880997d3eebdf378f14c203c61c2d63b2d17552e --- .../request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 0fb26a4cd993..35053882eeda 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -190,8 +190,6 @@ public void testValidateAndUpdateCache() throws Exception { // Check if all the checkpoints are cleared. for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); - assertFalse(Files.exists(Paths.get( - OmSnapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(checkpoint)))); } assertEquals(initialSnapshotPurgeCount + 1, getOmSnapshotIntMetrics().getNumSnapshotPurges()); assertEquals(initialSnapshotPurgeFailCount, getOmSnapshotIntMetrics().getNumSnapshotPurgeFails()); From ae655cbfd8c10321d05ed74cd248b1e2fb22818f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 10:30:07 -0400 Subject: [PATCH 25/97] HDDS-13785. Set defrag flag on previous snapshotId update Change-Id: I13ba8e2fd012a3c964d657e83496c93a4f55a3be --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 7df7fd09a2ae..2054e3b951b3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -651,6 +651,11 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo validateVersionRemoval(snapshotId, entry.getKey()); } } + SnapshotVersionsMeta existingVersionMeta = getVersionNodeMap().get(snapshotId); + // Set the needsDefrag if the new previous snapshotId is different from the existing one or if this is a new + // snapshot yaml file. + snapshotLocalData.setNeedsDefrag(existingVersionMeta == null + || !Objects.equals(existingVersionMeta.getPreviousSnapshotId(), snapshotLocalData.getPreviousSnapshotId())); return versionsToBeAdded; } From d419283eced9b5aff28a924b1801cda63321e0ab Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 14:44:56 -0400 Subject: [PATCH 26/97] HDDS-13783. Fix findbugs Change-Id: I02de81771c9102f1212bf1962e65095910ab8207 --- .../snapshot/OmSnapshotLocalDataManager.java | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 17b21cf8b43a..58a5923bc7bd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -373,16 +373,12 @@ public OmSnapshotLocalData getSnapshotLocalData() { return snapshotLocalData; } - public OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { + public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { if (!isPreviousSnapshotLoaded) { - synchronized (this) { - if (!isPreviousSnapshotLoaded) { - File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); - this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : - snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); - this.isPreviousSnapshotLoaded = true; - } - } + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : + snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + this.isPreviousSnapshotLoaded = true; } return previousSnapshotLocalData; } @@ -608,8 +604,12 @@ public synchronized void commit() throws IOException { String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); String tmpFilePath = filePath + ".tmp"; File tmpFile = new File(tmpFilePath); - if (tmpFile.exists()) { - tmpFile.delete(); + boolean tmpFileExists = tmpFile.exists(); + if (tmpFileExists) { + tmpFileExists = !tmpFile.delete(); + } + if (!tmpFileExists) { + throw new IOException("Unable to delete tmp file " + tmpFilePath); } snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, From 8a443087a096fea473f55ab65680943c72b32011 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 15:39:52 -0400 Subject: [PATCH 27/97] HDDS-13783. Fix pmd Change-Id: I8360183ef8ac68a95a05a6a2b00bb7ede5d57d12 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 58a5923bc7bd..393b988bd41e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -637,22 +637,10 @@ private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshot this.version = version; } - private int getVersion() { - return version; - } - - private UUID getSnapshotId() { - return snapshotId; - } - private UUID getPreviousSnapshotId() { return previousSnapshotId; } - private int getPreviousSnapshotVersion() { - return previousSnapshotVersion; - } - @Override public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { From 4d272d190f8aa8c2a4426389caa046e50c7140a6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 18:23:45 -0400 Subject: [PATCH 28/97] HDDS-13783. Fix lock release Change-Id: Ia7b386dab7558275be659bc32f838ccdd7f46ef5 --- .../snapshot/OmSnapshotLocalDataManager.java | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 393b988bd41e..2f417dd5f168 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -47,6 +47,7 @@ import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; +import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -340,6 +341,7 @@ public class ReadableOmSnapshotLocalDataProvider implements AutoCloseable { private final UUID snapshotId; private final Lock lock; private final OmSnapshotLocalData snapshotLocalData; + private final Lock previousLock; private OmSnapshotLocalData previousSnapshotLocalData; private volatile boolean isPreviousSnapshotLoaded = false; private final UUID resolvedPreviousSnapshotId; @@ -361,10 +363,11 @@ protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock, UUID snapshotIdToBeResolved) throws IOException { this.snapshotId = snapshotId; this.lock = lock; - Pair pair = initialize(lock, snapshotId, snapshotIdToBeResolved, + Triple pair = initialize(lock, snapshotId, snapshotIdToBeResolved, snapshotLocalDataSupplier); - this.snapshotLocalData = pair.getKey(); - this.resolvedPreviousSnapshotId = pair.getValue(); + this.snapshotLocalData = pair.getLeft(); + this.previousLock = pair.getMiddle(); + this.resolvedPreviousSnapshotId = pair.getRight(); this.previousSnapshotLocalData = null; this.isPreviousSnapshotLoaded = false; } @@ -387,7 +390,7 @@ public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IO * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ - private Pair initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, + private Triple initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { snapIdLock.lock(); @@ -408,7 +411,7 @@ private Pair initialize(Lock snapIdLock, UUID snapId, "to snapshotId " + ssLocalData.getSnapshotId() + ". Expected snapshotId " + snapId); } // Get previous snapshotId and acquire read lock on the id. We need to do this outside the loop instead of a - // do while loop since the nodes that may be added may not be present in the graph so it may not be possible + // do while loop since the nodes that need be added may not be present in the graph so it may not be possible // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); if (previousSnapshotId != null) { @@ -508,7 +511,9 @@ private Pair initialize(Lock snapIdLock, UUID snapId, } else { toResolveSnapshotId = null; } - return Pair.of(ssLocalData, toResolveSnapshotId); + return Triple.of(ssLocalData, + previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null , + toResolveSnapshotId); } catch (IOException e) { // Release all the locks in case of an exception and rethrow the exception. if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { @@ -521,8 +526,8 @@ private Pair initialize(Lock snapIdLock, UUID snapId, @Override public void close() { - if (resolvedPreviousSnapshotId != null) { - locks.get(resolvedPreviousSnapshotId).readLock().unlock(); + if (previousLock != null) { + previousLock.unlock(); } lock.unlock(); } @@ -608,7 +613,7 @@ public synchronized void commit() throws IOException { if (tmpFileExists) { tmpFileExists = !tmpFile.delete(); } - if (!tmpFileExists) { + if (tmpFileExists) { throw new IOException("Unable to delete tmp file " + tmpFilePath); } snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); From 2a38f598dfa0a3090e8c237821af6a5e7bb4b5b1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 18:54:33 -0400 Subject: [PATCH 29/97] HDDS-13627. address review comments Change-Id: Id19a1c451f1cdd6b08879e39b4ac2bae5d4517dc --- .../om/snapshot/OmSnapshotLocalDataManager.java | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 75611955b6f8..46f111f0f320 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -147,7 +147,7 @@ private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { return versionNodeMap.get(snapshotId).getVersionNode(version); } - private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) + private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapshotVersionsMeta) throws IOException { if (!versionNodeMap.containsKey(snapshotId)) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { @@ -166,9 +166,7 @@ private boolean addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta sna } } versionNodeMap.put(snapshotId, snapshotVersionsMeta); - return true; } - return false; } public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { @@ -231,10 +229,10 @@ public void close() { } static final class LocalDataVersionNode { - private UUID snapshotId; - private int version; - private UUID previousSnapshotId; - private int previousSnapshotVersion; + private final UUID snapshotId; + private final int version; + private final UUID previousSnapshotId; + private final int previousSnapshotVersion; private LocalDataVersionNode(UUID snapshotId, int version, UUID previousSnapshotId, int previousSnapshotVersion) { this.previousSnapshotId = previousSnapshotId; @@ -248,7 +246,6 @@ public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { return false; } - LocalDataVersionNode that = (LocalDataVersionNode) o; return version == that.version && previousSnapshotVersion == that.previousSnapshotVersion && snapshotId.equals(that.snapshotId) && Objects.equals(previousSnapshotId, that.previousSnapshotId); From ca098cf1ebd32827d204e41367193b9ddb02e167 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 21:25:24 -0400 Subject: [PATCH 30/97] HDDS-13783. Make graph updates synchronized Change-Id: I1b9d4227870d3918fdbf293e07f1e6a87bdcfd6c --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 2f417dd5f168..a543a65c501f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -201,7 +201,7 @@ private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapsh } } - public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { + void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { if (versionNodeMap.containsKey(snapshotLocalData.getSnapshotId())) { return; } @@ -583,7 +583,7 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo return versionsToBeAdded; } - private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); From 67d4b3d95c395fe43b23f6905e13e427194f2c0b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 21:35:12 -0400 Subject: [PATCH 31/97] HDDS-13627. Make add version with dependents package private Change-Id: I9f09fb50aafe82718bce9884c46d26b4862b5c04 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 46f111f0f320..3c529abaf3c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -169,7 +169,7 @@ private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapsh } } - public void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { + void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws IOException { if (versionNodeMap.containsKey(snapshotLocalData.getSnapshotId())) { return; } From 665f4116df6f026d53d0f31da42c43b961e71a07 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 23:29:29 -0400 Subject: [PATCH 32/97] HDDS-13783. Fix checkstyle Change-Id: I9c2cab7442f87c64b8b342910b1949ea133c72ad --- .../om/snapshot/OmSnapshotLocalDataManager.java | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 311590d660de..a543a65c501f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -27,9 +27,6 @@ import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; import com.google.common.util.concurrent.Striped; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.graph.GraphBuilder; -import com.google.common.graph.MutableGraph; import java.io.File; import java.io.IOException; import java.nio.file.Path; @@ -53,17 +50,6 @@ import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.SimpleStriped; -import java.nio.file.Paths; -import java.util.Arrays; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.Stack; -import java.util.UUID; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; From 2894e404db72b82abf5b0680f201f5ee7f5bf00b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 14 Oct 2025 23:34:22 -0400 Subject: [PATCH 33/97] HDDS-13783. Fix merge conflict Change-Id: Idfd6f31fdea8c0166e02ae2a95984ad520b4c0d1 --- .../org/apache/hadoop/ozone/om/SnapshotDefragService.java | 8 ++++---- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 436593b861b6..904ade6abe77 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.snapshot.MultiSnapshotLocks; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -130,11 +131,10 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { String snapshotPath = OmSnapshotManager.getSnapshotPath( ozoneManager.getConfiguration(), snapshotInfo); - try { + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = + ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { // Read snapshot local metadata from YAML - OmSnapshotLocalData snapshotLocalData = ozoneManager.getOmSnapshotManager() - .getSnapshotLocalDataManager() - .getOmSnapshotLocalData(snapshotInfo); + OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); // Check if snapshot needs compaction (defragmentation) boolean needsDefrag = snapshotLocalData.getNeedsDefrag(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index a543a65c501f..54adca46ddaf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -511,8 +511,7 @@ private Triple initialize(Lock snapIdLock, UUID } else { toResolveSnapshotId = null; } - return Triple.of(ssLocalData, - previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null , + return Triple.of(ssLocalData, previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null, toResolveSnapshotId); } catch (IOException e) { // Release all the locks in case of an exception and rethrow the exception. From ea0ab16c737843b99adde981d5c015d8d2535f7e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 06:26:19 -0400 Subject: [PATCH 34/97] HDDS-13783. Add write version api Change-Id: If7d52317a65df2d941cc9dc6befd8215e7418f60 --- .../hadoop/ozone/om/OmSnapshotLocalData.java | 5 +- .../snapshot/OmSnapshotLocalDataManager.java | 7 ++ .../ozone/om/TestOmSnapshotLocalDataYaml.java | 8 +- .../TestOmSnapshotLocalDataManager.java | 99 +++++++++++++++++++ 4 files changed, 113 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 83ad02fb14bc..d78430b6cae6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -184,9 +184,10 @@ public void setPreviousSnapshotId(UUID previousSnapshotId) { * Adds an entry to the defragged SST file list. * @param sstFiles SST file name */ - public void addVersionSSTFileInfos(List sstFiles, int previousSnapshotVersion) { + public void addVersionSSTFileInfos(List sstFiles, int previousSnapshotVersion) { version++; - this.versionSstFileInfos.put(version, new VersionMeta(previousSnapshotVersion, sstFiles)); + this.versionSstFileInfos.put(version, new VersionMeta(previousSnapshotVersion, sstFiles.stream() + .map(SstFileInfo::new).collect(Collectors.toList()))); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 54adca46ddaf..0f658348bfbe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -36,6 +36,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -62,6 +63,7 @@ import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ratis.util.function.CheckedSupplier; import org.apache.ratis.util.function.UncheckedAutoCloseableSupplier; +import org.rocksdb.LiveFileMetaData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.yaml.snakeyaml.Yaml; @@ -603,6 +605,11 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps } } + public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { + List sstFiles = OmSnapshotManager.getSnapshotSSTFileList(snapshotStore); + this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, getPreviousSnapshotLocalData().getVersion()); + } + public synchronized void commit() throws IOException { SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index 23d332ae75b9..b234014ebbc0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -130,11 +130,11 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw // Add some defragged SST files dataYaml.addVersionSSTFileInfos(ImmutableList.of( - new SstFileInfo("defragged-sst1", "k1", "k2", "table1"), - new SstFileInfo("defragged-sst2", "k3", "k4", "table2")), + createLiveFileMetaData("defragged-sst1", "table1", "k1", "k2"), + createLiveFileMetaData("defragged-sst2", "table2", "k3", "k4")), 1); dataYaml.addVersionSSTFileInfos(Collections.singletonList( - new SstFileInfo("defragged-sst3", "k4", "k5", "table1")), 3); + createLiveFileMetaData("defragged-sst3", "table1", "k4", "k5")), 3); File yamlFile = new File(testRoot, yamlFilePath); @@ -202,7 +202,7 @@ public void testUpdateSnapshotDataFile() throws IOException { dataYaml.setSstFiltered(false); dataYaml.setNeedsDefrag(false); dataYaml.addVersionSSTFileInfos( - singletonList(new SstFileInfo("defragged-sst4", "k5", "k6", "table3")), 5); + singletonList(createLiveFileMetaData("defragged-sst4", "table3", "k5", "k6")), 5); // Write updated data back to file omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index f77f4fa5c581..e63a557ca83c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -26,9 +26,15 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.when; +import com.google.common.util.concurrent.Striped; import java.io.File; import java.io.IOException; import java.nio.file.Path; @@ -39,12 +45,15 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -61,7 +70,9 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; import org.mockito.Mock; +import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -92,6 +103,11 @@ public class TestOmSnapshotLocalDataManager { private File snapshotsDir; + private static final String READ_LOCK_MESSAGE_ACQUIRE = "readLock acquire"; + private static final String READ_LOCK_MESSAGE_UNLOCK = "readLock unlock"; + private static final String WRITE_LOCK_MESSAGE_ACQUIRE = "writeLock acquire"; + private static final String WRITE_LOCK_MESSAGE_UNLOCK = "writeLock unlock"; + @BeforeAll public static void setupClass() { conf = new OzoneConfiguration(); @@ -140,6 +156,89 @@ public void tearDown() throws Exception { } } + private String getReadLockMessageAcquire(int index) { + return READ_LOCK_MESSAGE_ACQUIRE + index; + } + + private String getReadLockMessageUnlock(int index) { + return READ_LOCK_MESSAGE_UNLOCK + index; + } + + private String getWriteLockMessageAcquire(int index) { + return WRITE_LOCK_MESSAGE_ACQUIRE + index; + } + + private String getWriteLockMessageUnlock(int index) { + return WRITE_LOCK_MESSAGE_UNLOCK + index; + } + + private MockedStatic mockStripedLock(Map lockMap, int numLocks, + List messageCaptorer) { + MockedStatic mockedStatic = mockStatic(SimpleStriped.class); + Striped stripedLock = mock(Striped.class); + + List readWriteLocks = new ArrayList<>(); + for (int idx = 0; idx < numLocks; idx++) { + final int lockIndex = idx; + ReadWriteLock readWriteLock = mock(ReadWriteLock.class); + Lock readLock = mock(Lock.class); + Lock writeLock = mock(Lock.class); + when(readWriteLock.readLock()).thenReturn(readLock); + when(readWriteLock.writeLock()).thenReturn(writeLock); + doAnswer(invocationOnMock -> { + messageCaptorer.add(getReadLockMessageAcquire(lockIndex)); + return null; + }).when(readLock).lock(); + doAnswer(invocationOnMock -> { + messageCaptorer.add(getReadLockMessageUnlock(lockIndex)); + return null; + }).when(readLock).unlock(); + + doAnswer(invocationOnMock -> { + messageCaptorer.add(getWriteLockMessageAcquire(lockIndex)); + return null; + }).when(writeLock).lock(); + doAnswer(invocationOnMock -> { + messageCaptorer.add(getWriteLockMessageUnlock(lockIndex)); + return null; + }).when(writeLock).unlock(); + } + when(stripedLock.get(any())).thenAnswer(i -> { + if (lockMap.containsKey(i.getArgument(0))) { + return readWriteLocks.get(lockMap.get(i.getArgument(0))); + } + return readWriteLocks.get(0); + }); + mockedStatic.when(() -> SimpleStriped.readWriteLock(anyInt(), anyBoolean())).thenReturn(stripedLock); + return mockedStatic; + } + + private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, + int numberOfSnapshots) { + List snapshotInfos = new ArrayList<>(); + SnapshotInfo previouseSnapshotInfo = null; + + for (int i = 0; i < numberOfSnapshots; i++) { + java.util.UUID snapshotId = java.util.UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, previouseSnapshotInfo == null ? null + : previouseSnapshotInfo.getSnapshotId()); + OmSnapshotLocalData localData = createMockLocalData(snapshotId, snapshotInfo.getPathPreviousSnapshotId()); + + snapshotInfos.add(snapshotInfo); + previouseSnapshotInfo = snapshotInfo; + } + return null; + } + + /** + * Reading Snap1 against snap5 + */ + @Test + public void testLockOrderingWithOverLappingLocks() { + + + } + @Test public void testConstructor() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); From 915562bc3102aaad85308ff057737af74fdd3fa4 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 09:45:14 -0400 Subject: [PATCH 35/97] HDDS-13797. Refactor OzoneManagerLock Resource class to handle handle hierarchical resource locking Change-Id: I433a52feb491b72ea303fa32025540a742555d08 --- .../ozone/om/S3SecretLockedManager.java | 2 +- .../hadoop/ozone/om/lock/FlatResource.java | 46 ++++++ .../lock/HierachicalResourceLockManager.java | 38 +++++ .../ozone/om/lock/IOzoneManagerLock.java | 13 -- .../hadoop/ozone/om/lock/LeveledResource.java | 132 ++++++++++++++++ .../ozone/om/lock/OzoneManagerLock.java | 141 ------------------ .../apache/hadoop/ozone/om/lock/Resource.java | 31 ++++ .../hadoop/ozone/om/lock/TestKeyPathLock.java | 20 +-- .../ozone/om/lock/TestOzoneManagerLock.java | 3 - ...napshotDeletingServiceIntegrationTest.java | 2 +- .../hadoop/ozone/om/BucketManagerImpl.java | 2 +- .../hadoop/ozone/om/KeyManagerImpl.java | 2 +- .../apache/hadoop/ozone/om/ListIterator.java | 2 +- .../OMDBCheckpointServletInodeBasedXfer.java | 2 +- .../apache/hadoop/ozone/om/OzoneManager.java | 4 +- .../hadoop/ozone/om/PrefixManagerImpl.java | 2 +- .../ozone/om/SnapshotDefragService.java | 2 +- .../hadoop/ozone/om/SstFilteringService.java | 2 +- .../hadoop/ozone/om/VolumeManagerImpl.java | 4 +- .../ozone/om/lock/OBSKeyPathLockStrategy.java | 4 +- .../om/lock/RegularBucketLockStrategy.java | 2 +- .../request/bucket/OMBucketCreateRequest.java | 4 +- .../request/bucket/OMBucketDeleteRequest.java | 4 +- .../bucket/OMBucketSetOwnerRequest.java | 2 +- .../bucket/OMBucketSetPropertyRequest.java | 2 +- .../bucket/acl/OMBucketAclRequest.java | 2 +- .../file/OMDirectoryCreateRequest.java | 2 +- .../file/OMDirectoryCreateRequestWithFSO.java | 2 +- .../om/request/file/OMFileCreateRequest.java | 2 +- .../file/OMFileCreateRequestWithFSO.java | 2 +- .../request/file/OMRecoverLeaseRequest.java | 2 +- .../request/key/OMAllocateBlockRequest.java | 2 +- .../key/OMAllocateBlockRequestWithFSO.java | 2 +- .../key/OMDirectoriesPurgeRequestWithFSO.java | 2 +- .../om/request/key/OMKeyCommitRequest.java | 2 +- .../key/OMKeyCommitRequestWithFSO.java | 2 +- .../key/OMKeyCreateRequestWithFSO.java | 2 +- .../om/request/key/OMKeyDeleteRequest.java | 2 +- .../key/OMKeyDeleteRequestWithFSO.java | 2 +- .../om/request/key/OMKeyPurgeRequest.java | 2 +- .../om/request/key/OMKeyRenameRequest.java | 2 +- .../key/OMKeyRenameRequestWithFSO.java | 2 +- .../ozone/om/request/key/OMKeyRequest.java | 2 +- .../om/request/key/OMKeySetTimesRequest.java | 2 +- .../key/OMKeySetTimesRequestWithFSO.java | 2 +- .../om/request/key/OMKeysDeleteRequest.java | 2 +- .../om/request/key/OMKeysRenameRequest.java | 2 +- .../request/key/OMOpenKeysDeleteRequest.java | 2 +- .../om/request/key/acl/OMKeyAclRequest.java | 2 +- .../key/acl/OMKeyAclRequestWithFSO.java | 2 +- .../key/acl/prefix/OMPrefixAclRequest.java | 2 +- ...S3ExpiredMultipartUploadsAbortRequest.java | 2 +- .../S3InitiateMultipartUploadRequest.java | 2 +- ...InitiateMultipartUploadRequestWithFSO.java | 2 +- .../S3MultipartUploadAbortRequest.java | 2 +- .../S3MultipartUploadCommitPartRequest.java | 2 +- .../S3MultipartUploadCompleteRequest.java | 2 +- .../tagging/S3DeleteObjectTaggingRequest.java | 2 +- .../S3DeleteObjectTaggingRequestWithFSO.java | 2 +- .../s3/tagging/S3PutObjectTaggingRequest.java | 2 +- .../S3PutObjectTaggingRequestWithFSO.java | 2 +- .../s3/tenant/OMTenantAssignAdminRequest.java | 2 +- .../OMTenantAssignUserAccessIdRequest.java | 2 +- .../s3/tenant/OMTenantCreateRequest.java | 4 +- .../s3/tenant/OMTenantDeleteRequest.java | 2 +- .../s3/tenant/OMTenantRevokeAdminRequest.java | 2 +- .../OMTenantRevokeUserAccessIdRequest.java | 2 +- .../snapshot/OMSnapshotCreateRequest.java | 4 +- .../snapshot/OMSnapshotDeleteRequest.java | 4 +- .../snapshot/OMSnapshotRenameRequest.java | 4 +- .../request/volume/OMQuotaRepairRequest.java | 4 +- .../request/volume/OMVolumeCreateRequest.java | 4 +- .../request/volume/OMVolumeDeleteRequest.java | 4 +- .../volume/OMVolumeSetOwnerRequest.java | 2 +- .../volume/OMVolumeSetQuotaRequest.java | 2 +- .../volume/acl/OMVolumeAclRequest.java | 2 +- .../snapshot/OMSnapshotPurgeResponse.java | 2 +- .../om/service/SnapshotDeletingService.java | 2 +- .../ozone/om/snapshot/MultiSnapshotLocks.java | 2 +- .../ozone/om/snapshot/SnapshotCache.java | 2 +- .../om/snapshot/filter/ReclaimableFilter.java | 2 +- ...tOMDirectoriesPurgeRequestAndResponse.java | 2 +- .../om/snapshot/TestMultiSnapshotLocks.java | 4 +- .../ozone/om/snapshot/TestSnapshotCache.java | 2 +- .../filter/AbstractReclaimableFilterTest.java | 2 +- 85 files changed, 347 insertions(+), 257 deletions(-) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java index d42df2acbd24..2efe66b9db85 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.S3_SECRET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.S3_SECRET_LOCK; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java new file mode 100644 index 000000000000..f23a6ee78f28 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Flat Resource defined in Ozone. Locks can be acquired on a resource independent of one another. + */ +public enum FlatResource implements Resource { + // Background services lock on a Snapshot. + SNAPSHOT_GC_LOCK("SNAPSHOT_GC_LOCK"), + // Lock acquired on a Snapshot's RocksDB Handle. + SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"); + + private String name; + private IOzoneManagerLock.ResourceManager resourceManager; + + FlatResource(String name) { + this.name = name; + this.resourceManager = new IOzoneManagerLock.ResourceManager(); + } + + @Override + public String getName() { + return name; + } + + @Override + public IOzoneManagerLock.ResourceManager getResourceManager() { + return resourceManager; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java new file mode 100644 index 000000000000..94eba94d5a80 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Interface for Hierachical Resource Lock where the lock order acquired on resource is going to be deterministic and + * there is no cyclic lock ordering on resources. + * Typically, this can be used for locking elements which form a DAG like structure.(E.g. FSO tree, Snapshot chain etc.) + */ +public interface HierachicalResourceLockManager { + + HierarchicalResourceLock acquireLock(Resource resource, String key); + + /** + * Interface for managing the lock lifecycle corresponding to a Hierarchical Resource. + */ + interface HierarchicalResourceLock extends AutoCloseable { + boolean isLockAcquired(); + + @Override + void close(); + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java index 7e8ed7c78171..6a17a0f69b16 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java @@ -72,19 +72,6 @@ boolean isWriteLockedByCurrentThread(Resource resource, OMLockMetrics getOMLockMetrics(); - /** - * Defines a resource interface used to represent entities that can be - * associated with locks in the Ozone Manager Lock mechanism. A resource - * implementation provides a name and an associated {@link ResourceManager} - * to manage its locking behavior. - */ - interface Resource { - - String getName(); - - ResourceManager getResourceManager(); - } - /** * The ResourceManager class provides functionality for managing * information about resource read and write lock usage. It tracks the time of diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java new file mode 100644 index 000000000000..bb6b14e15882 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Leveled Resource defined in Ozone. + * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired + * after a higher level lock is already acquired. + */ +public enum LeveledResource implements Resource { + // For S3 Bucket need to allow only for S3, that should be means only 1. + S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 + + // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) + VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 + + // For bucket we need to allow both s3 bucket, volume and bucket. Which + // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 + BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 + + // For user we need to allow s3 bucket, volume, bucket and user lock. + // Which is 8 4 + 2 + 1 = 15 + USER_LOCK((byte) 3, "USER_LOCK"), // 15 + + S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 + KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 + PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 + SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 + + // This will tell the value, till which we can allow locking. + private short mask; + + // This value will help during setLock, and also will tell whether we can + // re-acquire lock or not. + private short setMask; + + // Name of the resource. + private String name; + + private IOzoneManagerLock.ResourceManager resourceManager; + + LeveledResource(byte pos, String name) { + // level of the resource + this.mask = (short) (Math.pow(2, pos + 1) - 1); + this.setMask = (short) Math.pow(2, pos); + this.name = name; + this.resourceManager = new IOzoneManagerLock.ResourceManager(); + } + + boolean canLock(short lockSetVal) { + + // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow + // re-acquire locks from single thread. 2nd condition is we have + // acquired one of these locks, but after that trying to acquire a lock + // with less than equal of lockLevel, we should disallow. + if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || + (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || + (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) + && setMask <= lockSetVal) { + return false; + } + + + // Our mask is the summation of bits of all previous possible locks. In + // other words it is the largest possible value for that bit position. + + // For example for Volume lock, bit position is 1, and mask is 3. Which + // is the largest value that can be represented with 2 bits is 3. + // Therefore if lockSet is larger than mask we have to return false i.e + // some other higher order lock has been acquired. + + return lockSetVal <= mask; + } + + /** + * Set Lock bits in lockSetVal. + * + * @param lockSetVal + * @return Updated value which has set lock bits. + */ + short setLock(short lockSetVal) { + return (short) (lockSetVal | setMask); + } + + /** + * Clear lock from lockSetVal. + * + * @param lockSetVal + * @return Updated value which has cleared lock bits. + */ + short clearLock(short lockSetVal) { + return (short) (lockSetVal & ~setMask); + } + + /** + * Return true, if this level is locked, else false. + * + * @param lockSetVal + */ + boolean isLevelLocked(short lockSetVal) { + return (lockSetVal & setMask) == setMask; + } + + @Override + public String getName() { + return name; + } + + @Override + public IOzoneManagerLock.ResourceManager getResourceManager() { + return resourceManager; + } + + short getMask() { + return mask; + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 364322c3ae38..cd6d85c9e3b2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -578,34 +578,6 @@ public OMLockMetrics getOMLockMetrics() { return omLockMetrics; } - /** - * Flat Resource defined in Ozone. Locks can be acquired on a resource independent of one another. - */ - public enum FlatResource implements Resource { - // Background services lock on a Snapshot. - SNAPSHOT_GC_LOCK("SNAPSHOT_GC_LOCK"), - // Lock acquired on a Snapshot's RocksDB Handle. - SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"); - - private String name; - private ResourceManager resourceManager; - - FlatResource(String name) { - this.name = name; - this.resourceManager = new ResourceManager(); - } - - @Override - public String getName() { - return name; - } - - @Override - public ResourceManager getResourceManager() { - return resourceManager; - } - } - private abstract static class ResourceLockManager { private final ThreadLocal omLockDetails = ThreadLocal.withInitial(OMLockDetails::new); @@ -690,119 +662,6 @@ public OMLockDetails lockResource(LeveledResource resource) { } } - /** - * Leveled Resource defined in Ozone. - * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired - * after a higher level lock is already acquired. - */ - public enum LeveledResource implements Resource { - // For S3 Bucket need to allow only for S3, that should be means only 1. - S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 - - // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) - VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 - - // For bucket we need to allow both s3 bucket, volume and bucket. Which - // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 - BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 - - // For user we need to allow s3 bucket, volume, bucket and user lock. - // Which is 8 4 + 2 + 1 = 15 - USER_LOCK((byte) 3, "USER_LOCK"), // 15 - - S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 - KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 - PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 - SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 - - // This will tell the value, till which we can allow locking. - private short mask; - - // This value will help during setLock, and also will tell whether we can - // re-acquire lock or not. - private short setMask; - - // Name of the resource. - private String name; - - private ResourceManager resourceManager; - - LeveledResource(byte pos, String name) { - // level of the resource - this.mask = (short) (Math.pow(2, pos + 1) - 1); - this.setMask = (short) Math.pow(2, pos); - this.name = name; - this.resourceManager = new ResourceManager(); - } - - boolean canLock(short lockSetVal) { - - // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow - // re-acquire locks from single thread. 2nd condition is we have - // acquired one of these locks, but after that trying to acquire a lock - // with less than equal of lockLevel, we should disallow. - if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || - (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || - (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) - && setMask <= lockSetVal) { - return false; - } - - - // Our mask is the summation of bits of all previous possible locks. In - // other words it is the largest possible value for that bit position. - - // For example for Volume lock, bit position is 1, and mask is 3. Which - // is the largest value that can be represented with 2 bits is 3. - // Therefore if lockSet is larger than mask we have to return false i.e - // some other higher order lock has been acquired. - - return lockSetVal <= mask; - } - - /** - * Set Lock bits in lockSetVal. - * - * @param lockSetVal - * @return Updated value which has set lock bits. - */ - short setLock(short lockSetVal) { - return (short) (lockSetVal | setMask); - } - - /** - * Clear lock from lockSetVal. - * - * @param lockSetVal - * @return Updated value which has cleared lock bits. - */ - short clearLock(short lockSetVal) { - return (short) (lockSetVal & ~setMask); - } - - /** - * Return true, if this level is locked, else false. - * @param lockSetVal - */ - boolean isLevelLocked(short lockSetVal) { - return (lockSetVal & setMask) == setMask; - } - - @Override - public String getName() { - return name; - } - - @Override - public ResourceManager getResourceManager() { - return resourceManager; - } - - short getMask() { - return mask; - } - } - /** * Update the processing details. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java new file mode 100644 index 000000000000..2ef2510f12d5 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +/** + * Defines a resource interface used to represent entities that can be + * associated with locks in the Ozone Manager Lock mechanism. A resource + * implementation provides a name and an associated {@link IOzoneManagerLock.ResourceManager} + * to manage its locking behavior. + */ +public interface Resource { + + String getName(); + + IOzoneManagerLock.ResourceManager getResourceManager(); +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java index 53fdc659883a..c5d50ebdbd9a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java @@ -39,8 +39,8 @@ class TestKeyPathLock extends TestOzoneManagerLock { private static final Logger LOG = LoggerFactory.getLogger(TestKeyPathLock.class); - private final OzoneManagerLock.LeveledResource resource = - OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; + private final LeveledResource resource = + LeveledResource.KEY_PATH_LOCK; @Test void testKeyPathLockMultiThreading() throws Exception { @@ -224,8 +224,8 @@ private void testDiffKeyPathWriteLockMultiThreadingUtil( @Test void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -246,8 +246,8 @@ void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { @Test void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -268,8 +268,8 @@ void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -290,8 +290,8 @@ void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() { - OzoneManagerLock.LeveledResource higherResource = - OzoneManagerLock.LeveledResource.BUCKET_LOCK; + LeveledResource higherResource = + LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index a1d853eb6b39..652f586f69ea 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -35,9 +35,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; -import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java index f4c83fc08a5f..8d8bde304dc4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDeletingServiceIntegrationTest.java @@ -23,7 +23,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DEEP_CLEANING_ENABLED; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index d6775b0495a9..d1c035130b9d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index e458fa73236a..00a8d4138ea4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -76,7 +76,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index 426e7b73ec4f..b12b7ba8bcd7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 7b5fe844d6ae..28769f75409c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -27,7 +27,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.includeSnapshotData; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.logEstimatedTarballSize; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 56e51cf4026e..ad77b921af9b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -94,8 +94,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PERMISSION_DENIED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus.LEADER_AND_READY; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.getRaftGroupIdFromOmServiceId; import static org.apache.hadoop.ozone.om.s3.S3SecretStoreConfigurationKeys.DEFAULT_SECRET_STORAGE_TYPE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index f6615b92f2d3..e7c70ecc808c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -23,7 +23,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 436593b861b6..9747bb7c8942 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DEFRAG_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DEFRAG_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index b94fd45bf7fb..522ea7df6de5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_SST_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_SST_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getColumnFamilyToKeyPrefixMap; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 0f1be7855788..812d5054b2e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java index c715856db80f..9aaddd1efc48 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.KEY_PATH_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java index 76071a408b4a..b8116bc4c607 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 838ee3be86ca..6f7e6975e295 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 4d802f9078e1..d1e0d7237885 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java index e60d5019ff41..4d57b22bed99 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 270b95d06da3..2e27c3db0b72 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index 565c6e4854d8..6c2a8987f2bc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 4e6ac64edcd2..2f6cf761a620 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 5adcfec9617c..f0f5b7aa1ea0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index b8812ddda99b..c0cdc5dc28ff 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index d04c9f421e47..5392d28269cd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index 5c96ae67fbe7..f05915bfc0a3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_SOFT_LIMIT_PERIOD; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RecoverLease; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 7c5660b93806..b172aa2318dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 799983cbe441..1d3e4bdb509d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import jakarta.annotation.Nonnull; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index ed6185141d6d..0ed6fa42ba04 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 31f1d9d71801..3902af8fd2d4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -22,7 +22,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index a23716d40d1c..cc86709a1a98 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index aa1338fa8cfa..b3a7e2bc547a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 5c2065356c0c..e7fdc0db0c16 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java index 75b5966e005e..809ef4c74f70 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 5fe932c4a094..6134b3e40fbb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index f1d71d99fdfe..00a5301d8434 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index 8163b902dbb5..b3098e518957 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OmUtils.normalizeKey; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.RENAME_OPEN_FILE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 2317a4815910..9c9da2bd88ba 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -28,7 +28,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 353a17757025..8444b1853ac3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java index 009bcd1662c1..23acba85f3de 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 427b2978f9c6..305e23ff5b4f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -26,7 +26,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST; import static org.apache.hadoop.ozone.OzoneConsts.VOLUME; import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java index ef4d64b27c9b..c2921323d636 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.RENAMED_KEYS_MAP; import static org.apache.hadoop.ozone.OzoneConsts.UNRENAMED_KEYS_MAP; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java index 3997f1a67782..5ac9c3c93d1e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 67a7f8a626b7..2dec9e910a60 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java index f32a22b17329..926ffdb694c0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 5e1f3513564a..334ee51b6e4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl.prefix; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index 5f7d01d9a733..29d7cfbfcc1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index ac27d0e0a173..fdd370c4bb8f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index 7e781635603b..dabec5043e7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 1e64edfb5be3..fbe219ecfcea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 5f715ded0b1b..5c57ce1e06d9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 6d41da38087c..5b8a286fee68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import jakarta.annotation.Nullable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java index 12c4ce13de5c..a884673d042d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java index b40e6c1f8a42..1e5e6bdac59d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java index 23d7a40f26de..524f52355c9a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java index 05a45322c599..cbad899e2d75 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java index 08979e4e8090..2501dfd150e0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index bb2c1a187856..84129c0039f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java index 3732f074bb0c..47432b1bb73e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java index bcb7e010bf8e..1baa18587474 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java index 469eed85e8bc..ae332c2e719e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java index 2458c8656256..7e814bac5a5e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 6211d4114005..37e67f89a2b8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import com.google.protobuf.ByteString; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index 3f8bae61c530..11e172040ffa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index 7a4cdc640dce..ed2543def3d0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java index 819edf6c01f5..c3ca72f71a15 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 1397f8a1b9f3..8889b9b0fa53 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.volume; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index 943a5ea5700c..68ac2690085c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index f9a6fa303590..862577683909 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index b4279eac2d4d..d7092d0b2b49 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index 88d786cdd204..7181fa8eca89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 267547bc1e54..3797b3fcf2eb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.response.snapshot; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.SNAPSHOT_INFO_TABLE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import com.google.common.annotations.VisibleForTesting; import jakarta.annotation.Nonnull; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java index 75e9a20cdf12..db44337ee411 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDeletingService.java @@ -22,7 +22,7 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_KEY_DELETING_LIMIT_PER_TASK_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK; import static org.apache.hadoop.ozone.om.OMConfigKeys.SNAPSHOT_DELETING_LIMIT_PER_TASK_DEFAULT; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ServiceException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index 525877306965..ec19cd94b549 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -26,8 +26,8 @@ import java.util.stream.Collectors; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.Resource; /** * Class to take multiple locks on multiple snapshots. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java index 27c29b534495..6867f819b9c3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotCache.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java index 5dc78e708fcb..7d227dfb641c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/filter/ReclaimableFilter.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot.filter; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index 54087fa64dc1..05385ddc5bad 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getOmKeyInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java index 9c358a9261b3..cbff0398882b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java @@ -36,8 +36,8 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.LeveledResource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; -import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -54,7 +54,7 @@ public class TestMultiSnapshotLocks { private IOzoneManagerLock mockLock; @Mock - private OzoneManagerLock.LeveledResource mockResource; + private LeveledResource mockResource; private MultiSnapshotLocks multiSnapshotLocks; private UUID obj1 = UUID.randomUUID(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java index 6fbc09eb89c8..9406d74c5ff6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotCache.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_DB_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java index 13ba79a77f82..ef97975ca8ec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; -import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.FlatResource.SNAPSHOT_GC_LOCK; +import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_GC_LOCK; import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; From 24da3ebee1d9633995aba4fb7c27f9a903b71d15 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 12:20:44 -0400 Subject: [PATCH 36/97] HDDS-13797. Update interface Change-Id: I5ffd4cff6028b50c8d75ea9b3885c1e9818fe968 --- .../lock/HierachicalResourceLockManager.java | 30 +++++++++++++++---- 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java index 94eba94d5a80..0cc8df45e2c7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java @@ -17,22 +17,40 @@ package org.apache.hadoop.ozone.om.lock; +import java.io.Closeable; +import java.io.IOException; + /** * Interface for Hierachical Resource Lock where the lock order acquired on resource is going to be deterministic and * there is no cyclic lock ordering on resources. * Typically, this can be used for locking elements which form a DAG like structure.(E.g. FSO tree, Snapshot chain etc.) */ -public interface HierachicalResourceLockManager { +public interface HierachicalResourceLockManager extends AutoCloseable { - HierarchicalResourceLock acquireLock(Resource resource, String key); + /** + * Acquires a read lock on the specified resource using the provided key. + * + * @param resource the resource on which the read lock is to be acquired + * @param key a unique identifier used for managing the lock + * @return a {@code HierarchicalResourceLock} interface to manage the lifecycle of the acquired lock + * @throws IOException if an I/O error occurs during the process of acquiring the lock + */ + HierarchicalResourceLock acquireReadLock(FlatResource resource, String key) throws IOException; + + /** + * Acquires a write lock on the specified resource using the provided key. + * + * @param resource the resource on which the write lock is to be acquired + * @param key a unique identifier used for managing the lock + * @return a {@code HierarchicalResourceLock} interface to manage the lifecycle of the acquired lock + * @throws IOException if an I/O error occurs during the process of acquiring the lock + */ + HierarchicalResourceLock acquireWriteLock(FlatResource resource, String key) throws IOException; /** * Interface for managing the lock lifecycle corresponding to a Hierarchical Resource. */ - interface HierarchicalResourceLock extends AutoCloseable { + interface HierarchicalResourceLock extends Closeable { boolean isLockAcquired(); - - @Override - void close(); } } From 8f3774a2045a27a8cc91adfbfb1178df977f7192 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 12:39:32 -0400 Subject: [PATCH 37/97] HDDS-13798. Implement PoolBasedHierarchicalResourceLockManager for Hierarchical Resource Change-Id: Iabeb0c8a90500ed9f6a57e232470d20f7c7251bf --- hadoop-ozone/common/pom.xml | 4 + .../apache/hadoop/ozone/om/OMConfigKeys.java | 8 + ...lBasedHierarchicalResourceLockManager.java | 204 +++++++ ...adOnlyHierarchicalResourceLockManager.java | 64 ++ ...lBasedHierarchicalResourceLockManager.java | 576 ++++++++++++++++++ .../hadoop/ozone/om/OMMetadataManager.java | 6 + .../ozone/om/OmMetadataManagerImpl.java | 13 + 7 files changed, 875 insertions(+) create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java create mode 100644 hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index cb082b9d6c44..1ecafebb8b3f 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -77,6 +77,10 @@ org.apache.commons commons-lang3 + + org.apache.commons + commons-pool2 + org.apache.hadoop hadoop-common diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 969288ed92c8..254a49ea9a99 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -674,6 +674,14 @@ public final class OMConfigKeys { "ozone.om.snapshot.compact.non.snapshot.diff.tables"; public static final boolean OZONE_OM_SNAPSHOT_COMPACT_NON_SNAPSHOT_DIFF_TABLES_DEFAULT = false; + public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT = + "ozone.om.hierarchical.resource.locks.soft.limit"; + public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT = 1024; + + public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT = + "ozone.om.hierarchical.resource.locks.hard.limit"; + public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT = 10000; + /** * Never constructed. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java new file mode 100644 index 000000000000..19d2dacb32da --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT; + +import com.google.common.base.Preconditions; +import java.io.Closeable; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; +import org.apache.commons.pool2.BasePooledObjectFactory; +import org.apache.commons.pool2.PooledObject; +import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.commons.pool2.impl.GenericObjectPool; +import org.apache.commons.pool2.impl.GenericObjectPoolConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +/** + * A lock manager implementation that manages hierarchical resource locks + * using a pool of reusable {@link ReadWriteLock} instances. The implementation + * ensures deterministic lock ordering for resources, avoiding cyclic + * lock dependencies, and is typically useful for structures like + * DAGs (e.g., File System trees or snapshot chains). + */ +public class PoolBasedHierarchicalResourceLockManager implements HierachicalResourceLockManager { + private final GenericObjectPool lockPool; + private final Map> lockMap; + + public PoolBasedHierarchicalResourceLockManager(OzoneConfiguration conf) { + int softLimit = conf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT); + int hardLimit = conf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT); + GenericObjectPoolConfig config = new GenericObjectPoolConfig<>(); + config.setMaxIdle(softLimit); + config.setMaxTotal(hardLimit); + config.setBlockWhenExhausted(true); + this.lockPool = new GenericObjectPool<>(new ReadWriteLockFactory(), config); + this.lockMap = new ConcurrentHashMap<>(); + } + + private ReadWriteLock operateOnLock(FlatResource resource, String key, Consumer function) + throws IOException { + AtomicReference exception = new AtomicReference<>(); + Map resourceLockMap = + this.lockMap.computeIfAbsent(resource, k -> new ConcurrentHashMap<>()); + LockReferenceCountPair lockRef = resourceLockMap.compute(key, (k, v) -> { + if (v == null) { + try { + ReadWriteLock readWriteLock = this.lockPool.borrowObject(); + v = new LockReferenceCountPair(readWriteLock); + } catch (Exception e) { + exception.set(new IOException("Exception while initializing lock object.", e)); + return null; + } + } + function.accept(v); + Preconditions.checkState(v.getCount() >= 0); + if (v.getCount() == 0) { + this.lockPool.returnObject(v.getLock()); + return null; + } + return v; + }); + if (exception.get() != null) { + throw exception.get(); + } + return lockRef == null ? null : lockRef.getLock(); + } + + @Override + public HierarchicalResourceLock acquireReadLock(FlatResource resource, String key) throws IOException { + return acquireLock(resource, key, true); + } + + @Override + public HierarchicalResourceLock acquireWriteLock(FlatResource resource, String key) throws IOException { + return acquireLock(resource, key, false); + } + + private HierarchicalResourceLock acquireLock(FlatResource resource, String key, boolean isReadLock) + throws IOException { + ReadWriteLock readWriteLock = operateOnLock(resource, key, LockReferenceCountPair::increment); + if (readWriteLock == null) { + throw new IOException("Unable to acquire " + (isReadLock ? "read" : "write") + " lock on resource " + + resource + " and key " + key); + } + return new PoolBasedHierarchicalResourceLock(resource, key, + isReadLock ? readWriteLock.readLock() : readWriteLock.writeLock()); + } + + @Override + public void close() { + this.lockPool.close(); + } + + /** + * Represents a hierarchical resource lock mechanism that operates + * using a resource pool for acquiring and releasing locks. This class + * provides thread-safe management of read and write locks associated + * with specific hierarchical resources. + * + * A lock can either be a read lock or a write lock. This is determined + * at the time of instantiation. The lifecycle of the lock is managed + * through this class, and the lock is automatically released when the + * `close` method is invoked. + * + * This is designed to work in conjunction with the containing manager + * class, {@code PoolBasedHierarchicalResourceLockManager}, which oversees + * the lifecycle of multiple such locks. + */ + public class PoolBasedHierarchicalResourceLock implements HierarchicalResourceLock, Closeable { + + private boolean isLockAcquired; + private final Lock lock; + private final FlatResource resource; + private final String key; + + public PoolBasedHierarchicalResourceLock(FlatResource resource, String key, Lock lock) { + this.isLockAcquired = true; + this.lock = lock; + this.resource = resource; + this.key = key; + this.lock.lock(); + } + + @Override + public boolean isLockAcquired() { + return isLockAcquired; + } + + @Override + public synchronized void close() throws IOException { + if (isLockAcquired) { + this.lock.unlock(); + operateOnLock(resource, key, (LockReferenceCountPair::decrement)); + isLockAcquired = false; + } + } + } + + private static final class LockReferenceCountPair { + private int count; + private ReadWriteLock lock; + + private LockReferenceCountPair(ReadWriteLock lock) { + this.count = 0; + this.lock = lock; + } + + private void increment() { + count++; + } + + private void decrement() { + count--; + } + + private int getCount() { + return count; + } + + private ReadWriteLock getLock() { + return lock; + } + } + + private static class ReadWriteLockFactory extends BasePooledObjectFactory { + + @Override + public ReadWriteLock create() throws Exception { + return new ReentrantReadWriteLock(); + } + + @Override + public PooledObject wrap(ReadWriteLock obj) { + return new DefaultPooledObject<>(obj); + } + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java new file mode 100644 index 000000000000..4bf2065a0bc6 --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +import java.io.IOException; + +/** + * A read only lock manager that does not acquire any lock. + */ +public class ReadOnlyHierarchicalResourceLockManager implements HierachicalResourceLockManager { + + private static final HierarchicalResourceLock EMPTY_LOCK_ACQUIRED = new HierarchicalResourceLock() { + @Override + public boolean isLockAcquired() { + return true; + } + + @Override + public void close() { + + } + }; + + private static final HierarchicalResourceLock EMPTY_LOCK_NOT_ACQUIRED = new HierarchicalResourceLock() { + @Override + public boolean isLockAcquired() { + return true; + } + + @Override + public void close() { + } + }; + + @Override + public HierarchicalResourceLock acquireReadLock(FlatResource resource, String key) throws IOException { + return EMPTY_LOCK_ACQUIRED; + } + + @Override + public HierarchicalResourceLock acquireWriteLock(FlatResource resource, String key) throws IOException { + return EMPTY_LOCK_NOT_ACQUIRED; + } + + @Override + public void close() throws Exception { + + } +} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java new file mode 100644 index 000000000000..319bf8a4245c --- /dev/null +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -0,0 +1,576 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.lock; + +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager.HierarchicalResourceLock; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.ValueSource; + +/** + * Test class for {@link PoolBasedHierarchicalResourceLockManager}. + * + * This class tests the functionality of the pool-based hierarchical resource lock manager, + * including basic lock operations, concurrency scenarios, resource pool management, + * and error conditions. + */ +public class TestPoolBasedHierarchicalResourceLockManager { + + private PoolBasedHierarchicalResourceLockManager lockManager; + private OzoneConfiguration conf; + + @BeforeEach + public void setUp() { + conf = new OzoneConfiguration(); + lockManager = new PoolBasedHierarchicalResourceLockManager(conf); + } + + @AfterEach + public void tearDown() { + if (lockManager != null) { + lockManager.close(); + } + } + + /** + * Test basic read lock acquisition and release. + */ + @Test + public void testBasicReadLockAcquisition() throws Exception { + String key = "test-key-1"; + + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test basic write lock acquisition and release. + */ + @Test + public void testBasicWriteLockAcquisition() throws Exception { + String key = "test-key-2"; + + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test multiple read locks can be acquired on the same resource. + */ + @Test + public void testMultipleReadLocks() throws Exception { + String key = "test-key-3"; + + try (HierarchicalResourceLock lock1 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key); + HierarchicalResourceLock lock2 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + + assertNotNull(lock1); + assertNotNull(lock2); + assertTrue(lock1.isLockAcquired()); + assertTrue(lock2.isLockAcquired()); + } + } + + /** + * Test write lock exclusivity - only one write lock can be acquired at a time. + */ + @Test + @Timeout(10) + public void testWriteLockExclusivity() throws Exception { + String key = "test-key-4"; + CountDownLatch latch1 = new CountDownLatch(1); + CountDownLatch latch2 = new CountDownLatch(1); + AtomicBoolean secondLockAcquired = new AtomicBoolean(false); + + ExecutorService executor = Executors.newFixedThreadPool(2); + + try { + // First thread acquires write lock + CompletableFuture future1 = CompletableFuture.runAsync(() -> { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + latch1.countDown(); + // Hold lock for a short time + Thread.sleep(100); + } catch (Exception e) { + fail("First thread failed to acquire lock: " + e.getMessage()); + } + }, executor); + + // Wait for first lock to be acquired + latch1.await(); + + // Second thread tries to acquire write lock + CompletableFuture future2 = CompletableFuture.runAsync(() -> { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + secondLockAcquired.set(true); + latch2.countDown(); + } catch (Exception e) { + fail("Second thread failed to acquire lock: " + e.getMessage()); + } + }, executor); + + // Wait for both threads to complete + future1.get(5, TimeUnit.SECONDS); + future2.get(5, TimeUnit.SECONDS); + + // Second lock should have been acquired after first was released + assertTrue(secondLockAcquired.get()); + + } finally { + executor.shutdown(); + } + } + + /** + * Test read-write lock interaction - write lock blocks read locks. + */ + @Test + @Timeout(10) + public void testReadWriteLockInteraction() throws Exception { + String key = "test-key-5"; + CountDownLatch writeLockAcquired = new CountDownLatch(1); + CountDownLatch readLockAcquired = new CountDownLatch(1); + AtomicBoolean readLockBlocked = new AtomicBoolean(false); + + ExecutorService executor = Executors.newFixedThreadPool(2); + + try { + // First thread acquires write lock + CompletableFuture future1 = CompletableFuture.runAsync(() -> { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + writeLockAcquired.countDown(); + // Hold lock for a short time + Thread.sleep(200); + } catch (Exception e) { + fail("Write lock acquisition failed: " + e.getMessage()); + } + }, executor); + + // Wait for write lock to be acquired + writeLockAcquired.await(); + + // Second thread tries to acquire read lock + CompletableFuture future2 = CompletableFuture.runAsync(() -> { + try { + // This should block until write lock is released + readLockBlocked.set(true); + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + readLockAcquired.countDown(); + } + } catch (Exception e) { + fail("Read lock acquisition failed: " + e.getMessage()); + } + }, executor); + + // Wait for both threads to complete + future1.get(5, TimeUnit.SECONDS); + future2.get(5, TimeUnit.SECONDS); + + assertTrue(readLockBlocked.get()); + assertEquals(0, readLockAcquired.getCount()); + + } finally { + executor.shutdown(); + } + } + + /** + * Test lock state after closing. + */ + @Test + public void testLockStateAfterClose() throws Exception { + String key = "test-key-6"; + + HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, key); + assertTrue(lock.isLockAcquired()); + + lock.close(); + assertFalse(lock.isLockAcquired()); + } + + /** + * Test double close doesn't cause issues. + */ + @Test + public void testDoubleClose() throws Exception { + String key = "test-key-7"; + + HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key); + assertTrue(lock.isLockAcquired()); + + // First close + lock.close(); + assertFalse(lock.isLockAcquired()); + + // Second close should not throw exception + lock.close(); + assertFalse(lock.isLockAcquired()); + } + + /** + * Test different resource types can be locked independently. + */ + @ParameterizedTest + @EnumSource(FlatResource.class) + public void testDifferentResourceTypes(FlatResource resource) throws Exception { + String key = "test-key-" + resource.name(); + + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(resource, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + + /** + * Test different keys on same resource type can be locked concurrently. + */ + @Test + public void testDifferentKeysOnSameResource() throws Exception { + String key1 = "test-key-8a"; + String key2 = "test-key-8b"; + + try (HierarchicalResourceLock lock1 = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key1); + HierarchicalResourceLock lock2 = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key2)) { + + assertNotNull(lock1); + assertNotNull(lock2); + assertTrue(lock1.isLockAcquired()); + assertTrue(lock2.isLockAcquired()); + } + } + + /** + * Test configuration parameters are respected. + */ + @Test + public void testConfigurationParameters() { + OzoneConfiguration customConf = new OzoneConfiguration(); + customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 100); + customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, 500); + + try (PoolBasedHierarchicalResourceLockManager customLockManager = + new PoolBasedHierarchicalResourceLockManager(customConf)) { + + // Test that manager can be created with custom configuration + assertNotNull(customLockManager); + + // Basic functionality test with custom configuration + try (HierarchicalResourceLock lock = customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, "test")) { + assertTrue(lock.isLockAcquired()); + } catch (Exception e) { + fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + } + } + } + + /** + * Test default configuration values. + */ + @Test + public void testDefaultConfiguration() { + OzoneConfiguration defaultConf = new OzoneConfiguration(); + + // Verify default values + assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT, + defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT)); + assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT, + defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, + OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT)); + + try (PoolBasedHierarchicalResourceLockManager defaultLockManager = + new PoolBasedHierarchicalResourceLockManager(defaultConf)) { + assertNotNull(defaultLockManager); + } + } + + /** + * Test concurrent access with multiple threads. + */ + @Test + @Timeout(30) + public void testConcurrentAccess() throws Exception { + int numThreads = 10; + int operationsPerThread = 50; + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + CountDownLatch latch = new CountDownLatch(numThreads); + AtomicInteger successCount = new AtomicInteger(0); + AtomicReference exception = new AtomicReference<>(); + + try { + List> futures = new ArrayList<>(); + + for (int i = 0; i < numThreads; i++) { + final int threadId = i; + CompletableFuture future = CompletableFuture.runAsync(() -> { + try { + for (int j = 0; j < operationsPerThread; j++) { + String key = "thread-" + threadId + "-op-" + j; + FlatResource resource = FlatResource.values()[j % FlatResource.values().length]; + + // Randomly choose read or write lock + boolean isReadLock = (j % 2 == 0); + + try (HierarchicalResourceLock lock = isReadLock ? + lockManager.acquireReadLock(resource, key) : + lockManager.acquireWriteLock(resource, key)) { + + assertTrue(lock.isLockAcquired()); + + // Simulate some work + Thread.sleep(1); + + successCount.incrementAndGet(); + } + } + } catch (Exception e) { + exception.set(e); + } finally { + latch.countDown(); + } + }, executor); + + futures.add(future); + } + + // Wait for all threads to complete + latch.await(25, TimeUnit.SECONDS); + + // Check for exceptions + if (exception.get() != null) { + fail("Concurrent access test failed: " + exception.get().getMessage()); + } + + // Verify all operations succeeded + assertEquals(numThreads * operationsPerThread, successCount.get()); + + } finally { + executor.shutdown(); + } + } + + /** + * Test resource pool behavior under stress. + */ + @Test + @Timeout(20) + public void testResourcePoolStress() throws Exception { + // Use smaller pool limits for stress testing + OzoneConfiguration stressConf = new OzoneConfiguration(); + stressConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 10); + stressConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, 20); + + try (PoolBasedHierarchicalResourceLockManager stressLockManager = + new PoolBasedHierarchicalResourceLockManager(stressConf)) { + + int numThreads = 5; + int operationsPerThread = 20; + ExecutorService executor = Executors.newFixedThreadPool(numThreads); + CountDownLatch latch = new CountDownLatch(numThreads); + AtomicInteger successCount = new AtomicInteger(0); + AtomicReference exception = new AtomicReference<>(); + + try { + for (int i = 0; i < numThreads; i++) { + final int threadId = i; + executor.submit(() -> { + try { + for (int j = 0; j < operationsPerThread; j++) { + String key = "stress-" + threadId + "-" + j; + + try (HierarchicalResourceLock lock = + stressLockManager.acquireWriteLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + + assertTrue(lock.isLockAcquired()); + + // Hold lock for a bit to stress the pool + Thread.sleep(10); + + successCount.incrementAndGet(); + } + } + } catch (Exception e) { + exception.set(e); + } finally { + latch.countDown(); + } + }); + } + + // Wait for all threads to complete + latch.await(15, TimeUnit.SECONDS); + + // Check for exceptions + if (exception.get() != null) { + fail("Resource pool stress test failed: " + exception.get().getMessage()); + } + + // Verify all operations succeeded + assertEquals(numThreads * operationsPerThread, successCount.get()); + + } finally { + executor.shutdown(); + } + } + } + + /** + * Test manager close functionality. + */ + @Test + public void testManagerClose() throws Exception { + String key = "test-key-close"; + + // Acquire a lock + HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, key); + assertTrue(lock.isLockAcquired()); + + // Close the lock + lock.close(); + assertFalse(lock.isLockAcquired()); + + // Close the manager + lockManager.close(); + + // Manager should be closed gracefully + // Note: We don't test acquiring locks after manager close as behavior is undefined + } + + /** + * Test null key handling. + */ + @Test + public void testNullKey() { + assertThrows(NullPointerException.class, () -> { + lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, null); + }); + } + + /** + * Test null resource handling. + */ + @Test + public void testNullResource() { + assertThrows(NullPointerException.class, () -> { + lockManager.acquireWriteLock(null, "test-key"); + }); + } + + /** + * Test empty key handling. + */ + @Test + public void testEmptyKey() throws Exception { + // Empty key should be allowed + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, "")) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test various key formats. + */ + @ParameterizedTest + @ValueSource(strings = {"simple", "key-with-dashes", "key_with_underscores", + "key.with.dots", "key/with/slashes", "123456789", + "key with spaces", "very-long-key-name-that-exceeds-normal-length-expectations"}) + public void testVariousKeyFormats(String key) throws Exception { + try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + } + } + + /** + * Test reentrant lock behavior - same thread can acquire multiple locks on same resource. + */ + @Test + public void testReentrantLockBehavior() throws Exception { + String key = "reentrant-test"; + + // Acquire first lock + try (HierarchicalResourceLock lock1 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + assertTrue(lock1.isLockAcquired()); + + // Acquire second lock on same resource from same thread + try (HierarchicalResourceLock lock2 = lockManager.acquireReadLock(FlatResource.SNAPSHOT_GC_LOCK, key)) { + assertTrue(lock2.isLockAcquired()); + + // Both locks should be active + assertTrue(lock1.isLockAcquired()); + assertTrue(lock2.isLockAcquired()); + } + + // First lock should still be active after second is released + assertTrue(lock1.isLockAcquired()); + } + } + + /** + * Test that IOException is properly propagated from pool operations. + */ + @Test + public void testIOExceptionPropagation() { + // This test verifies that IOExceptions from pool operations are properly handled + // In normal circumstances, the pool should not throw IOExceptions during basic operations + // but the code should handle them gracefully if they occur + + String key = "exception-test"; + + try (HierarchicalResourceLock lock = lockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, key)) { + assertNotNull(lock); + assertTrue(lock.isLockAcquired()); + // If we reach here, no IOException was thrown, which is expected for normal operation + } catch (Exception e) { + // If Exception is thrown, it should be properly propagated + assertNotNull(e.getMessage()); + } + } +} diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 5713f218bd5c..16fc941c9bd8 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -51,6 +51,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; @@ -90,6 +91,11 @@ public interface OMMetadataManager extends DBStoreHAManager, AutoCloseable { */ IOzoneManagerLock getLock(); + /** + * Returns the Hierarchical ResourceLock used on Metadata DB. + */ + HierachicalResourceLockManager getHierarchicalLockManager(); + /** * Returns the epoch associated with current OM process. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index ca9f45f8d24c..53d71837048c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -103,9 +103,12 @@ import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithMetadata; +import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.PoolBasedHierarchicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.ReadOnlyHierarchicalResourceLockManager; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OMMultipartUploadUtils; @@ -133,6 +136,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager, private DBStore store; private final IOzoneManagerLock lock; + private final HierachicalResourceLockManager hierarchicalLockManager; private TypedTable userTable; private TypedTable volumeTable; @@ -197,6 +201,7 @@ public OmMetadataManagerImpl(OzoneConfiguration conf, this.perfMetrics = this.ozoneManager.getPerfMetrics(); } this.lock = new OzoneManagerLock(conf); + this.hierarchicalLockManager = new PoolBasedHierarchicalResourceLockManager(conf); this.omEpoch = OmUtils.getOMEpoch(); start(conf); } @@ -207,6 +212,7 @@ public OmMetadataManagerImpl(OzoneConfiguration conf, protected OmMetadataManagerImpl() { OzoneConfiguration conf = new OzoneConfiguration(); this.lock = new OzoneManagerLock(conf); + this.hierarchicalLockManager = new PoolBasedHierarchicalResourceLockManager(conf); this.omEpoch = 0; perfMetrics = null; } @@ -239,6 +245,7 @@ public static OmMetadataManagerImpl createCheckpointMetadataManager( protected OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) throws IOException { lock = new OmReadOnlyLock(); + hierarchicalLockManager = new ReadOnlyHierarchicalResourceLockManager(); omEpoch = 0; int maxOpenFiles = conf.getInt(OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES, OZONE_OM_SNAPSHOT_DB_MAX_OPEN_FILES_DEFAULT); @@ -258,6 +265,7 @@ protected OmMetadataManagerImpl(OzoneConfiguration conf, File dir, String name) OmMetadataManagerImpl(OzoneConfiguration conf, String snapshotDirName, int maxOpenFiles) throws IOException { try { lock = new OmReadOnlyLock(); + hierarchicalLockManager = new ReadOnlyHierarchicalResourceLockManager(); omEpoch = 0; String snapshotDir = OMStorage.getOmDbDir(conf) + OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR; @@ -644,6 +652,11 @@ public IOzoneManagerLock getLock() { return lock; } + @Override + public HierachicalResourceLockManager getHierarchicalLockManager() { + return hierarchicalLockManager; + } + @Override public long getOmEpoch() { return omEpoch; From 6865fad02439befe27bb4f1172c780ad0dc14be6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 15:38:02 -0400 Subject: [PATCH 38/97] HDDS-13797. Revert move of Leveled Resource and Resource enum/interface Change-Id: I9cfe0a545a5d4565b6a5e6fb94ea86f29d0f23ad --- .../ozone/om/S3SecretLockedManager.java | 2 +- .../hadoop/ozone/om/lock/FlatResource.java | 2 + .../ozone/om/lock/IOzoneManagerLock.java | 13 ++ .../hadoop/ozone/om/lock/LeveledResource.java | 132 ------------------ .../ozone/om/lock/OzoneManagerLock.java | 113 +++++++++++++++ .../apache/hadoop/ozone/om/lock/Resource.java | 31 ---- .../hadoop/ozone/om/lock/TestKeyPathLock.java | 20 +-- .../ozone/om/lock/TestOzoneManagerLock.java | 2 + .../hadoop/ozone/om/BucketManagerImpl.java | 2 +- .../hadoop/ozone/om/KeyManagerImpl.java | 2 +- .../apache/hadoop/ozone/om/ListIterator.java | 2 +- .../apache/hadoop/ozone/om/OzoneManager.java | 4 +- .../hadoop/ozone/om/PrefixManagerImpl.java | 2 +- .../hadoop/ozone/om/VolumeManagerImpl.java | 4 +- .../ozone/om/lock/OBSKeyPathLockStrategy.java | 4 +- .../om/lock/RegularBucketLockStrategy.java | 2 +- .../request/bucket/OMBucketCreateRequest.java | 4 +- .../request/bucket/OMBucketDeleteRequest.java | 4 +- .../bucket/OMBucketSetOwnerRequest.java | 2 +- .../bucket/OMBucketSetPropertyRequest.java | 2 +- .../bucket/acl/OMBucketAclRequest.java | 2 +- .../file/OMDirectoryCreateRequest.java | 2 +- .../file/OMDirectoryCreateRequestWithFSO.java | 2 +- .../om/request/file/OMFileCreateRequest.java | 2 +- .../file/OMFileCreateRequestWithFSO.java | 2 +- .../request/file/OMRecoverLeaseRequest.java | 2 +- .../request/key/OMAllocateBlockRequest.java | 2 +- .../key/OMAllocateBlockRequestWithFSO.java | 2 +- .../key/OMDirectoriesPurgeRequestWithFSO.java | 2 +- .../om/request/key/OMKeyCommitRequest.java | 2 +- .../key/OMKeyCommitRequestWithFSO.java | 2 +- .../key/OMKeyCreateRequestWithFSO.java | 2 +- .../om/request/key/OMKeyDeleteRequest.java | 2 +- .../key/OMKeyDeleteRequestWithFSO.java | 2 +- .../om/request/key/OMKeyPurgeRequest.java | 2 +- .../om/request/key/OMKeyRenameRequest.java | 2 +- .../key/OMKeyRenameRequestWithFSO.java | 2 +- .../ozone/om/request/key/OMKeyRequest.java | 2 +- .../om/request/key/OMKeySetTimesRequest.java | 2 +- .../key/OMKeySetTimesRequestWithFSO.java | 2 +- .../om/request/key/OMKeysDeleteRequest.java | 2 +- .../om/request/key/OMKeysRenameRequest.java | 2 +- .../request/key/OMOpenKeysDeleteRequest.java | 2 +- .../om/request/key/acl/OMKeyAclRequest.java | 2 +- .../key/acl/OMKeyAclRequestWithFSO.java | 2 +- .../key/acl/prefix/OMPrefixAclRequest.java | 2 +- ...S3ExpiredMultipartUploadsAbortRequest.java | 2 +- .../S3InitiateMultipartUploadRequest.java | 2 +- ...InitiateMultipartUploadRequestWithFSO.java | 2 +- .../S3MultipartUploadAbortRequest.java | 2 +- .../S3MultipartUploadCommitPartRequest.java | 2 +- .../S3MultipartUploadCompleteRequest.java | 2 +- .../tagging/S3DeleteObjectTaggingRequest.java | 2 +- .../S3DeleteObjectTaggingRequestWithFSO.java | 2 +- .../s3/tagging/S3PutObjectTaggingRequest.java | 2 +- .../S3PutObjectTaggingRequestWithFSO.java | 2 +- .../s3/tenant/OMTenantAssignAdminRequest.java | 2 +- .../OMTenantAssignUserAccessIdRequest.java | 2 +- .../s3/tenant/OMTenantCreateRequest.java | 4 +- .../s3/tenant/OMTenantDeleteRequest.java | 2 +- .../s3/tenant/OMTenantRevokeAdminRequest.java | 2 +- .../OMTenantRevokeUserAccessIdRequest.java | 2 +- .../snapshot/OMSnapshotCreateRequest.java | 4 +- .../snapshot/OMSnapshotDeleteRequest.java | 4 +- .../snapshot/OMSnapshotRenameRequest.java | 4 +- .../request/volume/OMQuotaRepairRequest.java | 4 +- .../request/volume/OMVolumeCreateRequest.java | 4 +- .../request/volume/OMVolumeDeleteRequest.java | 4 +- .../volume/OMVolumeSetOwnerRequest.java | 2 +- .../volume/OMVolumeSetQuotaRequest.java | 2 +- .../volume/acl/OMVolumeAclRequest.java | 2 +- .../ozone/om/snapshot/MultiSnapshotLocks.java | 2 +- ...tOMDirectoriesPurgeRequestAndResponse.java | 2 +- .../om/snapshot/TestMultiSnapshotLocks.java | 4 +- 74 files changed, 220 insertions(+), 253 deletions(-) delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java index 2efe66b9db85..d42df2acbd24 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/S3SecretLockedManager.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.S3_SECRET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.S3_SECRET_LOCK; import java.io.IOException; import java.util.List; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java index f23a6ee78f28..73f8357252f2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java @@ -17,6 +17,8 @@ package org.apache.hadoop.ozone.om.lock; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; + /** * Flat Resource defined in Ozone. Locks can be acquired on a resource independent of one another. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java index 6a17a0f69b16..7e8ed7c78171 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/IOzoneManagerLock.java @@ -72,6 +72,19 @@ boolean isWriteLockedByCurrentThread(Resource resource, OMLockMetrics getOMLockMetrics(); + /** + * Defines a resource interface used to represent entities that can be + * associated with locks in the Ozone Manager Lock mechanism. A resource + * implementation provides a name and an associated {@link ResourceManager} + * to manage its locking behavior. + */ + interface Resource { + + String getName(); + + ResourceManager getResourceManager(); + } + /** * The ResourceManager class provides functionality for managing * information about resource read and write lock usage. It tracks the time of diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java deleted file mode 100644 index bb6b14e15882..000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/LeveledResource.java +++ /dev/null @@ -1,132 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - -/** - * Leveled Resource defined in Ozone. - * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired - * after a higher level lock is already acquired. - */ -public enum LeveledResource implements Resource { - // For S3 Bucket need to allow only for S3, that should be means only 1. - S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 - - // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) - VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 - - // For bucket we need to allow both s3 bucket, volume and bucket. Which - // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 - BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 - - // For user we need to allow s3 bucket, volume, bucket and user lock. - // Which is 8 4 + 2 + 1 = 15 - USER_LOCK((byte) 3, "USER_LOCK"), // 15 - - S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 - KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 - PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 - SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 - - // This will tell the value, till which we can allow locking. - private short mask; - - // This value will help during setLock, and also will tell whether we can - // re-acquire lock or not. - private short setMask; - - // Name of the resource. - private String name; - - private IOzoneManagerLock.ResourceManager resourceManager; - - LeveledResource(byte pos, String name) { - // level of the resource - this.mask = (short) (Math.pow(2, pos + 1) - 1); - this.setMask = (short) Math.pow(2, pos); - this.name = name; - this.resourceManager = new IOzoneManagerLock.ResourceManager(); - } - - boolean canLock(short lockSetVal) { - - // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow - // re-acquire locks from single thread. 2nd condition is we have - // acquired one of these locks, but after that trying to acquire a lock - // with less than equal of lockLevel, we should disallow. - if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || - (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || - (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) - && setMask <= lockSetVal) { - return false; - } - - - // Our mask is the summation of bits of all previous possible locks. In - // other words it is the largest possible value for that bit position. - - // For example for Volume lock, bit position is 1, and mask is 3. Which - // is the largest value that can be represented with 2 bits is 3. - // Therefore if lockSet is larger than mask we have to return false i.e - // some other higher order lock has been acquired. - - return lockSetVal <= mask; - } - - /** - * Set Lock bits in lockSetVal. - * - * @param lockSetVal - * @return Updated value which has set lock bits. - */ - short setLock(short lockSetVal) { - return (short) (lockSetVal | setMask); - } - - /** - * Clear lock from lockSetVal. - * - * @param lockSetVal - * @return Updated value which has cleared lock bits. - */ - short clearLock(short lockSetVal) { - return (short) (lockSetVal & ~setMask); - } - - /** - * Return true, if this level is locked, else false. - * - * @param lockSetVal - */ - boolean isLevelLocked(short lockSetVal) { - return (lockSetVal & setMask) == setMask; - } - - @Override - public String getName() { - return name; - } - - @Override - public IOzoneManagerLock.ResourceManager getResourceManager() { - return resourceManager; - } - - short getMask() { - return mask; - } -} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index cd6d85c9e3b2..6eb735d2ccc3 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -662,6 +662,119 @@ public OMLockDetails lockResource(LeveledResource resource) { } } + /** + * Leveled Resource defined in Ozone. + * Enforces lock acquisition ordering based on the resource level. A resource at lower level cannot be acquired + * after a higher level lock is already acquired. + */ + public enum LeveledResource implements Resource { + // For S3 Bucket need to allow only for S3, that should be means only 1. + S3_BUCKET_LOCK((byte) 0, "S3_BUCKET_LOCK"), // = 1 + + // For volume need to allow both s3 bucket and volume. 01 + 10 = 11 (3) + VOLUME_LOCK((byte) 1, "VOLUME_LOCK"), // = 2 + + // For bucket we need to allow both s3 bucket, volume and bucket. Which + // is equal to 100 + 010 + 001 = 111 = 4 + 2 + 1 = 7 + BUCKET_LOCK((byte) 2, "BUCKET_LOCK"), // = 4 + + // For user we need to allow s3 bucket, volume, bucket and user lock. + // Which is 8 4 + 2 + 1 = 15 + USER_LOCK((byte) 3, "USER_LOCK"), // 15 + + S3_SECRET_LOCK((byte) 4, "S3_SECRET_LOCK"), // 31 + KEY_PATH_LOCK((byte) 5, "KEY_PATH_LOCK"), //63 + PREFIX_LOCK((byte) 6, "PREFIX_LOCK"), //127 + SNAPSHOT_LOCK((byte) 7, "SNAPSHOT_LOCK"); // = 255 + + // This will tell the value, till which we can allow locking. + private short mask; + + // This value will help during setLock, and also will tell whether we can + // re-acquire lock or not. + private short setMask; + + // Name of the resource. + private String name; + + private ResourceManager resourceManager; + + LeveledResource(byte pos, String name) { + // level of the resource + this.mask = (short) (Math.pow(2, pos + 1) - 1); + this.setMask = (short) Math.pow(2, pos); + this.name = name; + this.resourceManager = new ResourceManager(); + } + + boolean canLock(short lockSetVal) { + + // For USER_LOCK, S3_SECRET_LOCK and PREFIX_LOCK we shall not allow + // re-acquire locks from single thread. 2nd condition is we have + // acquired one of these locks, but after that trying to acquire a lock + // with less than equal of lockLevel, we should disallow. + if (((USER_LOCK.setMask & lockSetVal) == USER_LOCK.setMask || + (S3_SECRET_LOCK.setMask & lockSetVal) == S3_SECRET_LOCK.setMask || + (PREFIX_LOCK.setMask & lockSetVal) == PREFIX_LOCK.setMask) + && setMask <= lockSetVal) { + return false; + } + + + // Our mask is the summation of bits of all previous possible locks. In + // other words it is the largest possible value for that bit position. + + // For example for Volume lock, bit position is 1, and mask is 3. Which + // is the largest value that can be represented with 2 bits is 3. + // Therefore if lockSet is larger than mask we have to return false i.e + // some other higher order lock has been acquired. + + return lockSetVal <= mask; + } + + /** + * Set Lock bits in lockSetVal. + * + * @param lockSetVal + * @return Updated value which has set lock bits. + */ + short setLock(short lockSetVal) { + return (short) (lockSetVal | setMask); + } + + /** + * Clear lock from lockSetVal. + * + * @param lockSetVal + * @return Updated value which has cleared lock bits. + */ + short clearLock(short lockSetVal) { + return (short) (lockSetVal & ~setMask); + } + + /** + * Return true, if this level is locked, else false. + * @param lockSetVal + */ + boolean isLevelLocked(short lockSetVal) { + return (lockSetVal & setMask) == setMask; + } + + @Override + public String getName() { + return name; + } + + @Override + public ResourceManager getResourceManager() { + return resourceManager; + } + + short getMask() { + return mask; + } + } + /** * Update the processing details. * diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java deleted file mode 100644 index 2ef2510f12d5..000000000000 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/Resource.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.lock; - -/** - * Defines a resource interface used to represent entities that can be - * associated with locks in the Ozone Manager Lock mechanism. A resource - * implementation provides a name and an associated {@link IOzoneManagerLock.ResourceManager} - * to manage its locking behavior. - */ -public interface Resource { - - String getName(); - - IOzoneManagerLock.ResourceManager getResourceManager(); -} diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java index c5d50ebdbd9a..53fdc659883a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java @@ -39,8 +39,8 @@ class TestKeyPathLock extends TestOzoneManagerLock { private static final Logger LOG = LoggerFactory.getLogger(TestKeyPathLock.class); - private final LeveledResource resource = - LeveledResource.KEY_PATH_LOCK; + private final OzoneManagerLock.LeveledResource resource = + OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; @Test void testKeyPathLockMultiThreading() throws Exception { @@ -224,8 +224,8 @@ private void testDiffKeyPathWriteLockMultiThreadingUtil( @Test void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -246,8 +246,8 @@ void testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() { @Test void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -268,8 +268,8 @@ void testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -290,8 +290,8 @@ void testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() { @Test void testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() { - LeveledResource higherResource = - LeveledResource.BUCKET_LOCK; + OzoneManagerLock.LeveledResource higherResource = + OzoneManagerLock.LeveledResource.BUCKET_LOCK; String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 652f586f69ea..3486f44d753d 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -35,6 +35,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.MetricsRecord; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index d1c035130b9d..d6775b0495a9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index 00a8d4138ea4..e458fa73236a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -76,7 +76,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.util.Time.monotonicNow; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java index b12b7ba8bcd7..426e7b73ec4f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ListIterator.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.Closeable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index ad77b921af9b..56e51cf4026e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -94,8 +94,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PERMISSION_DENIED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus.LEADER_AND_READY; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.getRaftGroupIdFromOmServiceId; import static org.apache.hadoop.ozone.om.s3.S3SecretStoreConfigurationKeys.DEFAULT_SECRET_STORAGE_TYPE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java index e7c70ecc808c..f6615b92f2d3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/PrefixManagerImpl.java @@ -23,7 +23,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_PATH_IN_ACL_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PREFIX_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.PREFIX; import com.google.common.annotations.VisibleForTesting; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java index 812d5054b2e6..0f1be7855788 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/VolumeManagerImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java index 9aaddd1efc48..c715856db80f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/OBSKeyPathLockStrategy.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.KEY_PATH_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.KEY_PATH_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java index b8116bc4c607..76071a408b4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/lock/RegularBucketLockStrategy.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.lock; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import org.apache.hadoop.ozone.om.OMMetadataManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 6f7e6975e295..838ee3be86ca 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -21,8 +21,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index d1e0d7237885..4d802f9078e1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.BUCKET_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.CONTAINS_SNAPSHOT; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java index 4d57b22bed99..e60d5019ff41 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 2e27c3db0b72..270b95d06da3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index 6c2a8987f2bc..565c6e4854d8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 2f6cf761a620..4e6ac64edcd2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index f0f5b7aa1ea0..5adcfec9617c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index c0cdc5dc28ff..b8812ddda99b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index 5392d28269cd..d04c9f421e47 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.file; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index f05915bfc0a3..5c96ae67fbe7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -24,7 +24,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_SOFT_LIMIT_PERIOD; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.HBASE_SUPPORT; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.RecoverLease; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index b172aa2318dd..7c5660b93806 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 1d3e4bdb509d..799983cbe441 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import jakarta.annotation.Nonnull; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 0ed6fa42ba04..ed6185141d6d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 3902af8fd2d4..31f1d9d71801 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -22,7 +22,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index cc86709a1a98..a23716d40d1c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_CLOSED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index b3a7e2bc547a..aa1338fa8cfa 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index e7fdc0db0c16..5c2065356c0c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java index 809ef4c74f70..75b5966e005e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.util.MetricUtil.captureLatencyNs; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 6134b3e40fbb..5fe932c4a094 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.validatePreviousSnapshotId; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 00a5301d8434..f1d71d99fdfe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index b3098e518957..8163b902dbb5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -20,7 +20,7 @@ import static org.apache.hadoop.ozone.OmUtils.normalizeKey; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.RENAME_OPEN_FILE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index 9c9da2bd88ba..2317a4815910 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -28,7 +28,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.util.Time.monotonicNow; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 8444b1853ac3..353a17757025 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java index 23acba85f3de..009bcd1662c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 305e23ff5b4f..427b2978f9c6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -26,7 +26,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST; import static org.apache.hadoop.ozone.OzoneConsts.VOLUME; import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_DELETE; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java index c2921323d636..ef4d64b27c9b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.RENAMED_KEYS_MAP; import static org.apache.hadoop.ozone.OzoneConsts.UNRENAMED_KEYS_MAP; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.PARTIAL_RENAME; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java index 5ac9c3c93d1e..3997f1a67782 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 2dec9e910a60..67a7f8a626b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java index 926ffdb694c0..f32a22b17329 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index 334ee51b6e4a..5e1f3513564a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.key.acl.prefix; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.PREFIX_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.PREFIX_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index 29d7cfbfcc1f..5f7d01d9a733 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index fdd370c4bb8f..ac27d0e0a173 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index dabec5043e7e..7e781635603b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.DIRECTORY_EXISTS; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index fbe219ecfcea..1e64edfb5be3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 5c57ce1e06d9..5f715ded0b1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.annotations.VisibleForTesting; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 5b8a286fee68..6d41da38087c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import jakarta.annotation.Nullable; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java index a884673d042d..12c4ce13de5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java index 1e5e6bdac59d..b40e6c1f8a42 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java index 524f52355c9a..23d7a40f26de 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java index cbad899e2d75..05a45322c599 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tagging; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import java.io.IOException; import java.util.Map; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java index 2501dfd150e0..08979e4e8090 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index 84129c0039f3..bb2c1a187856 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_MAXIMUM_ACCESS_ID_LENGTH; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java index 47432b1bb73e..3732f074bb0c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.USER_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java index 1baa18587474..bcb7e010bf8e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_EMPTY; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TENANT_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java index ae332c2e719e..469eed85e8bc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java index 7e814bac5a5e..2458c8656256 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.MULTITENANCY_SCHEMA; import com.google.common.base.Preconditions; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 37e67f89a2b8..6211d4114005 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -20,8 +20,8 @@ import static org.apache.hadoop.hdds.HddsUtils.fromProtobuf; import static org.apache.hadoop.hdds.HddsUtils.toProtobuf; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import com.google.protobuf.ByteString; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index 11e172040ffa..3f8bae61c530 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.snapshot; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index ed2543def3d0..7a4cdc640dce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.SNAPSHOT_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.SNAPSHOT_LOCK; import static org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature.FILESYSTEM_SNAPSHOT; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java index c3ca72f71a15..819edf6c01f5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java @@ -19,8 +19,8 @@ import static org.apache.hadoop.ozone.OzoneConsts.OLD_QUOTA_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.QUOTA_RESET; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index 8889b9b0fa53..1397f8a1b9f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.om.request.volume; import static org.apache.hadoop.ozone.om.helpers.OzoneAclUtil.getDefaultAclList; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index 68ac2690085c..943a5ea5700c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.USER_LOCK; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.USER_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index 862577683909..f9a6fa303590 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index d7092d0b2b49..b4279eac2d4d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import com.google.common.base.Preconditions; import java.io.IOException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index 7181fa8eca89..88d786cdd204 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.VOLUME_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.VOLUME_LOCK; import java.io.IOException; import java.nio.file.InvalidPathException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java index ec19cd94b549..525877306965 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/MultiSnapshotLocks.java @@ -26,8 +26,8 @@ import java.util.stream.Collectors; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; +import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock.Resource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; -import org.apache.hadoop.ozone.om.lock.Resource; /** * Class to take multiple locks on multiple snapshots. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java index 05385ddc5bad..54087fa64dc1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; -import static org.apache.hadoop.ozone.om.lock.LeveledResource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.LeveledResource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getOmKeyInfo; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java index cbff0398882b..9c358a9261b3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestMultiSnapshotLocks.java @@ -36,8 +36,8 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; -import org.apache.hadoop.ozone.om.lock.LeveledResource; import org.apache.hadoop.ozone.om.lock.OMLockDetails; +import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -54,7 +54,7 @@ public class TestMultiSnapshotLocks { private IOzoneManagerLock mockLock; @Mock - private LeveledResource mockResource; + private OzoneManagerLock.LeveledResource mockResource; private MultiSnapshotLocks multiSnapshotLocks; private UUID obj1 = UUID.randomUUID(); From 4711517be04886f67ea4b2a5e742c3786c9a2f20 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 18:24:38 -0400 Subject: [PATCH 39/97] HDDS-13798. Fix pmd findbugs Change-Id: I6953c4f0fab1b0b54e1b4f1fce69025fdd424ac9 --- .../TestPoolBasedHierarchicalResourceLockManager.java | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index 319bf8a4245c..ebe2c6034015 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -380,7 +380,7 @@ public void testConcurrentAccess() throws Exception { } // Wait for all threads to complete - latch.await(25, TimeUnit.SECONDS); + assertTrue(latch.await(25, TimeUnit.SECONDS)); // Check for exceptions if (exception.get() != null) { @@ -389,7 +389,9 @@ public void testConcurrentAccess() throws Exception { // Verify all operations succeeded assertEquals(numThreads * operationsPerThread, successCount.get()); - + for (CompletableFuture future : futures) { + future.get(); + } } finally { executor.shutdown(); } @@ -444,7 +446,7 @@ public void testResourcePoolStress() throws Exception { } // Wait for all threads to complete - latch.await(15, TimeUnit.SECONDS); + assertTrue(latch.await(15, TimeUnit.SECONDS)); // Check for exceptions if (exception.get() != null) { From 655a72442a6336c610402f1b68233abfef33e7ca Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 18:31:12 -0400 Subject: [PATCH 40/97] HDDS-13798. Fix pmd findbugs Change-Id: I31407994bb2d717f22977730b8b33ebc6eb33eea --- .../om/lock/TestPoolBasedHierarchicalResourceLockManager.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index ebe2c6034015..f589002cc089 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -58,11 +58,10 @@ public class TestPoolBasedHierarchicalResourceLockManager { private PoolBasedHierarchicalResourceLockManager lockManager; - private OzoneConfiguration conf; @BeforeEach public void setUp() { - conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); lockManager = new PoolBasedHierarchicalResourceLockManager(conf); } From 2bc61341cbbc45658990109059cbd5986d0c02c7 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 15 Oct 2025 21:16:04 -0400 Subject: [PATCH 41/97] HDDS-13798. Fix ozone-default.xml Change-Id: I8b0d1657f809bf79cc5c5a64b336b7c6689aee91 --- .../common/src/main/resources/ozone-default.xml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index b200c0b5bf1f..462b1e4331f4 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -4838,4 +4838,14 @@ warm up edek cache if none of key successful on OM start up. + + ozone.om.hierarchical.resource.locks.soft.limit + 1024 + Soft limit for number of lock objects that could be idle in the pool. + + + ozone.om.hierarchical.resource.locks.hard.limit + 10000 + Maximum number of lock objects that could be present in the pool. + From 8e8c534e9ba8bb49cbc29318dac7d0710406c572 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 06:05:52 -0400 Subject: [PATCH 42/97] HDDS-13798. Stop lock data manager on metadata stop Change-Id: I39115b2cb142fd36e370c8c5e72d6e58ce1ffb3a --- .../om/lock/ReadOnlyHierarchicalResourceLockManager.java | 2 +- .../org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java index 4bf2065a0bc6..59d61ca062d1 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java @@ -39,7 +39,7 @@ public void close() { private static final HierarchicalResourceLock EMPTY_LOCK_NOT_ACQUIRED = new HierarchicalResourceLock() { @Override public boolean isLockAcquired() { - return true; + return false; } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 53d71837048c..b40905a328c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -483,6 +483,11 @@ public void stop() throws IOException { store.close(); store = null; } + try { + hierarchicalLockManager.close(); + } catch (Exception e) { + LOG.error("Error closing hierarchical lock manager", e); + } tableCacheMetricsMap.values().forEach(TableCacheMetrics::unregister); // OzoneManagerLock cleanup lock.cleanup(); From f148f247a0780d93a49635c262e945cd696f137d Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 06:26:10 -0400 Subject: [PATCH 43/97] HDDS-13798. Update tests Change-Id: Ie4b4b50c28aa5c62eeae51feec99304fec4b079e --- ...lBasedHierarchicalResourceLockManager.java | 59 ++++++++++--------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index f589002cc089..d0d3d75ade4a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -28,13 +28,16 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -288,7 +291,8 @@ public void testDifferentKeysOnSameResource() throws Exception { * Test configuration parameters are respected. */ @Test - public void testConfigurationParameters() { + public void testConfigurationParameters() + throws InterruptedException, IOException, ExecutionException, TimeoutException { OzoneConfiguration customConf = new OzoneConfiguration(); customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 100); customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, 500); @@ -297,35 +301,34 @@ public void testConfigurationParameters() { new PoolBasedHierarchicalResourceLockManager(customConf)) { // Test that manager can be created with custom configuration + List locks = new ArrayList<>(); assertNotNull(customLockManager); - - // Basic functionality test with custom configuration - try (HierarchicalResourceLock lock = customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, "test")) { - assertTrue(lock.isLockAcquired()); - } catch (Exception e) { - fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + for (int i = 0; i < 500; i++) { + try { + locks.add(customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, "test" + i)); + } catch (IOException e) { + fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + } + } + CountDownLatch latch = new CountDownLatch(1); + CompletableFuture future = CompletableFuture.runAsync(() -> { + // Basic functionality test with custom configuration + latch.countDown(); + try (HierarchicalResourceLock lock = customLockManager.acquireReadLock(FlatResource.SNAPSHOT_DB_LOCK, + "test" + 501)) { + assertTrue(lock.isLockAcquired()); + } catch (Exception e) { + fail("Lock acquisition failed with custom configuration: " + e.getMessage()); + } + }); + Thread.sleep(1000); + latch.await(); + assertFalse(future.isDone()); + locks.get(0).close(); + future.get(5, TimeUnit.SECONDS); + for (HierarchicalResourceLock lock : locks) { + lock.close(); } - } - } - - /** - * Test default configuration values. - */ - @Test - public void testDefaultConfiguration() { - OzoneConfiguration defaultConf = new OzoneConfiguration(); - - // Verify default values - assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT, - defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, - OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT)); - assertEquals(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT, - defaultConf.getInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT, - OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT)); - - try (PoolBasedHierarchicalResourceLockManager defaultLockManager = - new PoolBasedHierarchicalResourceLockManager(defaultConf)) { - assertNotNull(defaultLockManager); } } From da030c0fac84123ff7ebd15eb3edbaf6ba0e4aff Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 06:33:59 -0400 Subject: [PATCH 44/97] HDDS-13798. Rename class Change-Id: I4625b940c0fd242897d2e1a3ae299c001588be1b --- ...a => HierarchicalResourceLockManager.java} | 2 +- ...lBasedHierarchicalResourceLockManager.java | 2 +- ...adOnlyHierarchicalResourceLockManager.java | 2 +- ...lBasedHierarchicalResourceLockManager.java | 23 +++++++++++-------- .../hadoop/ozone/om/OMMetadataManager.java | 4 ++-- .../ozone/om/OmMetadataManagerImpl.java | 6 ++--- 6 files changed, 21 insertions(+), 18 deletions(-) rename hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/{HierachicalResourceLockManager.java => HierarchicalResourceLockManager.java} (97%) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierarchicalResourceLockManager.java similarity index 97% rename from hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierarchicalResourceLockManager.java index 0cc8df45e2c7..d34b199113c9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierachicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/HierarchicalResourceLockManager.java @@ -25,7 +25,7 @@ * there is no cyclic lock ordering on resources. * Typically, this can be used for locking elements which form a DAG like structure.(E.g. FSO tree, Snapshot chain etc.) */ -public interface HierachicalResourceLockManager extends AutoCloseable { +public interface HierarchicalResourceLockManager extends AutoCloseable { /** * Acquires a read lock on the specified resource using the provided key. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java index 19d2dacb32da..d601e31e6343 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/PoolBasedHierarchicalResourceLockManager.java @@ -46,7 +46,7 @@ * lock dependencies, and is typically useful for structures like * DAGs (e.g., File System trees or snapshot chains). */ -public class PoolBasedHierarchicalResourceLockManager implements HierachicalResourceLockManager { +public class PoolBasedHierarchicalResourceLockManager implements HierarchicalResourceLockManager { private final GenericObjectPool lockPool; private final Map> lockMap; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java index 59d61ca062d1..19e114ae52ec 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/ReadOnlyHierarchicalResourceLockManager.java @@ -22,7 +22,7 @@ /** * A read only lock manager that does not acquire any lock. */ -public class ReadOnlyHierarchicalResourceLockManager implements HierachicalResourceLockManager { +public class ReadOnlyHierarchicalResourceLockManager implements HierarchicalResourceLockManager { private static final HierarchicalResourceLock EMPTY_LOCK_ACQUIRED = new HierarchicalResourceLock() { @Override diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java index d0d3d75ade4a..d9edd003504c 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestPoolBasedHierarchicalResourceLockManager.java @@ -18,9 +18,7 @@ package org.apache.hadoop.ozone.om.lock; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT_DEFAULT; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -42,13 +40,12 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager.HierarchicalResourceLock; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.ValueSource; /** @@ -257,15 +254,21 @@ public void testDoubleClose() throws Exception { /** * Test different resource types can be locked independently. */ - @ParameterizedTest - @EnumSource(FlatResource.class) - public void testDifferentResourceTypes(FlatResource resource) throws Exception { - String key = "test-key-" + resource.name(); + @Test + public void testDifferentResourceTypes() throws Exception { - try (HierarchicalResourceLock lock = lockManager.acquireWriteLock(resource, key)) { + List locks = new ArrayList<>(); + for (FlatResource otherResource : FlatResource.values()) { + String key = "test-key"; + locks.add(lockManager.acquireWriteLock(otherResource, key)); + } + for (HierarchicalResourceLock lock : locks) { assertNotNull(lock); assertTrue(lock.isLockAcquired()); } + for (HierarchicalResourceLock lock : locks) { + lock.close(); + } } @@ -291,7 +294,7 @@ public void testDifferentKeysOnSameResource() throws Exception { * Test configuration parameters are respected. */ @Test - public void testConfigurationParameters() + public void testHardLimitsWithCustomConfiguration() throws InterruptedException, IOException, ExecutionException, TimeoutException { OzoneConfiguration customConf = new OzoneConfiguration(); customConf.setInt(OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_SOFT_LIMIT, 100); diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 16fc941c9bd8..7a0872277341 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -51,7 +51,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; -import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; @@ -94,7 +94,7 @@ public interface OMMetadataManager extends DBStoreHAManager, AutoCloseable { /** * Returns the Hierarchical ResourceLock used on Metadata DB. */ - HierachicalResourceLockManager getHierarchicalLockManager(); + HierarchicalResourceLockManager getHierarchicalLockManager(); /** * Returns the epoch associated with current OM process. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index b40905a328c1..c7b071a6e8d9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -103,7 +103,7 @@ import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithMetadata; -import org.apache.hadoop.ozone.om.lock.HierachicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; import org.apache.hadoop.ozone.om.lock.IOzoneManagerLock; import org.apache.hadoop.ozone.om.lock.OmReadOnlyLock; import org.apache.hadoop.ozone.om.lock.OzoneManagerLock; @@ -136,7 +136,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager, private DBStore store; private final IOzoneManagerLock lock; - private final HierachicalResourceLockManager hierarchicalLockManager; + private final HierarchicalResourceLockManager hierarchicalLockManager; private TypedTable userTable; private TypedTable volumeTable; @@ -658,7 +658,7 @@ public IOzoneManagerLock getLock() { } @Override - public HierachicalResourceLockManager getHierarchicalLockManager() { + public HierarchicalResourceLockManager getHierarchicalLockManager() { return hierarchicalLockManager; } From b281569bbda234cbda4391f918b9c85cc68772c1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 09:21:21 -0400 Subject: [PATCH 45/97] HDDS-13783. Add tests Change-Id: I33b25ea6f8ddbad9a33d9420b91dbb72b28de1e7 --- .../hadoop/ozone/om/lock/FlatResource.java | 4 +- .../hadoop/ozone/om/OmSnapshotLocalData.java | 10 +- .../hadoop/ozone/om/OmSnapshotManager.java | 3 +- .../snapshot/OmSnapshotLocalDataManager.java | 202 ++++---- .../TestOmSnapshotLocalDataManager.java | 456 ++++++++++++++---- 5 files changed, 495 insertions(+), 180 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java index 73f8357252f2..f4d7e72ece3e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/FlatResource.java @@ -26,7 +26,9 @@ public enum FlatResource implements Resource { // Background services lock on a Snapshot. SNAPSHOT_GC_LOCK("SNAPSHOT_GC_LOCK"), // Lock acquired on a Snapshot's RocksDB Handle. - SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"); + SNAPSHOT_DB_LOCK("SNAPSHOT_DB_LOCK"), + // Lock acquired on a Snapshot's Local Data. + SNAPSHOT_LOCAL_DATA_LOCK("SNAPSHOT_LOCAL_DATA_LOCK"); private String name; private IOzoneManagerLock.ResourceManager resourceManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index d78430b6cae6..5de83927952c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -190,6 +190,10 @@ public void addVersionSSTFileInfos(List sstFiles, int previous .map(SstFileInfo::new).collect(Collectors.toList()))); } + public void removeVersionSSTFileInfos(int snapshotVersion) { + this.versionSstFileInfos.remove(snapshotVersion); + } + /** * Returns the checksum of the YAML representation. * @return checksum @@ -275,7 +279,7 @@ public OmSnapshotLocalData copyObject() { * maintain immutability. */ public static class VersionMeta implements CopyObject { - private final int previousSnapshotVersion; + private int previousSnapshotVersion; private final List sstFiles; public VersionMeta(int previousSnapshotVersion, List sstFiles) { @@ -287,6 +291,10 @@ public int getPreviousSnapshotVersion() { return previousSnapshotVersion; } + public void setPreviousSnapshotVersion(int previousSnapshotVersion) { + this.previousSnapshotVersion = previousSnapshotVersion; + } + public List getSstFiles() { return sstFiles; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 743c1e584e25..7b9beb80cf6f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -196,8 +196,7 @@ public final class OmSnapshotManager implements AutoCloseable { private final AtomicInteger inFlightSnapshotCount = new AtomicInteger(0); public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { - this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager(), - ozoneManager.getConfiguration()); + this.snapshotLocalDataManager = new OmSnapshotLocalDataManager(ozoneManager.getMetadataManager()); boolean isFilesystemSnapshotEnabled = ozoneManager.isFilesystemSnapshotEnabled(); LOG.info("Ozone filesystem snapshot feature is {}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 0f658348bfbe..5457c2649d3f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,19 +17,16 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; import com.google.common.graph.GraphBuilder; import com.google.common.graph.MutableGraph; -import com.google.common.util.concurrent.Striped; import java.io.File; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collections; @@ -42,15 +39,11 @@ import java.util.Set; import java.util.Stack; import java.util.UUID; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; -import org.apache.commons.lang3.tuple.Triple; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -58,6 +51,9 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.FlatResource; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.util.ObjectSerializer; import org.apache.hadoop.ozone.util.YamlSerializer; @@ -84,10 +80,9 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { // Used for acquiring locks on the entire data structure. private final ReadWriteLock fullLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. - private Striped locks; + private HierarchicalResourceLockManager locks; - public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager, - OzoneConfiguration configuration) throws IOException { + public OmSnapshotLocalDataManager(OMMetadataManager omMetadataManager) throws IOException { this.localDataGraph = GraphBuilder.directed().build(); this.omMetadataManager = omMetadataManager; this.snapshotLocalDataSerializer = new YamlSerializer( @@ -100,7 +95,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO }; this.versionNodeMap = new HashMap<>(); this.fullLock = new ReentrantReadWriteLock(); - init(configuration); + init(); } @VisibleForTesting @@ -229,11 +224,8 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws } } - private void init(OzoneConfiguration configuration) throws IOException { - boolean fair = configuration.getBoolean(OZONE_MANAGER_FAIR_LOCK, OZONE_MANAGER_FAIR_LOCK_DEFAULT); - String stripeSizeKey = OZONE_MANAGER_STRIPED_LOCK_SIZE_PREFIX + SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME; - int size = configuration.getInt(stripeSizeKey, OZONE_MANAGER_STRIPED_LOCK_SIZE_DEFAULT); - this.locks = SimpleStriped.readWriteLock(size, fair); + private void init() throws IOException { + this.locks = omMetadataManager.getHierarchicalLockManager(); RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); File snapshotDir = new File(store.getSnapshotsParentDir()); @@ -307,6 +299,37 @@ public void close() { } } + private static class LockDataProviderInitResult { + private final OmSnapshotLocalData snapshotLocalData; + private final HierarchicalResourceLock lock; + private final HierarchicalResourceLock previousLock; + private final UUID previousSnapshotId; + + private LockDataProviderInitResult(HierarchicalResourceLock lock, OmSnapshotLocalData snapshotLocalData, + HierarchicalResourceLock previousLock, UUID previousSnapshotId) { + this.lock = lock; + this.snapshotLocalData = snapshotLocalData; + this.previousLock = previousLock; + this.previousSnapshotId = previousSnapshotId; + } + + private HierarchicalResourceLock getLock() { + return lock; + } + + private HierarchicalResourceLock getPreviousLock() { + return previousLock; + } + + private UUID getPreviousSnapshotId() { + return previousSnapshotId; + } + + private OmSnapshotLocalData getSnapshotLocalData() { + return snapshotLocalData; + } + } + /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -341,35 +364,35 @@ public void close() { public class ReadableOmSnapshotLocalDataProvider implements AutoCloseable { private final UUID snapshotId; - private final Lock lock; + private final HierarchicalResourceLock lock; + private final HierarchicalResourceLock previousLock; private final OmSnapshotLocalData snapshotLocalData; - private final Lock previousLock; private OmSnapshotLocalData previousSnapshotLocalData; private volatile boolean isPreviousSnapshotLoaded = false; private final UUID resolvedPreviousSnapshotId; protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { - this(snapshotId, locks.get(snapshotId).readLock()); + this(snapshotId, true); } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapIdToResolve) throws IOException { - this(snapshotId, locks.get(snapshotId).readLock(), null, snapIdToResolve); + this(snapshotId, true, null, snapIdToResolve); } - protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock) throws IOException { - this(snapshotId, lock, null, null); + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock) throws IOException { + this(snapshotId, readLock, null, null); } - protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, Lock lock, + protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock, CheckedSupplier, IOException> snapshotLocalDataSupplier, UUID snapshotIdToBeResolved) throws IOException { this.snapshotId = snapshotId; - this.lock = lock; - Triple pair = initialize(lock, snapshotId, snapshotIdToBeResolved, + LockDataProviderInitResult result = initialize(readLock, snapshotId, snapshotIdToBeResolved, snapshotLocalDataSupplier); - this.snapshotLocalData = pair.getLeft(); - this.previousLock = pair.getMiddle(); - this.resolvedPreviousSnapshotId = pair.getRight(); + this.snapshotLocalData = result.getSnapshotLocalData(); + this.lock = result.getLock(); + this.previousLock = result.getPreviousLock(); + this.resolvedPreviousSnapshotId = result.getPreviousSnapshotId(); this.previousSnapshotLocalData = null; this.isPreviousSnapshotLoaded = false; } @@ -380,27 +403,35 @@ public OmSnapshotLocalData getSnapshotLocalData() { public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IOException { if (!isPreviousSnapshotLoaded) { - File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); - this.previousSnapshotLocalData = resolvedPreviousSnapshotId == null ? null : - snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + if (resolvedPreviousSnapshotId != null) { + File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(resolvedPreviousSnapshotId)); + this.previousSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + } this.isPreviousSnapshotLoaded = true; } return previousSnapshotLocalData; } + private HierarchicalResourceLock acquireLock(UUID snapshotId, boolean readLock) throws IOException { + HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, + snapshotId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapshotId.toString()); + if (!acquiredLock.isLockAcquired()) { + throw new IOException("Unable to acquire lock for snapshotId: " + snapshotId); + } + return acquiredLock; + } + /** * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ - private Triple initialize(Lock snapIdLock, UUID snapId, UUID toResolveSnapshotId, + private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUID toResolveSnapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { - snapIdLock.lock(); - // Get the Lock instance for the snapshot id and track it. - ReadWriteLock lockIdAcquired = locks.get(snapId); - ReadWriteLock previousReadLockAcquired = null; - boolean haspreviousReadLockAcquiredAcquired = false; + HierarchicalResourceLock snapIdLock = null; + HierarchicalResourceLock previousReadLockAcquired = null; try { + snapIdLock = acquireLock(snapId, readLock); snapshotLocalDataSupplier = snapshotLocalDataSupplier == null ? () -> { File snapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(snapId)); return Pair.of(snapshotLocalDataSerializer.load(snapshotLocalDataFile), snapshotLocalDataFile); @@ -417,25 +448,18 @@ private Triple initialize(Lock snapIdLock, UUID // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); if (previousSnapshotId != null) { - if (versionNodeMap.containsKey(previousSnapshotId)) { + if (!versionNodeMap.containsKey(previousSnapshotId)) { throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); } toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : toResolveSnapshotId; - previousReadLockAcquired = locks.get(previousSnapshotId); - // Stripe lock could return the same lock object for multiple snapshotIds so in case a write lock is - // acquired previously on the same lock then this could cause a deadlock. If the same lock instance is - // returned then acquiring this read lock is unnecessary. - if (lockIdAcquired == previousReadLockAcquired) { - previousReadLockAcquired = null; - } - if (previousReadLockAcquired != null) { - previousReadLockAcquired.readLock().lock(); - haspreviousReadLockAcquiredAcquired = true; - } - Map previousVersionNodeMap = versionNodeMap.get(previousSnapshotId) - .getSnapshotVersions(); + previousReadLockAcquired = acquireLock(previousSnapshotId, true); + // Create a copy of the previous versionMap to get the previous versions corresponding to the previous + // snapshot. This map would mutated to resolve the previous snapshot's version corresponding to the + // toResolveSnapshotId by iterating through the chain of previous snapshot ids. + Map previousVersionNodeMap = + new HashMap<>(versionNodeMap.get(previousSnapshotId).getSnapshotVersions()); UUID currentIteratedSnapshotId = previousSnapshotId; // Iterate through the chain of previous snapshot ids until the snapshot id to be resolved is found. while (!Objects.equals(currentIteratedSnapshotId, toResolveSnapshotId)) { @@ -454,25 +478,8 @@ private Triple initialize(Lock snapIdLock, UUID currentIteratedSnapshotId, snapId, toResolveSnapshotId)); } UUID previousId = previousIds.iterator().next(); - ReadWriteLock lockToBeAcquired = locks.get(previousId); - // If stripe lock returns the same lock object corresponding to snapshot id then no read lock needs to be - // acquired. - if (lockToBeAcquired == lockIdAcquired) { - lockToBeAcquired = null; - } - if (lockToBeAcquired != null) { - // If a read lock has already been acquired on the same lock based on the previous iteration snapshot id - // then no need to acquire another read lock on the same lock and this lock could just piggyback on the - // same lock. - if (lockToBeAcquired != previousReadLockAcquired) { - lockToBeAcquired.readLock().lock(); - haspreviousReadLockAcquiredAcquired = true; - } else { - // Set the previous read lock to null since the same lock instance is going to be used for current - // iteration lock as well. - previousReadLockAcquired = null; - } - } + HierarchicalResourceLock previousToPreviousReadLockAcquired = acquireLock(previousId, true); + try { // Get the version node for the snapshot and update the version node to the successor to point to the // previous node. @@ -486,15 +493,15 @@ private Triple initialize(Lock snapIdLock, UUID throw new IOException(String.format("Snapshot %s version %d doesn't have successor", currentIteratedSnapshotId, entry.getValue())); } + // Set the version node for iterated version to the successor corresponding to the previous snapshot id. entry.setValue(versionNode.iterator().next()); } } finally { - // Release the read lock acquired on the previous snapshot id if it was acquired. Now that the instance + // Release the read lock acquired on the previous snapshot id acquired. Now that the instance // is no longer needed we can release the read lock for the snapshot iterated in the previous snapshot. - if (previousReadLockAcquired != null) { - previousReadLockAcquired.readLock().unlock(); - } - previousReadLockAcquired = lockToBeAcquired; + // Make previousToPrevious previous for next iteration. + previousReadLockAcquired.close(); + previousReadLockAcquired = previousToPreviousReadLockAcquired; currentIteratedSnapshotId = previousId; } } @@ -502,6 +509,8 @@ private Triple initialize(Lock snapIdLock, UUID Map versionMetaMap = ssLocalData.getVersionSstFileInfos(); for (Map.Entry entry : versionMetaMap.entrySet()) { OmSnapshotLocalData.VersionMeta versionMeta = entry.getValue(); + // Get the relative version node which corresponds to the toResolveSnapshotId corresponding to the + // versionMeta which points to a particular version in the previous snapshot LocalDataVersionNode relativePreviousVersionNode = previousVersionNodeMap.get(versionMeta.getPreviousSnapshotVersion()); if (relativePreviousVersionNode == null) { @@ -509,28 +518,33 @@ private Triple initialize(Lock snapIdLock, UUID " with version : %d against previous snapshot %s previous version : %d", snapId, entry.getKey(), toResolveSnapshotId, versionMeta.getPreviousSnapshotVersion())); } + // Set the previous snapshot version to the relativePreviousVersionNode which was captured. + versionMeta.setPreviousSnapshotVersion(relativePreviousVersionNode.getVersion()); } } else { toResolveSnapshotId = null; } - return Triple.of(ssLocalData, previousReadLockAcquired != null ? previousReadLockAcquired.readLock() : null, - toResolveSnapshotId); + return new LockDataProviderInitResult(snapIdLock, ssLocalData, previousReadLockAcquired, toResolveSnapshotId); } catch (IOException e) { // Release all the locks in case of an exception and rethrow the exception. - if (previousReadLockAcquired != null && haspreviousReadLockAcquiredAcquired) { - previousReadLockAcquired.readLock().unlock(); + if (previousReadLockAcquired != null) { + previousReadLockAcquired.close(); + } + if (snapIdLock != null) { + snapIdLock.close(); } - snapIdLock.unlock(); throw e; } } @Override - public void close() { + public void close() throws IOException { if (previousLock != null) { - previousLock.unlock(); + previousLock.close(); + } + if (lock != null) { + lock.close(); } - lock.unlock(); } } @@ -552,18 +566,18 @@ public void close() { public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapshotLocalDataProvider { private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { - super(snapshotId, locks.get(snapshotId).writeLock()); + super(snapshotId, false); fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { - super(snapshotId, locks.get(snapshotId).writeLock(), null, snapshotIdToBeResolved); + super(snapshotId, false, null, snapshotIdToBeResolved); fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { - super(snapshotId, locks.get(snapshotId).writeLock(), snapshotLocalDataSupplier, null); + super(snapshotId, false, snapshotLocalDataSupplier, null); fullLock.readLock().lock(); } @@ -607,7 +621,9 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { List sstFiles = OmSnapshotManager.getSnapshotSSTFileList(snapshotStore); - this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, getPreviousSnapshotLocalData().getVersion()); + OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); + this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : + previousSnapshotLocalData.getVersion()); } public synchronized void commit() throws IOException { @@ -623,13 +639,13 @@ public synchronized void commit() throws IOException { throw new IOException("Unable to delete tmp file " + tmpFilePath); } snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); - FileUtils.moveFile(tmpFile, new File(filePath), StandardCopyOption.ATOMIC_MOVE, + Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); upsertNode(super.snapshotId, localDataVersionNodes); } @Override - public void close() { + public void close() throws IOException { super.close(); fullLock.readLock().unlock(); } @@ -652,6 +668,10 @@ private UUID getPreviousSnapshotId() { return previousSnapshotId; } + private int getVersion() { + return version; + } + @Override public boolean equals(Object o) { if (!(o instanceof LocalDataVersionNode)) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index e63a557ca83c..43b3f1838521 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -27,43 +27,51 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.doAnswer; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.reset; import static org.mockito.Mockito.when; -import com.google.common.util.concurrent.Striped; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; import java.io.File; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.TreeMap; import java.util.UUID; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; +import org.apache.commons.compress.utils.Sets; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.lock.FlatResource; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; +import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager.HierarchicalResourceLock; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.WritableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.util.YamlSerializer; import org.apache.ozone.compaction.log.SstFileInfo; +import org.assertj.core.util.Lists; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; @@ -71,8 +79,8 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.mockito.Mock; -import org.mockito.MockedStatic; import org.mockito.MockitoAnnotations; import org.rocksdb.LiveFileMetaData; import org.yaml.snakeyaml.Yaml; @@ -83,12 +91,14 @@ public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; - - private static OzoneConfiguration conf; + private static List lockCapturor; @Mock private OMMetadataManager omMetadataManager; + @Mock + private HierarchicalResourceLockManager lockManager; + @Mock private RDBStore rdbStore; @@ -110,7 +120,6 @@ public class TestOmSnapshotLocalDataManager { @BeforeAll public static void setupClass() { - conf = new OzoneConfiguration(); snapshotLocalDataYamlSerializer = new YamlSerializer( new OmSnapshotLocalDataYaml.YamlFactory()) { @@ -119,10 +128,11 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO data.computeAndSetChecksum(yaml); } }; + lockCapturor = new ArrayList<>(); } @AfterAll - public static void teardownClass() throws IOException { + public static void teardownClass() { snapshotLocalDataYamlSerializer.close(); snapshotLocalDataYamlSerializer = null; } @@ -133,15 +143,15 @@ public void setUp() throws IOException { // Setup mock behavior when(omMetadataManager.getStore()).thenReturn(rdbStore); - + when(omMetadataManager.getHierarchicalLockManager()).thenReturn(lockManager); this.snapshotsDir = tempDir.resolve("snapshots").toFile(); FileUtils.deleteDirectory(snapshotsDir); assertTrue(snapshotsDir.exists() || snapshotsDir.mkdirs()); File dbLocation = tempDir.resolve("db").toFile(); FileUtils.deleteDirectory(dbLocation); assertTrue(dbLocation.exists() || dbLocation.mkdirs()); + mockLockManager(); - when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); } @@ -156,92 +166,368 @@ public void tearDown() throws Exception { } } - private String getReadLockMessageAcquire(int index) { - return READ_LOCK_MESSAGE_ACQUIRE + index; + private String getReadLockMessageAcquire(UUID snapshotId) { + return READ_LOCK_MESSAGE_ACQUIRE + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private String getReadLockMessageUnlock(int index) { - return READ_LOCK_MESSAGE_UNLOCK + index; + private String getReadLockMessageRelease(UUID snapshotId) { + return READ_LOCK_MESSAGE_UNLOCK + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private String getWriteLockMessageAcquire(int index) { - return WRITE_LOCK_MESSAGE_ACQUIRE + index; + private String getWriteLockMessageAcquire(UUID snapshotId) { + return WRITE_LOCK_MESSAGE_ACQUIRE + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private String getWriteLockMessageUnlock(int index) { - return WRITE_LOCK_MESSAGE_UNLOCK + index; + private String getWriteLockMessageRelease(UUID snapshotId) { + return WRITE_LOCK_MESSAGE_UNLOCK + " " + FlatResource.SNAPSHOT_LOCAL_DATA_LOCK + " " + snapshotId; } - private MockedStatic mockStripedLock(Map lockMap, int numLocks, - List messageCaptorer) { - MockedStatic mockedStatic = mockStatic(SimpleStriped.class); - Striped stripedLock = mock(Striped.class); - - List readWriteLocks = new ArrayList<>(); - for (int idx = 0; idx < numLocks; idx++) { - final int lockIndex = idx; - ReadWriteLock readWriteLock = mock(ReadWriteLock.class); - Lock readLock = mock(Lock.class); - Lock writeLock = mock(Lock.class); - when(readWriteLock.readLock()).thenReturn(readLock); - when(readWriteLock.writeLock()).thenReturn(writeLock); - doAnswer(invocationOnMock -> { - messageCaptorer.add(getReadLockMessageAcquire(lockIndex)); - return null; - }).when(readLock).lock(); - doAnswer(invocationOnMock -> { - messageCaptorer.add(getReadLockMessageUnlock(lockIndex)); - return null; - }).when(readLock).unlock(); + private HierarchicalResourceLock getHierarchicalResourceLock(FlatResource resource, String key, boolean isWriteLock) { + return new HierarchicalResourceLock() { + @Override + public boolean isLockAcquired() { + return true; + } - doAnswer(invocationOnMock -> { - messageCaptorer.add(getWriteLockMessageAcquire(lockIndex)); - return null; - }).when(writeLock).lock(); - doAnswer(invocationOnMock -> { - messageCaptorer.add(getWriteLockMessageUnlock(lockIndex)); - return null; - }).when(writeLock).unlock(); - } - when(stripedLock.get(any())).thenAnswer(i -> { - if (lockMap.containsKey(i.getArgument(0))) { - return readWriteLocks.get(lockMap.get(i.getArgument(0))); + @Override + public void close() { + if (isWriteLock) { + lockCapturor.add(WRITE_LOCK_MESSAGE_UNLOCK + " " + resource + " " + key); + } else { + lockCapturor.add(READ_LOCK_MESSAGE_UNLOCK + " " + resource + " " + key); + } } - return readWriteLocks.get(0); - }); - mockedStatic.when(() -> SimpleStriped.readWriteLock(anyInt(), anyBoolean())).thenReturn(stripedLock); - return mockedStatic; + }; } - private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, - int numberOfSnapshots) { - List snapshotInfos = new ArrayList<>(); - SnapshotInfo previouseSnapshotInfo = null; + private void mockLockManager() throws IOException { + lockCapturor.clear(); + reset(lockManager); + when(lockManager.acquireReadLock(any(FlatResource.class), anyString())) + .thenAnswer(i -> { + lockCapturor.add(READ_LOCK_MESSAGE_ACQUIRE + " " + i.getArgument(0) + " " + i.getArgument(1)); + return getHierarchicalResourceLock(i.getArgument(0), i.getArgument(1), false); + }); + when(lockManager.acquireWriteLock(any(FlatResource.class), anyString())) + .thenAnswer(i -> { + lockCapturor.add(WRITE_LOCK_MESSAGE_ACQUIRE + " " + i.getArgument(0) + " " + i.getArgument(1)); + return getHierarchicalResourceLock(i.getArgument(0), i.getArgument(1), true); + }); + } + private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, + int numberOfSnapshots) throws IOException { + SnapshotInfo previousSnapshotInfo = null; + int counter = 0; + Map> liveFileMetaDataMap = new HashMap<>(); + liveFileMetaDataMap.put(KEY_TABLE, + Lists.newArrayList(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key2"))); + liveFileMetaDataMap.put(FILE_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", FILE_TABLE, "key1", + "key2"))); + liveFileMetaDataMap.put(DIRECTORY_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", DIRECTORY_TABLE, "key1", + "key2"))); + liveFileMetaDataMap.put("col1", Lists.newArrayList(createMockLiveFileMetaData("file2.sst", "col1", "key1", + "key2"))); + List snapshotIds = new ArrayList<>(); for (int i = 0; i < numberOfSnapshots; i++) { - java.util.UUID snapshotId = java.util.UUID.randomUUID(); - SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, previouseSnapshotInfo == null ? null - : previouseSnapshotInfo.getSnapshotId()); - OmSnapshotLocalData localData = createMockLocalData(snapshotId, snapshotInfo.getPathPreviousSnapshotId()); - - snapshotInfos.add(snapshotInfo); - previouseSnapshotInfo = snapshotInfo; + UUID snapshotId = UUID.randomUUID(); + SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, previousSnapshotInfo == null ? null + : previousSnapshotInfo.getSnapshotId()); + mockSnapshotStore(snapshotId, liveFileMetaDataMap.values().stream() + .flatMap(Collection::stream).collect(Collectors.toList())); + localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); + previousSnapshotInfo = snapshotInfo; + for (String table : liveFileMetaDataMap.keySet()) { + liveFileMetaDataMap.get(table).add( + createMockLiveFileMetaData("file" + counter++ + ".sst", table, "key1", "key4")); + } + snapshotIds.add(snapshotId); } - return null; + return snapshotIds; + } + + private void mockSnapshotStore(UUID snapshotId, List sstFiles) throws RocksDatabaseException { + // Setup snapshot store mock + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); + + when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation); + RocksDatabase rocksDatabase = mock(RocksDatabase.class); + when(snapshotStore.getDb()).thenReturn(rocksDatabase); + when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); } /** * Reading Snap1 against snap5 */ + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 20); + for (int start = 0; start < snapshotIds.size(); start++) { + for (int end = start + 1; end < snapshotIds.size(); end++) { + UUID startSnapshotId = snapshotIds.get(start); + UUID endSnapshotId = snapshotIds.get(end); + lockCapturor.clear(); + int logCaptorIdx = 0; + try (ReadableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = + read ? localDataManager.getOmSnapshotLocalData(endSnapshotId, startSnapshotId) : + localDataManager.getWritableOmSnapshotLocalData(endSnapshotId, startSnapshotId)) { + OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); + OmSnapshotLocalData previousSnapshot = omSnapshotLocalDataProvider.getPreviousSnapshotLocalData(); + assertEquals(startSnapshotId, previousSnapshot.getSnapshotId()); + assertEquals(endSnapshotId, snapshotLocalData.getSnapshotId()); + if (read) { + assertEquals(getReadLockMessageAcquire(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageAcquire(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } + int idx = end - 1; + UUID previousSnapId = snapshotIds.get(idx--); + assertEquals(getReadLockMessageAcquire(previousSnapId), lockCapturor.get(logCaptorIdx++)); + while (idx >= start) { + UUID prevPrevSnapId = snapshotIds.get(idx--); + assertEquals(getReadLockMessageAcquire(prevPrevSnapId), lockCapturor.get(logCaptorIdx++)); + assertEquals(getReadLockMessageRelease(previousSnapId), lockCapturor.get(logCaptorIdx++)); + previousSnapId = prevPrevSnapId; + } + } + assertEquals(getReadLockMessageRelease(startSnapshotId), lockCapturor.get(logCaptorIdx++)); + if (read) { + assertEquals(getReadLockMessageRelease(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageRelease(endSnapshotId), lockCapturor.get(logCaptorIdx++)); + } + assertEquals(lockCapturor.size(), logCaptorIdx); + } + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testVersionLockResolution(boolean read) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 5); + for (int snapIdx = 0; snapIdx < snapshotIds.size(); snapIdx++) { + UUID snapId = snapshotIds.get(snapIdx); + UUID expectedPreviousSnapId = snapIdx - 1 >= 0 ? snapshotIds.get(snapIdx - 1) : null; + lockCapturor.clear(); + int logCaptorIdx = 0; + try (ReadableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = + read ? localDataManager.getOmSnapshotLocalData(snapId) : + localDataManager.getWritableOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); + OmSnapshotLocalData previousSnapshot = omSnapshotLocalDataProvider.getPreviousSnapshotLocalData(); + assertEquals(snapId, snapshotLocalData.getSnapshotId()); + assertEquals(expectedPreviousSnapId, previousSnapshot == null ? null : + previousSnapshot.getSnapshotId()); + if (read) { + assertEquals(getReadLockMessageAcquire(snapId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageAcquire(snapId), lockCapturor.get(logCaptorIdx++)); + } + if (expectedPreviousSnapId != null) { + assertEquals(getReadLockMessageAcquire(expectedPreviousSnapId), lockCapturor.get(logCaptorIdx++)); + } + } + if (expectedPreviousSnapId != null) { + assertEquals(getReadLockMessageRelease(expectedPreviousSnapId), lockCapturor.get(logCaptorIdx++)); + } + if (read) { + assertEquals(getReadLockMessageRelease(snapId), lockCapturor.get(logCaptorIdx++)); + } else { + assertEquals(getWriteLockMessageRelease(snapId), lockCapturor.get(logCaptorIdx++)); + } + assertEquals(lockCapturor.size(), logCaptorIdx); + } + } + + @Test + public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExisting() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + UUID snapId = snapshotIds.get(1); + try (WritableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = + localDataManager.getWritableOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); + snapshotLocalData.addVersionSSTFileInfos(Lists.newArrayList(createMockLiveFileMetaData("file1.sst", KEY_TABLE, + "key1", "key2")), 3); + + IOException ex = assertThrows(IOException.class, omSnapshotLocalDataProvider::commit); + System.out.println(ex.getMessage()); + assertTrue(ex.getMessage().contains("since previous snapshot with version hasn't been loaded")); + } + } + @Test - public void testLockOrderingWithOverLappingLocks() { + public void testAddVersionFromRDB() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(4, 5, 6, 8)); + UUID snapId = snapshotIds.get(1); + List newVersionSstFiles = + Lists.newArrayList(createMockLiveFileMetaData("file5.sst", KEY_TABLE, "key1", "key2"), + createMockLiveFileMetaData("file6.sst", FILE_TABLE, "key1", "key2"), + createMockLiveFileMetaData("file7.sst", KEY_TABLE, "key1", "key2"), + createMockLiveFileMetaData("file1.sst", "col1", "key1", "key2")); + try (WritableOmSnapshotLocalDataProvider snap = + localDataManager.getWritableOmSnapshotLocalData(snapId)) { + mockSnapshotStore(snapId, newVersionSstFiles); + snap.addSnapshotVersion(snapshotStore); + snap.commit(); + } + validateVersions(localDataManager, snapId, 1, Sets.newHashSet(0, 1)); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + OmSnapshotLocalData.VersionMeta versionMeta = snapshotLocalData.getVersionSstFileInfos().get(1); + assertEquals(6, versionMeta.getPreviousSnapshotVersion()); + List expectedLiveFileMetaData = + newVersionSstFiles.subList(0, 3).stream().map(SstFileInfo::new).collect(Collectors.toList()); + assertEquals(expectedLiveFileMetaData, versionMeta.getSstFiles()); + } + + } + + private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManager, UUID snapId, int expectedVersion, + Set expectedVersions) throws IOException { + try (ReadableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getOmSnapshotLocalData(snapId)) { + assertEquals(expectedVersion, snap.getSnapshotLocalData().getVersion()); + assertEquals(expectedVersions, snap.getSnapshotLocalData().getVersionSstFileInfos().keySet()); + } + } + + /** + * Validates write-time version propagation and removal rules when the previous + * snapshot already has a concrete version recorded. + * + * Test flow: + * 1) Create two snapshots in a chain: {@code prevSnapId -> snapId}. + * 2) For {@code prevSnapId}: set {@code version=3} and add SST metadata for version {@code 0}; commit. + * 3) For {@code snapId}: set {@code version=4} and add SST metadata for version {@code 4}; commit. + * After commit, versions resolve to {@code prev.version=4} and {@code snap.version=5}, and their + * version maps are {@code {0,4}} and {@code {0,5}} respectively (base version 0 plus the current one). + * 4) If {@code nextVersionExisting} is {@code true}: + * - Attempt to remove version {@code 4} from {@code prevSnapId}; expect {@link IOException} because + * the successor snapshot still exists at version {@code 5} and depends on {@code prevSnapId}. + * - Validate that versions and version maps remain unchanged. + * Else ({@code false}): + * - Remove version {@code 5} from {@code snapId} and commit, then remove version {@code 4} from + * {@code prevSnapId} and commit. + * - Validate that both snapshots now only contain the base version {@code 0}. + * + * This ensures a snapshot cannot drop a version that still has a dependent successor, and that removals + * are allowed only after dependents are cleared. + * + * @param nextVersionExisting whether the successor snapshot's version still exists ({@code true}) or is + * removed first ({@code false}) + * @throws IOException if commit validation fails as expected in the protected case + */ + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + UUID prevSnapId = snapshotIds.get(0); + UUID snapId = snapshotIds.get(1); + addVersionsToLocalData(localDataManager, prevSnapId, ImmutableMap.of(4, 1)); + addVersionsToLocalData(localDataManager, snapId, ImmutableMap.of(5, 4)); + + validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0, 5)); + validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); + + if (nextVersionExisting) { + try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { + prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + IOException ex = assertThrows(IOException.class, prevSnap::commit); + assertTrue(ex.getMessage().contains("Cannot remove Snapshot " + prevSnapId + " with version : 4 since it " + + "still has predecessors")); + } + validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0, 5)); + validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); + } else { + try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapId)) { + snap.getSnapshotLocalData().removeVersionSSTFileInfos(5); + snap.commit(); + } + + try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { + prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + prevSnap.commit(); + } + validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0)); + validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0)); + } + } + private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalDataManager, + UUID snapId, Map versionMap) throws IOException { + try (WritableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getWritableOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + for (Map.Entry version : versionMap.entrySet().stream() + .sorted(Map.Entry.comparingByKey()).collect(Collectors.toList())) { + snapshotLocalData.setVersion(version.getKey() - 1); + snapshotLocalData.addVersionSSTFileInfos(ImmutableList.of(createMockLiveFileMetaData("file" + version + + ".sst", KEY_TABLE, "key1", "key2")), version.getValue()); + } + snap.commit(); + } + try (ReadableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getOmSnapshotLocalData(snapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + for (int version : versionMap.keySet()) { + assertTrue(snapshotLocalData.getVersionSstFileInfos().containsKey(version)); + } + } + } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testVersionResolution(boolean read) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 5); + List> versionMaps = Arrays.asList( + ImmutableMap.of(4, 1, 6, 3, 8, 9, 11, 15), + ImmutableMap.of(5, 4, 6, 8, 10, 11), + ImmutableMap.of(1, 5, 3, 5, 8, 10), + ImmutableMap.of(1, 1, 2, 3, 5, 8), + ImmutableMap.of(1, 1, 11, 2, 20, 5, 30, 2) + ); + for (int i = 0; i < snapshotIds.size(); i++) { + addVersionsToLocalData(localDataManager, snapshotIds.get(i), versionMaps.get(i)); + } + for (int start = 0; start < snapshotIds.size(); start++) { + for (int end = start + 1; end < snapshotIds.size(); end++) { + UUID prevSnapId = snapshotIds.get(start); + UUID snapId = snapshotIds.get(end); + Map versionMap = new HashMap<>(versionMaps.get(end)); + versionMap.put(0, 0); + for (int idx = end - 1; idx > start; idx--) { + for (Map.Entry version : versionMap.entrySet()) { + version.setValue(versionMaps.get(idx).getOrDefault(version.getValue(), 0)); + } + } + try (ReadableOmSnapshotLocalDataProvider snap = read ? + localDataManager.getOmSnapshotLocalData(snapId, prevSnapId) : + localDataManager.getWritableOmSnapshotLocalData(snapId, prevSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + OmSnapshotLocalData prevSnapshotLocalData = snap.getPreviousSnapshotLocalData(); + assertEquals(prevSnapshotLocalData.getSnapshotId(), snapshotLocalData.getPreviousSnapshotId()); + assertEquals(prevSnapId, snapshotLocalData.getPreviousSnapshotId()); + assertEquals(snapId, snapshotLocalData.getSnapshotId()); + assertTrue(snapshotLocalData.getVersionSstFileInfos().size() > 1); + snapshotLocalData.getVersionSstFileInfos() + .forEach((version, versionMeta) -> { + assertEquals(versionMap.get(version), versionMeta.getPreviousSnapshotVersion()); + }); + } + } + } } + @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); assertNotNull(localDataManager); } @@ -250,7 +536,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -281,7 +567,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); @@ -304,7 +590,7 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); @@ -325,7 +611,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -341,7 +627,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -359,7 +645,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -375,7 +661,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -397,7 +683,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); assertNotNull(localDataManager); Map versionMap = @@ -417,13 +703,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager, conf); + new OmSnapshotLocalDataManager(omMetadataManager); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); // Should not throw exception localDataManager.close(); From 1ad24b4e3824fdce86e22fc79c243ff220360bb2 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 09:26:30 -0400 Subject: [PATCH 46/97] HDDS-13783. Fix checkstyle Change-Id: I5d861cc0120cf89b43cdb961734931396736a27a --- .../snapshot/OmSnapshotLocalDataManager.java | 9 ++++----- .../TestOmSnapshotLocalDataManager.java | 19 ++++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 5457c2649d3f..8f6ce69ad554 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -42,7 +42,6 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; -import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -299,7 +298,7 @@ public void close() { } } - private static class LockDataProviderInitResult { + private static final class LockDataProviderInitResult { private final OmSnapshotLocalData snapshotLocalData; private final HierarchicalResourceLock lock; private final HierarchicalResourceLock previousLock; @@ -412,11 +411,11 @@ public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IO return previousSnapshotLocalData; } - private HierarchicalResourceLock acquireLock(UUID snapshotId, boolean readLock) throws IOException { + private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) throws IOException { HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, - snapshotId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapshotId.toString()); + snapId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapId.toString()); if (!acquiredLock.isLockAcquired()) { - throw new IOException("Unable to acquire lock for snapshotId: " + snapshotId); + throw new IOException("Unable to acquire lock for snapshotId: " + snapId); } return acquiredLock; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 43b3f1838521..d273e758c3f1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -41,14 +41,12 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TreeMap; import java.util.UUID; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -215,7 +213,7 @@ private void mockLockManager() throws IOException { }); } - private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataManager, + private List createSnapshotLocalData(OmSnapshotLocalDataManager snapshotLocalDataManager, int numberOfSnapshots) throws IOException { SnapshotInfo previousSnapshotInfo = null; int counter = 0; @@ -224,8 +222,8 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataM Lists.newArrayList(createMockLiveFileMetaData("file1.sst", KEY_TABLE, "key1", "key2"))); liveFileMetaDataMap.put(FILE_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", FILE_TABLE, "key1", "key2"))); - liveFileMetaDataMap.put(DIRECTORY_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", DIRECTORY_TABLE, "key1", - "key2"))); + liveFileMetaDataMap.put(DIRECTORY_TABLE, Lists.newArrayList(createMockLiveFileMetaData("file2.sst", + DIRECTORY_TABLE, "key1", "key2"))); liveFileMetaDataMap.put("col1", Lists.newArrayList(createMockLiveFileMetaData("file2.sst", "col1", "key1", "key2"))); List snapshotIds = new ArrayList<>(); @@ -235,7 +233,7 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager localDataM : previousSnapshotInfo.getSnapshotId()); mockSnapshotStore(snapshotId, liveFileMetaDataMap.values().stream() .flatMap(Collection::stream).collect(Collectors.toList())); - localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); + snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); previousSnapshotInfo = snapshotInfo; for (String table : liveFileMetaDataMap.keySet()) { liveFileMetaDataMap.get(table).add( @@ -258,7 +256,12 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) } /** - * Reading Snap1 against snap5 + * Checks lock orders taken i.e. while reading a snapshot against the previous snapshot. + * Depending on read or write locks are acquired on the snapshotId and read lock is acquired on the previous + * snapshot. Once the instance is closed the read lock on previous snapshot is released followed by releasing the + * lock on the snapshotId. + * @param read + * @throws IOException */ @ParameterizedTest @ValueSource(booleans = {true, false}) @@ -386,7 +389,6 @@ public void testAddVersionFromRDB() throws IOException { newVersionSstFiles.subList(0, 3).stream().map(SstFileInfo::new).collect(Collectors.toList()); assertEquals(expectedLiveFileMetaData, versionMeta.getSstFiles()); } - } private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManager, UUID snapId, int expectedVersion, @@ -524,7 +526,6 @@ public void testVersionResolution(boolean read) throws IOException { } } - @Test public void testConstructor() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); From d62991103960df7deb4fe1eab9d303be13a64deb Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 09:39:11 -0400 Subject: [PATCH 47/97] HDDS-13783. Fix findbugs Change-Id: Idf069e026ada0f57f664c64299a856809fd63344 --- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index d273e758c3f1..dcea25465949 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -235,9 +235,9 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager snapshotLo .flatMap(Collection::stream).collect(Collectors.toList())); snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); previousSnapshotInfo = snapshotInfo; - for (String table : liveFileMetaDataMap.keySet()) { - liveFileMetaDataMap.get(table).add( - createMockLiveFileMetaData("file" + counter++ + ".sst", table, "key1", "key4")); + for (Map.Entry> tableEntry : liveFileMetaDataMap.entrySet()) { + String table = tableEntry.getKey(); + tableEntry.getValue().add(createMockLiveFileMetaData("file" + counter++ + ".sst", table, "key1", "key4")); } snapshotIds.add(snapshotId); } From 8eeb44bcddb7e23a6da5e8bf831732d9217913c1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 10:28:07 -0400 Subject: [PATCH 48/97] HDDS-13783. Fix pmd Change-Id: Ib0066799f77554b0d27cb53da89e8f542c9c308a --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 8f6ce69ad554..05287286bca3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -70,7 +70,6 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); - private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; private final ObjectSerializer snapshotLocalDataSerializer; private final MutableGraph localDataGraph; From 06e7d373f9837ca012704b6e1389af814dee06ca Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 14:23:29 -0400 Subject: [PATCH 49/97] HDDS-13785. Fix merge issue Change-Id: I8712f187b7f6f115feac7ce0a203eab32d091529 --- .../hadoop/ozone/om/OmSnapshotLocalData.java | 4 -- .../snapshot/OmSnapshotLocalDataManager.java | 65 +++++++++++-------- .../TestOmSnapshotLocalDataManager.java | 12 ++-- 3 files changed, 43 insertions(+), 38 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index b8dab82e9315..5de83927952c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -194,10 +194,6 @@ public void removeVersionSSTFileInfos(int snapshotVersion) { this.versionSstFileInfos.remove(snapshotVersion); } - public void removeVersionSSTFileInfos(int snapshotVersion) { - this.versionSstFileInfos.remove(snapshotVersion); - } - /** * Returns the checksum of the YAML representation. * @return checksum diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9d3393c20585..9fbaa66d9383 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -79,7 +79,6 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); - private static final String SNAPSHOT_LOCAL_DATA_LOCK_RESOURCE_NAME = "snapshot_local_data_lock"; private static final String LOCAL_DATA_MANAGER_SERVICE_NAME = "OmSnapshotLocalDataManagerService"; private final ObjectSerializer snapshotLocalDataSerializer; @@ -255,7 +254,7 @@ private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { }); } - private void init() throws IOException { + private void init(OzoneConfiguration configuration, SnapshotChainManager chainManager) throws IOException { this.locks = omMetadataManager.getHierarchicalLockManager(); this.snapshotToBeCheckedForOrphans = new ConcurrentHashMap<>(); RDBStore store = (RDBStore) omMetadataManager.getStore(); @@ -281,18 +280,21 @@ private void init() throws IOException { for (UUID snapshotId : versionNodeMap.keySet()) { increamentOrphanCheckCount(snapshotId); } - this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); - this.scheduler.scheduleWithFixedDelay( - () -> { - try { - checkOrphanSnapshotVersions(omMetadataManager, snapshotChainManager); - } catch (IOException e) { - LOG.error("Exception while checking orphan snapshot versions", e); - } - }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); + if (snapshotLocalDataManagerServiceInterval > 0) { + this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); + this.scheduler.scheduleWithFixedDelay( + () -> { + try { + checkOrphanSnapshotVersions(omMetadataManager, chainManager); + } catch (IOException e) { + LOG.error("Exception while checking orphan snapshot versions", e); + } + }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); + } + } private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager) @@ -300,25 +302,32 @@ private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, Snap for (Map.Entry entry : snapshotToBeCheckedForOrphans.entrySet()) { UUID snapshotId = entry.getKey(); int countBeforeCheck = entry.getValue(); - try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = - new WritableOmSnapshotLocalDataProvider(snapshotId)) { - OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); - boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); - for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap().get( - snapshotId).getSnapshotVersions().entrySet()) { - LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); - // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 - // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 - // node can be only deleted if the snapshot is also purged. - boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 - && (versionEntry.getVersion() != 0 || isSnapshotPurged); - if (toRemove) { - snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); - } + checkOrphanSnapshotVersions(metadataManager, chainManager, snapshotId); + decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + } + } + + @VisibleForTesting + void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager, + UUID snapshotId) throws IOException { + try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = new WritableOmSnapshotLocalDataProvider( + snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); + boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); + for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap() + .get(snapshotId).getSnapshotVersions().entrySet()) { + LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); + // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 + // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 + // node can be only deleted if the snapshot is also purged. + boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 + && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) + || isSnapshotPurged); + if (toRemove) { + snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); } - snapshotLocalDataProvider.commit(); } - decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + snapshotLocalDataProvider.commit(); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 7de9deba207c..a5590c4895f3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -283,7 +283,7 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) @ParameterizedTest @ValueSource(booleans = {true, false}) public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 20); for (int start = 0; start < snapshotIds.size(); start++) { for (int end = start + 1; end < snapshotIds.size(); end++) { @@ -327,7 +327,7 @@ public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOExcept @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionLockResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); for (int snapIdx = 0; snapIdx < snapshotIds.size(); snapIdx++) { UUID snapId = snapshotIds.get(snapIdx); @@ -365,7 +365,7 @@ public void testVersionLockResolution(boolean read) throws IOException { @Test public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExisting() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID snapId = snapshotIds.get(1); try (WritableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = @@ -382,7 +382,7 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis @Test public void testAddVersionFromRDB() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(4, 5, 6, 8)); UUID snapId = snapshotIds.get(1); @@ -445,7 +445,7 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage @ParameterizedTest @ValueSource(booleans = {true, false}) public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID prevSnapId = snapshotIds.get(0); UUID snapId = snapshotIds.get(1); @@ -502,7 +502,7 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); List> versionMaps = Arrays.asList( ImmutableMap.of(4, 1, 6, 3, 8, 9, 11, 15), From fab85eaa382dd824a66716a0a87e72677257b6cc Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 15:14:14 -0400 Subject: [PATCH 50/97] HDDS-13785. Fix checkstyle Change-Id: I5f39975ae4d2631cd6dd5dae83ed73cf6bd52d14 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 -- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 3 --- 2 files changed, 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9fbaa66d9383..eac346667db8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -43,14 +43,12 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.Scheduler; -import org.apache.hadoop.hdds.utils.SimpleStriped; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index a5590c4895f3..adfe4b0414e6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -27,8 +27,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; @@ -728,7 +726,6 @@ public void testInitWithInvalidPathThrowsException() throws IOException { @Test public void testClose() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); - // Should not throw exception localDataManager.close(); } From c73a35519c12765c4b4e28a62f13e677788f9098 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 15:29:52 -0400 Subject: [PATCH 51/97] HDDS-13783. Fix test Change-Id: I8e5ba34d39de9c8e007d2902a8738c487b01902a --- .../org/apache/hadoop/ozone/om/TestOmSnapshotManager.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 7f808df3f978..6ec49935b356 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -774,6 +774,7 @@ public void testCreateSnapshotIdempotent() throws Exception { when(bucketTable.get(dbBucketKey)).thenReturn(omBucketInfo); SnapshotInfo first = createSnapshotInfo(volumeName, bucketName); + first.setPathPreviousSnapshotId(null); when(snapshotInfoTable.get(first.getTableKey())).thenReturn(first); // Create first checkpoint for the snapshot checkpoint @@ -797,10 +798,13 @@ public void testCreateSnapshotIdempotent() throws Exception { private SnapshotInfo createSnapshotInfo(String volumeName, String bucketName) { - return SnapshotInfo.newInstance(volumeName, + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, UUID.randomUUID().toString(), UUID.randomUUID(), Time.now()); + snapshotInfo.setPathPreviousSnapshotId(null); + snapshotInfo.setGlobalPreviousSnapshotId(null); + return snapshotInfo; } } From 1d39beeadef77359ac48c19305e5a9234d6d5ee0 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 15:55:48 -0400 Subject: [PATCH 52/97] HDDS-13785. Fix test Change-Id: I0b33548be47ab3d94a9ad7bed92ad81d16b9ea29 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 +++- .../org/apache/hadoop/ozone/om/TestOmSnapshotManager.java | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index eac346667db8..7c3bcaa560fd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -236,7 +236,9 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws } private void increamentOrphanCheckCount(UUID snapshotId) { - this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : v + 1); + if (snapshotId != null) { + this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : (v + 1)); + } } private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 6ec49935b356..73f9e2863be3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -330,7 +330,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); assertFalse(localData.getSstFiltered()); assertEquals(0L, localData.getLastDefragTime()); - assertFalse(localData.getNeedsDefrag()); + assertTrue(localData.getNeedsDefrag()); assertEquals(1, localData.getVersionSstFileInfos().size()); // Cleanup From b1a38343c5f2eb98727c2ae468082d8126b4eb42 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 16:09:17 -0400 Subject: [PATCH 53/97] HDDS-13785. Fix test Change-Id: Id6de8a04ae094bf0069bf6dc6e604fe26712d6ee --- .../main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java | 5 ----- hadoop-hdds/common/src/main/resources/ozone-default.xml | 5 +++++ .../main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java | 3 +++ .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 1b1d7fcc95d5..db66fed22fe9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -640,11 +640,6 @@ public final class OzoneConfigKeys { OZONE_OM_SNAPSHOT_COMPACTION_DAG_MAX_TIME_ALLOWED_DEFAULT = TimeUnit.DAYS.toMillis(30); - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = - "ozone.om.snapshot.local.data.manager.service.interval"; - - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; - public static final String OZONE_OM_SNAPSHOT_COMPACTION_DAG_PRUNE_DAEMON_RUN_INTERVAL = "ozone.om.snapshot.compaction.dag.prune.daemon.run.interval"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 462b1e4331f4..06dc7f84d63a 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -4848,4 +4848,9 @@ 10000 Maximum number of lock objects that could be present in the pool. + + ozone.om.snapshot.local.data.manager.service.interval + 5m + Interval for cleaning up orphan snapshot local data versions corresponding to snapshots + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 254a49ea9a99..d31bc2c9fc3b 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -681,6 +681,9 @@ public final class OMConfigKeys { public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT = "ozone.om.hierarchical.resource.locks.hard.limit"; public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT = 10000; + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = + "ozone.om.snapshot.local.data.manager.service.interval"; + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; /** * Never constructed. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 7c3bcaa560fd..e39662d44912 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; From 1986bbefda494bbcce356a9468980528cda890f6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 18:28:43 -0400 Subject: [PATCH 54/97] HDDS-13785. Fix conditions Change-Id: Ic5d26fdaec10470b2dc6c14deacbf185ccbef2bf --- .../snapshot/OmSnapshotLocalDataManager.java | 22 +++++++++-- .../TestOmSnapshotLocalDataManager.java | 38 +++++++++---------- 2 files changed, 36 insertions(+), 24 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index e39662d44912..f98672ad71e8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -254,6 +254,10 @@ private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { }); } + Map getSnapshotToBeCheckedForOrphans() { + return snapshotToBeCheckedForOrphans; + } + private void init(OzoneConfiguration configuration, SnapshotChainManager chainManager) throws IOException { this.locks = omMetadataManager.getHierarchicalLockManager(); this.snapshotToBeCheckedForOrphans = new ConcurrentHashMap<>(); @@ -696,25 +700,37 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); + Map newVersions = snapshotVersions.getSnapshotVersions(); Map> predecessors = new HashMap<>(); + boolean versionsRemoved = false; // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + versionsRemoved = versionsRemoved || !newVersions.containsKey(existingVersion.getKey()); localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (Map.Entry entry : newVersions.entrySet()) { for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { localDataGraph.putEdge(predecessor, entry.getValue()); } } - // The previous snapshotId could have become an orphan entry or could have orphan versions. if (existingSnapVersions != null) { - increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of + // version removals) + if (versionsRemoved || !Objects.equals(existingSnapVersions.getPreviousSnapshotId(), + snapshotVersions.getPreviousSnapshotId())) { + increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + } + // If the version is also updated it could mean that there could be some orphan version present within the + // same snapshot. + if (existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { + increamentOrphanCheckCount(snapshotId); + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index adfe4b0414e6..659e37d753ba 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -63,7 +63,6 @@ import org.apache.hadoop.ozone.om.OmSnapshotLocalData; import org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml; import org.apache.hadoop.ozone.om.OmSnapshotManager; -import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.FlatResource; import org.apache.hadoop.ozone.om.lock.HierarchicalResourceLockManager; @@ -102,9 +101,6 @@ public class TestOmSnapshotLocalDataManager { @Mock private HierarchicalResourceLockManager lockManager; - @Mock - private SnapshotChainManager chainManager; - @Mock private RDBStore rdbStore; @@ -281,7 +277,7 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) @ParameterizedTest @ValueSource(booleans = {true, false}) public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 20); for (int start = 0; start < snapshotIds.size(); start++) { for (int end = start + 1; end < snapshotIds.size(); end++) { @@ -325,7 +321,7 @@ public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOExcept @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionLockResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); for (int snapIdx = 0; snapIdx < snapshotIds.size(); snapIdx++) { UUID snapId = snapshotIds.get(snapIdx); @@ -363,7 +359,7 @@ public void testVersionLockResolution(boolean read) throws IOException { @Test public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExisting() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID snapId = snapshotIds.get(1); try (WritableOmSnapshotLocalDataProvider omSnapshotLocalDataProvider = @@ -380,7 +376,7 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis @Test public void testAddVersionFromRDB() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(4, 5, 6, 8)); UUID snapId = snapshotIds.get(1); @@ -443,7 +439,7 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage @ParameterizedTest @ValueSource(booleans = {true, false}) public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); UUID prevSnapId = snapshotIds.get(0); UUID snapId = snapshotIds.get(1); @@ -500,7 +496,7 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionResolution(boolean read) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 5); List> versionMaps = Arrays.asList( ImmutableMap.of(4, 1, 6, 3, 8, 9, 11, 15), @@ -543,7 +539,7 @@ public void testVersionResolution(boolean read) throws IOException { @Test public void testConstructor() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); assertNotNull(localDataManager); } @@ -552,7 +548,7 @@ public void testGetSnapshotLocalPropertyYamlPathWithSnapshotInfo() throws IOExce UUID snapshotId = UUID.randomUUID(); SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); File yamlPath = new File(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); assertNotNull(yamlPath); @@ -583,7 +579,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { RocksDatabase rocksDatabase = mock(RocksDatabase.class); when(snapshotStore.getDb()).thenReturn(rocksDatabase); when(rocksDatabase.getLiveFilesMetaData()).thenReturn(sstFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); localDataManager.createNewOmSnapshotLocalDataFile(snapshotStore, snapshotInfo); @@ -606,7 +602,7 @@ public void testGetOmSnapshotLocalDataWithSnapshotInfo() throws IOException { // Create and write snapshot local data file OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); // Write the file manually for testing Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId())); @@ -627,7 +623,7 @@ public void testGetOmSnapshotLocalDataWithMismatchedSnapshotId() throws IOExcept // Create local data with wrong snapshot ID OmSnapshotLocalData localData = createMockLocalData(wrongSnapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); Path yamlPath = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotId)); writeLocalDataToFile(localData, yamlPath); @@ -643,7 +639,7 @@ public void testGetOmSnapshotLocalDataWithFile() throws IOException { OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); Path yamlPath = tempDir.resolve("test-snapshot.yaml"); writeLocalDataToFile(localData, yamlPath); @@ -661,7 +657,7 @@ public void testAddVersionNodeWithDependents() throws IOException { .sorted(Comparator.comparing(String::valueOf)).collect(Collectors.toList()); UUID snapshotId = versionIds.get(0); UUID previousSnapshotId = versionIds.get(1); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); // Create snapshot directory structure and files createSnapshotLocalDataFile(snapshotId, previousSnapshotId); createSnapshotLocalDataFile(previousSnapshotId, null); @@ -677,7 +673,7 @@ public void testAddVersionNodeWithDependentsAlreadyExists() throws IOException { createSnapshotLocalDataFile(snapshotId, null); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); OmSnapshotLocalData localData = createMockLocalData(snapshotId, null); @@ -699,7 +695,7 @@ public void testInitWithExistingYamlFiles() throws IOException { createSnapshotLocalDataFile(snapshotId, previousSnapshotId); // Initialize - should load existing files - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); assertNotNull(localDataManager); Map versionMap = @@ -719,13 +715,13 @@ public void testInitWithInvalidPathThrowsException() throws IOException { // Should throw IOException during init assertThrows(IOException.class, () -> { - new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + new OmSnapshotLocalDataManager(omMetadataManager, null, conf); }); } @Test public void testClose() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, chainManager, conf); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); // Should not throw exception localDataManager.close(); } From 52be3dd8a030e4cff9ea8e13d36de53d1a6ad023 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 19:45:05 -0400 Subject: [PATCH 55/97] HDDS-13783. Allow version resolution to null Change-Id: Ibe23607830bee6a96812cbb0b541dcac33370be9 --- .../snapshot/OmSnapshotLocalDataManager.java | 28 +++++++++++-------- .../TestOmSnapshotLocalDataManager.java | 13 +++++++-- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 05287286bca3..33bff2bbd484 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -374,19 +374,19 @@ protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOExceptio } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapIdToResolve) throws IOException { - this(snapshotId, true, null, snapIdToResolve); + this(snapshotId, true, null, snapIdToResolve, true); } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock) throws IOException { - this(snapshotId, readLock, null, null); + this(snapshotId, readLock, null, null, false); } protected ReadableOmSnapshotLocalDataProvider(UUID snapshotId, boolean readLock, CheckedSupplier, IOException> snapshotLocalDataSupplier, - UUID snapshotIdToBeResolved) throws IOException { + UUID snapshotIdToBeResolved, boolean isSnapshotToBeResolvedNullable) throws IOException { this.snapshotId = snapshotId; LockDataProviderInitResult result = initialize(readLock, snapshotId, snapshotIdToBeResolved, - snapshotLocalDataSupplier); + isSnapshotToBeResolvedNullable, snapshotLocalDataSupplier); this.snapshotLocalData = result.getSnapshotLocalData(); this.lock = result.getLock(); this.previousLock = result.getPreviousLock(); @@ -423,9 +423,9 @@ private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) thro * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ - private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUID toResolveSnapshotId, - CheckedSupplier, IOException> snapshotLocalDataSupplier) - throws IOException { + private LockDataProviderInitResult initialize( + boolean readLock, UUID snapId, UUID toResolveSnapshotId, boolean isSnapshotToBeResolvedNullable, + CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { HierarchicalResourceLock snapIdLock = null; HierarchicalResourceLock previousReadLockAcquired = null; try { @@ -445,13 +445,16 @@ private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUI // do while loop since the nodes that need be added may not be present in the graph so it may not be possible // to iterate through the chain. UUID previousSnapshotId = ssLocalData.getPreviousSnapshotId(); - if (previousSnapshotId != null) { + // if flag toResolveSnapshotIdIsNull is true or toResolveSnapshotId is not null, then we resolve snapshot + // with previous snapshot id as null, which would mean if the snapshot local data is committed the snapshot + // local data would become first snapshot in the chain with no previous snapshot id. + toResolveSnapshotId = (isSnapshotToBeResolvedNullable || toResolveSnapshotId != null) ? toResolveSnapshotId : + ssLocalData.getPreviousSnapshotId(); + if (toResolveSnapshotId != null && previousSnapshotId != null) { if (!versionNodeMap.containsKey(previousSnapshotId)) { throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); } - toResolveSnapshotId = toResolveSnapshotId == null ? ssLocalData.getPreviousSnapshotId() : - toResolveSnapshotId; previousReadLockAcquired = acquireLock(previousSnapshotId, true); // Create a copy of the previous versionMap to get the previous versions corresponding to the previous // snapshot. This map would mutated to resolve the previous snapshot's version corresponding to the @@ -521,6 +524,7 @@ private LockDataProviderInitResult initialize(boolean readLock, UUID snapId, UUI } } else { toResolveSnapshotId = null; + ssLocalData.setPreviousSnapshotId(null); } return new LockDataProviderInitResult(snapIdLock, ssLocalData, previousReadLockAcquired, toResolveSnapshotId); } catch (IOException e) { @@ -569,13 +573,13 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, UUID snapshotIdToBeResolved) throws IOException { - super(snapshotId, false, null, snapshotIdToBeResolved); + super(snapshotId, false, null, snapshotIdToBeResolved, true); fullLock.readLock().lock(); } private WritableOmSnapshotLocalDataProvider(UUID snapshotId, CheckedSupplier, IOException> snapshotLocalDataSupplier) throws IOException { - super(snapshotId, false, snapshotLocalDataSupplier, null); + super(snapshotId, false, snapshotLocalDataSupplier, null, false); fullLock.readLock().lock(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index dcea25465949..edcee1f48884 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -267,7 +268,9 @@ private void mockSnapshotStore(UUID snapshotId, List sstFiles) @ValueSource(booleans = {true, false}) public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); - List snapshotIds = createSnapshotLocalData(localDataManager, 20); + List snapshotIds = new ArrayList<>(); + snapshotIds.add(null); + snapshotIds.addAll(createSnapshotLocalData(localDataManager, 20)); for (int start = 0; start < snapshotIds.size(); start++) { for (int end = start + 1; end < snapshotIds.size(); end++) { UUID startSnapshotId = snapshotIds.get(start); @@ -279,8 +282,14 @@ public void testLockOrderingAgainstAnotherSnapshot(boolean read) throws IOExcept localDataManager.getWritableOmSnapshotLocalData(endSnapshotId, startSnapshotId)) { OmSnapshotLocalData snapshotLocalData = omSnapshotLocalDataProvider.getSnapshotLocalData(); OmSnapshotLocalData previousSnapshot = omSnapshotLocalDataProvider.getPreviousSnapshotLocalData(); - assertEquals(startSnapshotId, previousSnapshot.getSnapshotId()); assertEquals(endSnapshotId, snapshotLocalData.getSnapshotId()); + if (startSnapshotId == null) { + assertNull(previousSnapshot); + assertNull(snapshotLocalData.getPreviousSnapshotId()); + continue; + } + assertEquals(startSnapshotId, previousSnapshot.getSnapshotId()); + assertEquals(startSnapshotId, snapshotLocalData.getPreviousSnapshotId()); if (read) { assertEquals(getReadLockMessageAcquire(endSnapshotId), lockCapturor.get(logCaptorIdx++)); } else { From 908c47d04a85659fe14657e4b78008486fc7e1f2 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 16 Oct 2025 23:26:02 -0400 Subject: [PATCH 56/97] HDDS-13785. Fix tests Change-Id: I39d1ba6b8bac77fe0ff442b61afe56efe50c9712 --- .../om/snapshot/filter/AbstractReclaimableFilterTest.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java index ef97975ca8ec..3c50e93625f5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/filter/AbstractReclaimableFilterTest.java @@ -52,6 +52,7 @@ import org.apache.hadoop.ozone.om.BucketManager; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -162,10 +163,11 @@ protected void teardown() throws IOException { } private void mockOzoneManager(BucketLayout bucketLayout) throws IOException { - OMMetadataManager metadataManager = mock(OMMetadataManager.class); + OmMetadataManagerImpl metadataManager = mock(OmMetadataManagerImpl.class); BucketManager bucketManager = mock(BucketManager.class); when(ozoneManager.getMetadataManager()).thenReturn(metadataManager); when(ozoneManager.getBucketManager()).thenReturn(bucketManager); + when(metadataManager.getSnapshotChainManager()).thenReturn(snapshotChainManager); long volumeCount = 0; for (String volume : volumes) { when(metadataManager.getVolumeId(eq(volume))).thenReturn(volumeCount); From 278605af16b1b5ce8216d83577c379fcd3795d2f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 13:23:35 -0400 Subject: [PATCH 57/97] HDDS-13783. Add dirty bit Change-Id: I165f00132548acf920b8fb9d7530a6314366797d --- .../hadoop/ozone/om/OmSnapshotLocalData.java | 10 +- .../snapshot/OmSnapshotLocalDataManager.java | 102 ++++++++++++------ .../TestOmSnapshotLocalDataManager.java | 14 ++- 3 files changed, 84 insertions(+), 42 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5de83927952c..5d474d371329 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -115,7 +115,7 @@ public boolean getSstFiltered() { * Sets whether SST is filtered for this snapshot. * @param sstFiltered */ - public void setSstFiltered(boolean sstFiltered) { + void setSstFiltered(boolean sstFiltered) { this.isSSTFiltered = sstFiltered; } @@ -131,7 +131,7 @@ public long getLastDefragTime() { * Sets the last defrag time, in epoch milliseconds. * @param lastDefragTime Timestamp of the last defrag */ - public void setLastDefragTime(Long lastDefragTime) { + void setLastDefragTime(Long lastDefragTime) { this.lastDefragTime = lastDefragTime; } @@ -147,7 +147,7 @@ public boolean getNeedsDefrag() { * Sets whether the snapshot needs defrag. * @param needsDefrag true if the snapshot needs defrag, false otherwise */ - public void setNeedsDefrag(boolean needsDefrag) { + void setNeedsDefrag(boolean needsDefrag) { this.needsDefrag = needsDefrag; } @@ -163,7 +163,7 @@ public Map getVersionSstFileInfos() { * Sets the defragged SST file list. * @param versionSstFileInfos Map of version to defragged SST file list */ - public void setVersionSstFileInfos(Map versionSstFileInfos) { + void setVersionSstFileInfos(Map versionSstFileInfos) { this.versionSstFileInfos.clear(); this.versionSstFileInfos.putAll(versionSstFileInfos); } @@ -260,7 +260,7 @@ public int getVersion() { * Sets the version of the snapshot local data. A valid version shall be greater than 0. * @param version version */ - public void setVersion(int version) { + void setVersion(int version) { this.version = version; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 33bff2bbd484..430ba1ef4555 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -328,6 +328,27 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } + private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + LocalDataVersionNode existingVersionNode = existingVersion.getValue(); + predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + localDataGraph.removeNode(existingVersionNode); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } + /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -567,6 +588,8 @@ public void close() throws IOException { */ public final class WritableOmSnapshotLocalDataProvider extends ReadableOmSnapshotLocalDataProvider { + private boolean dirty; + private WritableOmSnapshotLocalDataProvider(UUID snapshotId) throws IOException { super(snapshotId, false); fullLock.readLock().lock(); @@ -586,6 +609,7 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) throws IOException { SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + SnapshotVersionsMeta existingVersionsMeta = getVersionNodeMap().get(snapshotLocalData.getSnapshotId()); for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { validateVersionAddition(node); } @@ -597,28 +621,12 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo validateVersionRemoval(snapshotId, entry.getKey()); } } - return versionsToBeAdded; - } - - private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { - SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); - Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : - existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); - // Track all predecessors of the existing versions and remove the node from the graph. - for (Map.Entry existingVersion : existingVersions.entrySet()) { - LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); - localDataGraph.removeNode(existingVersionNode); - } - // Add the nodes to be added in the graph and map. - addSnapshotVersionMeta(snapshotId, snapshotVersions); - // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } + // Set Dirty if the snapshot doesn't exist or previousSnapshotId has changed. + if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), + existingVersionsMeta.getPreviousSnapshotId())) { + setDirty(); } + return versionsToBeAdded; } public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { @@ -626,24 +634,50 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + // Set Dirty if a version is added. + setDirty(); + } + + public void removeVersion(int version) { + this.getSnapshotLocalData().removeVersionSSTFileInfos(version); + // Set Dirty if a version is removed. + setDirty(); } public synchronized void commit() throws IOException { + // Validate modification and commit the changes. SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); - String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); - String tmpFilePath = filePath + ".tmp"; - File tmpFile = new File(tmpFilePath); - boolean tmpFileExists = tmpFile.exists(); - if (tmpFileExists) { - tmpFileExists = !tmpFile.delete(); - } - if (tmpFileExists) { - throw new IOException("Unable to delete tmp file " + tmpFilePath); + // Need to update the disk state if and only if the dirty bit is set. + if (isDirty()) { + String filePath = getSnapshotLocalPropertyYamlPath(super.snapshotId); + String tmpFilePath = filePath + ".tmp"; + File tmpFile = new File(tmpFilePath); + boolean tmpFileExists = tmpFile.exists(); + if (tmpFileExists) { + tmpFileExists = !tmpFile.delete(); + } + if (tmpFileExists) { + throw new IOException("Unable to delete tmp file " + tmpFilePath); + } + snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); + Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, + StandardCopyOption.REPLACE_EXISTING); + upsertNode(super.snapshotId, localDataVersionNodes); + // Reset dirty bit + resetDirty(); } - snapshotLocalDataSerializer.save(new File(tmpFilePath), super.snapshotLocalData); - Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, - StandardCopyOption.REPLACE_EXISTING); - upsertNode(super.snapshotId, localDataVersionNodes); + } + + private void setDirty() { + dirty = true; + } + + private void resetDirty() { + dirty = false; + } + + private boolean isDirty() { + return dirty; } @Override diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index edcee1f48884..e63f73e295d6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -49,6 +49,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; @@ -76,6 +77,7 @@ import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -87,6 +89,7 @@ /** * Test class for OmSnapshotLocalDataManager. */ +@Timeout(value = 30, unit = TimeUnit.SECONDS) public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; @@ -449,7 +452,7 @@ public void testWriteVersionValidation(boolean nextVersionExisting) throws IOExc if (nextVersionExisting) { try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { - prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + prevSnap.removeVersion(4); IOException ex = assertThrows(IOException.class, prevSnap::commit); assertTrue(ex.getMessage().contains("Cannot remove Snapshot " + prevSnapId + " with version : 4 since it " + "still has predecessors")); @@ -458,12 +461,12 @@ public void testWriteVersionValidation(boolean nextVersionExisting) throws IOExc validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); } else { try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapId)) { - snap.getSnapshotLocalData().removeVersionSSTFileInfos(5); + snap.removeVersion(5); snap.commit(); } try (WritableOmSnapshotLocalDataProvider prevSnap = localDataManager.getWritableOmSnapshotLocalData(prevSnapId)) { - prevSnap.getSnapshotLocalData().removeVersionSSTFileInfos(4); + prevSnap.removeVersion(4); prevSnap.commit(); } validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0)); @@ -481,6 +484,11 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData snapshotLocalData.addVersionSSTFileInfos(ImmutableList.of(createMockLiveFileMetaData("file" + version + ".sst", KEY_TABLE, "key1", "key2")), version.getValue()); } + mockSnapshotStore(snapId, ImmutableList.of(createMockLiveFileMetaData("file" + + snapshotLocalData.getVersion() + 1 + ".sst", KEY_TABLE, "key1", "key2"))); + snap.addSnapshotVersion(snapshotStore); + snap.removeVersion(snapshotLocalData.getVersion()); + snapshotLocalData.setVersion(snapshotLocalData.getVersion() - 1); snap.commit(); } try (ReadableOmSnapshotLocalDataProvider snap = snapshotLocalDataManager.getOmSnapshotLocalData(snapId)) { From ac4719b311d0ee4bb7b81bca512bfb4c39b75252 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 13:47:51 -0400 Subject: [PATCH 58/97] Merge Change-Id: Ie7ac6a6cad96577e53a99b38a57e54ed888f0333 --- .../java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5d474d371329..cadeabe75458 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -147,7 +147,7 @@ public boolean getNeedsDefrag() { * Sets whether the snapshot needs defrag. * @param needsDefrag true if the snapshot needs defrag, false otherwise */ - void setNeedsDefrag(boolean needsDefrag) { + public void setNeedsDefrag(boolean needsDefrag) { this.needsDefrag = needsDefrag; } From cf19dce44465d132709d0c76d76f8624ee13d645 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 14:00:05 -0400 Subject: [PATCH 59/97] HDDS-13783. Address review comments Change-Id: I3df543d896463f24ba3b69fce1b2f655af612dc6 --- .../hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index e63f73e295d6..0be19dfb36ee 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -370,7 +370,6 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis "key1", "key2")), 3); IOException ex = assertThrows(IOException.class, omSnapshotLocalDataProvider::commit); - System.out.println(ex.getMessage()); assertTrue(ex.getMessage().contains("since previous snapshot with version hasn't been loaded")); } } From 34097de7a535eb050d0378a672f1ff5660c3574c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 14:08:21 -0400 Subject: [PATCH 60/97] HDDS-13783. Address review comments Change-Id: I94cf4b82b2b620f480e2d1e01e6d94a6679d974e --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 430ba1ef4555..d9316cbcb2b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -215,6 +215,11 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws if (prevSnapId != null && !versionNodeMap.containsKey(prevSnapId)) { File previousSnapshotLocalDataFile = new File(getSnapshotLocalPropertyYamlPath(prevSnapId)); OmSnapshotLocalData prevSnapshotLocalData = snapshotLocalDataSerializer.load(previousSnapshotLocalDataFile); + if (!prevSnapId.equals(prevSnapshotLocalData.getSnapshotId())) { + throw new IOException("SnapshotId mismatch: expected " + prevSnapId + + " but found " + prevSnapshotLocalData.getSnapshotId() + + " in file " + previousSnapshotLocalDataFile.getAbsolutePath()); + } stack.push(Pair.of(prevSnapshotLocalData.getSnapshotId(), new SnapshotVersionsMeta(prevSnapshotLocalData))); } visitedSnapshotIds.add(snapId); @@ -441,7 +446,7 @@ private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) thro } /** - * Intializer the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the + * Intializes the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. */ private LockDataProviderInitResult initialize( From c46ddc276a34a236bc5bc3607f19805cbe7e699c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 14:10:37 -0400 Subject: [PATCH 61/97] HDDS-13783. Address review comments Change-Id: I661e61e04031c1bcd537024e0a0859a6d6aeaffd --- .../org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 5d474d371329..fb9dbe1c49e9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -115,7 +115,7 @@ public boolean getSstFiltered() { * Sets whether SST is filtered for this snapshot. * @param sstFiltered */ - void setSstFiltered(boolean sstFiltered) { + public void setSstFiltered(boolean sstFiltered) { this.isSSTFiltered = sstFiltered; } @@ -131,7 +131,7 @@ public long getLastDefragTime() { * Sets the last defrag time, in epoch milliseconds. * @param lastDefragTime Timestamp of the last defrag */ - void setLastDefragTime(Long lastDefragTime) { + public void setLastDefragTime(Long lastDefragTime) { this.lastDefragTime = lastDefragTime; } @@ -147,7 +147,7 @@ public boolean getNeedsDefrag() { * Sets whether the snapshot needs defrag. * @param needsDefrag true if the snapshot needs defrag, false otherwise */ - void setNeedsDefrag(boolean needsDefrag) { + public void setNeedsDefrag(boolean needsDefrag) { this.needsDefrag = needsDefrag; } From 99afc0294cbdf922f1719cc36650f27db745bd67 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 17 Oct 2025 15:08:10 -0400 Subject: [PATCH 62/97] HDDS-13783. Address review comments Change-Id: I59cab67d93f0359bca54c0c8119f5018167b7d1a --- .../java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index fb9dbe1c49e9..1c840a1cd2e9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -260,7 +260,7 @@ public int getVersion() { * Sets the version of the snapshot local data. A valid version shall be greater than 0. * @param version version */ - void setVersion(int version) { + public void setVersion(int version) { this.version = version; } From 48ec0bb1f1d34a90b86aeb67aab9dd623ed4f23e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 18 Oct 2025 15:07:19 -0400 Subject: [PATCH 63/97] HDDS-13810. Fix Build Issue because of unused dependency Change-Id: I950b4d0cc45a74369b8efa36deaf947db4cb35bc --- hadoop-hdds/server-scm/pom.xml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 811d439f7dd8..d7a1f9dc19c9 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -38,11 +38,6 @@ com.fasterxml.jackson.core jackson-core - - com.fasterxml.jackson.core - jackson-databind - - com.google.guava guava @@ -71,11 +66,6 @@ org.apache.commons commons-collections4 - - org.apache.commons - commons-compress - - org.apache.commons commons-lang3 @@ -88,11 +78,6 @@ org.apache.hadoop hadoop-common - - org.apache.ozone - hdds-client - - org.apache.ozone hdds-common @@ -170,6 +155,16 @@ hdds-docs provided + + com.fasterxml.jackson.core + jackson-databind + test + + + org.apache.commons + commons-compress + test + @@ -194,6 +189,11 @@ hadoop-hdfs-client test + + org.apache.ozone + hdds-client + test + org.apache.ozone hdds-common From cb31b7c6ae6e7a840d020a0a14afc0657e2fe801 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 19 Oct 2025 08:32:32 -0400 Subject: [PATCH 64/97] Revert "HDDS-13810. Fix Build Issue because of unused dependency" This reverts commit 48ec0bb1f1d34a90b86aeb67aab9dd623ed4f23e. --- hadoop-hdds/server-scm/pom.xml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index d7a1f9dc19c9..811d439f7dd8 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -38,6 +38,11 @@ com.fasterxml.jackson.core jackson-core + + com.fasterxml.jackson.core + jackson-databind + + com.google.guava guava @@ -66,6 +71,11 @@ org.apache.commons commons-collections4 + + org.apache.commons + commons-compress + + org.apache.commons commons-lang3 @@ -78,6 +88,11 @@ org.apache.hadoop hadoop-common + + org.apache.ozone + hdds-client + + org.apache.ozone hdds-common @@ -155,16 +170,6 @@ hdds-docs provided - - com.fasterxml.jackson.core - jackson-databind - test - - - org.apache.commons - commons-compress - test - @@ -189,11 +194,6 @@ hadoop-hdfs-client test - - org.apache.ozone - hdds-client - test - org.apache.ozone hdds-common From ff90af89830d6abc72763e21a516d2c78505f730 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 19 Oct 2025 22:57:33 -0400 Subject: [PATCH 65/97] HDDS-13785. Add unit tests Change-Id: I21de33cbea63edda0bd21503cb2516ea1d1f1647 --- .../snapshot/OmSnapshotLocalDataManager.java | 6 +- .../TestOmSnapshotLocalDataManager.java | 79 ++++++++++++++++++- 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 98260feea71d..71f274b39c84 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -333,7 +333,7 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) || isSnapshotPurged); if (toRemove) { - snapshotLocalData.removeVersionSSTFileInfos(versionEntry.getVersion()); + snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); } } snapshotLocalDataProvider.commit(); @@ -438,7 +438,7 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + predecessors.put(existingVersion.getKey(), new HashSet<>(localDataGraph.predecessors(existingVersionNode))); versionsRemoved = versionsRemoved || !newVersions.containsKey(existingVersion.getKey()); localDataGraph.removeNode(existingVersionNode); } @@ -787,7 +787,7 @@ public synchronized void commit() throws IOException { } else if (snapshotLocalDataFile.exists()) { LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path : {}", super.snapshotId, snapshotLocalDataFile.getAbsolutePath()); - if (snapshotLocalDataFile.delete()) { + if (!snapshotLocalDataFile.delete()) { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 51d491c23f90..bfb6a83cc500 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -18,11 +18,13 @@ package org.apache.hadoop.ozone.om.snapshot; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -38,6 +40,7 @@ import com.google.common.collect.ImmutableMap; import java.io.File; import java.io.IOException; +import java.nio.file.NoSuchFileException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; @@ -98,6 +101,7 @@ public class TestOmSnapshotLocalDataManager { private static YamlSerializer snapshotLocalDataYamlSerializer; private static List lockCapturor; private static OzoneConfiguration conf; + private static Map purgedSnapshotIdMap; @Mock private OMMetadataManager omMetadataManager; @@ -137,6 +141,7 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO } }; lockCapturor = new ArrayList<>(); + purgedSnapshotIdMap = new HashMap<>(); } @AfterAll @@ -163,7 +168,10 @@ public void setUp() throws IOException { when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); this.snapshotUtilMock = mockStatic(SnapshotUtils.class); - snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())).thenReturn(false); + purgedSnapshotIdMap.clear(); + snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())) + .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); + conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, -1); } @AfterEach @@ -421,6 +429,75 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage } } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testOrphanVersionDeletionWithVersionDeletion(boolean purgeSnapshot) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + List snapshotIds = createSnapshotLocalData(localDataManager, 3); + UUID firstSnapId = snapshotIds.get(0); + UUID secondSnapId = snapshotIds.get(1); + UUID thirdSnapId = snapshotIds.get(2); + + addVersionsToLocalData(localDataManager, firstSnapId, ImmutableMap.of(1, 1, 2, 2, 3, 3)); + addVersionsToLocalData(localDataManager, secondSnapId, ImmutableMap.of(4, 2, 8, 1, 10, 3, 11, 3)); + addVersionsToLocalData(localDataManager, thirdSnapId, ImmutableMap.of(5, 8, 13, 10)); + assertEquals(new HashSet<>(snapshotIds), localDataManager.getSnapshotToBeCheckedForOrphans().keySet()); + localDataManager.getSnapshotToBeCheckedForOrphans().clear(); + purgedSnapshotIdMap.put(secondSnapId, purgeSnapshot); + localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, thirdSnapId); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(thirdSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(Sets.newHashSet(0, 13), snapshotLocalData.getVersionSstFileInfos().keySet()); + } + assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId)); + localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, secondSnapId); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(secondSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + if (purgeSnapshot) { + assertEquals(Sets.newHashSet(0, 10), snapshotLocalData.getVersionSstFileInfos().keySet()); + } else { + assertEquals(Sets.newHashSet(0, 10, 11), snapshotLocalData.getVersionSstFileInfos().keySet()); + } + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + List snapshotIds = createSnapshotLocalData(localDataManager, 3); + UUID firstSnapId = snapshotIds.get(0); + UUID secondSnapId = snapshotIds.get(1); + UUID thirdSnapId = snapshotIds.get(2); + + addVersionsToLocalData(localDataManager, firstSnapId, ImmutableMap.of(1, 1, 2, 2, 3, 3)); + addVersionsToLocalData(localDataManager, secondSnapId, ImmutableMap.of(4, 2, 8, 1, 10, 3, 11, 3)); + addVersionsToLocalData(localDataManager, thirdSnapId, ImmutableMap.of(5, 8, 13, 10)); + purgedSnapshotIdMap.put(secondSnapId, purgeSnapshot); + try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = + localDataManager.getWritableOmSnapshotLocalData(thirdSnapId, firstSnapId)) { + snapshotLocalDataProvider.commit(); + } + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(thirdSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(Sets.newHashSet(0, 5, 13), snapshotLocalData.getVersionSstFileInfos().keySet()); + assertEquals(firstSnapId, snapshotLocalData.getPreviousSnapshotId()); + } + + assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId)); + localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, secondSnapId); + if (purgeSnapshot) { + NoSuchFileException e = assertThrows(NoSuchFileException.class, + () -> localDataManager.getOmSnapshotLocalData(secondSnapId)); + assertFalse(localDataManager.getVersionNodeMap().containsKey(secondSnapId)); + } else { + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(secondSnapId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(Sets.newHashSet(0, 11), snapshotLocalData.getVersionSstFileInfos().keySet()); + } + } + } + /** * Validates write-time version propagation and removal rules when the previous * snapshot already has a concrete version recorded. From 8b014dd981c3a0f4996acb9f52cd5b748c34159f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 19 Oct 2025 23:12:13 -0400 Subject: [PATCH 66/97] HDDS-13783. Add case for commit key in middle of chain Change-Id: I37d2f5c07f405f3069a4cb99881d5d1e67110e79 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index d9316cbcb2b7..86029241a05b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -341,7 +341,7 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), localDataGraph.predecessors(existingVersionNode)); + predecessors.put(existingVersion.getKey(), new HashSet<>(localDataGraph.predecessors(existingVersionNode))); localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 0be19dfb36ee..1a444d688ef0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -440,11 +440,13 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage @ValueSource(booleans = {true, false}) public void testWriteVersionValidation(boolean nextVersionExisting) throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); - List snapshotIds = createSnapshotLocalData(localDataManager, 2); + List snapshotIds = createSnapshotLocalData(localDataManager, 3); UUID prevSnapId = snapshotIds.get(0); UUID snapId = snapshotIds.get(1); + UUID nextSnapId = snapshotIds.get(2); addVersionsToLocalData(localDataManager, prevSnapId, ImmutableMap.of(4, 1)); addVersionsToLocalData(localDataManager, snapId, ImmutableMap.of(5, 4)); + addVersionsToLocalData(localDataManager, nextSnapId, ImmutableMap.of(6, 0)); validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0, 5)); validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0, 4)); From 57662c651945592230c1dc7f83d7f29d481dee0f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 20 Oct 2025 07:42:27 -0400 Subject: [PATCH 67/97] HDDS-13783. Convert set to list of predecessors Change-Id: I119e8cede140c755d3cd09c0a56234ff4906be98 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 13 ++++++++----- .../om/snapshot/TestOmSnapshotLocalDataManager.java | 10 ++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 86029241a05b..74a4c89cfd11 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -28,6 +28,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Comparator; @@ -337,18 +338,20 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); + Map> predecessors = new HashMap<>(); // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - predecessors.put(existingVersion.getKey(), new HashSet<>(localDataGraph.predecessors(existingVersionNode))); + // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the + // nodes in the graph would change. + predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); localDataGraph.removeNode(existingVersionNode); } // Add the nodes to be added in the graph and map. addSnapshotVersionMeta(snapshotId, snapshotVersions); // Reconnect all the predecessors for existing nodes. for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptySet())) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) { localDataGraph.putEdge(predecessor, entry.getValue()); } } @@ -514,11 +517,11 @@ private LockDataProviderInitResult initialize( Set versionNode = localDataGraph.successors(entry.getValue()); if (versionNode.size() > 1) { throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", - currentIteratedSnapshotId, entry.getValue(), versionNode)); + currentIteratedSnapshotId, entry.getValue().getVersion(), versionNode)); } if (versionNode.isEmpty()) { throw new IOException(String.format("Snapshot %s version %d doesn't have successor", - currentIteratedSnapshotId, entry.getValue())); + currentIteratedSnapshotId, entry.getValue().getVersion())); } // Set the version node for iterated version to the successor corresponding to the previous snapshot id. entry.setValue(versionNode.iterator().next()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 1a444d688ef0..947c1a4b7f47 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -472,6 +472,16 @@ public void testWriteVersionValidation(boolean nextVersionExisting) throws IOExc } validateVersions(localDataManager, snapId, 5, Sets.newHashSet(0)); validateVersions(localDataManager, prevSnapId, 4, Sets.newHashSet(0)); + // Check next snapshot is able to resolve to previous snapshot. + try (ReadableOmSnapshotLocalDataProvider nextSnap = localDataManager.getOmSnapshotLocalData(nextSnapId, + prevSnapId)) { + OmSnapshotLocalData snapshotLocalData = nextSnap.getSnapshotLocalData(); + assertEquals(prevSnapId, snapshotLocalData.getPreviousSnapshotId()); + snapshotLocalData.getVersionSstFileInfos() + .forEach((version, versionMeta) -> { + assertEquals(0, versionMeta.getPreviousSnapshotVersion()); + }); + } } } From 5f0bb916662a9e0f168e5b4cd23b74e5be740fc1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 23 Oct 2025 21:22:21 -0400 Subject: [PATCH 68/97] HDDS-13833. Add transactionInfo field in SnapshotLocalData and update the value on SnapshotPurgeRequest Change-Id: I0e9de1954fc65a907b9f4a98d6f84b2772d15551 --- .../org/apache/hadoop/ozone/OzoneConsts.java | 1 + .../hadoop/ozone/om/OmSnapshotLocalData.java | 17 ++++++++++- .../ozone/om/OmSnapshotLocalDataYaml.java | 8 +++++- .../snapshot/OMSnapshotPurgeRequest.java | 6 ++-- .../snapshot/OMSnapshotPurgeResponse.java | 27 ++++++++++++++---- .../snapshot/OmSnapshotLocalDataManager.java | 10 ++++++- .../ozone/om/TestOmSnapshotLocalDataYaml.java | 28 ++++++++++++++----- ...TestOMSnapshotPurgeRequestAndResponse.java | 18 ++++++++++-- .../TestOmSnapshotLocalDataManager.java | 23 ++++++++++++++- 9 files changed, 118 insertions(+), 20 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index cb4490c2c1db..aecbdfae615d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -221,6 +221,7 @@ public final class OzoneConsts { public static final String OM_SST_FILE_INFO_START_KEY = "startKey"; public static final String OM_SST_FILE_INFO_END_KEY = "endKey"; public static final String OM_SST_FILE_INFO_COL_FAMILY = "columnFamily"; + public static final String OM_SLD_TXN_INFO = "transactionInfo"; // YAML fields for .container files public static final String CONTAINER_ID = "containerID"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java index 1c840a1cd2e9..02e07914b311 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalData.java @@ -30,6 +30,7 @@ import java.util.UUID; import java.util.stream.Collectors; import org.apache.commons.codec.digest.DigestUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.CopyObject; import org.apache.hadoop.ozone.util.WithChecksum; import org.apache.ozone.compaction.log.SstFileInfo; @@ -63,6 +64,9 @@ public class OmSnapshotLocalData implements WithChecksum { // Previous snapshotId based on which the snapshot local data is built. private UUID previousSnapshotId; + // Stores the transactionInfo corresponding to OM when the snaphot is purged. + private TransactionInfo transactionInfo; + // Map of version to VersionMeta, using linkedHashMap since the order of the map needs to be deterministic for // checksum computation. private final LinkedHashMap versionSstFileInfos; @@ -73,7 +77,8 @@ public class OmSnapshotLocalData implements WithChecksum { /** * Creates a OmSnapshotLocalData object with default values. */ - public OmSnapshotLocalData(UUID snapshotId, List notDefraggedSSTFileList, UUID previousSnapshotId) { + public OmSnapshotLocalData(UUID snapshotId, List notDefraggedSSTFileList, UUID previousSnapshotId, + TransactionInfo transactionInfo) { this.snapshotId = snapshotId; this.isSSTFiltered = false; this.lastDefragTime = 0L; @@ -83,6 +88,7 @@ public OmSnapshotLocalData(UUID snapshotId, List notDefraggedS new VersionMeta(0, notDefraggedSSTFileList.stream().map(SstFileInfo::new).collect(Collectors.toList()))); this.version = 0; this.previousSnapshotId = previousSnapshotId; + this.transactionInfo = transactionInfo; setChecksumTo0ByteArray(); } @@ -101,6 +107,15 @@ public OmSnapshotLocalData(OmSnapshotLocalData source) { this.previousSnapshotId = source.previousSnapshotId; this.versionSstFileInfos = new LinkedHashMap<>(); setVersionSstFileInfos(source.versionSstFileInfos); + this.transactionInfo = source.transactionInfo; + } + + public TransactionInfo getTransactionInfo() { + return transactionInfo; + } + + public void setTransactionInfo(TransactionInfo transactionInfo) { + this.transactionInfo = transactionInfo; } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java index c376e9a332c0..344d7305db43 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotLocalDataYaml.java @@ -24,6 +24,7 @@ import org.apache.commons.pool2.BasePooledObjectFactory; import org.apache.commons.pool2.PooledObject; import org.apache.commons.pool2.impl.DefaultPooledObject; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.ozone.compaction.log.SstFileInfo; @@ -71,6 +72,8 @@ private static class OmSnapshotLocalDataRepresenter extends Representer { this.addClassTag(SstFileInfo.class, SST_FILE_INFO_TAG); representers.put(SstFileInfo.class, new RepresentSstFileInfo()); representers.put(VersionMeta.class, new RepresentVersionMeta()); + representers.put(TransactionInfo.class, data -> new ScalarNode(Tag.STR, data.toString(), null, null, + DumperOptions.ScalarStyle.PLAIN)); representers.put(UUID.class, data -> new ScalarNode(Tag.STR, data.toString(), null, null, DumperOptions.ScalarStyle.PLAIN)); } @@ -168,7 +171,10 @@ public Object construct(Node node) { UUID snapId = UUID.fromString(snapIdStr); final String prevSnapIdStr = (String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID); UUID prevSnapId = prevSnapIdStr != null ? UUID.fromString(prevSnapIdStr) : null; - OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId); + final String purgeTxInfoStr = (String) nodes.get(OzoneConsts.OM_SLD_TXN_INFO); + TransactionInfo transactionInfo = purgeTxInfoStr != null ? TransactionInfo.valueOf(purgeTxInfoStr) : null; + OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId, + transactionInfo); // Set version from YAML Integer version = (Integer) nodes.get(OzoneConsts.OM_SLD_VERSION); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 5524371bf1e2..a1a1d306c238 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -91,6 +91,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut List snapshotDbKeys = snapshotPurgeRequest .getSnapshotDBKeysList(); + TransactionInfo transactionInfo = TransactionInfo.valueOf(context.getTermIndex()); try { // Each snapshot purge operation does three things: @@ -123,12 +124,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut } // Update the snapshotInfo lastTransactionInfo. for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); + snapshotInfo.setLastTransactionInfo(transactionInfo.toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), CacheValue.get(context.getIndex(), snapshotInfo)); } - omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); + omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos, + transactionInfo); omSnapshotIntMetrics.incNumSnapshotPurges(); LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating snapshots:{}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 3797b3fcf2eb..8a370cb975e5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Map; import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -36,6 +37,7 @@ import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.WritableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -49,15 +51,18 @@ public class OMSnapshotPurgeResponse extends OMClientResponse { LoggerFactory.getLogger(OMSnapshotPurgeResponse.class); private final List snapshotDbKeys; private final Map updatedSnapInfos; + private final TransactionInfo transactionInfo; public OMSnapshotPurgeResponse( @Nonnull OMResponse omResponse, @Nonnull List snapshotDbKeys, - Map updatedSnapInfos + Map updatedSnapInfos, + TransactionInfo transactionInfo ) { super(omResponse); this.snapshotDbKeys = snapshotDbKeys; this.updatedSnapInfos = updatedSnapInfos; + this.transactionInfo = transactionInfo; } /** @@ -69,6 +74,7 @@ public OMSnapshotPurgeResponse(@Nonnull OMResponse omResponse) { checkStatusNotOK(); this.snapshotDbKeys = null; this.updatedSnapInfos = null; + this.transactionInfo = null; } @Override @@ -96,10 +102,14 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, // Remove the snapshot from snapshotId to snapshotTableKey map. ((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager() .removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId()); - // Delete Snapshot checkpoint directory. + OmSnapshotLocalDataManager snapshotLocalDataManager = ((OmMetadataManagerImpl) omMetadataManager) .getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager(); - deleteCheckpointDirectory(snapshotLocalDataManager, omMetadataManager, snapshotInfo); + // Update snapshot local data to update purge transaction info. This would be used to check whether the + // snapshot purged txn is flushed to rocksdb. + updateLocalData(snapshotLocalDataManager, snapshotInfo); + // Delete Snapshot checkpoint directory. + deleteCheckpointDirectory(omMetadataManager, snapshotInfo); // Delete snapshotInfo from the table. omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey); } @@ -115,11 +125,18 @@ private void updateSnapInfo(OmMetadataManagerImpl metadataManager, } } + private void updateLocalData(OmSnapshotLocalDataManager localDataManager, SnapshotInfo snapshotInfo) + throws IOException { + try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapshotInfo)) { + snap.setTransactionInfo(this.transactionInfo); + snap.commit(); + } + } + /** * Deletes the checkpoint directory for a snapshot. */ - private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalDataManager, - OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { + private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { // Acquiring write lock to avoid race condition with sst filtering service which creates a sst filtered file // inside the snapshot directory. Any operation apart which doesn't create/delete files under this snapshot // directory can run in parallel along with this operation. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 74a4c89cfd11..09fdbee0af0b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -44,6 +44,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotLocalData; @@ -136,7 +137,8 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId(), + null), null))) { snapshotLocalData.commit(); } @@ -652,6 +654,12 @@ public void removeVersion(int version) { setDirty(); } + public void setTransactionInfo(TransactionInfo transactionInfo) { + this.getSnapshotLocalData().setTransactionInfo(transactionInfo); + // Set Dirty when the transactionInfo is set. + setDirty(); + } + public synchronized void commit() throws IOException { // Validate modification and commit the changes. SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java index b234014ebbc0..2f8b7be9a195 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotLocalDataYaml.java @@ -37,11 +37,13 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta; import org.apache.hadoop.ozone.util.ObjectSerializer; @@ -106,7 +108,8 @@ private LiveFileMetaData createLiveFileMetaData(String fileName, String table, S /** * Creates a snapshot local data YAML file. */ - private Pair writeToYaml(UUID snapshotId, String snapshotName) throws IOException { + private Pair writeToYaml(UUID snapshotId, String snapshotName, TransactionInfo transactionInfo) + throws IOException { String yamlFilePath = snapshotName + ".yaml"; UUID previousSnapshotId = UUID.randomUUID(); // Create snapshot data with not defragged SST files @@ -115,7 +118,7 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw createLiveFileMetaData("sst2", "table1", "k3", "k4"), createLiveFileMetaData("sst3", "table2", "k4", "k5")); OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList, - previousSnapshotId); + previousSnapshotId, transactionInfo); // Set version dataYaml.setVersion(42); @@ -150,7 +153,9 @@ private Pair writeToYaml(UUID snapshotId, String snapshotName) throw @Test public void testWriteToYaml() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1"); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1", transactionInfo); File yamlFile = yamlFilePrevIdPair.getLeft(); UUID prevSnapId = yamlFilePrevIdPair.getRight(); @@ -160,6 +165,7 @@ public void testWriteToYaml() throws IOException { // Verify fields assertEquals(44, snapshotData.getVersion()); assertTrue(snapshotData.getSstFiltered()); + assertEquals(transactionInfo, snapshotData.getTransactionInfo()); VersionMeta notDefraggedSSTFiles = snapshotData.getVersionSstFileInfos().get(0); assertEquals(new VersionMeta(0, @@ -192,17 +198,19 @@ public void testWriteToYaml() throws IOException { @Test public void testUpdateSnapshotDataFile() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2"); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2", null); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalData dataYaml = omSnapshotLocalDataSerializer.load(yamlFile); - + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); // Update snapshot data dataYaml.setSstFiltered(false); dataYaml.setNeedsDefrag(false); dataYaml.addVersionSSTFileInfos( singletonList(createLiveFileMetaData("defragged-sst4", "table3", "k5", "k6")), 5); + dataYaml.setTransactionInfo(transactionInfo); // Write updated data back to file omSnapshotLocalDataSerializer.save(yamlFile, dataYaml); @@ -213,6 +221,7 @@ public void testUpdateSnapshotDataFile() throws IOException { // Verify updated data assertThat(dataYaml.getSstFiltered()).isFalse(); assertThat(dataYaml.getNeedsDefrag()).isFalse(); + assertEquals(transactionInfo, dataYaml.getTransactionInfo()); Map defraggedFiles = dataYaml.getVersionSstFileInfos(); assertEquals(4, defraggedFiles.size()); @@ -234,7 +243,9 @@ public void testEmptyFile() throws IOException { @Test public void testChecksum() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3"); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3", transactionInfo); File yamlFile = yamlFilePrevIdPair.getLeft(); // Read from YAML file OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile); @@ -251,7 +262,9 @@ public void testChecksum() throws IOException { @Test public void testYamlContainsAllFields() throws IOException { UUID snapshotId = UUID.randomUUID(); - Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4"); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + Pair yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4", transactionInfo); File yamlFile = yamlFilePrevIdPair.getLeft(); String content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset()); @@ -264,5 +277,6 @@ public void testYamlContainsAllFields() throws IOException { assertThat(content).contains(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO); assertThat(content).contains(OzoneConsts.OM_SLD_SNAP_ID); assertThat(content).contains(OzoneConsts.OM_SLD_PREV_SNAP_ID); + assertThat(content).contains(OzoneConsts.OM_SLD_TXN_INFO); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 35053882eeda..b78975ef0816 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -52,6 +52,7 @@ import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider; import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; @@ -159,6 +160,10 @@ public void testValidateAndUpdateCache() throws Exception { List snapshotDbKeysToPurge = createSnapshots(10); assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + List snapshotInfos = new ArrayList<>(); + for (String snapshotKey : snapshotDbKeysToPurge) { + snapshotInfos.add(getOmMetadataManager().getSnapshotInfoTable().get(snapshotKey)); + } // Check if all the checkpoints are created. for (Path checkpoint : checkpointPaths) { @@ -171,9 +176,9 @@ public void testValidateAndUpdateCache() throws Exception { snapshotDbKeysToPurge); OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); - + TransactionInfo transactionInfo = TransactionInfo.valueOf(TransactionInfo.getTermIndex(200L)); OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) - omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), transactionInfo.getTransactionIndex()); for (String snapshotTableKey: snapshotDbKeysToPurge) { assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey)); @@ -191,6 +196,15 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } + OmSnapshotLocalDataManager snapshotLocalDataManager = + getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager(); + for (SnapshotInfo snapshotInfo : snapshotInfos) { + try (ReadableOmSnapshotLocalDataProvider snapProvider = + snapshotLocalDataManager.getOmSnapshotLocalData(snapshotInfo)) { + assertEquals(transactionInfo, snapProvider.getSnapshotLocalData().getTransactionInfo()); + } + } + assertEquals(initialSnapshotPurgeCount + 1, getOmSnapshotIntMetrics().getNumSnapshotPurges()); assertEquals(initialSnapshotPurgeFailCount, getOmSnapshotIntMetrics().getNumSnapshotPurgeFails()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 947c1a4b7f47..bfaa48c04feb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -49,6 +49,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -56,6 +57,7 @@ import org.apache.commons.compress.utils.Sets; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.RocksDatabase; import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; @@ -374,6 +376,25 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis } } + @Test + public void testUpdateTransactionInfo() throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), + ThreadLocalRandom.current().nextLong()); + UUID snapshotId = createSnapshotLocalData(localDataManager, 1).get(0); + try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertNull(snapshotLocalData.getTransactionInfo()); + snap.setTransactionInfo(transactionInfo); + snap.commit(); + } + + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotId)) { + OmSnapshotLocalData snapshotLocalData = snap.getSnapshotLocalData(); + assertEquals(transactionInfo, snapshotLocalData.getTransactionInfo()); + } + } + @Test public void testAddVersionFromRDB() throws IOException { localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); @@ -774,7 +795,7 @@ private OmSnapshotLocalData createMockLocalData(UUID snapshotId, UUID previousSn sstFiles.add(createMockLiveFileMetaData("file2.sst", "columnFamily1", "key3", "key10")); sstFiles.add(createMockLiveFileMetaData("file3.sst", "columnFamily2", "key1", "key8")); sstFiles.add(createMockLiveFileMetaData("file4.sst", "columnFamily2", "key0", "key10")); - return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId); + return new OmSnapshotLocalData(snapshotId, sstFiles, previousSnapshotId, null); } private void createSnapshotLocalDataFile(UUID snapshotId, UUID previousSnapshotId) From 5b55a59757e87a55ad441445c5a702c279c80ccb Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 23 Oct 2025 21:59:27 -0400 Subject: [PATCH 69/97] HDDS-13785. Merge with HDDS-13833 Change-Id: I20189e6a47d6fec15f57f1770ee75010a1bc6edb --- .../apache/hadoop/ozone/om/OmSnapshotManager.java | 10 ++++++++++ .../om/snapshot/OmSnapshotLocalDataManager.java | 14 +++++++++----- .../hadoop/ozone/om/snapshot/SnapshotUtils.java | 11 ----------- .../snapshot/TestOmSnapshotLocalDataManager.java | 4 ++-- 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index ad3a820c2c95..3e7c0c08702c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -345,6 +345,16 @@ public OmSnapshotManager(OzoneManager ozoneManager) throws IOException { } } + public static boolean isSnapshotPurged(SnapshotChainManager chainManager, OMMetadataManager omMetadataManager, + UUID snapshotId, TransactionInfo transactionInfo) throws IOException { + String tableKey = chainManager.getTableKey(snapshotId); + if (tableKey == null) { + return true; + } + return !omMetadataManager.getSnapshotInfoTable().isExist(tableKey) && transactionInfo != null && + isTransactionFlushedToDisk(omMetadataManager, transactionInfo); + } + /** * Help reject OM startup if snapshot feature is disabled * but there are snapshots remaining in this OM. Note: snapshots that are diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index a3e6be683721..1c2eeb3a0f58 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -325,7 +325,8 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = new WritableOmSnapshotLocalDataProvider( snapshotId)) { OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); - boolean isSnapshotPurged = SnapshotUtils.isSnapshotPurged(chainManager, metadataManager, snapshotId); + boolean isSnapshotPurged = OmSnapshotManager.isSnapshotPurged(chainManager, metadataManager, snapshotId, + snapshotLocalData.getTransactionInfo()); for (Map.Entry integerLocalDataVersionNodeEntry : getVersionNodeMap() .get(snapshotId).getSnapshotVersions().entrySet()) { LocalDataVersionNode versionEntry = integerLocalDataVersionNodeEntry.getValue(); @@ -431,7 +432,8 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } - private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, + boolean transactionInfoSet) throws IOException { SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : existingSnapVersions.getSnapshotVersions(); @@ -463,9 +465,11 @@ private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snaps snapshotVersions.getPreviousSnapshotId())) { increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); } - // If the version is also updated it could mean that there could be some orphan version present within the + // If the transactionInfo set this means the snapshot has been purged and the entire yaml file could have + // become an orphan if the version is also updated it + // could mean that there could be some orphan version present within the // same snapshot. - if (existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { + if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { increamentOrphanCheckCount(snapshotId); } } @@ -802,7 +806,7 @@ public synchronized void commit() throws IOException { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); } } - upsertNode(super.snapshotId, localDataVersionNodes); + upsertNode(super.snapshotId, localDataVersionNodes, getSnapshotLocalData().getTransactionInfo() != null); // Reset dirty bit resetDirty(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java index f6d9d7ae17e2..63e7e38d518f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotUtils.java @@ -34,8 +34,6 @@ import java.util.Objects; import java.util.Optional; import java.util.UUID; -import org.apache.hadoop.hdds.utils.db.CodecException; -import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -91,15 +89,6 @@ public static SnapshotInfo getSnapshotInfo(final OzoneManager ozoneManager, return snapshotInfo; } - public static boolean isSnapshotPurged(SnapshotChainManager chainManager, OMMetadataManager omMetadataManager, - UUID snapshotId) throws RocksDatabaseException, CodecException { - String tableKey = chainManager.getTableKey(snapshotId); - if (tableKey == null) { - return true; - } - return !omMetadataManager.getSnapshotInfoTable().isExist(tableKey); - } - public static SnapshotInfo getSnapshotInfo(OzoneManager ozoneManager, SnapshotChainManager chainManager, UUID snapshotId) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 2b029a3a27b4..d4233591be34 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -171,7 +171,7 @@ public void setUp() throws IOException { when(rdbStore.getDbLocation()).thenReturn(dbLocation); this.snapshotUtilMock = mockStatic(SnapshotUtils.class); purgedSnapshotIdMap.clear(); - snapshotUtilMock.when(() -> SnapshotUtils.isSnapshotPurged(any(), any(), any())) + snapshotUtilMock.when(() -> OmSnapshotManager.isSnapshotPurged(any(), any(), any(), any())) .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, -1); } @@ -397,7 +397,7 @@ public void testWriteVersionAdditionValidationWithoutPreviousSnapshotVersionExis @Test public void testUpdateTransactionInfo() throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()); UUID snapshotId = createSnapshotLocalData(localDataManager, 1).get(0); From cc35056c3b9b5548a752ed07bd372e04bb805cab Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 24 Oct 2025 20:38:06 -0400 Subject: [PATCH 70/97] HDDS-13783. Make local data graph synchrnous Change-Id: I2f7cb5ec772d2d2de99e8d3cba8bdc34e3f36efd --- .../snapshot/OmSnapshotLocalDataManager.java | 125 ++++++++++-------- 1 file changed, 72 insertions(+), 53 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 74a4c89cfd11..99461bb8cab6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -40,6 +40,7 @@ import java.util.Set; import java.util.Stack; import java.util.UUID; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; @@ -78,6 +79,8 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private final OMMetadataManager omMetadataManager; // Used for acquiring locks on the entire data structure. private final ReadWriteLock fullLock; + // Used for taking a lock on internal data structure Map and Graph to ensure thread safety; + private final ReadWriteLock internalLock; // Locks should be always acquired by iterating through the snapshot chain to avoid deadlocks. private HierarchicalResourceLockManager locks; @@ -92,8 +95,9 @@ public void computeAndSetChecksum(Yaml yaml, OmSnapshotLocalData data) throws IO data.computeAndSetChecksum(yaml); } }; - this.versionNodeMap = new HashMap<>(); + this.versionNodeMap = new ConcurrentHashMap<>(); this.fullLock = new ReentrantReadWriteLock(); + this.internalLock = new ReentrantReadWriteLock(); init(); } @@ -334,29 +338,6 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } - private synchronized void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { - SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); - Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : - existingSnapVersions.getSnapshotVersions(); - Map> predecessors = new HashMap<>(); - // Track all predecessors of the existing versions and remove the node from the graph. - for (Map.Entry existingVersion : existingVersions.entrySet()) { - LocalDataVersionNode existingVersionNode = existingVersion.getValue(); - // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the - // nodes in the graph would change. - predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); - localDataGraph.removeNode(existingVersionNode); - } - // Add the nodes to be added in the graph and map. - addSnapshotVersionMeta(snapshotId, snapshotVersions); - // Reconnect all the predecessors for existing nodes. - for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { - for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) { - localDataGraph.putEdge(predecessor, entry.getValue()); - } - } - } - /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -480,11 +461,11 @@ private LockDataProviderInitResult initialize( toResolveSnapshotId = (isSnapshotToBeResolvedNullable || toResolveSnapshotId != null) ? toResolveSnapshotId : ssLocalData.getPreviousSnapshotId(); if (toResolveSnapshotId != null && previousSnapshotId != null) { + previousReadLockAcquired = acquireLock(previousSnapshotId, true); if (!versionNodeMap.containsKey(previousSnapshotId)) { throw new IOException(String.format("Operating on snapshot id : %s with previousSnapshotId: %s invalid " + "since previousSnapshotId is not loaded.", snapId, previousSnapshotId)); } - previousReadLockAcquired = acquireLock(previousSnapshotId, true); // Create a copy of the previous versionMap to get the previous versions corresponding to the previous // snapshot. This map would mutated to resolve the previous snapshot's version corresponding to the // toResolveSnapshotId by iterating through the chain of previous snapshot ids. @@ -509,22 +490,27 @@ private LockDataProviderInitResult initialize( } UUID previousId = previousIds.iterator().next(); HierarchicalResourceLock previousToPreviousReadLockAcquired = acquireLock(previousId, true); - try { // Get the version node for the snapshot and update the version node to the successor to point to the // previous node. for (Map.Entry entry : previousVersionNodeMap.entrySet()) { - Set versionNode = localDataGraph.successors(entry.getValue()); - if (versionNode.size() > 1) { - throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", - currentIteratedSnapshotId, entry.getValue().getVersion(), versionNode)); - } - if (versionNode.isEmpty()) { - throw new IOException(String.format("Snapshot %s version %d doesn't have successor", - currentIteratedSnapshotId, entry.getValue().getVersion())); + internalLock.readLock().lock(); + try { + Set versionNode = localDataGraph.successors(entry.getValue()); + if (versionNode.size() > 1) { + throw new IOException(String.format("Snapshot %s version %d has multiple successors %s", + currentIteratedSnapshotId, entry.getValue().getVersion(), versionNode)); + } + if (versionNode.isEmpty()) { + throw new IOException(String.format("Snapshot %s version %d doesn't have successor", + currentIteratedSnapshotId, entry.getValue().getVersion())); + } + // Set the version node for iterated version to the successor corresponding to the previous snapshot + // id. + entry.setValue(versionNode.iterator().next()); + } finally { + internalLock.readLock().unlock(); } - // Set the version node for iterated version to the successor corresponding to the previous snapshot id. - entry.setValue(versionNode.iterator().next()); } } finally { // Release the read lock acquired on the previous snapshot id acquired. Now that the instance @@ -616,25 +602,30 @@ private WritableOmSnapshotLocalDataProvider(UUID snapshotId, private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLocalData) throws IOException { - SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); - SnapshotVersionsMeta existingVersionsMeta = getVersionNodeMap().get(snapshotLocalData.getSnapshotId()); - for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { - validateVersionAddition(node); - } - UUID snapshotId = snapshotLocalData.getSnapshotId(); - Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? - getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); - for (Map.Entry entry : existingVersions.entrySet()) { - if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { - validateVersionRemoval(snapshotId, entry.getKey()); + internalLock.readLock().lock(); + try { + SnapshotVersionsMeta versionsToBeAdded = new SnapshotVersionsMeta(snapshotLocalData); + SnapshotVersionsMeta existingVersionsMeta = getVersionNodeMap().get(snapshotLocalData.getSnapshotId()); + for (LocalDataVersionNode node : versionsToBeAdded.getSnapshotVersions().values()) { + validateVersionAddition(node); } + UUID snapshotId = snapshotLocalData.getSnapshotId(); + Map existingVersions = getVersionNodeMap().containsKey(snapshotId) ? + getVersionNodeMap().get(snapshotId).getSnapshotVersions() : Collections.emptyMap(); + for (Map.Entry entry : existingVersions.entrySet()) { + if (!versionsToBeAdded.getSnapshotVersions().containsKey(entry.getKey())) { + validateVersionRemoval(snapshotId, entry.getKey()); + } + } + // Set Dirty if the snapshot doesn't exist or previousSnapshotId has changed. + if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), + existingVersionsMeta.getPreviousSnapshotId())) { + setDirty(); + } + return versionsToBeAdded; + } finally { + internalLock.readLock().unlock(); } - // Set Dirty if the snapshot doesn't exist or previousSnapshotId has changed. - if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), - existingVersionsMeta.getPreviousSnapshotId())) { - setDirty(); - } - return versionsToBeAdded; } public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { @@ -676,6 +667,34 @@ public synchronized void commit() throws IOException { } } + private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { + internalLock.writeLock().lock(); + try { + SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); + Map existingVersions = existingSnapVersions == null ? Collections.emptyMap() : + existingSnapVersions.getSnapshotVersions(); + Map> predecessors = new HashMap<>(); + // Track all predecessors of the existing versions and remove the node from the graph. + for (Map.Entry existingVersion : existingVersions.entrySet()) { + LocalDataVersionNode existingVersionNode = existingVersion.getValue(); + // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the + // nodes in the graph would change. + predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); + localDataGraph.removeNode(existingVersionNode); + } + // Add the nodes to be added in the graph and map. + addSnapshotVersionMeta(snapshotId, snapshotVersions); + // Reconnect all the predecessors for existing nodes. + for (Map.Entry entry : snapshotVersions.getSnapshotVersions().entrySet()) { + for (LocalDataVersionNode predecessor : predecessors.getOrDefault(entry.getKey(), Collections.emptyList())) { + localDataGraph.putEdge(predecessor, entry.getValue()); + } + } + } finally { + internalLock.writeLock().unlock(); + } + } + private void setDirty() { dirty = true; } From 3f59895032771c884ef1f7f58367cfd0d470cbf9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 24 Oct 2025 20:51:49 -0400 Subject: [PATCH 71/97] HDDS-13785. Use internal lock on orphan block cleanup Change-Id: I743bd8877d90037b9892f75d344c773604362eb0 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 86a41e4fb967..f94ccf350208 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -336,12 +336,18 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai // remove the version entry if it is not referenced by any other snapshot version node. For version node 0 // a newly created snapshot version could point to a version with indegree 0 in such a scenario a version 0 // node can be only deleted if the snapshot is also purged. - boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 - && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) - || isSnapshotPurged); - if (toRemove) { - snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); + internalLock.readLock().lock(); + try { + boolean toRemove = localDataGraph.inDegree(versionEntry) == 0 + && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) + || isSnapshotPurged); + if (toRemove) { + snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); + } + } finally { + internalLock.readLock().unlock(); } + } snapshotLocalDataProvider.commit(); } From 616bef3682fa1fd12a0e3a550421476cbed2ac96 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Mon, 27 Oct 2025 14:01:27 -0400 Subject: [PATCH 72/97] HDDS-13783. Fix NPE with concurrentHashMap Change-Id: I960fd1d25e0f323d56f5e88ffb03261d777d9021 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 99461bb8cab6..bfe74822476d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -179,7 +179,7 @@ public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws } private LocalDataVersionNode getVersionNode(UUID snapshotId, int version) { - if (!versionNodeMap.containsKey(snapshotId)) { + if (snapshotId == null || !versionNodeMap.containsKey(snapshotId)) { return null; } return versionNodeMap.get(snapshotId).getVersionNode(version); @@ -190,7 +190,7 @@ private void addSnapshotVersionMeta(UUID snapshotId, SnapshotVersionsMeta snapsh if (!versionNodeMap.containsKey(snapshotId)) { for (LocalDataVersionNode versionNode : snapshotVersionsMeta.getSnapshotVersions().values()) { validateVersionAddition(versionNode); - LocalDataVersionNode previousVersionNode = versionNode.previousSnapshotId == null ? null : + LocalDataVersionNode previousVersionNode = getVersionNode(versionNode.previousSnapshotId, versionNode.previousSnapshotVersion); localDataGraph.addNode(versionNode); if (previousVersionNode != null) { From b0023d11132696b9f560c88d8376652be2615a78 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 28 Oct 2025 06:27:34 -0400 Subject: [PATCH 73/97] HDDS-13830. Snapshot Rocks DB directory path computation based on local version of snapshot Change-Id: Ib4af980b466bda88b19c5793c1c666bc66895a30 --- .../hadoop/ozone/client/OzoneSnapshot.java | 2 +- .../ozone/client/TestOzoneSnapshot.java | 4 +- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 39 +++-------- .../ozone/om/helpers/TestOmSnapshotInfo.java | 1 - .../hadoop/fs/ozone/TestOzoneFsSnapshot.java | 2 +- .../hadoop/ozone/freon/TestOMSnapshotDAG.java | 2 +- .../ozone/om/TestOMDbCheckpointServlet.java | 2 +- ...stOMDbCheckpointServletInodeBasedXfer.java | 2 +- .../hadoop/ozone/om/TestOMRatisSnapshots.java | 6 +- .../ozone/om/snapshot/TestOmSnapshot.java | 2 +- .../om/snapshot/TestOmSnapshotFileSystem.java | 2 +- .../snapshot/TestOzoneManagerHASnapshot.java | 2 +- .../snapshot/TestOzoneManagerSnapshotAcl.java | 2 +- .../om/snapshot/TestOzoneSnapshotRestore.java | 2 +- .../TestSnapshotBackgroundServices.java | 2 +- .../ozone/om/OMDBCheckpointServlet.java | 15 ++-- .../OMDBCheckpointServletInodeBasedXfer.java | 20 +++--- .../hadoop/ozone/om/OmSnapshotManager.java | 32 ++++----- .../ozone/om/SnapshotDefragService.java | 12 +++- .../hadoop/ozone/om/SstFilteringService.java | 5 +- .../snapshot/OMSnapshotPurgeResponse.java | 22 +++--- .../snapshot/OmSnapshotLocalDataManager.java | 69 +++++++++++++++---- .../ozone/om/TestOmSnapshotManager.java | 2 +- .../TestOMSnapshotCreateResponse.java | 2 +- .../TestOMSnapshotDeleteResponse.java | 2 +- .../TestOmSnapshotLocalDataManager.java | 4 +- .../ozone/om/snapshot/TestSnapshotChain.java | 1 - .../om/snapshot/TestSnapshotDiffManager.java | 3 - .../ozone/om/snapshot/TestSnapshotInfo.java | 1 - .../TestSnapshotRequestAndResponse.java | 2 +- .../om/snapshot/TestSstFilteringService.java | 6 +- 31 files changed, 149 insertions(+), 121 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index b7bf7051caeb..95f05a50e064 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -192,7 +192,7 @@ public static OzoneSnapshot fromSnapshotInfo(SnapshotInfo snapshotInfo) { snapshotInfo.getSnapshotStatus(), snapshotInfo.getSnapshotId(), snapshotInfo.getSnapshotPath(), - snapshotInfo.getCheckpointDir(), + snapshotInfo.getCheckpointDirName(0), snapshotInfo.getReferencedSize(), snapshotInfo.getReferencedReplicatedSize(), snapshotInfo.getExclusiveSize() + snapshotInfo.getExclusiveSizeDeltaFromDirDeepCleaning(), diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index 8980e28b59b4..16cf58ab5a2c 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.client; +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.when; @@ -40,7 +41,6 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { when(snapshotInfo.getSnapshotStatus()).thenReturn(SNAPSHOT_ACTIVE); when(snapshotInfo.getSnapshotId()).thenReturn(snapshotId); when(snapshotInfo.getSnapshotPath()).thenReturn("volume/bucket"); - when(snapshotInfo.getCheckpointDir()).thenReturn("checkpointDir"); when(snapshotInfo.getReferencedSize()).thenReturn(1000L); when(snapshotInfo.getReferencedReplicatedSize()).thenReturn(3000L); when(snapshotInfo.getExclusiveSize()).thenReturn(4000L); @@ -57,7 +57,7 @@ public void testOzoneSnapshotFromSnapshotInfo() { OzoneSnapshot ozoneSnapshot = OzoneSnapshot.fromSnapshotInfo(snapshotInfo); OzoneSnapshot expectedOzoneSnapshot = new OzoneSnapshot( "volume", "bucket", "snap", 1000L, SNAPSHOT_ACTIVE, snapshotId, - "volume/bucket", "checkpointDir", 1000L, 3000L, 6000L, 18000L); + "volume/bucket", OM_SNAPSHOT_SEPARATOR + snapshotId, 1000L, 3000L, 6000L, 18000L); assertEquals(expectedOzoneSnapshot, ozoneSnapshot); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index cbc3709ea1e8..a26422cd81fb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -71,7 +71,6 @@ public final class SnapshotInfo implements Auditable, CopyObject { private UUID pathPreviousSnapshotId; private UUID globalPreviousSnapshotId; private String snapshotPath; // snapshot mask - private String checkpointDir; /** * RocksDB's transaction sequence number at the time of checkpoint creation. */ @@ -99,7 +98,6 @@ private SnapshotInfo(Builder b) { this.pathPreviousSnapshotId = b.pathPreviousSnapshotId; this.globalPreviousSnapshotId = b.globalPreviousSnapshotId; this.snapshotPath = b.snapshotPath; - this.checkpointDir = b.checkpointDir; this.dbTxSequenceNumber = b.dbTxSequenceNumber; this.deepClean = b.deepClean; this.sstFiltered = b.sstFiltered; @@ -150,10 +148,6 @@ public void setSnapshotPath(String snapshotPath) { this.snapshotPath = snapshotPath; } - public void setCheckpointDir(String checkpointDir) { - this.checkpointDir = checkpointDir; - } - public boolean isDeepCleaned() { return deepClean; } @@ -202,10 +196,6 @@ public String getSnapshotPath() { return snapshotPath; } - public String getCheckpointDir() { - return checkpointDir; - } - public boolean isSstFiltered() { return sstFiltered; } @@ -231,7 +221,6 @@ public SnapshotInfo.Builder toBuilder() { .setPathPreviousSnapshotId(pathPreviousSnapshotId) .setGlobalPreviousSnapshotId(globalPreviousSnapshotId) .setSnapshotPath(snapshotPath) - .setCheckpointDir(checkpointDir) .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean) .setSstFiltered(sstFiltered) @@ -260,7 +249,6 @@ public static class Builder { private UUID pathPreviousSnapshotId; private UUID globalPreviousSnapshotId; private String snapshotPath; - private String checkpointDir; private long dbTxSequenceNumber; private boolean deepClean; private boolean sstFiltered; @@ -339,12 +327,6 @@ public Builder setSnapshotPath(String snapshotPath) { return this; } - /** @param checkpointDir - Snapshot checkpoint directory. */ - public Builder setCheckpointDir(String checkpointDir) { - this.checkpointDir = checkpointDir; - return this; - } - /** @param dbTxSequenceNumber - RDB latest transaction sequence number. */ public Builder setDbTxSequenceNumber(long dbTxSequenceNumber) { this.dbTxSequenceNumber = dbTxSequenceNumber; @@ -459,7 +441,6 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { } sib.setSnapshotPath(snapshotPath) - .setCheckpointDir(checkpointDir) .setDbTxSequenceNumber(dbTxSequenceNumber) .setDeepClean(deepClean); return sib.build(); @@ -544,7 +525,6 @@ public static SnapshotInfo getFromProtobuf( } osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) - .setCheckpointDir(snapshotInfoProto.getCheckpointDir()) .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); return osib.build(); @@ -562,17 +542,20 @@ public Map toAuditMap() { /** * Get the name of the checkpoint directory. */ - public static String getCheckpointDirName(UUID snapshotId) { + public static String getCheckpointDirName(UUID snapshotId, int version) { Objects.requireNonNull(snapshotId, "SnapshotId is needed to create checkpoint directory"); - return OM_SNAPSHOT_SEPARATOR + snapshotId; + if (version == 0) { + return OM_SNAPSHOT_SEPARATOR + snapshotId; + } + return OM_SNAPSHOT_SEPARATOR + snapshotId + OM_SNAPSHOT_SEPARATOR + version; } /** * Get the name of the checkpoint directory, (non-static). */ - public String getCheckpointDirName() { - return getCheckpointDirName(getSnapshotId()); + public String getCheckpointDirName(int version) { + return getCheckpointDirName(getSnapshotId(), version); } public long getDbTxSequenceNumber() { @@ -703,10 +686,6 @@ public static SnapshotInfo newInstance(String volumeName, .setBucketName(bucketName) .setDeepClean(false) .setDeepCleanedDeletedDir(false); - - if (snapshotId != null) { - builder.setCheckpointDir(getCheckpointDirName(snapshotId)); - } return builder.build(); } @@ -729,7 +708,6 @@ public boolean equals(Object o) { Objects.equals( globalPreviousSnapshotId, that.globalPreviousSnapshotId) && snapshotPath.equals(that.snapshotPath) && - checkpointDir.equals(that.checkpointDir) && deepClean == that.deepClean && sstFiltered == that.sstFiltered && referencedSize == that.referencedSize && @@ -746,7 +724,7 @@ public int hashCode() { return Objects.hash(snapshotId, name, volumeName, bucketName, snapshotStatus, creationTime, deletionTime, pathPreviousSnapshotId, - globalPreviousSnapshotId, snapshotPath, checkpointDir, + globalPreviousSnapshotId, snapshotPath, deepClean, sstFiltered, referencedSize, referencedReplicatedSize, exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir, lastTransactionInfo, createTransactionInfo); @@ -773,7 +751,6 @@ public String toString() { ", pathPreviousSnapshotId: '" + pathPreviousSnapshotId + '\'' + ", globalPreviousSnapshotId: '" + globalPreviousSnapshotId + '\'' + ", snapshotPath: '" + snapshotPath + '\'' + - ", checkpointDir: '" + checkpointDir + '\'' + ", dbTxSequenceNumber: '" + dbTxSequenceNumber + '\'' + ", deepClean: '" + deepClean + '\'' + ", sstFiltered: '" + sstFiltered + '\'' + diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index 98cc035b3c07..7750b32e2e0a 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -60,7 +60,6 @@ private SnapshotInfo createSnapshotInfo() { .setPathPreviousSnapshotId(PATH_PREVIOUS_SNAPSHOT_ID) .setGlobalPreviousSnapshotId(GLOBAL_PREVIOUS_SNAPSHOT_ID) .setSnapshotPath(SNAPSHOT_PATH) - .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) .setDeepClean(false) .setSstFiltered(false) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java index d02319a4cab6..6a97796af32b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsSnapshot.java @@ -547,7 +547,7 @@ private String createSnapshot() throws Exception { SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(VOLUME, BUCKET, snapshotName)); - String snapshotDirName = getSnapshotPath(conf, snapshotInfo) + + String snapshotDirName = getSnapshotPath(conf, snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), 1000, 100000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java index 5429dc0f4a12..9f69ed51b7ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestOMSnapshotDAG.java @@ -130,7 +130,7 @@ public static void shutdown() { } private String getDBCheckpointAbsolutePath(SnapshotInfo snapshotInfo) { - return OmSnapshotManager.getSnapshotPath(conf, snapshotInfo); + return OmSnapshotManager.getSnapshotPath(conf, snapshotInfo, 0); } private static String getSnapshotDBKey(String volumeName, String bucketName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index 3d542785e113..d0b38116d5fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -733,7 +733,7 @@ private String createSnapshot(String vname, String bname) writeClient.createSnapshot(vname, bname, snapshotName); SnapshotInfo snapshotInfo = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(vname, bname, snapshotName)); - String snapshotPath = getSnapshotPath(conf, snapshotInfo) + String snapshotPath = getSnapshotPath(conf, snapshotInfo, 0) + OM_KEY_PREFIX; GenericTestUtils.waitFor(() -> new File(snapshotPath).exists(), 100, 30000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java index 0f5c8bae4b46..5ab71373dd4d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java @@ -228,7 +228,7 @@ public void write(int b) throws IOException { .thenReturn(lock); doCallRealMethod().when(omDbCheckpointServletMock).getCheckpoint(any(), anyBoolean()); assertNull(doCallRealMethod().when(omDbCheckpointServletMock).getBootstrapTempData()); - doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirs(any()); + doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirs(any(), any(), any()); doCallRealMethod().when(omDbCheckpointServletMock). processMetadataSnapshotRequest(any(), any(), anyBoolean(), anyBoolean()); doCallRealMethod().when(omDbCheckpointServletMock).writeDbDataToStream(any(), any(), any(), any()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index a1de8fc377a0..3609703c7ef6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -337,11 +337,11 @@ private void checkSnapshot(OzoneManager leaderOM, OzoneManager followerOM, File followerMetaDir = OMStorage.getOmDbDir(followerOM.getConfiguration()); Path followerActiveDir = Paths.get(followerMetaDir.toString(), OM_DB_NAME); Path followerSnapshotDir = - Paths.get(getSnapshotPath(followerOM.getConfiguration(), snapshotInfo)); + Paths.get(getSnapshotPath(followerOM.getConfiguration(), snapshotInfo, 0)); File leaderMetaDir = OMStorage.getOmDbDir(leaderOM.getConfiguration()); Path leaderActiveDir = Paths.get(leaderMetaDir.toString(), OM_DB_NAME); Path leaderSnapshotDir = - Paths.get(getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo)); + Paths.get(getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo, 0)); // Get list of live files on the leader. RocksDB activeRocksDB = ((RDBStore) leaderOM.getMetadataManager().getStore()) @@ -1056,7 +1056,7 @@ private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) .get(tableKey); // Allow the snapshot to be written to disk String fileName = - getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); + getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils .waitForCheckpointDirectoryExist(snapshotDir)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java index 93dba945d46d..19b237fe2600 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java @@ -1990,7 +1990,7 @@ private String createSnapshot(String volName, String buckName, .get(SnapshotInfo.getTableKey(volName, linkedBuckets.getOrDefault(buckName, buckName), snapshotName)); String snapshotDirName = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), - snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; + snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils .waitFor(() -> new File(snapshotDirName).exists(), 1000, 120000); return snapshotKeyPrefix; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java index fca8b137b720..964513702a08 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotFileSystem.java @@ -709,7 +709,7 @@ private String createSnapshot(String snapshotName) SnapshotInfo snapshotInfo = ozoneManager.getMetadataManager() .getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(snapshot.getVolumeName(), snapshot.getBucketName(), snapshotName)); - String snapshotDirName = getSnapshotPath(conf, snapshotInfo) + + String snapshotDirName = getSnapshotPath(conf, snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), 1000, 120000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java index bae852ae3368..b6008ab3d2e2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerHASnapshot.java @@ -410,7 +410,7 @@ private void createSnapshot(String volName, String buckName, String snapName) th String tableKey = SnapshotInfo.getTableKey(volName, buckName, snapName); SnapshotInfo snapshotInfo = SnapshotUtils.getSnapshotInfo(cluster.getOMLeader(), tableKey); - String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo); + String fileName = getSnapshotPath(cluster.getOMLeader().getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { throw new IOException("Snapshot directory doesn't exist"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java index f735ad15d295..455f1430d997 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotAcl.java @@ -685,7 +685,7 @@ private void createSnapshot() .get(SnapshotInfo.getTableKey(volumeName, bucketName, snapshotName)); // Allow the snapshot to be written to disk String fileName = - getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo); + getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils .waitForCheckpointDirectoryExist(snapshotDir)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java index 6c67554d7b8d..b2fde1f01960 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneSnapshotRestore.java @@ -159,7 +159,7 @@ private String createSnapshot(String volName, String buckName, .getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volName, buckName, snapshotName)); String snapshotDirName = OmSnapshotManager - .getSnapshotPath(clientConf, snapshotInfo) + OM_KEY_PREFIX + "CURRENT"; + .getSnapshotPath(clientConf, snapshotInfo, 0) + OM_KEY_PREFIX + "CURRENT"; GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), 1000, 120000); return snapshotKeyPrefix; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java index a67a4599beee..eacde483d2ac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotBackgroundServices.java @@ -633,7 +633,7 @@ private SnapshotInfo createOzoneSnapshot(OzoneManager leaderOM, String name) thr .getSnapshotInfoTable() .get(tableKey); // Allow the snapshot to be written to disk - String fileName = getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo); + String fileName = getSnapshotPath(leaderOM.getConfiguration(), snapshotInfo, 0); File snapshotDir = new File(fileName); if (!RDBCheckpointUtils.waitForCheckpointDirectoryExist(snapshotDir)) { throw new IOException("snapshot directory doesn't exist"); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index 4d85e9f07472..efe9fc0aeea9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -69,6 +69,7 @@ import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils; +import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; @@ -347,7 +348,8 @@ private Set getSnapshotDirs(DBCheckpoint checkpoint, boolean waitForDir) OzoneConfiguration conf = getConf(); Set snapshotPaths = new HashSet<>(); - + OzoneManager om = (OzoneManager) getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); + OmSnapshotLocalDataManager snapshotLocalDataManager = om.getOmSnapshotManager().getSnapshotLocalDataManager(); // get snapshotInfo entries OmMetadataManagerImpl checkpointMetadataManager = OmMetadataManagerImpl.createCheckpointMetadataManager( @@ -359,11 +361,14 @@ private Set getSnapshotDirs(DBCheckpoint checkpoint, boolean waitForDir) // For each entry, wait for corresponding directory. while (iterator.hasNext()) { Table.KeyValue entry = iterator.next(); - Path path = Paths.get(getSnapshotPath(conf, entry.getValue())); - if (waitForDir) { - waitForDirToExist(path); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapMetaProvider = + snapshotLocalDataManager.getOmSnapshotLocalDataMeta(entry.getValue())) { + Path path = Paths.get(getSnapshotPath(conf, entry.getValue(), snapMetaProvider.getMeta().getVersion())); + if (waitForDir) { + waitForDirToExist(path); + } + snapshotPaths.add(path); } - snapshotPaths.add(path); } } finally { checkpointMetadataManager.stop(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 27e7f1c2d6d6..88e018ace4d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.ROCKSDB_SST_SUFFIX; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY; +import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath; import static org.apache.hadoop.ozone.om.lock.FlatResource.SNAPSHOT_DB_LOCK; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.includeSnapshotData; import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.logEstimatedTarballSize; @@ -69,7 +70,6 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; -import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -210,6 +210,7 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina DBCheckpoint checkpoint = null; OzoneManager om = (OzoneManager) getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE); OMMetadataManager omMetadataManager = om.getMetadataManager(); + OmSnapshotLocalDataManager snapshotLocalDataManager = om.getOmSnapshotManager().getSnapshotLocalDataManager(); boolean includeSnapshotData = includeSnapshotData(request); AtomicLong maxTotalSstSize = new AtomicLong(getConf().getLong(OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY, OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT)); @@ -219,7 +220,7 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina if (!includeSnapshotData) { maxTotalSstSize.set(Long.MAX_VALUE); } else { - snapshotPaths = getSnapshotDirs(omMetadataManager); + snapshotPaths = getSnapshotDirs(omMetadataManager, omMetadataManager, snapshotLocalDataManager); } if (sstFilesToExclude.isEmpty()) { @@ -382,18 +383,21 @@ private OzoneConfiguration getConf() { /** * Collects paths to all snapshot databases. * - * @param omMetadataManager OMMetadataManager instance + * @param activeOMMetadataManager OMMetadataManager instance * @return Set of paths to snapshot databases * @throws IOException if an I/O error occurs */ - Set getSnapshotDirs(OMMetadataManager omMetadataManager) throws IOException { + Set getSnapshotDirs(OMMetadataManager activeOMMetadataManager, OMMetadataManager omMetadataManager, + OmSnapshotLocalDataManager localDataManager) throws IOException { Set snapshotPaths = new HashSet<>(); SnapshotChainManager snapshotChainManager = new SnapshotChainManager(omMetadataManager); for (SnapshotChainInfo snapInfo : snapshotChainManager.getGlobalSnapshotChain().values()) { - String snapshotDir = - OmSnapshotManager.getSnapshotPath(getConf(), SnapshotInfo.getCheckpointDirName(snapInfo.getSnapshotId())); - Path path = Paths.get(snapshotDir); - snapshotPaths.add(path); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapLocalMeta = + localDataManager.getOmSnapshotLocalDataMeta(snapInfo.getSnapshotId())) { + Path snapshotDir = getSnapshotPath(activeOMMetadataManager, + snapInfo.getSnapshotId(), snapLocalMeta.getMeta().getVersion()); + snapshotPaths.add(snapshotDir); + } } return snapshotPaths; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 7b9beb80cf6f..0954b029ab67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -415,8 +415,12 @@ public OmSnapshot load(@Nonnull UUID snapshotId) throws IOException { "' with txnId : '" + TransactionInfo.fromByteString(snapshotInfo.getCreateTransactionInfo()) + "' has not been flushed yet. Please wait a few more seconds before retrying", TIMEOUT); } - snapshotMetadataManager = new OmMetadataManagerImpl(conf, - snapshotInfo.getCheckpointDirName(), maxOpenSstFilesInSnapshotDb); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapshotLocalDataProvider = + snapshotLocalDataManager.getOmSnapshotLocalDataMeta(snapshotInfo)) { + snapshotMetadataManager = new OmMetadataManagerImpl(conf, + snapshotInfo.getCheckpointDirName(snapshotLocalDataProvider.getMeta().getVersion()), + maxOpenSstFilesInSnapshotDb); + } } catch (IOException e) { LOG.error("Failed to retrieve snapshot: {}", snapshotTableKey, e); throw e; @@ -505,14 +509,12 @@ public static DBCheckpoint createOmSnapshotCheckpoint( boolean snapshotDirExist = false; // Create DB checkpoint for snapshot - String checkpointPrefix = store.getDbLocation().getName(); - Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); + Path snapshotDirPath = getSnapshotPath(omMetadataManager, snapshotInfo, 0); if (Files.exists(snapshotDirPath)) { snapshotDirExist = true; dbCheckpoint = new RocksDBCheckpoint(snapshotDirPath); } else { - dbCheckpoint = store.getSnapshot(snapshotInfo.getCheckpointDirName()); + dbCheckpoint = store.getSnapshot(snapshotInfo.getCheckpointDirName(0)); } OmSnapshotManager omSnapshotManager = ((OmMetadataManagerImpl) omMetadataManager).getOzoneManager().getOmSnapshotManager(); @@ -796,27 +798,23 @@ public static String getSnapshotPrefix(String snapshotName) { snapshotName + OM_KEY_PREFIX; } - public static Path getSnapshotPath(OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { - RDBStore store = (RDBStore) omMetadataManager.getStore(); - String checkpointPrefix = store.getDbLocation().getName(); - return Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); + public static Path getSnapshotPath(OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo, int version) { + return getSnapshotPath(omMetadataManager, snapshotInfo.getSnapshotId(), version); } - public static Path getSnapshotPath(OMMetadataManager omMetadataManager, UUID snapshotId) { + public static Path getSnapshotPath(OMMetadataManager omMetadataManager, UUID snapshotId, int version) { RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); return Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotId)); + checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotId, version)); } public static String getSnapshotPath(OzoneConfiguration conf, - SnapshotInfo snapshotInfo) { - return getSnapshotPath(conf, snapshotInfo.getCheckpointDirName()); + SnapshotInfo snapshotInfo, int version) { + return getSnapshotPath(conf, snapshotInfo.getCheckpointDirName(version)); } - public static String getSnapshotPath(OzoneConfiguration conf, - String checkpointDirName) { + private static String getSnapshotPath(OzoneConfiguration conf, String checkpointDirName) { return OMStorage.getOmDbDir(conf) + OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + OM_DB_NAME + checkpointDirName; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 87f6ff55bb71..0bc0a6cd0abf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.Iterator; import java.util.Optional; @@ -79,6 +80,7 @@ public class SnapshotDefragService extends BackgroundService private final AtomicBoolean running; private final MultiSnapshotLocks snapshotIdLocks; + private final OzoneConfiguration conf; private final BootstrapStateHandler.Lock lock = new BootstrapStateHandler.Lock(); @@ -90,6 +92,7 @@ public SnapshotDefragService(long interval, TimeUnit unit, long serviceTimeout, this.snapshotLimitPerTask = configuration .getLong(SNAPSHOT_DEFRAG_LIMIT_PER_TASK, SNAPSHOT_DEFRAG_LIMIT_PER_TASK_DEFAULT); + this.conf = configuration; snapshotsDefraggedCount = new AtomicLong(0); running = new AtomicBoolean(false); IOzoneManagerLock omLock = ozoneManager.getMetadataManager().getLock(); @@ -128,11 +131,14 @@ private boolean isRocksToolsNativeLibAvailable() { * Checks if a snapshot needs defragmentation by examining its YAML metadata. */ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { - String snapshotPath = OmSnapshotManager.getSnapshotPath( - ozoneManager.getConfiguration(), snapshotInfo); - + if (!SstFilteringService.isSstFiltered(conf, snapshotInfo)) { + return false; + } try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { + Path snapshotPath = OmSnapshotManager.getSnapshotPath( + ozoneManager.getMetadataManager(), snapshotInfo, + readableOmSnapshotLocalDataProvider.getSnapshotLocalData().getVersion()); // Read snapshot local metadata from YAML OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index 522ea7df6de5..4b5002eb6c4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -87,7 +87,7 @@ public class SstFilteringService extends BackgroundService public static boolean isSstFiltered(OzoneConfiguration ozoneConfiguration, SnapshotInfo snapshotInfo) { Path sstFilteredFile = Paths.get(OmSnapshotManager.getSnapshotPath(ozoneConfiguration, - snapshotInfo), SST_FILTERED_FILE); + snapshotInfo, 0), SST_FILTERED_FILE); return snapshotInfo.isSstFiltered() || sstFilteredFile.toFile().exists(); } @@ -138,7 +138,8 @@ private void markSSTFilteredFlagForSnapshot(SnapshotInfo snapshotInfo) throws IO .acquireReadLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { - String snapshotDir = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo); + // Ensure snapshot is sstFiltered before defrag. + String snapshotDir = OmSnapshotManager.getSnapshotPath(ozoneManager.getConfiguration(), snapshotInfo, 0); try { // mark the snapshot as filtered by creating a file. if (Files.exists(Paths.get(snapshotDir))) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 3797b3fcf2eb..407da697b1da 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -119,7 +119,7 @@ private void updateSnapInfo(OmMetadataManagerImpl metadataManager, * Deletes the checkpoint directory for a snapshot. */ private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalDataManager, - OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) { + OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) throws IOException { // Acquiring write lock to avoid race condition with sst filtering service which creates a sst filtered file // inside the snapshot directory. Any operation apart which doesn't create/delete files under this snapshot // directory can run in parallel along with this operation. @@ -127,14 +127,18 @@ private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalD .acquireWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); boolean acquiredSnapshotLock = omLockDetails.isLockAcquired(); if (acquiredSnapshotLock) { - Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo); - try { - FileUtils.deleteDirectory(snapshotDirPath.toFile()); - } catch (IOException ex) { - LOG.error("Failed to delete snapshot directory {} for snapshot {}", - snapshotDirPath, snapshotInfo.getTableKey(), ex); - } finally { - omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); + try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapMetaProvider = + snapshotLocalDataManager.getOmSnapshotLocalDataMeta(snapshotInfo)) { + Path snapshotDirPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotInfo, + snapMetaProvider.getMeta().getVersion()); + try { + FileUtils.deleteDirectory(snapshotDirPath.toFile()); + } catch (IOException ex) { + LOG.error("Failed to delete snapshot directory {} for snapshot {}", + snapshotDirPath, snapshotInfo.getTableKey(), ex); + } finally { + omMetadataManager.getLock().releaseWriteLock(SNAPSHOT_DB_LOCK, snapshotInfo.getSnapshotId().toString()); + } } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index bfe74822476d..54dfc0477827 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -127,7 +127,7 @@ public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) { } public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) { - Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId); + Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId, 0); return getSnapshotLocalPropertyYamlPath(snapshotPath); } @@ -146,6 +146,14 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf } } + public ReadableOmSnapshotLocalDataMetaProvider getOmSnapshotLocalDataMeta(SnapshotInfo snapInfo) throws IOException { + return getOmSnapshotLocalDataMeta(snapInfo.getSnapshotId()); + } + + public ReadableOmSnapshotLocalDataMetaProvider getOmSnapshotLocalDataMeta(UUID snapshotId) throws IOException { + return new ReadableOmSnapshotLocalDataMetaProvider(snapshotId); + } + public ReadableOmSnapshotLocalDataProvider getOmSnapshotLocalData(SnapshotInfo snapshotInfo) throws IOException { return getOmSnapshotLocalData(snapshotInfo.getSnapshotId()); } @@ -307,6 +315,15 @@ public void close() { } } + private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) throws IOException { + HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, + snapId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapId.toString()); + if (!acquiredLock.isLockAcquired()) { + throw new IOException("Unable to acquire lock for snapshotId: " + snapId); + } + return acquiredLock; + } + private static final class LockDataProviderInitResult { private final OmSnapshotLocalData snapshotLocalData; private final HierarchicalResourceLock lock; @@ -338,6 +355,34 @@ private OmSnapshotLocalData getSnapshotLocalData() { } } + /** + * Provides LocalData's metadata stored in memory for a snapshot after acquiring a read lock on this. + */ + public final class ReadableOmSnapshotLocalDataMetaProvider implements AutoCloseable { + private final SnapshotVersionsMeta meta; + private final HierarchicalResourceLock lock; + private boolean closed; + + private ReadableOmSnapshotLocalDataMetaProvider(UUID snapshotId) throws IOException { + this.lock = acquireLock(snapshotId, true); + this.meta = versionNodeMap.get(snapshotId); + this.closed = false; + } + + public synchronized SnapshotVersionsMeta getMeta() throws IOException { + if (closed) { + throw new IOException("Resource has already been closed."); + } + return meta; + } + + @Override + public synchronized void close() throws IOException { + closed = true; + lock.close(); + } + } + /** * The ReadableOmSnapshotLocalDataProvider class is responsible for managing the * access and initialization of local snapshot data in a thread-safe manner. @@ -420,15 +465,6 @@ public synchronized OmSnapshotLocalData getPreviousSnapshotLocalData() throws IO return previousSnapshotLocalData; } - private HierarchicalResourceLock acquireLock(UUID snapId, boolean readLock) throws IOException { - HierarchicalResourceLock acquiredLock = readLock ? locks.acquireReadLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, - snapId.toString()) : locks.acquireWriteLock(FlatResource.SNAPSHOT_LOCAL_DATA_LOCK, snapId.toString()); - if (!acquiredLock.isLockAcquired()) { - throw new IOException("Unable to acquire lock for snapshotId: " + snapId); - } - return acquiredLock; - } - /** * Intializes the snapshot local data by acquiring the lock on the snapshot and also acquires a read lock on the * snapshotId to be resolved by iterating through the chain of previous snapshot ids. @@ -761,7 +797,10 @@ public String toString() { } } - static final class SnapshotVersionsMeta { + /** + * Class that encapsulates the metadata corresponding to a snapshot's local data. + */ + public static final class SnapshotVersionsMeta { private final UUID previousSnapshotId; private final Map snapshotVersions; private int version; @@ -783,16 +822,16 @@ private Map getVersionNodes(OmSnapshotLocalData s return versionNodes; } - UUID getPreviousSnapshotId() { + public UUID getPreviousSnapshotId() { return previousSnapshotId; } - int getVersion() { + public int getVersion() { return version; } - Map getSnapshotVersions() { - return snapshotVersions; + private Map getSnapshotVersions() { + return Collections.unmodifiableMap(snapshotVersions); } LocalDataVersionNode getVersionNode(int snapshotVersion) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 6ec49935b356..f6b8df609d99 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -314,7 +314,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { when(mockedDb.getLiveFilesMetaData()).thenReturn(mockedLiveFiles); Path snapshotYaml = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); - when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager, snapshotInfo).toFile()); + when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager, snapshotInfo, 0).toFile()); // Create an existing YAML file for the snapshot assertTrue(snapshotYaml.toFile().createNewFile()); assertEquals(0, Files.size(snapshotYaml)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java index 2cafae138fd4..6bef4b84247b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotCreateResponse.java @@ -131,7 +131,7 @@ public void testAddToDBBatch(int numberOfKeys) throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); // Confirm snapshot directory was created - String snapshotDir = getSnapshotPath(ozoneConfiguration, snapshotInfo); + String snapshotDir = getSnapshotPath(ozoneConfiguration, snapshotInfo, 0); assertTrue((new File(snapshotDir)).exists()); // Confirm table has 1 entry diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java index 2d5d7b2870f7..bdb23b65f2c8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/snapshot/TestOMSnapshotDeleteResponse.java @@ -117,7 +117,7 @@ public void testAddToDBBatch() throws Exception { // Confirm snapshot directory was created String snapshotDir = OmSnapshotManager.getSnapshotPath(ozoneConfiguration, - snapshotInfo); + snapshotInfo, 0); assertTrue((new File(snapshotDir)).exists()); // Confirm table has 1 entry diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 947c1a4b7f47..43ebe6fbeb5b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -250,7 +250,7 @@ private List createSnapshotLocalData(OmSnapshotLocalDataManager snapshotLo private void mockSnapshotStore(UUID snapshotId, List sstFiles) throws RocksDatabaseException { // Setup snapshot store mock - File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId, 0).toFile(); assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); when(snapshotStore.getDbLocation()).thenReturn(snapshotDbLocation); @@ -580,7 +580,7 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { SnapshotInfo snapshotInfo = createMockSnapshotInfo(snapshotId, null); // Setup snapshot store mock - File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId).toFile(); + File snapshotDbLocation = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId, 0).toFile(); assertTrue(snapshotDbLocation.exists() || snapshotDbLocation.mkdirs()); List sstFiles = new ArrayList<>(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java index 3f53a66f4f95..e62b64893254 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotChain.java @@ -93,7 +93,6 @@ private SnapshotInfo createSnapshotInfo(UUID snapshotID, .setPathPreviousSnapshotId(pathPrevID) .setGlobalPreviousSnapshotId(globalPrevID) .setSnapshotPath(String.join("/", "vol1", "bucket1")) - .setCheckpointDir("checkpoint.testdir") .build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 0ea625a0e064..ec896cb3dda3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -289,7 +289,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .setBucketName(BUCKET_NAME) .setName(baseSnapshotName) .setSnapshotPath(snapshotPath) - .setCheckpointDir(snapshotCheckpointDir) .build(); for (JobStatus jobStatus : jobStatuses) { @@ -302,7 +301,6 @@ public void init() throws RocksDBException, IOException, ExecutionException { .setBucketName(BUCKET_NAME) .setName(targetSnapshotName) .setSnapshotPath(snapshotPath) - .setCheckpointDir(snapshotCheckpointDir) .build(); SnapshotDiffJob diffJob = new SnapshotDiffJob(System.currentTimeMillis(), @@ -1395,7 +1393,6 @@ public void testThreadPoolIsFull(String description, .setBucketName(BUCKET_NAME) .setName(snapshotName) .setSnapshotPath("fromSnapshotPath") - .setCheckpointDir("fromSnapshotCheckpointDir") .build(); snapshotInfos.add(snapInfo); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java index ca27d9bc8938..a39d907038fb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotInfo.java @@ -75,7 +75,6 @@ private SnapshotInfo createSnapshotInfo() { .setPathPreviousSnapshotId(EXPECTED_PREVIOUS_SNAPSHOT_ID) .setGlobalPreviousSnapshotId(EXPECTED_PREVIOUS_SNAPSHOT_ID) .setSnapshotPath("test/path") - .setCheckpointDir("checkpoint.testdir") .build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java index d9e81693dd8d..e7c98e298b18 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotRequestAndResponse.java @@ -225,7 +225,7 @@ protected Path createSnapshotCheckpoint(String volume, String bucket, String sna RDBStore store = (RDBStore) omMetadataManager.getStore(); String checkpointPrefix = store.getDbLocation().getName(); Path snapshotDirPath = Paths.get(store.getSnapshotsParentDir(), - checkpointPrefix + snapshotInfo.getCheckpointDir()); + checkpointPrefix + SnapshotInfo.getCheckpointDirName(snapshotInfo.getSnapshotId(), 0)); // Check the DB is still there assertTrue(Files.exists(snapshotDirPath)); return snapshotDirPath; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java index e523f32ef7e2..108dd30c8222 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSstFilteringService.java @@ -219,7 +219,7 @@ public void testIrrelevantSstFileDeletion() .get(SnapshotInfo.getTableKey(volumeName, bucketName2, snapshotName1)); String snapshotDirName = - OmSnapshotManager.getSnapshotPath(conf, snapshotInfo); + OmSnapshotManager.getSnapshotPath(conf, snapshotInfo, 0); for (LiveFileMetaData file : allFiles) { //Skipping the previous files from this check even those also works. @@ -294,11 +294,11 @@ public void testActiveAndDeletedSnapshotCleanup() throws Exception { SnapshotInfo snapshot1Info = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketNames.get(0), "snap1")); File snapshot1Dir = - new File(OmSnapshotManager.getSnapshotPath(conf, snapshot1Info)); + new File(OmSnapshotManager.getSnapshotPath(conf, snapshot1Info, 0)); SnapshotInfo snapshot2Info = om.getMetadataManager().getSnapshotInfoTable() .get(SnapshotInfo.getTableKey(volumeName, bucketNames.get(0), "snap2")); File snapshot2Dir = - new File(OmSnapshotManager.getSnapshotPath(conf, snapshot2Info)); + new File(OmSnapshotManager.getSnapshotPath(conf, snapshot2Info, 0)); File snap1Current = new File(snapshot1Dir, "CURRENT"); File snap2Current = new File(snapshot2Dir, "CURRENT"); From 36b6fb357de7668aa946b516dde375eb6ab4aca5 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Tue, 28 Oct 2025 08:01:07 -0400 Subject: [PATCH 74/97] HDDS-13830. Add test Change-Id: Ica7f0f89f8e9f5c6531709c949b6e9fa6b7c4da1 --- .../ozone/om/TestOmSnapshotManager.java | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index f6b8df609d99..116d78019b75 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -42,7 +42,9 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.timeout; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -95,6 +97,9 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.MockedStatic; import org.rocksdb.LiveFileMetaData; import org.slf4j.event.Level; @@ -741,6 +746,43 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti destAddNonSstToCopiedFiles); } + @ParameterizedTest + @ValueSource(ints = {0, 1, 10, 100}) + public void testGetSnapshotPath(int version) { + OMMetadataManager metadataManager = mock(OMMetadataManager.class); + RDBStore store = mock(RDBStore.class); + when(metadataManager.getStore()).thenReturn(store); + File file = new File("test-db"); + when(store.getDbLocation()).thenReturn(file); + String path = "dir1/dir2"; + when(store.getSnapshotsParentDir()).thenReturn(path); + UUID snapshotId = UUID.randomUUID(); + String snapshotPath = OmSnapshotManager.getSnapshotPath(metadataManager, snapshotId, version).toString(); + String expectedPath = "dir1/dir2/test-db-" + snapshotId; + if (version != 0) { + expectedPath = expectedPath + "-" + version; + } + assertEquals(expectedPath, snapshotPath); + } + + @ParameterizedTest + @ValueSource(ints = {0, 1, 10, 100}) + public void testGetSnapshotPathFromConf(int version) { + try (MockedStatic mocked = mockStatic(OMStorage.class)) { + String omDir = "dir1/dir2"; + mocked.when(() -> OMStorage.getOmDbDir(any())).thenReturn(new File(omDir)); + OzoneConfiguration conf = mock(OzoneConfiguration.class); + SnapshotInfo snapshotInfo = createSnapshotInfo("volumeName", "bucketname"); + String snapshotPath = OmSnapshotManager.getSnapshotPath(conf, snapshotInfo, version); + String expectedPath = omDir + OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + + OM_DB_NAME + "-" + snapshotInfo.getSnapshotId(); + if (version != 0) { + expectedPath = expectedPath + "-" + version; + } + assertEquals(expectedPath, snapshotPath); + } + } + @Test public void testCreateSnapshotIdempotent() throws Exception { // set up db tables From 45963867aa8e516544f04dec14966448402e1a35 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 29 Oct 2025 01:07:29 -0400 Subject: [PATCH 75/97] HDDS-13783. Add comments for localDataGraph Change-Id: I1bb9832e7e8c40deeccb9d0868eaf5772f39b7f9 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index bfe74822476d..3e92eb6748ce 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -74,6 +74,23 @@ public class OmSnapshotLocalDataManager implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(OmSnapshotLocalDataManager.class); private final ObjectSerializer snapshotLocalDataSerializer; + // In-memory DAG of snapshot-version dependencies. Each node represents a + // specific (snapshotId, version) pair, and a directed edge points to the + // corresponding (previousSnapshotId, previousSnapshotVersion) it depends on. + // The durable state is stored in each snapshot's YAML (previousSnapshotId and + // VersionMeta.previousSnapshotVersion). This graph mirrors that persisted + // structure to validate adds/removes and to resolve versions across chains. + // This graph is maintained only in memory and is not persisted to disk. + // Example (linear chain, arrows point to previous): + // (S0, v1) <- (S1, v4) <- (S2, v5) <- (S3, v7) + // where each node is (snapshotId, version) and each arrow points to its + // corresponding (previousSnapshotId, previousSnapshotVersion) dependency. + // + // Example (multiple versions for a single snapshotId S2): + // (S1, v4) <- (S2, v6) <- (S3, v8) + // (S1, v3) <- (S2, v5) + // Here S2 has two distinct versions (v6 and v5), each represented as its own + // node, and each version can depend on a different previousSnapshotVersion on S1. private final MutableGraph localDataGraph; private final Map versionNodeMap; private final OMMetadataManager omMetadataManager; From fd4bfdb2213f3372712cca212d346df6797fd450 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 17:43:29 -0400 Subject: [PATCH 76/97] HDDS-13785. Add test for handling needs defrag Change-Id: I2af63982b9e5e9c42fbb54ac39366d2d66e563d2 --- .../ozone/om/SnapshotDefragService.java | 5 ++- .../snapshot/OmSnapshotLocalDataManager.java | 18 ++++++++-- .../TestOmSnapshotLocalDataManager.java | 33 +++++++++++++++++-- 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 87f6ff55bb71..799ac6a40773 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -137,9 +137,8 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); // Check if snapshot needs compaction (defragmentation) - boolean needsDefrag = snapshotLocalData.getNeedsDefrag(); - LOG.debug("Snapshot {} needsDefragmentation field value: {}", - snapshotInfo.getName(), needsDefrag); + boolean needsDefrag = readableOmSnapshotLocalDataProvider.needsDefrag(); + LOG.debug("Snapshot {} needsDefragmentation field value: {}", snapshotInfo.getName(), needsDefrag); return needsDefrag; } catch (IOException e) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index b081317cc57e..f38f1148ff76 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -170,8 +170,8 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId(), - null), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), + snapshotInfo.getPathPreviousSnapshotId(), null), null))) { snapshotLocalData.commit(); } @@ -674,6 +674,19 @@ private LockDataProviderInitResult initialize( } } + public boolean needsDefrag() { + if (snapshotLocalData.getNeedsDefrag()) { + return true; + } + if (resolvedPreviousSnapshotId != null) { + int snapshotVersion = snapshotLocalData.getVersion(); + int previousResolvedSnapshotVersion = snapshotLocalData.getVersionSstFileInfos().get(snapshotVersion) + .getPreviousSnapshotVersion(); + return previousResolvedSnapshotVersion < getVersionNodeMap().get(resolvedPreviousSnapshotId).getVersion(); + } + return false; + } + @Override public void close() throws IOException { if (previousLock != null) { @@ -756,6 +769,7 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + this.getSnapshotLocalData().setNeedsDefrag(false); // Set Dirty if a version is added. setDirty(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index d4233591be34..cd442b9cdd38 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -31,6 +31,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.reset; @@ -124,7 +125,7 @@ public class TestOmSnapshotLocalDataManager { private AutoCloseable mocks; private File snapshotsDir; - private MockedStatic snapshotUtilMock; + private MockedStatic snapshotUtilMock; private static final String READ_LOCK_MESSAGE_ACQUIRE = "readLock acquire"; private static final String READ_LOCK_MESSAGE_UNLOCK = "readLock unlock"; @@ -169,7 +170,7 @@ public void setUp() throws IOException { when(rdbStore.getSnapshotsParentDir()).thenReturn(snapshotsDir.getAbsolutePath()); when(rdbStore.getDbLocation()).thenReturn(dbLocation); - this.snapshotUtilMock = mockStatic(SnapshotUtils.class); + this.snapshotUtilMock = mockStatic(OmSnapshotManager.class, CALLS_REAL_METHODS); purgedSnapshotIdMap.clear(); snapshotUtilMock.when(() -> OmSnapshotManager.isSnapshotPurged(any(), any(), any(), any())) .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); @@ -519,6 +520,32 @@ public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) thro } } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testWriteWithChainUpdate(boolean previousSnapshotExisting) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); + List snapshotIds = createSnapshotLocalData(localDataManager, 3 + (previousSnapshotExisting ? 1 : 0)); + int snapshotIdx = 1 + (previousSnapshotExisting ? 1 : 0); + for (UUID snapshotId : snapshotIds) { + addVersionsToLocalData(localDataManager, snapshotId, ImmutableMap.of(1, 1)); + } + + UUID snapshotId = snapshotIds.get(snapshotIdx); + UUID toUpdatePreviousSnapshotId = snapshotIdx - 2 >= 0 ? snapshotIds.get(snapshotIdx - 2) : null; + + try (WritableOmSnapshotLocalDataProvider snap = + localDataManager.getWritableOmSnapshotLocalData(snapshotId, toUpdatePreviousSnapshotId)) { + assertFalse(snap.needsDefrag()); + snap.commit(); + assertTrue(snap.needsDefrag()); + } + try (ReadableOmSnapshotLocalDataProvider snap = + localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertEquals(toUpdatePreviousSnapshotId, snap.getSnapshotLocalData().getPreviousSnapshotId()); + assertTrue(snap.needsDefrag()); + } + } + /** * Validates write-time version propagation and removal rules when the previous * snapshot already has a concrete version recorded. @@ -717,6 +744,8 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); assertEquals(expectedVersionMeta, versionMeta); + // New Snapshot create needs to be defragged always. + assertTrue(snapshotLocalData.needsDefrag()); } } From 8a297369c4f9da73e6990662d3968034c03b6096 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 17:46:11 -0400 Subject: [PATCH 77/97] HDDS-13833. Fix checkstyle Change-Id: I90e886ac5ca954cdf4fa7110b175168b2facac4b --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 3806fda7ed57..3411c4879ddc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -158,8 +158,8 @@ public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInf try (WritableOmSnapshotLocalDataProvider snapshotLocalData = new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(), () -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(), - OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId(), - null), + OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), + snapshotInfo.getPathPreviousSnapshotId(), null), null))) { snapshotLocalData.commit(); } From a810cc1b4b13636040b0bf248290382f5eccf2e1 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 18:45:20 -0400 Subject: [PATCH 78/97] HDDS-13785. Fix findbugs Change-Id: Ic2aa9091a9c463bb238b5af6c347df3d080b28f9 --- .../java/org/apache/hadoop/ozone/om/SnapshotDefragService.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 799ac6a40773..212953cd874c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -134,8 +134,6 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { // Read snapshot local metadata from YAML - OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); - // Check if snapshot needs compaction (defragmentation) boolean needsDefrag = readableOmSnapshotLocalDataProvider.needsDefrag(); LOG.debug("Snapshot {} needsDefragmentation field value: {}", snapshotInfo.getName(), needsDefrag); From 78c103629bd83b748767fcc210becc6b77b177be Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Thu, 30 Oct 2025 22:17:02 -0400 Subject: [PATCH 79/97] HDDS-13859. OmSnapshotLocalDataManager should handle needsDefrag flag in the yaml file Change-Id: Id4e0082b42ded19a5b05418555478c5a71ae2d1a --- .../ozone/om/SnapshotDefragService.java | 7 +-- .../snapshot/OmSnapshotLocalDataManager.java | 17 +++++++ .../TestOmSnapshotLocalDataManager.java | 49 +++++++++++++++++++ 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java index 87f6ff55bb71..212953cd874c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotDefragService.java @@ -134,12 +134,9 @@ private boolean needsDefragmentation(SnapshotInfo snapshotInfo) { try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider readableOmSnapshotLocalDataProvider = ozoneManager.getOmSnapshotManager().getSnapshotLocalDataManager().getOmSnapshotLocalData(snapshotInfo)) { // Read snapshot local metadata from YAML - OmSnapshotLocalData snapshotLocalData = readableOmSnapshotLocalDataProvider.getSnapshotLocalData(); - // Check if snapshot needs compaction (defragmentation) - boolean needsDefrag = snapshotLocalData.getNeedsDefrag(); - LOG.debug("Snapshot {} needsDefragmentation field value: {}", - snapshotInfo.getName(), needsDefrag); + boolean needsDefrag = readableOmSnapshotLocalDataProvider.needsDefrag(); + LOG.debug("Snapshot {} needsDefragmentation field value: {}", snapshotInfo.getName(), needsDefrag); return needsDefrag; } catch (IOException e) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 3411c4879ddc..ef789aa8e1dd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -573,6 +573,19 @@ private LockDataProviderInitResult initialize( } } + public boolean needsDefrag() { + if (snapshotLocalData.getNeedsDefrag()) { + return true; + } + if (resolvedPreviousSnapshotId != null) { + int snapshotVersion = snapshotLocalData.getVersion(); + int previousResolvedSnapshotVersion = snapshotLocalData.getVersionSstFileInfos().get(snapshotVersion) + .getPreviousSnapshotVersion(); + return previousResolvedSnapshotVersion < getVersionNodeMap().get(resolvedPreviousSnapshotId).getVersion(); + } + return false; + } + @Override public void close() throws IOException { if (previousLock != null) { @@ -640,6 +653,9 @@ private SnapshotVersionsMeta validateModification(OmSnapshotLocalData snapshotLo if (existingVersionsMeta == null || !Objects.equals(versionsToBeAdded.getPreviousSnapshotId(), existingVersionsMeta.getPreviousSnapshotId())) { setDirty(); + // Set the needsDefrag if the new previous snapshotId is different from the existing one or if this is a new + // snapshot yaml file. + snapshotLocalData.setNeedsDefrag(true); } return versionsToBeAdded; } finally { @@ -652,6 +668,7 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + this.getSnapshotLocalData().setNeedsDefrag(false); // Set Dirty if a version is added. setDirty(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index bfaa48c04feb..869c76270c7c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -23,6 +23,7 @@ import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -431,6 +432,32 @@ private void validateVersions(OmSnapshotLocalDataManager snapshotLocalDataManage } } + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testWriteWithChainUpdate(boolean previousSnapshotExisting) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 3 + (previousSnapshotExisting ? 1 : 0)); + int snapshotIdx = 1 + (previousSnapshotExisting ? 1 : 0); + for (UUID snapshotId : snapshotIds) { + addVersionsToLocalData(localDataManager, snapshotId, ImmutableMap.of(1, 1)); + } + + UUID snapshotId = snapshotIds.get(snapshotIdx); + UUID toUpdatePreviousSnapshotId = snapshotIdx - 2 >= 0 ? snapshotIds.get(snapshotIdx - 2) : null; + + try (WritableOmSnapshotLocalDataProvider snap = + localDataManager.getWritableOmSnapshotLocalData(snapshotId, toUpdatePreviousSnapshotId)) { + assertFalse(snap.needsDefrag()); + snap.commit(); + assertTrue(snap.needsDefrag()); + } + try (ReadableOmSnapshotLocalDataProvider snap = + localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertEquals(toUpdatePreviousSnapshotId, snap.getSnapshotLocalData().getPreviousSnapshotId()); + assertTrue(snap.needsDefrag()); + } + } + /** * Validates write-time version propagation and removal rules when the previous * snapshot already has a concrete version recorded. @@ -531,6 +558,26 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData } } + @ParameterizedTest + @ValueSource(ints = {1, 2, 3}) + public void testNeedsDefrag(int previousVersion) throws IOException { + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + List snapshotIds = createSnapshotLocalData(localDataManager, 2); + for (UUID snapshotId : snapshotIds) { + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotId)) { + assertTrue(snap.needsDefrag()); + } + } + addVersionsToLocalData(localDataManager, snapshotIds.get(0), ImmutableMap.of(1, 1, 2, 2, 3, 3)); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotIds.get(0))) { + assertFalse(snap.needsDefrag()); + } + addVersionsToLocalData(localDataManager, snapshotIds.get(1), ImmutableMap.of(1, 3, 2, previousVersion)); + try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotIds.get(1))) { + assertEquals(previousVersion < snap.getPreviousSnapshotLocalData().getVersion(), snap.needsDefrag()); + } + } + @ParameterizedTest @ValueSource(booleans = {true, false}) public void testVersionResolution(boolean read) throws IOException { @@ -629,6 +676,8 @@ public void testCreateNewOmSnapshotLocalDataFile() throws IOException { OmSnapshotLocalData.VersionMeta expectedVersionMeta = new OmSnapshotLocalData.VersionMeta(0, sstFileInfos); assertEquals(expectedVersionMeta, versionMeta); + // New Snapshot create needs to be defragged always. + assertTrue(snapshotLocalData.needsDefrag()); } } From bf4746f1e3f4eb3ee6e28a8a41d0705127253e07 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 07:33:35 -0400 Subject: [PATCH 80/97] HDDS-13859. Fix Test Change-Id: Ib1ab4c81e920f1465c3e9046092cdbf0727bcf7d --- .../java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 6ec49935b356..73f9e2863be3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -330,7 +330,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); assertFalse(localData.getSstFiltered()); assertEquals(0L, localData.getLastDefragTime()); - assertFalse(localData.getNeedsDefrag()); + assertTrue(localData.getNeedsDefrag()); assertEquals(1, localData.getVersionSstFileInfos().size()); // Cleanup From 09d955c199dfb7f646d2bd8697b6064a7f058b00 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 07:49:08 -0400 Subject: [PATCH 81/97] HDDS-13859. Add comments Change-Id: Iaea152b0b69dcdd52a6f3806c713ee2b48d3875c --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 1 + 1 file changed, 1 insertion(+) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index ef789aa8e1dd..40e4bf678db0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -668,6 +668,7 @@ public void addSnapshotVersion(RDBStore snapshotStore) throws IOException { OmSnapshotLocalData previousSnapshotLocalData = getPreviousSnapshotLocalData(); this.getSnapshotLocalData().addVersionSSTFileInfos(sstFiles, previousSnapshotLocalData == null ? 0 : previousSnapshotLocalData.getVersion()); + // Adding a new snapshot version means it has been defragged thus the flag needs to be reset. this.getSnapshotLocalData().setNeedsDefrag(false); // Set Dirty if a version is added. setDirty(); From 2cf1bce8c8d2546b51ef06b15eb910370da46816 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 08:48:08 -0400 Subject: [PATCH 82/97] HDDS-13859. Fix test after merge master Change-Id: I6527b98cd16850079b2f3855f4bfb5a5b59fa3ee --- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index cc39d263bdd2..8554d1684e26 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -691,7 +691,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0)); assertFalse(localData.getSstFiltered()); assertEquals(0L, localData.getLastDefragTime()); - assertFalse(localData.getNeedsDefrag()); + assertTrue(localData.getNeedsDefrag()); assertEquals(1, localData.getVersionSstFileInfos().size()); } From 5849dacbd5760f4166038f9b4aac2b6f5dfa0c0f Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 08:56:52 -0400 Subject: [PATCH 83/97] HDDS-13785. Fix tests after merge Change-Id: Ia67b9db375bc694bacce4e4e87d9d04906c3b01c --- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 5816306e7ea2..006c946abacf 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.om.snapshot; +import static org.apache.hadoop.hdds.StringUtils.bytes2String; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; @@ -651,7 +652,7 @@ private void addVersionsToLocalData(OmSnapshotLocalDataManager snapshotLocalData @ParameterizedTest @ValueSource(ints = {1, 2, 3}) public void testNeedsDefrag(int previousVersion) throws IOException { - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); List snapshotIds = createSnapshotLocalData(localDataManager, 2); for (UUID snapshotId : snapshotIds) { try (ReadableOmSnapshotLocalDataProvider snap = localDataManager.getOmSnapshotLocalData(snapshotId)) { @@ -761,7 +762,7 @@ public void testCreateNewSnapshotLocalYaml() throws IOException { mockedLiveFiles.add(createMockLiveFileMetaData("ot2.sst", "otherTable", "k1", "k2")); mockSnapshotStore(snapshotId, mockedLiveFiles); - localDataManager = new OmSnapshotLocalDataManager(omMetadataManager); + localDataManager = new OmSnapshotLocalDataManager(omMetadataManager, null, conf); Path snapshotYaml = Paths.get(localDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo)); // Create an existing YAML file for the snapshot assertTrue(snapshotYaml.toFile().createNewFile()); From 519495ac08e1ac29cc50ad6a6cceb7a84ddcb0af Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 18:29:28 -0400 Subject: [PATCH 84/97] HDDS-13785. Address review comments Change-Id: I1a672ce523de5c7d8317766d7ce8f49fee3d099e --- .../main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java | 4 ++-- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 6 +++--- .../ozone/om/snapshot/TestOmSnapshotLocalDataManager.java | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 652a384b2cc9..469900aa8ea7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -678,9 +678,9 @@ public final class OMConfigKeys { public static final String OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT = "ozone.om.hierarchical.resource.locks.hard.limit"; public static final int OZONE_OM_HIERARCHICAL_RESOURCE_LOCKS_HARD_LIMIT_DEFAULT = 10000; - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL = + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL = "ozone.om.snapshot.local.data.manager.service.interval"; - public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT = "5m"; + public static final String OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT = "5m"; /** * Never constructed. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 4833da95c91f..9c3a9d0cdbbc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -17,8 +17,8 @@ package org.apache.hadoop.ozone.om.snapshot; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import com.google.common.annotations.VisibleForTesting; @@ -315,7 +315,7 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa increamentOrphanCheckCount(snapshotId); } long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( - OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL_DEFAULT, + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); if (snapshotLocalDataManagerServiceInterval > 0) { this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 006c946abacf..912be56c4bd5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdds.StringUtils.bytes2String; import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OmSnapshotLocalDataYaml.YAML_FILE_EXTENSION; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE; import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE; @@ -177,7 +177,7 @@ public void setUp() throws IOException { purgedSnapshotIdMap.clear(); snapshotUtilMock.when(() -> OmSnapshotManager.isSnapshotPurged(any(), any(), any(), any())) .thenAnswer(i -> purgedSnapshotIdMap.getOrDefault(i.getArgument(2), false)); - conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_INTERVAL, -1); + conf.setInt(OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, -1); } @AfterEach From c125250bcf4e88ae6372d7d46b1824c6141d3561 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 31 Oct 2025 18:30:20 -0400 Subject: [PATCH 85/97] HDDS-13785. Address review comments Change-Id: I6e817c1d5a27c5c0d8a3cb4a296638057ed21951 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9c3a9d0cdbbc..9af48367dde7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -265,13 +265,13 @@ void addVersionNodeWithDependents(OmSnapshotLocalData snapshotLocalData) throws } } - private void increamentOrphanCheckCount(UUID snapshotId) { + private void incrementOrphanCheckCount(UUID snapshotId) { if (snapshotId != null) { this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> v == null ? 1 : (v + 1)); } } - private void decreamentOrphanCheckCount(UUID snapshotId, int decrementBy) { + private void decrementOrphanCheckCount(UUID snapshotId, int decrementBy) { this.snapshotToBeCheckedForOrphans.compute(snapshotId, (k, v) -> { if (v == null) { return null; @@ -312,7 +312,7 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa addVersionNodeWithDependents(snapshotLocalData); } for (UUID snapshotId : versionNodeMap.keySet()) { - increamentOrphanCheckCount(snapshotId); + incrementOrphanCheckCount(snapshotId); } long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, @@ -337,7 +337,7 @@ private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, Snap UUID snapshotId = entry.getKey(); int countBeforeCheck = entry.getValue(); checkOrphanSnapshotVersions(metadataManager, chainManager, snapshotId); - decreamentOrphanCheckCount(snapshotId, countBeforeCheck); + decrementOrphanCheckCount(snapshotId, countBeforeCheck); } } @@ -855,14 +855,14 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, // version removals) if (versionsRemoved || !Objects.equals(existingSnapVersions.getPreviousSnapshotId(), snapshotVersions.getPreviousSnapshotId())) { - increamentOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); + incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); } // If the transactionInfo set this means the snapshot has been purged and the entire yaml file could have // become an orphan if the version is also updated it // could mean that there could be some orphan version present within the // same snapshot. if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { - increamentOrphanCheckCount(snapshotId); + incrementOrphanCheckCount(snapshotId); } } } finally { From ec59b893ecb6262f7f69b6c5551474c1c5e5e9fa Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:17:36 -0400 Subject: [PATCH 86/97] HDDS-13785. Address review comments Change-Id: I6870b5263d104e4179dbc13b24bd923238ff9171 --- .../om/snapshot/OmSnapshotLocalDataManager.java | 12 +++++++++--- .../om/snapshot/TestOmSnapshotLocalDataManager.java | 2 +- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 9af48367dde7..e6e940290987 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -284,6 +284,7 @@ private void decrementOrphanCheckCount(UUID snapshotId, int decrementBy) { }); } + @VisibleForTesting Map getSnapshotToBeCheckedForOrphans() { return snapshotToBeCheckedForOrphans; } @@ -315,7 +316,8 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa incrementOrphanCheckCount(snapshotId); } long snapshotLocalDataManagerServiceInterval = configuration.getTimeDuration( - OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL, + OZONE_OM_SNAPSHOT_LOCAL_DATA_MANAGER_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); if (snapshotLocalDataManagerServiceInterval > 0) { this.scheduler = new Scheduler(LOCAL_DATA_MANAGER_SERVICE_NAME, true, 1); @@ -344,6 +346,7 @@ private void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, Snap @VisibleForTesting void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChainManager chainManager, UUID snapshotId) throws IOException { + LOG.info("Checking orphan snapshot versions for snapshot {}", snapshotId); try (WritableOmSnapshotLocalDataProvider snapshotLocalDataProvider = new WritableOmSnapshotLocalDataProvider( snapshotId)) { OmSnapshotLocalData snapshotLocalData = snapshotLocalDataProvider.getSnapshotLocalData(); @@ -361,6 +364,9 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai && ((versionEntry.getVersion() != 0 && versionEntry.getVersion() != snapshotLocalData.getVersion()) || isSnapshotPurged); if (toRemove) { + LOG.info("Removing snapshot Id : {} version: {} from local data, snapshotLocalDataVersion : {}, " + + "snapshotPurged: {}, inDegree : {}", snapshotId, versionEntry.getVersion(), + snapshotLocalData.getVersion(), isSnapshotPurged, localDataGraph.inDegree(versionEntry)); snapshotLocalDataProvider.removeVersion(versionEntry.getVersion()); } } finally { @@ -857,8 +863,8 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, snapshotVersions.getPreviousSnapshotId())) { incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); } - // If the transactionInfo set this means the snapshot has been purged and the entire yaml file could have - // become an orphan if the version is also updated it + // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have + // become an orphan. Otherwise if the version is updated it // could mean that there could be some orphan version present within the // same snapshot. if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java index 912be56c4bd5..60f2cdac1801 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshotLocalDataManager.java @@ -512,7 +512,7 @@ public void testOrphanVersionDeletionWithChainUpdate(boolean purgeSnapshot) thro assertTrue(localDataManager.getSnapshotToBeCheckedForOrphans().containsKey(secondSnapId)); localDataManager.checkOrphanSnapshotVersions(omMetadataManager, null, secondSnapId); if (purgeSnapshot) { - NoSuchFileException e = assertThrows(NoSuchFileException.class, + assertThrows(NoSuchFileException.class, () -> localDataManager.getOmSnapshotLocalData(secondSnapId)); assertFalse(localDataManager.getVersionNodeMap().containsKey(secondSnapId)); } else { From b0b6d6a3a6b3eb00d253f4035934991326aa2ca9 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:31:51 -0400 Subject: [PATCH 87/97] HDDS-13785. Address review comments Change-Id: I5fb8bcc7beb9941e67df0d02a40b09e30a4c3880 --- .../ozone/om/snapshot/OmSnapshotLocalDataManager.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index e6e940290987..0ae6174974a8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -372,7 +372,11 @@ void checkOrphanSnapshotVersions(OMMetadataManager metadataManager, SnapshotChai } finally { internalLock.readLock().unlock(); } - + } + // If Snapshot is purged but not flushed completely to disk then this needs to wait for the next iteration + // which can be done by incrementing the orphan check count for the snapshotId. + if (!snapshotLocalData.getVersionSstFileInfos().isEmpty() && snapshotLocalData.getTransactionInfo() != null) { + incrementOrphanCheckCount(snapshotId); } snapshotLocalDataProvider.commit(); } From a7598071309691dd91c496999d91c59380d55cda Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:36:00 -0400 Subject: [PATCH 88/97] HDDS-13785. Change catch exception Change-Id: I30f586045a567b0e0e3200a449d7627d96f59dd5 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 0ae6174974a8..e08ee2830947 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -325,7 +325,7 @@ private void init(OzoneConfiguration configuration, SnapshotChainManager chainMa () -> { try { checkOrphanSnapshotVersions(omMetadataManager, chainManager); - } catch (IOException e) { + } catch (Exception e) { LOG.error("Exception while checking orphan snapshot versions", e); } }, snapshotLocalDataManagerServiceInterval, snapshotLocalDataManagerServiceInterval, TimeUnit.MILLISECONDS); From 808b17484d985fc25c670a77eb0920a685afcd2b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 00:38:09 -0400 Subject: [PATCH 89/97] HDDS-13785. Address review comments Change-Id: Ia13da4aa1fb25768b0dac64fd4203ed1ca596284 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index e08ee2830947..33c43eda0234 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -820,7 +820,7 @@ public synchronized void commit() throws IOException { Files.move(tmpFile.toPath(), Paths.get(filePath), StandardCopyOption.ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING); } else if (snapshotLocalDataFile.exists()) { - LOG.info("Deleting Yaml file corresponding to snapshotId: {} in path : {}", + LOG.info("Deleting YAML file corresponding to snapshotId: {} in path : {}", super.snapshotId, snapshotLocalDataFile.getAbsolutePath()); if (!snapshotLocalDataFile.delete()) { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); From c829a8be16375e8ca863bdae84278c4f31861507 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 01:04:59 -0400 Subject: [PATCH 90/97] HDDS-13830. Fix test Change-Id: I88f541c712d847515f6ef5b1d094cc5168936966 --- .../hadoop/ozone/client/OzoneSnapshot.java | 18 ++++++++++++++++++ .../hadoop/ozone/client/TestOzoneSnapshot.java | 4 +++- 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java index 95f05a50e064..360fd4cef6da 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshot.java @@ -222,4 +222,22 @@ public int hashCode() { return Objects.hash(volumeName, bucketName, name, creationTime, snapshotStatus, snapshotId, snapshotPath, checkpointDir, referencedSize, referencedReplicatedSize, exclusiveSize, exclusiveReplicatedSize); } + + @Override + public String toString() { + return "OzoneSnapshot{" + + "bucketName='" + bucketName + '\'' + + ", volumeName='" + volumeName + '\'' + + ", name='" + name + '\'' + + ", creationTime=" + creationTime + + ", snapshotStatus=" + snapshotStatus + + ", snapshotId=" + snapshotId + + ", snapshotPath='" + snapshotPath + '\'' + + ", checkpointDir='" + checkpointDir + '\'' + + ", referencedSize=" + referencedSize + + ", referencedReplicatedSize=" + referencedReplicatedSize + + ", exclusiveSize=" + exclusiveSize + + ", exclusiveReplicatedSize=" + exclusiveReplicatedSize + + '}'; + } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index 16cf58ab5a2c..0ddacef84acf 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import java.util.UUID; @@ -41,6 +42,7 @@ private SnapshotInfo getMockedSnapshotInfo(UUID snapshotId) { when(snapshotInfo.getSnapshotStatus()).thenReturn(SNAPSHOT_ACTIVE); when(snapshotInfo.getSnapshotId()).thenReturn(snapshotId); when(snapshotInfo.getSnapshotPath()).thenReturn("volume/bucket"); + when(snapshotInfo.getCheckpointDirName(eq(0))).thenReturn("checkpointDir"); when(snapshotInfo.getReferencedSize()).thenReturn(1000L); when(snapshotInfo.getReferencedReplicatedSize()).thenReturn(3000L); when(snapshotInfo.getExclusiveSize()).thenReturn(4000L); @@ -57,7 +59,7 @@ public void testOzoneSnapshotFromSnapshotInfo() { OzoneSnapshot ozoneSnapshot = OzoneSnapshot.fromSnapshotInfo(snapshotInfo); OzoneSnapshot expectedOzoneSnapshot = new OzoneSnapshot( "volume", "bucket", "snap", 1000L, SNAPSHOT_ACTIVE, snapshotId, - "volume/bucket", OM_SNAPSHOT_SEPARATOR + snapshotId, 1000L, 3000L, 6000L, 18000L); + "volume/bucket", "checkpointDir", 1000L, 3000L, 6000L, 18000L); assertEquals(expectedOzoneSnapshot, ozoneSnapshot); } } From bfd341c692f86f765477f119c149ce267c7fcdd3 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 01:09:55 -0400 Subject: [PATCH 91/97] HDDS-13830. Fix test Change-Id: I5541c7f931bb0b60af238102b5afc8086be53a6c --- .../java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java index 0ddacef84acf..028e937a9c2e 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneSnapshot.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client; -import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_SEPARATOR; import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.eq; From 4ccd3fcf3b37a919c946e37e78380ebbd74b7137 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 07:35:23 -0400 Subject: [PATCH 92/97] HDDS-13830. Fix test Change-Id: I7077460d4b9d87d460244e3df51d51f078a83970 --- .../org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java | 1 - .../interface-client/src/main/proto/OmClientProtocol.proto | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index 7750b32e2e0a..64946e52e886 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -85,7 +85,6 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() { .setPathPreviousSnapshotID(toProtobuf(PATH_PREVIOUS_SNAPSHOT_ID)) .setGlobalPreviousSnapshotID(toProtobuf(GLOBAL_PREVIOUS_SNAPSHOT_ID)) .setSnapshotPath(SNAPSHOT_PATH) - .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) .setDeepClean(false) .setSstFiltered(false) diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 61a3c1d6792e..1e5675f612e6 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -880,7 +880,7 @@ message SnapshotInfo { optional hadoop.hdds.UUID pathPreviousSnapshotID = 8; optional hadoop.hdds.UUID globalPreviousSnapshotID = 9; optional string snapshotPath = 10; - optional string checkpointDir = 11; + optional string checkpointDir = 11 [deprecated = true]; optional int64 dbTxSequenceNumber = 12; optional bool deepClean = 13; optional bool sstFiltered = 14; From 41b7cfbcba3250090b6a3c3e0c54ea169f4705fa Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 1 Nov 2025 17:22:32 -0400 Subject: [PATCH 93/97] HDDS-13830. Fix pmd Change-Id: If9cd8e82083f90997d7ce052408b33648d9b2c2a --- .../org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index 64946e52e886..e7695debd619 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -45,7 +45,6 @@ public class TestOmSnapshotInfo { private static final UUID GLOBAL_PREVIOUS_SNAPSHOT_ID = PATH_PREVIOUS_SNAPSHOT_ID; private static final String SNAPSHOT_PATH = "test/path"; - private static final String CHECKPOINT_DIR = "checkpoint.testdir"; private static final long DB_TX_SEQUENCE_NUMBER = 12345L; private SnapshotInfo createSnapshotInfo() { From d0422aed82ef2a2b3189dbc3f7a7384b3f16ba95 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 2 Nov 2025 14:16:56 -0500 Subject: [PATCH 94/97] HDDS-13830. Fix mrge issue Change-Id: Ia02449ab5237ddb6021fa3a62ed66290c1dff83d --- .../om/TestOMDbCheckpointServletInodeBasedXfer.java | 4 ++-- .../om/OMDBCheckpointServletInodeBasedXfer.java | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java index f2b94182c809..a6ae3eaab21f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServletInodeBasedXfer.java @@ -240,7 +240,7 @@ public void write(int b) throws IOException { doCallRealMethod().when(omDbCheckpointServletMock) .transferSnapshotData(anySet(), any(), anySet(), any(), any(), anyMap()); doCallRealMethod().when(omDbCheckpointServletMock).createAndPrepareCheckpoint(anyBoolean()); - doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any()); + doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any(), any(), any()); } @ParameterizedTest @@ -748,7 +748,7 @@ private void setupClusterAndMocks(String volumeName, String bucketName, // Init the mock with the spyDbstore doCallRealMethod().when(omDbCheckpointServletMock).initialize(any(), any(), eq(false), any(), any(), eq(false)); - doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any()); + doCallRealMethod().when(omDbCheckpointServletMock).getSnapshotDirsFromDB(any(), any(), any()); omDbCheckpointServletMock.initialize(spyDbStore, om.getMetrics().getDBCheckpointMetrics(), false, om.getOmAdminUsernames(), om.getOmAdminGroups(), false); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java index 9d55e2203cf4..748329be83ae 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java @@ -71,6 +71,7 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager; import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -268,7 +269,8 @@ public void writeDbDataToStream(HttpServletRequest request, OutputStream destina // get the list of snapshots from the checkpoint try (OmMetadataManagerImpl checkpointMetadataManager = OmMetadataManagerImpl .createCheckpointMetadataManager(om.getConfiguration(), checkpoint)) { - snapshotPaths = getSnapshotDirsFromDB(checkpointMetadataManager); + snapshotPaths = getSnapshotDirsFromDB(omMetadataManager, checkpointMetadataManager, + snapshotLocalDataManager); } writeDBToArchive(sstFilesToExclude, getCompactionLogDir(), maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap, false); @@ -402,11 +404,9 @@ Set getSnapshotDirsFromDB(OMMetadataManager activeOMMetadataManager, OMMet Table.KeyValue kv = iter.next(); SnapshotInfo snapshotInfo = kv.getValue(); try (OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataMetaProvider snapLocalMeta = - localDataManager.getOmSnapshotLocalDataMeta(snapInfo.getSnapshotId())) { - OmSnapshotManager.getSnapshotPath(getConf(), - snapshotInfo.getCheckpointDirName()); - Path snapshotDir = getSnapshotPath(activeOMMetadataManager, - snapInfo.getSnapshotId(), snapLocalMeta.getMeta().getVersion()); + localDataManager.getOmSnapshotLocalDataMeta(snapshotInfo.getSnapshotId())) { + Path snapshotDir = getSnapshotPath(activeOMMetadataManager, snapshotInfo.getSnapshotId(), + snapLocalMeta.getMeta().getVersion()); snapshotPaths.add(snapshotDir); } } From 018571c42d8ea7fb5f454f510aeb0b1a014f8bb7 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 5 Nov 2025 13:50:30 -0500 Subject: [PATCH 95/97] HDDS-13785. Address review comments Change-Id: Ied3c728c1f566bdd9327b6b5340892bed993cbf6 --- .../snapshot/OmSnapshotLocalDataManager.java | 54 ++++++++++++------- 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 21b79b0eb997..96f76e4b790c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -862,14 +862,44 @@ public synchronized void commit() throws IOException { throw new IOException("Unable to delete file " + snapshotLocalDataFile.getAbsolutePath()); } } - upsertNode(super.snapshotId, localDataVersionNodes, getSnapshotLocalData().getTransactionInfo() != null); + SnapshotVersionsMeta previousVersionMeta = upsertNode(super.snapshotId, localDataVersionNodes); + checkForOphanVersionsAndIncrementCount(super.snapshotId, previousVersionMeta, localDataVersionNodes, + getSnapshotLocalData().getTransactionInfo() != null); // Reset dirty bit resetDirty(); } } - private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, - boolean transactionInfoSet) throws IOException { + private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVersionsMeta previousVersionsMeta, + SnapshotVersionsMeta currentVersionMeta, boolean transactionInfoSet) { + internalLock.readLock().lock(); + try { + if (previousVersionsMeta != null) { + Map currentVersionNodeMap = currentVersionMeta.getSnapshotVersions(); + Map previousVersionNodeMap = previousVersionsMeta.getSnapshotVersions(); + boolean versionsRemoved = previousVersionNodeMap.keySet().stream() + .anyMatch(version -> !currentVersionNodeMap.containsKey(version)); + + // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of + // version removals) + if (versionsRemoved || !Objects.equals(previousVersionsMeta.getPreviousSnapshotId(), + currentVersionMeta.getPreviousSnapshotId())) { + incrementOrphanCheckCount(previousVersionsMeta.getPreviousSnapshotId()); + } + // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have + // become an orphan. Otherwise if the version is updated it + // could mean that there could be some orphan version present within the + // same snapshot. + if (transactionInfoSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { + incrementOrphanCheckCount(snapshotId); + } + } + } finally { + internalLock.readLock().unlock(); + } + } + + private SnapshotVersionsMeta upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions) throws IOException { internalLock.writeLock().lock(); try { SnapshotVersionsMeta existingSnapVersions = getVersionNodeMap().remove(snapshotId); @@ -877,14 +907,12 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, existingSnapVersions.getSnapshotVersions(); Map newVersions = snapshotVersions.getSnapshotVersions(); Map> predecessors = new HashMap<>(); - boolean versionsRemoved = false; // Track all predecessors of the existing versions and remove the node from the graph. for (Map.Entry existingVersion : existingVersions.entrySet()) { LocalDataVersionNode existingVersionNode = existingVersion.getValue(); // Create a copy of predecessors since the list of nodes returned would be a mutable set and it changes as the // nodes in the graph would change. predecessors.put(existingVersion.getKey(), new ArrayList<>(localDataGraph.predecessors(existingVersionNode))); - versionsRemoved = versionsRemoved || !newVersions.containsKey(existingVersion.getKey()); localDataGraph.removeNode(existingVersionNode); } @@ -896,21 +924,7 @@ private void upsertNode(UUID snapshotId, SnapshotVersionsMeta snapshotVersions, localDataGraph.putEdge(predecessor, entry.getValue()); } } - if (existingSnapVersions != null) { - // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of - // version removals) - if (versionsRemoved || !Objects.equals(existingSnapVersions.getPreviousSnapshotId(), - snapshotVersions.getPreviousSnapshotId())) { - incrementOrphanCheckCount(existingSnapVersions.getPreviousSnapshotId()); - } - // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have - // become an orphan. Otherwise if the version is updated it - // could mean that there could be some orphan version present within the - // same snapshot. - if (transactionInfoSet || existingSnapVersions.getVersion() != snapshotVersions.getVersion()) { - incrementOrphanCheckCount(snapshotId); - } - } + return existingSnapVersions; } finally { internalLock.writeLock().unlock(); } From 6cd54dda954ba9088521bad5711c991297d9af70 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 5 Nov 2025 13:53:17 -0500 Subject: [PATCH 96/97] HDDS-13785. Address review comments Change-Id: I1105c70ad060e8a344d042eadc3fa65b1469b716 --- .../hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 96f76e4b790c..20cefc4f44b1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -871,7 +871,7 @@ public synchronized void commit() throws IOException { } private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVersionsMeta previousVersionsMeta, - SnapshotVersionsMeta currentVersionMeta, boolean transactionInfoSet) { + SnapshotVersionsMeta currentVersionMeta, boolean isPurgeTransactionSet) { internalLock.readLock().lock(); try { if (previousVersionsMeta != null) { @@ -890,7 +890,7 @@ private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVer // become an orphan. Otherwise if the version is updated it // could mean that there could be some orphan version present within the // same snapshot. - if (transactionInfoSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { + if (isPurgeTransactionSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { incrementOrphanCheckCount(snapshotId); } } From 261a6698e95bc794c8f8e1e2648e34bc69162140 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 5 Nov 2025 14:16:02 -0500 Subject: [PATCH 97/97] HDDS-13785. Remove unnecessary read lock Change-Id: Idc77f8a9a9a6cecb43b69d5532cf1bb2c679ce78 --- .../snapshot/OmSnapshotLocalDataManager.java | 41 ++++++++----------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java index 20cefc4f44b1..70955fa05783 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotLocalDataManager.java @@ -872,30 +872,25 @@ public synchronized void commit() throws IOException { private void checkForOphanVersionsAndIncrementCount(UUID snapshotId, SnapshotVersionsMeta previousVersionsMeta, SnapshotVersionsMeta currentVersionMeta, boolean isPurgeTransactionSet) { - internalLock.readLock().lock(); - try { - if (previousVersionsMeta != null) { - Map currentVersionNodeMap = currentVersionMeta.getSnapshotVersions(); - Map previousVersionNodeMap = previousVersionsMeta.getSnapshotVersions(); - boolean versionsRemoved = previousVersionNodeMap.keySet().stream() - .anyMatch(version -> !currentVersionNodeMap.containsKey(version)); - - // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of - // version removals) - if (versionsRemoved || !Objects.equals(previousVersionsMeta.getPreviousSnapshotId(), - currentVersionMeta.getPreviousSnapshotId())) { - incrementOrphanCheckCount(previousVersionsMeta.getPreviousSnapshotId()); - } - // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have - // become an orphan. Otherwise if the version is updated it - // could mean that there could be some orphan version present within the - // same snapshot. - if (isPurgeTransactionSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { - incrementOrphanCheckCount(snapshotId); - } + if (previousVersionsMeta != null) { + Map currentVersionNodeMap = currentVersionMeta.getSnapshotVersions(); + Map previousVersionNodeMap = previousVersionsMeta.getSnapshotVersions(); + boolean versionsRemoved = previousVersionNodeMap.keySet().stream() + .anyMatch(version -> !currentVersionNodeMap.containsKey(version)); + + // The previous snapshotId could have become an orphan entry or could have orphan versions.(In case of + // version removals) + if (versionsRemoved || !Objects.equals(previousVersionsMeta.getPreviousSnapshotId(), + currentVersionMeta.getPreviousSnapshotId())) { + incrementOrphanCheckCount(previousVersionsMeta.getPreviousSnapshotId()); + } + // If the transactionInfo set, this means the snapshot has been purged and the entire YAML file could have + // become an orphan. Otherwise if the version is updated it + // could mean that there could be some orphan version present within the + // same snapshot. + if (isPurgeTransactionSet || previousVersionsMeta.getVersion() != currentVersionMeta.getVersion()) { + incrementOrphanCheckCount(snapshotId); } - } finally { - internalLock.readLock().unlock(); } }