> entry : hardlinkMap.entrySet()) {
+ Collections.sort(entry.getValue());
+ }
+ }
+
+ private String getSnapshotDBPath(String checkPointDir) {
+ return OMStorage.getOmDbDir(cluster.getConf()) +
+ OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX +
+ OM_DB_NAME + checkPointDir;
+ }
+
+ private static String getInode(String inodeAndMtime) {
+ String inode = inodeAndMtime.split("-")[0];
+ return inode;
+ }
+
+ private void writeData(String volumeName, String bucketName, boolean includeSnapshots) throws Exception {
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName);
+ for (int i = 0; i < 10; i++) {
+ TestDataUtil.createKey(bucket, "key" + i,
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
+ "sample".getBytes(StandardCharsets.UTF_8));
+ om.getMetadataManager().getStore().flushDB();
+ }
+ if (includeSnapshots) {
+ TestDataUtil.createKey(bucket, "keysnap1",
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
+ "sample".getBytes(StandardCharsets.UTF_8));
+ TestDataUtil.createKey(bucket, "keysnap2",
+ ReplicationConfig.fromTypeAndFactor(ReplicationType.RATIS, ReplicationFactor.ONE),
+ "sample".getBytes(StandardCharsets.UTF_8));
+ client.getObjectStore().createSnapshot(volumeName, bucketName, "snapshot10");
+ client.getObjectStore().createSnapshot(volumeName, bucketName, "snapshot20");
+ }
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
new file mode 100644
index 000000000000..3fe5aca7a919
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServletInodeBasedXfer.java
@@ -0,0 +1,436 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import static org.apache.hadoop.hdds.utils.Archiver.includeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.linkAndIncludeFile;
+import static org.apache.hadoop.hdds.utils.Archiver.tar;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.includeRatisSnapshotCompleteFlag;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY;
+import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.includeSnapshotData;
+import static org.apache.hadoop.ozone.om.snapshot.OMDBCheckpointUtils.logEstimatedTarballSize;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX;
+import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_SUFFIX;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.nio.file.StandardOpenOption;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Stream;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import org.apache.commons.compress.archivers.ArchiveOutputStream;
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.recon.ReconConfig;
+import org.apache.hadoop.hdds.utils.DBCheckpointServlet;
+import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
+import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
+import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
+import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Specialized OMDBCheckpointServlet implementation that transfers Ozone Manager
+ * database checkpoints using inode-based deduplication.
+ *
+ * This servlet constructs checkpoint archives by examining file inodes,
+ * ensuring that files with the same inode (i.e., hardlinks or duplicates)
+ * are only transferred once. It maintains mappings from inode IDs to file
+ * paths, manages hardlink information, and enforces snapshot and SST file
+ * size constraints as needed.
+ *
+ * This approach optimizes checkpoint streaming by reducing redundant data
+ * transfer, especially in environments where RocksDB and snapshotting result
+ * in multiple hardlinks to the same physical data.
+ */
+public class OMDBCheckpointServletInodeBasedXfer extends DBCheckpointServlet {
+
+ protected static final Logger LOG =
+ LoggerFactory.getLogger(OMDBCheckpointServletInodeBasedXfer.class);
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ public void init() throws ServletException {
+ OzoneManager om = (OzoneManager) getServletContext()
+ .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+
+ if (om == null) {
+ LOG.error("Unable to initialize OMDBCheckpointServlet. OM is null");
+ return;
+ }
+
+ OzoneConfiguration conf = getConf();
+ // Only Ozone Admins and Recon are allowed
+ Collection allowedUsers =
+ new LinkedHashSet<>(om.getOmAdminUsernames());
+ Collection allowedGroups = om.getOmAdminGroups();
+ ReconConfig reconConfig = conf.getObject(ReconConfig.class);
+ String reconPrincipal = reconConfig.getKerberosPrincipal();
+ if (!reconPrincipal.isEmpty()) {
+ UserGroupInformation ugi =
+ UserGroupInformation.createRemoteUser(reconPrincipal);
+ allowedUsers.add(ugi.getShortUserName());
+ }
+
+ initialize(om.getMetadataManager().getStore(),
+ om.getMetrics().getDBCheckpointMetrics(),
+ om.getAclsEnabled(),
+ allowedUsers,
+ allowedGroups,
+ om.isSpnegoEnabled());
+ }
+
+ @Override
+ public void processMetadataSnapshotRequest(HttpServletRequest request, HttpServletResponse response,
+ boolean isFormData, boolean flush) {
+ List excludedSstList = new ArrayList<>();
+ String[] sstParam = isFormData ?
+ parseFormDataParameters(request) : request.getParameterValues(
+ OZONE_DB_CHECKPOINT_REQUEST_TO_EXCLUDE_SST);
+ Set receivedSstFiles = extractSstFilesToExclude(sstParam);
+ Path tmpdir = null;
+ try (BootstrapStateHandler.Lock lock = getBootstrapStateLock().lock()) {
+ tmpdir = Files.createTempDirectory(getBootstrapTempData().toPath(),
+ "bootstrap-data-");
+ if (tmpdir == null) {
+ throw new IOException("tmp dir is null");
+ }
+ String tarName = "om.data-" + System.currentTimeMillis() + ".tar";
+ response.setContentType("application/x-tar");
+ response.setHeader("Content-Disposition", "attachment; filename=\"" + tarName + "\"");
+ Instant start = Instant.now();
+ writeDbDataToStream(request, response.getOutputStream(), receivedSstFiles, tmpdir);
+ Instant end = Instant.now();
+ long duration = Duration.between(start, end).toMillis();
+ LOG.info("Time taken to write the checkpoint to response output " +
+ "stream: {} milliseconds", duration);
+ logSstFileList(excludedSstList,
+ "Excluded {} SST files from the latest checkpoint{}: {}", 5);
+ } catch (Exception e) {
+ LOG.error(
+ "Unable to process metadata snapshot request. ", e);
+ response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
+ } finally {
+ try {
+ if (tmpdir != null) {
+ FileUtils.deleteDirectory(tmpdir.toFile());
+ }
+ } catch (IOException e) {
+ LOG.error("unable to delete: " + tmpdir, e.toString());
+ }
+ }
+ }
+
+ Path getSstBackupDir() {
+ RocksDBCheckpointDiffer differ = getDbStore().getRocksDBCheckpointDiffer();
+ return new File(differ.getSSTBackupDir()).toPath();
+ }
+
+ Path getCompactionLogDir() {
+ RocksDBCheckpointDiffer differ = getDbStore().getRocksDBCheckpointDiffer();
+ return new File(differ.getCompactionLogDir()).toPath();
+ }
+
+ /**
+ * Streams the Ozone Manager database checkpoint and (optionally) snapshot-related data
+ * as a tar archive to the provided output stream. This method handles deduplication
+ * based on file inodes to avoid transferring duplicate files (such as hardlinks),
+ * supports excluding specific SST files, enforces maximum total SST file size limits,
+ * and manages temporary directories for processing.
+ *
+ * The method processes snapshot directories and backup/compaction logs (if requested),
+ * then finally the active OM database. It also writes a hardlink mapping file
+ * and includes a completion flag for Ratis snapshot streaming.
+ *
+ * @param request The HTTP servlet request containing parameters for the snapshot.
+ * @param destination The output stream to which the tar archive is written.
+ * @param sstFilesToExclude Set of SST file identifiers to exclude from the archive.
+ * @param tmpdir Temporary directory for staging files during archiving.
+ * @throws IOException if an I/O error occurs during processing or streaming.
+ */
+
+ public void writeDbDataToStream(HttpServletRequest request, OutputStream destination,
+ Set sstFilesToExclude, Path tmpdir) throws IOException {
+ DBCheckpoint checkpoint = null;
+ OzoneManager om = (OzoneManager) getServletContext().getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
+ OMMetadataManager omMetadataManager = om.getMetadataManager();
+ boolean includeSnapshotData = includeSnapshotData(request);
+ AtomicLong maxTotalSstSize = new AtomicLong(getConf().getLong(OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_KEY,
+ OZONE_OM_RATIS_SNAPSHOT_MAX_TOTAL_SST_SIZE_DEFAULT));
+
+ Set snapshotPaths = Collections.emptySet();
+
+ if (!includeSnapshotData) {
+ maxTotalSstSize.set(Long.MAX_VALUE);
+ } else {
+ snapshotPaths = getSnapshotDirs(omMetadataManager);
+ }
+
+ if (sstFilesToExclude.isEmpty()) {
+ logEstimatedTarballSize(getDbStore().getDbLocation().toPath(), snapshotPaths);
+ }
+
+ boolean shouldContinue = true;
+
+ Map hardLinkFileMap = new HashMap<>();
+ try (ArchiveOutputStream archiveOutputStream = tar(destination)) {
+ if (includeSnapshotData) {
+ // Process each snapshot db path and write it to archive
+ for (Path snapshotDbPath : snapshotPaths) {
+ if (!shouldContinue) {
+ break;
+ }
+ shouldContinue = writeDBToArchive(sstFilesToExclude, snapshotDbPath,
+ maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap);
+ }
+
+
+ if (shouldContinue) {
+ shouldContinue = writeDBToArchive(sstFilesToExclude, getSstBackupDir(),
+ maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap);
+ }
+
+ if (shouldContinue) {
+ shouldContinue = writeDBToArchive(sstFilesToExclude, getCompactionLogDir(),
+ maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap);
+ }
+ }
+
+ if (shouldContinue) {
+ // we finished transferring files from snapshot DB's by now and
+ // this is the last step where we transfer the active om.db contents
+ checkpoint = createAndPrepareCheckpoint(tmpdir, true);
+ // unlimited files as we want the Active DB contents to be transferred in a single batch
+ maxTotalSstSize.set(Long.MAX_VALUE);
+ Path checkpointDir = checkpoint.getCheckpointLocation();
+ writeDBToArchive(sstFilesToExclude, checkpointDir,
+ maxTotalSstSize, archiveOutputStream, tmpdir, hardLinkFileMap);
+ if (includeSnapshotData) {
+ Path tmpCompactionLogDir = tmpdir.resolve(getCompactionLogDir().getFileName());
+ Path tmpSstBackupDir = tmpdir.resolve(getSstBackupDir().getFileName());
+ writeDBToArchive(sstFilesToExclude, tmpCompactionLogDir, maxTotalSstSize, archiveOutputStream, tmpdir,
+ hardLinkFileMap, getCompactionLogDir());
+ writeDBToArchive(sstFilesToExclude, tmpSstBackupDir, maxTotalSstSize, archiveOutputStream, tmpdir,
+ hardLinkFileMap, getSstBackupDir());
+ }
+ writeHardlinkFile(getConf(), hardLinkFileMap, archiveOutputStream);
+ includeRatisSnapshotCompleteFlag(archiveOutputStream);
+ }
+
+ } catch (IOException ioe) {
+ LOG.error("got exception writing to archive " + ioe);
+ throw ioe;
+ } finally {
+ cleanupCheckpoint(checkpoint);
+ }
+ }
+
+ private boolean writeDBToArchive(Set sstFilesToExclude, Path dir,
+ AtomicLong maxTotalSstSize, ArchiveOutputStream archiveOutputStream,
+ Path tmpdir, Map hardLinkFileMap) throws IOException {
+ return writeDBToArchive(sstFilesToExclude, dir, maxTotalSstSize,
+ archiveOutputStream, tmpdir, hardLinkFileMap, null);
+ }
+
+ private static void cleanupCheckpoint(DBCheckpoint checkpoint) {
+ if (checkpoint != null) {
+ try {
+ checkpoint.cleanupCheckpoint();
+ } catch (IOException e) {
+ LOG.error("Error trying to clean checkpoint at {} .",
+ checkpoint.getCheckpointLocation().toString());
+ }
+ }
+ }
+
+ /**
+ * Writes a hardlink mapping file to the archive, which maps file IDs to their
+ * relative paths. This method generates the mapping file based on the provided
+ * hardlink metadata and adds it to the archive output stream.
+ *
+ * @param conf Ozone configuration for the OM instance.
+ * @param hardlinkFileMap A map where the key is the absolute file path
+ * and the value is its corresponding file ID.
+ * @param archiveOutputStream The archive output stream to which the hardlink
+ * file should be written.
+ * @throws IOException If an I/O error occurs while creating or writing the
+ * hardlink file.
+ */
+ private static void writeHardlinkFile(OzoneConfiguration conf, Map hardlinkFileMap,
+ ArchiveOutputStream archiveOutputStream) throws IOException {
+ Path data = Files.createTempFile(DATA_PREFIX, DATA_SUFFIX);
+ Path metaDirPath = OMStorage.getOmDbDir(conf).toPath();
+ StringBuilder sb = new StringBuilder();
+
+ for (Map.Entry entry : hardlinkFileMap.entrySet()) {
+ Path p = Paths.get(entry.getKey());
+ String fileId = entry.getValue();
+ Path relativePath = metaDirPath.relativize(p);
+ // if the file is in "om.db" directory, strip off the 'o
+ // m.db' name from the path
+ // and only keep the file name as this would be created in the current dir of the untarred dir
+ // on the follower.
+ if (relativePath.startsWith(OM_DB_NAME)) {
+ relativePath = relativePath.getFileName();
+ }
+ sb.append(relativePath).append('\t').append(fileId).append('\n');
+ }
+ Files.write(data, sb.toString().getBytes(StandardCharsets.UTF_8), StandardOpenOption.TRUNCATE_EXISTING);
+ includeFile(data.toFile(), OmSnapshotManager.OM_HARDLINK_FILE, archiveOutputStream);
+ }
+
+ /**
+ * Gets the configuration from the OzoneManager context.
+ *
+ * @return OzoneConfiguration instance
+ */
+ private OzoneConfiguration getConf() {
+ return ((OzoneManager) getServletContext()
+ .getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE))
+ .getConfiguration();
+ }
+
+ /**
+ * Collects paths to all snapshot databases.
+ *
+ * @param omMetadataManager OMMetadataManager instance
+ * @return Set of paths to snapshot databases
+ * @throws IOException if an I/O error occurs
+ */
+ Set getSnapshotDirs(OMMetadataManager omMetadataManager) throws IOException {
+ Set snapshotPaths = new HashSet<>();
+ SnapshotChainManager snapshotChainManager = new SnapshotChainManager(omMetadataManager);
+ for (SnapshotChainInfo snapInfo : snapshotChainManager.getGlobalSnapshotChain().values()) {
+ String snapshotDir =
+ OmSnapshotManager.getSnapshotPath(getConf(), SnapshotInfo.getCheckpointDirName(snapInfo.getSnapshotId()));
+ Path path = Paths.get(snapshotDir);
+ snapshotPaths.add(path);
+ }
+ return snapshotPaths;
+ }
+
+ /**
+ * Writes database files to the archive, handling deduplication based on inode IDs.
+ * Here the dbDir could either be a snapshot db directory, the active om.db,
+ * compaction log dir, sst backup dir.
+ *
+ * @param sstFilesToExclude Set of SST file IDs to exclude from the archive
+ * @param dbDir Directory containing database files to archive
+ * @param maxTotalSstSize Maximum total size of SST files to include
+ * @param archiveOutputStream Archive output stream
+ * @param tmpDir Temporary directory for processing
+ * @return true if processing should continue, false if size limit reached
+ * @throws IOException if an I/O error occurs
+ */
+ private boolean writeDBToArchive(Set sstFilesToExclude, Path dbDir, AtomicLong maxTotalSstSize,
+ ArchiveOutputStream archiveOutputStream, Path tmpDir,
+ Map hardLinkFileMap, Path destDir) throws IOException {
+ if (!Files.exists(dbDir)) {
+ LOG.warn("DB directory {} does not exist. Skipping.", dbDir);
+ return true;
+ }
+ long bytesWritten = 0L;
+ int filesWritten = 0;
+ long lastLoggedTime = Time.monotonicNow();
+ try (Stream files = Files.list(dbDir)) {
+ Iterable iterable = files::iterator;
+ for (Path dbFile : iterable) {
+ if (!Files.isDirectory(dbFile)) {
+ String fileId = OmSnapshotUtils.getFileInodeAndLastModifiedTimeString(dbFile);
+ String path = dbFile.toFile().getAbsolutePath();
+ if (destDir != null) {
+ path = destDir.resolve(dbFile.getFileName()).toString();
+ }
+ // if the file is in the om checkpoint dir, then we need to change the path to point to the OM DB.
+ if (path.contains(OM_CHECKPOINT_DIR)) {
+ path = getDbStore().getDbLocation().toPath().resolve(dbFile.getFileName()).toAbsolutePath().toString();
+ }
+ hardLinkFileMap.put(path, fileId);
+ if (!sstFilesToExclude.contains(fileId)) {
+ long fileSize = Files.size(dbFile);
+ if (maxTotalSstSize.get() - fileSize <= 0) {
+ return false;
+ }
+ bytesWritten += linkAndIncludeFile(dbFile.toFile(), fileId, archiveOutputStream, tmpDir);
+ filesWritten++;
+ maxTotalSstSize.addAndGet(-fileSize);
+ sstFilesToExclude.add(fileId);
+ if (Time.monotonicNow() - lastLoggedTime >= 30000) {
+ LOG.info("Transferred {} KB, #files {} to checkpoint tarball stream...",
+ bytesWritten / (1024), filesWritten);
+ lastLoggedTime = Time.monotonicNow();
+ }
+ }
+ }
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Creates a database checkpoint and copies compaction log and SST backup files
+ * into the given temporary directory.
+ * The copy to the temporary directory for compaction log and SST backup files
+ * is done to maintain a consistent view of the files in these directories.
+ *
+ * @param tmpdir Temporary directory for storing checkpoint-related files.
+ * @param flush If true, flushes in-memory data to disk before checkpointing.
+ * @return The created database checkpoint.
+ * @throws IOException If an error occurs during checkpoint creation or file copying.
+ */
+ private DBCheckpoint createAndPrepareCheckpoint(Path tmpdir, boolean flush) throws IOException {
+ // make tmp directories to contain the copies
+ Path tmpCompactionLogDir = tmpdir.resolve(getCompactionLogDir().getFileName());
+ Path tmpSstBackupDir = tmpdir.resolve(getSstBackupDir().getFileName());
+
+ // Create checkpoint and then copy the files so that it has all the compaction entries and files.
+ DBCheckpoint dbCheckpoint = getDbStore().getCheckpoint(flush);
+ FileUtils.copyDirectory(getCompactionLogDir().toFile(), tmpCompactionLogDir.toFile());
+ OmSnapshotUtils.linkFiles(getSstBackupDir().toFile(), tmpSstBackupDir.toFile());
+
+ return dbCheckpoint;
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index 1de509e99394..93070fcbe052 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -758,10 +758,15 @@ public static Path getSnapshotPath(OMMetadataManager omMetadataManager, Snapshot
}
public static String getSnapshotPath(OzoneConfiguration conf,
- SnapshotInfo snapshotInfo) {
+ SnapshotInfo snapshotInfo) {
+ return getSnapshotPath(conf, snapshotInfo.getCheckpointDirName());
+ }
+
+ public static String getSnapshotPath(OzoneConfiguration conf,
+ String checkpointDirName) {
return OMStorage.getOmDbDir(conf) +
OM_KEY_PREFIX + OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX +
- OM_DB_NAME + snapshotInfo.getCheckpointDirName();
+ OM_DB_NAME + checkpointDirName;
}
public static boolean isSnapshotKey(String[] keyParts) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java
index f5805044b7f4..848384ce3e2e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OmSnapshotUtils.java
@@ -27,6 +27,7 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.BasicFileAttributes;
+import java.nio.file.attribute.FileTime;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
@@ -65,6 +66,26 @@ public static Object getINode(Path file) throws IOException {
return Files.readAttributes(file, BasicFileAttributes.class).fileKey();
}
+ /**
+ * Returns a string combining the inode (fileKey) and the last modification time (mtime) of the given file.
+ *
+ * The returned string is formatted as "{inode}-{mtime}", where:
+ *
+ * - {@code inode} is the unique file key obtained from the file system, typically representing
+ * the inode on POSIX systems
+ * - {@code mtime} is the last modified time of the file in milliseconds since the epoch
+ *
+ *
+ * @param file the {@link Path} to the file whose inode and modification time are to be retrieved
+ * @return a string in the format "{inode}-{mtime}"
+ * @throws IOException if an I/O error occurs
+ */
+ public static String getFileInodeAndLastModifiedTimeString(Path file) throws IOException {
+ Object inode = Files.readAttributes(file, BasicFileAttributes.class).fileKey();
+ FileTime mTime = Files.getLastModifiedTime(file);
+ return String.format("%s-%s", inode, mTime.toMillis());
+ }
+
/**
* Create file of links to add to tarball.
* Format of entries are either: