Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
88 commits
Select commit Hold shift + click to select a range
6e241f6
HDDS-13765. SnapshotLocalData yaml should also track snapshotId
swamirishi Oct 8, 2025
a869500
HDDS-13627. In memory Manager for Snapshot Local Data
swamirishi Oct 8, 2025
252d338
HDDS-13627. In memory Manager for Snapshot Local Data
swamirishi Oct 9, 2025
4099bc6
HDDS-13767. Refactor SnapshotLocalDataYaml related code into OmSnapsh…
swamirishi Oct 9, 2025
e02670c
HDDS-13767. Fix pmd
swamirishi Oct 9, 2025
5a66cfc
Merge remote-tracking branch 'origin/HDDS-13767' into HEAD
swamirishi Oct 9, 2025
79580e9
HDDS-13627. Fix checkstyle
swamirishi Oct 9, 2025
2a331ef
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 9, 2025
c4f69e2
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 10, 2025
afbc592
HDDS-13627. Add tests
swamirishi Oct 10, 2025
70ac2c7
HDDS-13783. Implement locks for OmSnapshotLocalDataManager
swamirishi Oct 12, 2025
b554cc7
HDDS-13783. Implement locks for OmSnapshotLocalDataManager
swamirishi Oct 12, 2025
49eccfa
HDDS-13783. Refactor inline variable
swamirishi Oct 12, 2025
51eda04
HDDS-13627. Refactor map data structure
swamirishi Oct 13, 2025
25f766c
Merge remote-tracking branch 'origin/HDDS-13627' into HEAD
swamirishi Oct 13, 2025
96689fa
HDDS-13783. Add more condition to upsert
swamirishi Oct 13, 2025
0674299
HDDS-13783. Add java doc comment
swamirishi Oct 13, 2025
5d9fc49
HDDS-13783. Add java doc comment
swamirishi Oct 13, 2025
2d88176
HDDS-13783. Implement full lock
swamirishi Oct 13, 2025
a3c4c69
HDDS-13783. Refactor and move modify method into WritableOmSnapshotLo…
swamirishi Oct 13, 2025
686d0c7
HDDS-13783. Make full lock non static
swamirishi Oct 13, 2025
491a54b
HDDS-13783. Fix remove
swamirishi Oct 13, 2025
5e69ee9
HDDS-13627. Fix findbugs
swamirishi Oct 13, 2025
81871b2
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 14, 2025
a95604e
HDDS-13627. Fix tests
swamirishi Oct 14, 2025
5a90fcf
HDDS-13627. remove checksum interface
swamirishi Oct 14, 2025
20d7d6a
HDDS-13627. Fix test failures
swamirishi Oct 14, 2025
25fa6ae
Merge remote-tracking branch 'origin/HDDS-13627' into HEAD
swamirishi Oct 14, 2025
d419283
HDDS-13783. Fix findbugs
swamirishi Oct 14, 2025
8a44308
HDDS-13783. Fix pmd
swamirishi Oct 14, 2025
cb94c36
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 14, 2025
e26052c
Merge remote-tracking branch 'origin/HDDS-13627' into HEAD
swamirishi Oct 14, 2025
4d272d1
HDDS-13783. Fix lock release
swamirishi Oct 14, 2025
2a38f59
HDDS-13627. address review comments
swamirishi Oct 14, 2025
ca098cf
HDDS-13783. Make graph updates synchronized
swamirishi Oct 15, 2025
67d4b3d
HDDS-13627. Make add version with dependents package private
swamirishi Oct 15, 2025
9838cda
Merge remote-tracking branch 'origin/HDDS-13627' into HEAD
swamirishi Oct 15, 2025
6a19dbb
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 15, 2025
665f411
HDDS-13783. Fix checkstyle
swamirishi Oct 15, 2025
2894e40
HDDS-13783. Fix merge conflict
swamirishi Oct 15, 2025
ea0ab16
HDDS-13783. Add write version api
swamirishi Oct 15, 2025
915562b
HDDS-13797. Refactor OzoneManagerLock Resource class to handle handle…
swamirishi Oct 15, 2025
1c0d0ac
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 15, 2025
24da3eb
HDDS-13797. Update interface
swamirishi Oct 15, 2025
8f3774a
HDDS-13798. Implement PoolBasedHierarchicalResourceLockManager for Hi…
swamirishi Oct 15, 2025
503cd4e
Merge remote-tracking branch 'origin/HDDS-13798' into HEAD
swamirishi Oct 15, 2025
6865fad
HDDS-13797. Revert move of Leveled Resource and Resource enum/interface
swamirishi Oct 15, 2025
903ecd1
Merge remote-tracking branch 'origin/HDDS-13797' into HEAD
swamirishi Oct 15, 2025
06d1e99
Merge remote-tracking branch 'origin/HDDS-13798' into HEAD
swamirishi Oct 15, 2025
60a7728
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 15, 2025
4711517
HDDS-13798. Fix pmd findbugs
swamirishi Oct 15, 2025
af8754c
Merge remote-tracking branch 'origin/HDDS-13798' into HEAD
swamirishi Oct 15, 2025
655a724
HDDS-13798. Fix pmd findbugs
swamirishi Oct 15, 2025
6386c1b
Merge remote-tracking branch 'origin/HDDS-13798' into HEAD
swamirishi Oct 15, 2025
2bc6134
HDDS-13798. Fix ozone-default.xml
swamirishi Oct 16, 2025
0de7c62
Merge remote-tracking branch 'origin/HDDS-13798' into HEAD
swamirishi Oct 16, 2025
8e8c534
HDDS-13798. Stop lock data manager on metadata stop
swamirishi Oct 16, 2025
f148f24
HDDS-13798. Update tests
swamirishi Oct 16, 2025
da030c0
HDDS-13798. Rename class
swamirishi Oct 16, 2025
6af6498
Merge remote-tracking branch 'origin/HDDS-13798' into HEAD
swamirishi Oct 16, 2025
b281569
HDDS-13783. Add tests
swamirishi Oct 16, 2025
1ad24b4
HDDS-13783. Fix checkstyle
swamirishi Oct 16, 2025
d629911
HDDS-13783. Fix findbugs
swamirishi Oct 16, 2025
8eeb44b
HDDS-13783. Fix pmd
swamirishi Oct 16, 2025
d9301b3
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 16, 2025
c73a355
HDDS-13783. Fix test
swamirishi Oct 16, 2025
52be3dd
HDDS-13783. Allow version resolution to null
swamirishi Oct 16, 2025
278605a
HDDS-13783. Add dirty bit
swamirishi Oct 17, 2025
cf19dce
HDDS-13783. Address review comments
swamirishi Oct 17, 2025
34097de
HDDS-13783. Address review comments
swamirishi Oct 17, 2025
c46ddc2
HDDS-13783. Address review comments
swamirishi Oct 17, 2025
99afc02
HDDS-13783. Address review comments
swamirishi Oct 17, 2025
48ec0bb
HDDS-13810. Fix Build Issue because of unused dependency
swamirishi Oct 18, 2025
f524cad
Merge remote-tracking branch 'origin/HDDS-13810' into HEAD
swamirishi Oct 18, 2025
cb31b7c
Revert "HDDS-13810. Fix Build Issue because of unused dependency"
swamirishi Oct 19, 2025
8b014dd
HDDS-13783. Add case for commit key in middle of chain
swamirishi Oct 20, 2025
57662c6
HDDS-13783. Convert set to list of predecessors
swamirishi Oct 20, 2025
79a46f4
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 23, 2025
5f0bb91
HDDS-13833. Add transactionInfo field in SnapshotLocalData and update…
swamirishi Oct 24, 2025
aa6facf
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 24, 2025
cc35056
HDDS-13783. Make local data graph synchrnous
swamirishi Oct 25, 2025
95341dd
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 27, 2025
616bef3
HDDS-13783. Fix NPE with concurrentHashMap
swamirishi Oct 27, 2025
4596386
HDDS-13783. Add comments for localDataGraph
swamirishi Oct 29, 2025
7af6521
Merge remote-tracking branch 'origin/HDDS-13783' into HEAD
swamirishi Oct 29, 2025
e19dae2
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 29, 2025
613d106
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Oct 30, 2025
8a29736
HDDS-13833. Fix checkstyle
swamirishi Oct 30, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,7 @@ public final class OzoneConsts {
public static final String OM_SST_FILE_INFO_START_KEY = "startKey";
public static final String OM_SST_FILE_INFO_END_KEY = "endKey";
public static final String OM_SST_FILE_INFO_COL_FAMILY = "columnFamily";
public static final String OM_SLD_TXN_INFO = "transactionInfo";

// YAML fields for .container files
public static final String CONTAINER_ID = "containerID";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.CopyObject;
import org.apache.hadoop.ozone.util.WithChecksum;
import org.apache.ozone.compaction.log.SstFileInfo;
Expand Down Expand Up @@ -63,6 +64,9 @@ public class OmSnapshotLocalData implements WithChecksum<OmSnapshotLocalData> {
// Previous snapshotId based on which the snapshot local data is built.
private UUID previousSnapshotId;

// Stores the transactionInfo corresponding to OM when the snaphot is purged.
private TransactionInfo transactionInfo;

// Map of version to VersionMeta, using linkedHashMap since the order of the map needs to be deterministic for
// checksum computation.
private final LinkedHashMap<Integer, VersionMeta> versionSstFileInfos;
Expand All @@ -73,7 +77,8 @@ public class OmSnapshotLocalData implements WithChecksum<OmSnapshotLocalData> {
/**
* Creates a OmSnapshotLocalData object with default values.
*/
public OmSnapshotLocalData(UUID snapshotId, List<LiveFileMetaData> notDefraggedSSTFileList, UUID previousSnapshotId) {
public OmSnapshotLocalData(UUID snapshotId, List<LiveFileMetaData> notDefraggedSSTFileList, UUID previousSnapshotId,
TransactionInfo transactionInfo) {
this.snapshotId = snapshotId;
this.isSSTFiltered = false;
this.lastDefragTime = 0L;
Expand All @@ -83,6 +88,7 @@ public OmSnapshotLocalData(UUID snapshotId, List<LiveFileMetaData> notDefraggedS
new VersionMeta(0, notDefraggedSSTFileList.stream().map(SstFileInfo::new).collect(Collectors.toList())));
this.version = 0;
this.previousSnapshotId = previousSnapshotId;
this.transactionInfo = transactionInfo;
setChecksumTo0ByteArray();
}

Expand All @@ -101,6 +107,15 @@ public OmSnapshotLocalData(OmSnapshotLocalData source) {
this.previousSnapshotId = source.previousSnapshotId;
this.versionSstFileInfos = new LinkedHashMap<>();
setVersionSstFileInfos(source.versionSstFileInfos);
this.transactionInfo = source.transactionInfo;
}

public TransactionInfo getTransactionInfo() {
return transactionInfo;
}

public void setTransactionInfo(TransactionInfo transactionInfo) {
this.transactionInfo = transactionInfo;
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import org.apache.commons.pool2.BasePooledObjectFactory;
import org.apache.commons.pool2.PooledObject;
import org.apache.commons.pool2.impl.DefaultPooledObject;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta;
import org.apache.ozone.compaction.log.SstFileInfo;
Expand Down Expand Up @@ -71,6 +72,8 @@ private static class OmSnapshotLocalDataRepresenter extends Representer {
this.addClassTag(SstFileInfo.class, SST_FILE_INFO_TAG);
representers.put(SstFileInfo.class, new RepresentSstFileInfo());
representers.put(VersionMeta.class, new RepresentVersionMeta());
representers.put(TransactionInfo.class, data -> new ScalarNode(Tag.STR, data.toString(), null, null,
DumperOptions.ScalarStyle.PLAIN));
representers.put(UUID.class, data ->
new ScalarNode(Tag.STR, data.toString(), null, null, DumperOptions.ScalarStyle.PLAIN));
}
Expand Down Expand Up @@ -168,7 +171,10 @@ public Object construct(Node node) {
UUID snapId = UUID.fromString(snapIdStr);
final String prevSnapIdStr = (String) nodes.get(OzoneConsts.OM_SLD_PREV_SNAP_ID);
UUID prevSnapId = prevSnapIdStr != null ? UUID.fromString(prevSnapIdStr) : null;
OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId);
final String purgeTxInfoStr = (String) nodes.get(OzoneConsts.OM_SLD_TXN_INFO);
TransactionInfo transactionInfo = purgeTxInfoStr != null ? TransactionInfo.valueOf(purgeTxInfoStr) : null;
OmSnapshotLocalData snapshotLocalData = new OmSnapshotLocalData(snapId, Collections.emptyList(), prevSnapId,
transactionInfo);

// Set version from YAML
Integer version = (Integer) nodes.get(OzoneConsts.OM_SLD_VERSION);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut

List<String> snapshotDbKeys = snapshotPurgeRequest
.getSnapshotDBKeysList();
TransactionInfo transactionInfo = TransactionInfo.valueOf(context.getTermIndex());
try {

// Each snapshot purge operation does three things:
Expand Down Expand Up @@ -123,12 +124,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
}
// Update the snapshotInfo lastTransactionInfo.
for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) {
snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString());
snapshotInfo.setLastTransactionInfo(transactionInfo.toByteString());
omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()),
CacheValue.get(context.getIndex(), snapshotInfo));
}

omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos);
omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos,
transactionInfo);

omSnapshotIntMetrics.incNumSnapshotPurges();
LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating snapshots:{}.",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import java.util.List;
import java.util.Map;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
Expand All @@ -36,6 +37,7 @@
import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.WritableOmSnapshotLocalDataProvider;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand All @@ -49,15 +51,18 @@ public class OMSnapshotPurgeResponse extends OMClientResponse {
LoggerFactory.getLogger(OMSnapshotPurgeResponse.class);
private final List<String> snapshotDbKeys;
private final Map<String, SnapshotInfo> updatedSnapInfos;
private final TransactionInfo transactionInfo;

public OMSnapshotPurgeResponse(
@Nonnull OMResponse omResponse,
@Nonnull List<String> snapshotDbKeys,
Map<String, SnapshotInfo> updatedSnapInfos
Map<String, SnapshotInfo> updatedSnapInfos,
TransactionInfo transactionInfo
) {
super(omResponse);
this.snapshotDbKeys = snapshotDbKeys;
this.updatedSnapInfos = updatedSnapInfos;
this.transactionInfo = transactionInfo;
}

/**
Expand All @@ -69,6 +74,7 @@ public OMSnapshotPurgeResponse(@Nonnull OMResponse omResponse) {
checkStatusNotOK();
this.snapshotDbKeys = null;
this.updatedSnapInfos = null;
this.transactionInfo = null;
}

@Override
Expand Down Expand Up @@ -96,10 +102,14 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
// Remove the snapshot from snapshotId to snapshotTableKey map.
((OmMetadataManagerImpl) omMetadataManager).getSnapshotChainManager()
.removeFromSnapshotIdToTable(snapshotInfo.getSnapshotId());
// Delete Snapshot checkpoint directory.

OmSnapshotLocalDataManager snapshotLocalDataManager = ((OmMetadataManagerImpl) omMetadataManager)
.getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager();
deleteCheckpointDirectory(snapshotLocalDataManager, omMetadataManager, snapshotInfo);
// Update snapshot local data to update purge transaction info. This would be used to check whether the
// snapshot purged txn is flushed to rocksdb.
updateLocalData(snapshotLocalDataManager, snapshotInfo);
// Delete Snapshot checkpoint directory.
deleteCheckpointDirectory(omMetadataManager, snapshotInfo);
// Delete snapshotInfo from the table.
omMetadataManager.getSnapshotInfoTable().deleteWithBatch(batchOperation, dbKey);
}
Expand All @@ -115,11 +125,18 @@ private void updateSnapInfo(OmMetadataManagerImpl metadataManager,
}
}

private void updateLocalData(OmSnapshotLocalDataManager localDataManager, SnapshotInfo snapshotInfo)
throws IOException {
try (WritableOmSnapshotLocalDataProvider snap = localDataManager.getWritableOmSnapshotLocalData(snapshotInfo)) {
snap.setTransactionInfo(this.transactionInfo);
snap.commit();
}
}

/**
* Deletes the checkpoint directory for a snapshot.
*/
private void deleteCheckpointDirectory(OmSnapshotLocalDataManager snapshotLocalDataManager,
OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) {
private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, SnapshotInfo snapshotInfo) {
// Acquiring write lock to avoid race condition with sst filtering service which creates a sst filtered file
// inside the snapshot directory. Any operation apart which doesn't create/delete files under this snapshot
// directory can run in parallel along with this operation.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmSnapshotLocalData;
Expand Down Expand Up @@ -155,10 +156,11 @@ public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) {
*/
public void createNewOmSnapshotLocalDataFile(RDBStore snapshotStore, SnapshotInfo snapshotInfo) throws IOException {
try (WritableOmSnapshotLocalDataProvider snapshotLocalData =
new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(),
() -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(),
OmSnapshotManager.getSnapshotSSTFileList(snapshotStore), snapshotInfo.getPathPreviousSnapshotId()),
null))) {
new WritableOmSnapshotLocalDataProvider(snapshotInfo.getSnapshotId(),
() -> Pair.of(new OmSnapshotLocalData(snapshotInfo.getSnapshotId(),
OmSnapshotManager.getSnapshotSSTFileList(snapshotStore),
snapshotInfo.getPathPreviousSnapshotId(), null),
null))) {
snapshotLocalData.commit();
}
}
Expand Down Expand Up @@ -660,6 +662,12 @@ public void removeVersion(int version) {
setDirty();
}

public void setTransactionInfo(TransactionInfo transactionInfo) {
this.getSnapshotLocalData().setTransactionInfo(transactionInfo);
// Set Dirty when the transactionInfo is set.
setDirty();
}

public synchronized void commit() throws IOException {
// Validate modification and commit the changes.
SnapshotVersionsMeta localDataVersionNodes = validateModification(super.snapshotLocalData);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,11 +37,13 @@
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta;
import org.apache.hadoop.ozone.util.ObjectSerializer;
Expand Down Expand Up @@ -106,7 +108,8 @@ private LiveFileMetaData createLiveFileMetaData(String fileName, String table, S
/**
* Creates a snapshot local data YAML file.
*/
private Pair<File, UUID> writeToYaml(UUID snapshotId, String snapshotName) throws IOException {
private Pair<File, UUID> writeToYaml(UUID snapshotId, String snapshotName, TransactionInfo transactionInfo)
throws IOException {
String yamlFilePath = snapshotName + ".yaml";
UUID previousSnapshotId = UUID.randomUUID();
// Create snapshot data with not defragged SST files
Expand All @@ -115,7 +118,7 @@ private Pair<File, UUID> writeToYaml(UUID snapshotId, String snapshotName) throw
createLiveFileMetaData("sst2", "table1", "k3", "k4"),
createLiveFileMetaData("sst3", "table2", "k4", "k5"));
OmSnapshotLocalData dataYaml = new OmSnapshotLocalData(snapshotId, notDefraggedSSTFileList,
previousSnapshotId);
previousSnapshotId, transactionInfo);

// Set version
dataYaml.setVersion(42);
Expand Down Expand Up @@ -150,7 +153,9 @@ private Pair<File, UUID> writeToYaml(UUID snapshotId, String snapshotName) throw
@Test
public void testWriteToYaml() throws IOException {
UUID snapshotId = UUID.randomUUID();
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1");
TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(),
ThreadLocalRandom.current().nextLong());
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot1", transactionInfo);
File yamlFile = yamlFilePrevIdPair.getLeft();
UUID prevSnapId = yamlFilePrevIdPair.getRight();

Expand All @@ -160,6 +165,7 @@ public void testWriteToYaml() throws IOException {
// Verify fields
assertEquals(44, snapshotData.getVersion());
assertTrue(snapshotData.getSstFiltered());
assertEquals(transactionInfo, snapshotData.getTransactionInfo());

VersionMeta notDefraggedSSTFiles = snapshotData.getVersionSstFileInfos().get(0);
assertEquals(new VersionMeta(0,
Expand Down Expand Up @@ -192,17 +198,19 @@ public void testWriteToYaml() throws IOException {
@Test
public void testUpdateSnapshotDataFile() throws IOException {
UUID snapshotId = UUID.randomUUID();
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2");
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot2", null);
File yamlFile = yamlFilePrevIdPair.getLeft();
// Read from YAML file
OmSnapshotLocalData dataYaml =
omSnapshotLocalDataSerializer.load(yamlFile);

TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(),
ThreadLocalRandom.current().nextLong());
// Update snapshot data
dataYaml.setSstFiltered(false);
dataYaml.setNeedsDefrag(false);
dataYaml.addVersionSSTFileInfos(
singletonList(createLiveFileMetaData("defragged-sst4", "table3", "k5", "k6")), 5);
dataYaml.setTransactionInfo(transactionInfo);

// Write updated data back to file
omSnapshotLocalDataSerializer.save(yamlFile, dataYaml);
Expand All @@ -213,6 +221,7 @@ public void testUpdateSnapshotDataFile() throws IOException {
// Verify updated data
assertThat(dataYaml.getSstFiltered()).isFalse();
assertThat(dataYaml.getNeedsDefrag()).isFalse();
assertEquals(transactionInfo, dataYaml.getTransactionInfo());

Map<Integer, VersionMeta> defraggedFiles = dataYaml.getVersionSstFileInfos();
assertEquals(4, defraggedFiles.size());
Expand All @@ -234,7 +243,9 @@ public void testEmptyFile() throws IOException {
@Test
public void testChecksum() throws IOException {
UUID snapshotId = UUID.randomUUID();
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3");
TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(),
ThreadLocalRandom.current().nextLong());
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot3", transactionInfo);
File yamlFile = yamlFilePrevIdPair.getLeft();
// Read from YAML file
OmSnapshotLocalData snapshotData = omSnapshotLocalDataSerializer.load(yamlFile);
Expand All @@ -251,7 +262,9 @@ public void testChecksum() throws IOException {
@Test
public void testYamlContainsAllFields() throws IOException {
UUID snapshotId = UUID.randomUUID();
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4");
TransactionInfo transactionInfo = TransactionInfo.valueOf(ThreadLocalRandom.current().nextLong(),
ThreadLocalRandom.current().nextLong());
Pair<File, UUID> yamlFilePrevIdPair = writeToYaml(snapshotId, "snapshot4", transactionInfo);
File yamlFile = yamlFilePrevIdPair.getLeft();
String content = FileUtils.readFileToString(yamlFile, Charset.defaultCharset());

Expand All @@ -264,5 +277,6 @@ public void testYamlContainsAllFields() throws IOException {
assertThat(content).contains(OzoneConsts.OM_SLD_VERSION_SST_FILE_INFO);
assertThat(content).contains(OzoneConsts.OM_SLD_SNAP_ID);
assertThat(content).contains(OzoneConsts.OM_SLD_PREV_SNAP_ID);
assertThat(content).contains(OzoneConsts.OM_SLD_TXN_INFO);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotPurgeResponse;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager.ReadableOmSnapshotLocalDataProvider;
import org.apache.hadoop.ozone.om.snapshot.TestSnapshotRequestAndResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest;
Expand Down Expand Up @@ -159,6 +160,10 @@ public void testValidateAndUpdateCache() throws Exception {

List<String> snapshotDbKeysToPurge = createSnapshots(10);
assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty());
List<SnapshotInfo> snapshotInfos = new ArrayList<>();
for (String snapshotKey : snapshotDbKeysToPurge) {
snapshotInfos.add(getOmMetadataManager().getSnapshotInfoTable().get(snapshotKey));
}

// Check if all the checkpoints are created.
for (Path checkpoint : checkpointPaths) {
Expand All @@ -171,9 +176,9 @@ public void testValidateAndUpdateCache() throws Exception {
snapshotDbKeysToPurge);

OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest);

TransactionInfo transactionInfo = TransactionInfo.valueOf(TransactionInfo.getTermIndex(200L));
OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse)
omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L);
omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), transactionInfo.getTransactionIndex());

for (String snapshotTableKey: snapshotDbKeysToPurge) {
assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey));
Expand All @@ -191,6 +196,15 @@ public void testValidateAndUpdateCache() throws Exception {
for (Path checkpoint : checkpointPaths) {
assertFalse(Files.exists(checkpoint));
}
OmSnapshotLocalDataManager snapshotLocalDataManager =
getOzoneManager().getOmSnapshotManager().getSnapshotLocalDataManager();
for (SnapshotInfo snapshotInfo : snapshotInfos) {
try (ReadableOmSnapshotLocalDataProvider snapProvider =
snapshotLocalDataManager.getOmSnapshotLocalData(snapshotInfo)) {
assertEquals(transactionInfo, snapProvider.getSnapshotLocalData().getTransactionInfo());
}
}

assertEquals(initialSnapshotPurgeCount + 1, getOmSnapshotIntMetrics().getNumSnapshotPurges());
assertEquals(initialSnapshotPurgeFailCount, getOmSnapshotIntMetrics().getNumSnapshotPurgeFails());
}
Expand Down
Loading