Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import java.util.Objects;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.rocksdb.LiveFileMetaData;

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@
* limitations under the License.
*/

package org.apache.ozone.compaction.log;
package org.apache.ozone.rocksdb.util;

import static org.apache.commons.io.FilenameUtils.getBaseName;

import java.util.Objects;
import org.apache.hadoop.hdds.StringUtils;
Expand All @@ -39,7 +41,7 @@ public SstFileInfo(String fileName, String startRange, String endRange, String c
}

public SstFileInfo(LiveFileMetaData fileMetaData) {
this(fileMetaData.fileName(), StringUtils.bytes2String(fileMetaData.smallestKey()),
this(getBaseName(fileMetaData.fileName()), StringUtils.bytes2String(fileMetaData.smallestKey()),
StringUtils.bytes2String(fileMetaData.largestKey()),
StringUtils.bytes2String(fileMetaData.columnFamilyName()));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,34 +17,28 @@

package org.apache.ozone.rocksdiff;

import java.util.Objects;
import org.apache.ozone.compaction.log.CompactionFileInfo;
import org.apache.ozone.rocksdb.util.SstFileInfo;

/**
* Node in the compaction DAG that represents an SST file.
*/
public class CompactionNode {
// Name of the SST file
private final String fileName;
public class CompactionNode extends SstFileInfo {
private final long snapshotGeneration;
private final long totalNumberOfKeys;
private long cumulativeKeysReverseTraversal;
private final String startKey;
private final String endKey;
private final String columnFamily;

/**
* CompactionNode constructor.
* @param file SST file (filename without extension)
* @param seqNum Snapshot generation (sequence number)
*/
public CompactionNode(String file, long seqNum, String startKey, String endKey, String columnFamily) {
fileName = file;
super(file, startKey, endKey, columnFamily);
totalNumberOfKeys = 0L;
snapshotGeneration = seqNum;
cumulativeKeysReverseTraversal = 0L;
this.startKey = startKey;
this.endKey = endKey;
this.columnFamily = columnFamily;
}

public CompactionNode(CompactionFileInfo compactionFileInfo) {
Expand All @@ -54,11 +48,7 @@ public CompactionNode(CompactionFileInfo compactionFileInfo) {

@Override
public String toString() {
return String.format("Node{%s}", fileName);
}

public String getFileName() {
return fileName;
return String.format("Node{%s}", getFileName());
}

public long getSnapshotGeneration() {
Expand All @@ -73,18 +63,6 @@ public long getCumulativeKeysReverseTraversal() {
return cumulativeKeysReverseTraversal;
}

public String getStartKey() {
return startKey;
}

public String getEndKey() {
return endKey;
}

public String getColumnFamily() {
return columnFamily;
}

public void setCumulativeKeysReverseTraversal(
long cumulativeKeysReverseTraversal) {
this.cumulativeKeysReverseTraversal = cumulativeKeysReverseTraversal;
Expand All @@ -93,4 +71,16 @@ public void setCumulativeKeysReverseTraversal(
public void addCumulativeKeysReverseTraversal(long diff) {
this.cumulativeKeysReverseTraversal += diff;
}

// Not changing previous behaviour.
@Override
public final boolean equals(Object o) {
return this == o;
}

// Having hashcode only on the basis of the filename.
@Override
public int hashCode() {
return Objects.hash(getFileName());
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB;
import org.apache.ozone.compaction.log.CompactionFileInfo;
import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.rocksdb.LiveFileMetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -106,7 +107,7 @@ public static void filterRelevantSstFiles(Set<String> inputFiles,
}

@VisibleForTesting
static boolean shouldSkipNode(CompactionNode node,
static boolean shouldSkipNode(SstFileInfo node,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

seems unnecessary change.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We would need this change later

Map<String, String> columnFamilyToPrefixMap) {
// This is for backward compatibility. Before the compaction log table
// migration, startKey, endKey and columnFamily information is not persisted
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.ozone.compaction.log;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;

import org.apache.hadoop.hdds.StringUtils;
import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.junit.jupiter.api.Test;
import org.rocksdb.LiveFileMetaData;

/**
* Test class for Base SstFileInfo class.
*/
public class TestSstFileInfo {

@Test
public void testSstFileInfo() {
String smallestKey = "/smallestKey/1";
String largestKey = "/largestKey/2";
String columnFamily = "columnFamily/123";
LiveFileMetaData lfm = mock(LiveFileMetaData.class);
when(lfm.fileName()).thenReturn("/1.sst");
when(lfm.columnFamilyName()).thenReturn(StringUtils.string2Bytes(columnFamily));
when(lfm.smallestKey()).thenReturn(StringUtils.string2Bytes(smallestKey));
when(lfm.largestKey()).thenReturn(StringUtils.string2Bytes(largestKey));
SstFileInfo expectedSstFileInfo = new SstFileInfo("1", smallestKey, largestKey, columnFamily);
assertEquals(expectedSstFileInfo, new SstFileInfo(lfm));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.hdds.utils.db.CopyObject;
import org.apache.hadoop.ozone.util.WithChecksum;
import org.apache.ozone.compaction.log.SstFileInfo;
import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.rocksdb.LiveFileMetaData;
import org.yaml.snakeyaml.Yaml;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import org.apache.commons.pool2.impl.DefaultPooledObject;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta;
import org.apache.ozone.compaction.log.SstFileInfo;
import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.yaml.snakeyaml.DumperOptions;
import org.yaml.snakeyaml.LoaderOptions;
import org.yaml.snakeyaml.TypeDescription;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,10 +139,12 @@ public static String getSnapshotLocalPropertyYamlPath(Path snapshotPath) {
* @param snapshotInfo snapshot metadata
* @return the path to the snapshot's local property YAML file
*/
@VisibleForTesting
public String getSnapshotLocalPropertyYamlPath(SnapshotInfo snapshotInfo) {
return getSnapshotLocalPropertyYamlPath(snapshotInfo.getSnapshotId());
}

@VisibleForTesting
public String getSnapshotLocalPropertyYamlPath(UUID snapshotId) {
Path snapshotPath = OmSnapshotManager.getSnapshotPath(omMetadataManager, snapshotId);
return getSnapshotLocalPropertyYamlPath(snapshotPath);
Expand Down Expand Up @@ -191,7 +193,7 @@ public WritableOmSnapshotLocalDataProvider getWritableOmSnapshotLocalData(UUID s
return new WritableOmSnapshotLocalDataProvider(snapshotId);
}

public OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException {
OmSnapshotLocalData getOmSnapshotLocalData(File snapshotDataPath) throws IOException {
return snapshotLocalDataSerializer.load(snapshotDataPath);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
import org.apache.hadoop.ozone.om.OmSnapshotLocalData.VersionMeta;
import org.apache.hadoop.ozone.util.ObjectSerializer;
import org.apache.hadoop.ozone.util.YamlSerializer;
import org.apache.ozone.compaction.log.SstFileInfo;
import org.apache.ozone.rocksdb.util.SstFileInfo;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
Expand Down
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@swamirishi Thanks for working on this,
Could you please make the required changes to testCreateNewSnapshotLocalYaml as it would lead to test failure because the test still expects filenames with .sst, causing mismatch.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done moved the test to TestOmSnapshotLocalDataManager since the code has been refactored in the jira in HDDS-13783

Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.om;

import static org.apache.commons.io.file.PathUtils.copyDirectory;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.apache.hadoop.hdds.utils.HAUtils.getExistingFiles;
import static org.apache.hadoop.ozone.OzoneConsts.OM_CHECKPOINT_DIR;
import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME;
Expand All @@ -28,11 +27,7 @@
import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE;
import static org.apache.hadoop.ozone.om.OMDBCheckpointServlet.processFile;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.BUCKET_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DIRECTORY_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.FILE_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.KEY_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.VOLUME_TABLE;
import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.getINode;
import static org.assertj.core.api.Assertions.assertThat;
Expand All @@ -48,34 +43,28 @@
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.TreeMap;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.RDBBatchOperation;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.RocksDatabase;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TypedTable;
import org.apache.hadoop.ozone.om.exceptions.OMException;
Expand All @@ -85,7 +74,6 @@
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotLocalDataManager;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
import org.apache.hadoop.util.Time;
import org.apache.ozone.compaction.log.SstFileInfo;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.GenericTestUtils.LogCapturer;
import org.junit.jupiter.api.AfterAll;
Expand All @@ -95,7 +83,6 @@
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.io.TempDir;
import org.rocksdb.LiveFileMetaData;
import org.slf4j.event.Level;

/**
Expand Down Expand Up @@ -272,71 +259,6 @@ public void testCloseOnEviction() throws IOException,
}, 100, 30_000);
}

private LiveFileMetaData createMockLiveFileMetadata(String cfname, String fileName) {
LiveFileMetaData lfm = mock(LiveFileMetaData.class);
when(lfm.columnFamilyName()).thenReturn(cfname.getBytes(StandardCharsets.UTF_8));
when(lfm.fileName()).thenReturn(fileName);
when(lfm.smallestKey()).thenReturn(string2Bytes("k1"));
when(lfm.largestKey()).thenReturn(string2Bytes("k2"));
return lfm;
}

@Test
public void testCreateNewSnapshotLocalYaml() throws IOException {
SnapshotInfo snapshotInfo = createSnapshotInfo("vol1", "buck1");

Map<String, List<String>> expNotDefraggedSSTFileList = new TreeMap<>();
OmSnapshotLocalData.VersionMeta notDefraggedVersionMeta = new OmSnapshotLocalData.VersionMeta(0,
ImmutableList.of(new SstFileInfo("dt1.sst", "k1", "k2", DIRECTORY_TABLE),
new SstFileInfo("dt2.sst", "k1", "k2", DIRECTORY_TABLE),
new SstFileInfo("ft1.sst", "k1", "k2", FILE_TABLE),
new SstFileInfo("ft2.sst", "k1", "k2", FILE_TABLE),
new SstFileInfo("kt1.sst", "k1", "k2", KEY_TABLE),
new SstFileInfo("kt2.sst", "k1", "k2", KEY_TABLE)));
expNotDefraggedSSTFileList.put(KEY_TABLE, Stream.of("kt1.sst", "kt2.sst").collect(Collectors.toList()));
expNotDefraggedSSTFileList.put(FILE_TABLE, Stream.of("ft1.sst", "ft2.sst").collect(Collectors.toList()));
expNotDefraggedSSTFileList.put(DIRECTORY_TABLE, Stream.of("dt1.sst", "dt2.sst").collect(Collectors.toList()));

List<LiveFileMetaData> mockedLiveFiles = new ArrayList<>();
for (Map.Entry<String, List<String>> entry : expNotDefraggedSSTFileList.entrySet()) {
String cfname = entry.getKey();
for (String fname : entry.getValue()) {
mockedLiveFiles.add(createMockLiveFileMetadata(cfname, fname));
}
}
// Add some other column families and files that should be ignored
mockedLiveFiles.add(createMockLiveFileMetadata("otherTable", "ot1.sst"));
mockedLiveFiles.add(createMockLiveFileMetadata("otherTable", "ot2.sst"));

RDBStore mockedStore = mock(RDBStore.class);
RocksDatabase mockedDb = mock(RocksDatabase.class);
when(mockedStore.getDb()).thenReturn(mockedDb);
when(mockedDb.getLiveFilesMetaData()).thenReturn(mockedLiveFiles);

Path snapshotYaml = Paths.get(snapshotLocalDataManager.getSnapshotLocalPropertyYamlPath(snapshotInfo));
when(mockedStore.getDbLocation()).thenReturn(getSnapshotPath(omMetadataManager, snapshotInfo).toFile());
// Create an existing YAML file for the snapshot
assertTrue(snapshotYaml.toFile().createNewFile());
assertEquals(0, Files.size(snapshotYaml));
// Create a new YAML file for the snapshot
snapshotLocalDataManager.createNewOmSnapshotLocalDataFile(mockedStore, snapshotInfo);
// Verify that previous file was overwritten
assertTrue(Files.exists(snapshotYaml));
assertTrue(Files.size(snapshotYaml) > 0);
// Verify the contents of the YAML file
OmSnapshotLocalData localData = snapshotLocalDataManager.getOmSnapshotLocalData(snapshotYaml.toFile());
assertNotNull(localData);
assertEquals(0, localData.getVersion());
assertEquals(notDefraggedVersionMeta, localData.getVersionSstFileInfos().get(0));
assertFalse(localData.getSstFiltered());
assertEquals(0L, localData.getLastDefragTime());
assertFalse(localData.getNeedsDefrag());
assertEquals(1, localData.getVersionSstFileInfos().size());

// Cleanup
Files.delete(snapshotYaml);
}

@Test
public void testValidateSnapshotLimit() throws IOException {
TypedTable<String, SnapshotInfo> snapshotInfoTable = mock(TypedTable.class);
Expand Down
Loading