Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
7796d21
HDDS-8739. Snapdiff should return complete absolute path in Diff Entry
swamirishi Jun 2, 2023
b5f2f77
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Jun 14, 2023
4c53136
HDDS-8739. Fix Diff entry representation
swamirishi Jun 14, 2023
eddb5a1
HDDS-8739. Fix compilation issues
swamirishi Jun 14, 2023
87792cc
HDDS-8739. Fix bug
swamirishi Jun 14, 2023
971321f
HDDS-8739. fix bug to resolve abs path
swamirishi Jun 14, 2023
0feae64
HDDS-8739. Fix testcases
swamirishi Jun 16, 2023
007d6f6
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Jun 16, 2023
7756571
HDDS-8739. Fix testcases
swamirishi Jun 16, 2023
ccea92b
HDDS-8739. Fix checkstyle
swamirishi Jun 17, 2023
6297ed5
HDDS-8739. Address review comments
swamirishi Jun 19, 2023
c3ec1a7
HDDS-8739: Fix failures
swamirishi Jun 19, 2023
3f6510f
HDDS-8739: Fix failures
swamirishi Jun 19, 2023
ebfb66b
HDDS-8739: Fix failures
swamirishi Jun 20, 2023
227f6c1
HDDS-8739: Fix failures
swamirishi Jun 20, 2023
981cb79
HDDS-8739: Address review comments
swamirishi Jun 20, 2023
3bc5467
HDDS-8739: Fix failures
swamirishi Jun 20, 2023
9d35447
HDDS-8739: Address review comments
swamirishi Jun 21, 2023
258a36c
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Jun 21, 2023
d318ec2
HDDS-8739: Address review comments
swamirishi Jun 21, 2023
f48d2a1
HDDS-8739: Address review comments
swamirishi Jun 21, 2023
c69987e
HDDS-8739: Fix checkstyle
swamirishi Jun 21, 2023
f5b299a
Merge remote-tracking branch 'apache/master' into HEAD
swamirishi Jun 22, 2023
d224462
HDDS-8739: Resolve merge conflicts
swamirishi Jun 22, 2023
d03890d
HDDS-8739: Resolve test failure
swamirishi Jun 22, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ public static boolean doesSstFileContainKeyRange(String filepath,
TableProperties properties = sstFileReader.get().getTableProperties();
String tableName = new String(properties.getColumnFamilyName(), UTF_8);
if (tableToPrefixMap.containsKey(tableName)) {
String prefix = tableToPrefixMap.get(tableName) + OM_KEY_PREFIX;
String prefix = tableToPrefixMap.get(tableName);
try (ManagedSstFileReaderIterator iterator =
ManagedSstFileReaderIterator.managed(sstFileReader.get()
.newIterator(new ReadOptions()))) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
import java.util.stream.Collectors;
import java.util.Comparator;

import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.conf.ConfigurationException;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
Expand All @@ -65,6 +64,7 @@
import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_INDICATOR;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_BIND_HOST_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DECOMMISSIONED_NODES_KEY;
Expand Down Expand Up @@ -675,7 +675,6 @@ public static String getOzoneManagerServiceId(OzoneConfiguration conf)
* does not preserve.
* @return normalized key name.
*/
@SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
public static String normalizeKey(String keyName,
boolean preserveTrailingSlash) {
// For empty strings do nothing, just return the same.
Expand All @@ -692,8 +691,8 @@ public static String normalizeKey(String keyName,
LOG.debug("Normalized key {} to {} ", keyName,
normalizedKeyName.substring(1));
}
if (preserveTrailingSlash && keyName.endsWith("/")) {
return normalizedKeyName.substring(1) + "/";
if (preserveTrailingSlash && keyName.endsWith(OZONE_URI_DELIMITER)) {
return normalizedKeyName.substring(1) + OZONE_URI_DELIMITER;
}
return normalizedKeyName.substring(1);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,15 @@
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
import org.apache.hadoop.hdds.utils.db.Proto2Codec;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.ozone.OFSPath;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DiffReportEntryProto;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffReportProto;

import java.nio.charset.StandardCharsets;
import java.nio.file.Paths;
import java.util.List;
import java.util.stream.Collectors;

Expand Down Expand Up @@ -163,7 +166,7 @@ public static DiffReportEntry fromProtobufDiffReportEntry(
return null;
}
DiffType type = fromProtobufDiffType(entry.getDiffType());
return type == null ? null : new DiffReportEntry(type,
return type == null ? null : new DiffReportEntryOzone(type,
entry.getSourcePath().getBytes(StandardCharsets.UTF_8),
entry.hasTargetPath() ?
entry.getTargetPath().getBytes(StandardCharsets.UTF_8) : null);
Expand Down Expand Up @@ -191,7 +194,7 @@ public static DiffReportEntry getDiffReportEntry(final DiffType type,

public static DiffReportEntry getDiffReportEntry(final DiffType type,
final String sourcePath, final String targetPath) {
return new DiffReportEntry(type,
return new DiffReportEntryOzone(type,
sourcePath.getBytes(StandardCharsets.UTF_8),
targetPath != null ? targetPath.getBytes(StandardCharsets.UTF_8) :
null);
Expand All @@ -206,5 +209,44 @@ public void aggregate(SnapshotDiffReportOzone diffReport) {
this.getDiffList().addAll(diffReport.getDiffList());
}

/**
* DiffReportEntry for ozone.
*/
public static class DiffReportEntryOzone extends DiffReportEntry {

public DiffReportEntryOzone(DiffType type, byte[] sourcePath) {
super(type, sourcePath);
}

public DiffReportEntryOzone(DiffType type, byte[][] sourcePathComponents) {
super(type, sourcePathComponents);
}

public DiffReportEntryOzone(DiffType type, byte[] sourcePath,
byte[] targetPath) {
super(type, sourcePath, targetPath);
}

public DiffReportEntryOzone(DiffType type, byte[][] sourcePathComponents,
byte[][] targetPathComponents) {
super(type, sourcePathComponents, targetPathComponents);
}

static String getPathString(byte[] path) {
String pathStr = DFSUtilClient.bytes2String(path);
return pathStr.isEmpty() ? "." : Paths.get(pathStr)
.toAbsolutePath().toString();
}

@Override
public String toString() {
String str = this.getType().getLabel() + "\t" +
getPathString(this.getSourcePath());
if (this.getType() == SnapshotDiffReport.DiffType.RENAME) {
str = str + " -> " + getPathString(this.getTargetPath());
}

return str;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2481,9 +2481,11 @@ public void testSnapshotDiff() throws Exception {
diff.getDiffList().get(0).getType());
Assert.assertEquals(SnapshotDiffReport.DiffType.CREATE,
diff.getDiffList().get(1).getType());
Assert.assertArrayEquals("key1".getBytes(StandardCharsets.UTF_8),
Assert.assertArrayEquals(
"/key1".getBytes(StandardCharsets.UTF_8),
diff.getDiffList().get(0).getSourcePath());
Assert.assertArrayEquals("key2".getBytes(StandardCharsets.UTF_8),
Assert.assertArrayEquals(
"/key2".getBytes(StandardCharsets.UTF_8),
diff.getDiffList().get(1).getSourcePath());

// test whether snapdiff returns aggregated response as
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import org.apache.hadoop.hdds.utils.db.DBProfile;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.TestDataUtil;
Expand Down Expand Up @@ -81,6 +82,7 @@
import java.util.stream.Collectors;

import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone;
import static org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting;
import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
Expand Down Expand Up @@ -216,7 +218,7 @@ private void init() throws Exception {

// stop the deletion services so that keys can still be read
keyManager.stop();
preFinalizationChecks();
// preFinalizationChecks();
finalizeOMUpgrade();
counter = new AtomicInteger();
}
Expand Down Expand Up @@ -599,12 +601,14 @@ public void testSnapDiff() throws Exception {
SnapshotDiffReportOzone
diff2 = getSnapDiffReport(volume, bucket, snap2, snap3);
Assert.assertEquals(2, diff2.getDiffList().size());
Assert.assertTrue(diff2.getDiffList().contains(
SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReportOzone.DiffType.CREATE, key2)));
Assert.assertTrue(diff2.getDiffList().contains(
SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReportOzone.DiffType.DELETE, key1)));
Assert.assertEquals(
Arrays.asList(SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReport.DiffType.DELETE,
OZONE_URI_DELIMITER + key1),
SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReport.DiffType.CREATE,
OZONE_URI_DELIMITER + key2)),
diff2.getDiffList());

// Rename Key2
String key2Renamed = key2 + "_renamed";
Expand All @@ -617,7 +621,8 @@ public void testSnapDiff() throws Exception {
Assert.assertEquals(1, diff3.getDiffList().size());
Assert.assertTrue(diff3.getDiffList().contains(
SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReportOzone.DiffType.RENAME, key2, key2Renamed)));
SnapshotDiffReportOzone.DiffType.RENAME, OZONE_URI_DELIMITER + key2,
OZONE_URI_DELIMITER + key2Renamed)));


// Create a directory
Expand All @@ -628,14 +633,10 @@ public void testSnapDiff() throws Exception {
SnapshotDiffReportOzone
diff4 = getSnapDiffReport(volume, bucket, snap4, snap5);
Assert.assertEquals(1, diff4.getDiffList().size());
// for non-fso, directories are a special type of key with "/" appended
// at the end.
if (!bucket1.getBucketLayout().isFileSystemOptimized()) {
dir1 = dir1 + OM_KEY_PREFIX;
}
Assert.assertTrue(diff4.getDiffList().contains(
SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReportOzone.DiffType.CREATE, dir1)));
SnapshotDiffReportOzone.DiffType.CREATE,
OM_KEY_PREFIX + dir1)));

String key3 = createFileKeyWithPrefix(bucket1, "key-3-");
String snap6 = "snap" + counter.incrementAndGet();
Expand All @@ -648,17 +649,14 @@ public void testSnapDiff() throws Exception {
createSnapshot(volume, bucket, snap7);
SnapshotDiffReportOzone
diff5 = getSnapDiffReport(volume, bucket, snap6, snap7);
assertEquals(2, diff5.getDiffList().size());
assertEquals(SnapshotDiffReportOzone.DiffType.RENAME,
diff5.getDiffList().get(0).getType());
assertEquals(key3, org.apache.hadoop.hdds.StringUtils.bytes2String(
diff5.getDiffList().get(0).getSourcePath()));
assertEquals(renamedKey3, org.apache.hadoop.hdds.StringUtils.bytes2String(
diff5.getDiffList().get(0).getTargetPath()));
assertEquals(SnapshotDiffReportOzone.DiffType.MODIFY,
diff5.getDiffList().get(1).getType());
assertEquals(key3, org.apache.hadoop.hdds.StringUtils.bytes2String(
diff5.getDiffList().get(1).getSourcePath()));
List<SnapshotDiffReport.DiffReportEntry> expectedDiffList =
Arrays.asList(SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReport.DiffType.RENAME, OZONE_URI_DELIMITER + key3,
OZONE_URI_DELIMITER + renamedKey3),
SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReport.DiffType.MODIFY, OZONE_URI_DELIMITER + key3)
);
assertEquals(expectedDiffList, diff5.getDiffList());
}

@Test
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ozone.om.snapshot;

import com.google.common.collect.Sets;
import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;

import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
import java.util.Optional;
import java.util.Queue;
import java.util.Set;

import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;

/**
* Class to resolve absolute paths for FSO DirectoryInfo Objects.
*/
public class FSODirectoryPathResolver implements ObjectPathResolver {

private final String prefix;
private final long bucketId;
private final Table<String, OmDirectoryInfo> dirInfoTable;

public FSODirectoryPathResolver(String prefix, long bucketId,
Table<String, OmDirectoryInfo> dirInfoTable) {
this.prefix = prefix;
this.dirInfoTable = dirInfoTable;
this.bucketId = bucketId;
}

private void addToPathMap(Pair<Long, Path> objectIDPath,
Set<Long> dirObjIds, Map<Long, Path> pathMap) {
if (dirObjIds.contains(objectIDPath.getKey())) {
pathMap.put(objectIDPath.getKey(), objectIDPath.getValue());
dirObjIds.remove(objectIDPath.getKey());
}
}

/**
* Assuming all dirObjIds belong to a bucket this function resolves absolute
* path for a given FSO bucket.
* @param dirObjIds Object Ids corresponding to which absolute path is needed.
* @return Map of Path corresponding to provided directory object IDs
*/
@SuppressFBWarnings("DMI_HARDCODED_ABSOLUTE_FILENAME")
@Override
public Map<Long, Path> getAbsolutePathForObjectIDs(
Optional<Set<Long>> dirObjIds) throws IOException {
// Root of a bucket would always have the
// key as /volumeId/bucketId/bucketId/
if (!dirObjIds.isPresent() || dirObjIds.get().isEmpty()) {
return Collections.emptyMap();
}
Set<Long> objIds = Sets.newHashSet(dirObjIds.get());
Map<Long, Path> objectIdPathMap = new HashMap<>();
Queue<Pair<Long, Path>> objectIdPathVals = new LinkedList<>();
Pair<Long, Path> root = Pair.of(bucketId, Paths.get(OZONE_URI_DELIMITER));
objectIdPathVals.add(root);
addToPathMap(root, objIds, objectIdPathMap);

while (!objectIdPathVals.isEmpty() && objIds.size() > 0) {
Pair<Long, Path> parent = objectIdPathVals.poll();
try (TableIterator<String,
? extends Table.KeyValue<String, OmDirectoryInfo>>
subDirIter = dirInfoTable.iterator(
prefix + parent.getKey() + OM_KEY_PREFIX)) {
while (objIds.size() > 0 && subDirIter.hasNext()) {
OmDirectoryInfo childDir = subDirIter.next().getValue();
Pair<Long, Path> pathVal = Pair.of(childDir.getObjectID(),
parent.getValue().resolve(childDir.getName()));
addToPathMap(pathVal, objIds, objectIdPathMap);
objectIdPathVals.add(pathVal);
}
}
}
// Invalid directory objectId which does not exist in the given bucket.
if (objIds.size() > 0) {
throw new IllegalArgumentException(
"Dir object Ids required but not found in bucket: " + objIds);
}
return objectIdPathMap;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.om.snapshot;

import java.io.IOException;
import java.nio.file.Path;
import java.util.Map;
import java.util.Optional;
import java.util.Set;

/**
* Class to resolve paths of Objects.
*/
public interface ObjectPathResolver {

Map<Long, Path> getAbsolutePathForObjectIDs(Optional<Set<Long>> objIds)
throws IOException;
}
Loading