diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/PersistentList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PersistentList.java similarity index 96% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/PersistentList.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PersistentList.java index 56e544a75895..66a43b3047a7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/PersistentList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/PersistentList.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.om.snapshot; +package org.apache.hadoop.ozone.util; import java.util.Iterator; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index e1ee88114228..369c3bbf67d0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -671,6 +671,18 @@ public void writeLock() { this.lock.writeLock().lock(); } + @Override + public void readUnlock(String opName) { + // not used anywhere, implemented as part of interface. + this.lock.writeLock().lock(); + } + + @Override + public void writeUnlock(String opName) { + // not used anywhere, implemented as part of interface. + this.lock.writeLock().unlock(); + } + /** * Release write lock. */ diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index b3ebf71fc016..33bea6cde169 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -107,6 +107,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.junit.jupiter junit-jupiter-params + + org.apache.hadoop + hadoop-hdfs-client + diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReport.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReport.java index e8e387099c4e..c9e9ea95d537 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReport.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffReport.java @@ -18,216 +18,111 @@ package org.apache.hadoop.ozone.snapshot; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffReportProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DiffReportEntryProto; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DiffReportEntryProto.DiffTypeProto; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.OFSPath; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.util.PersistentList; -import java.util.Collections; import java.util.List; import java.util.stream.Collectors; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; + /** - * Snapshot diff report. + * Snapshot Diff Report. */ -public class SnapshotDiffReport { - - private static final String LINE_SEPARATOR = System.getProperty( - "line.separator", "\n"); - - /** - * Types of the difference, which include CREATE, MODIFY, DELETE, and RENAME. - * Each type has a label for representation: - * + CREATE - * M MODIFY - * - DELETE - * R RENAME - */ - public enum DiffType { - CREATE("+"), - MODIFY("M"), - DELETE("-"), - RENAME("R"); - - private final String label; - - DiffType(String label) { - this.label = label; - } - - public String getLabel() { - return label; - } - - public DiffTypeProto toProtobuf() { - return DiffTypeProto.valueOf(this.name()); - } - - public static DiffType fromProtobuf(final DiffTypeProto type) { - return DiffType.valueOf(type.name()); - } - } - - /** - * Snapshot diff report entry. - */ - public static final class DiffReportEntry { - - /** - * The type of diff. - */ - private final DiffType type; - - /** - * Source File/Object path. - */ - private final String sourcePath; - - /** - * Destination File/Object path, if this is a re-name operation. - */ - private final String targetPath; - - private DiffReportEntry(final DiffType type, final String sourcePath, - final String targetPath) { - this.type = type; - this.sourcePath = sourcePath; - this.targetPath = targetPath; - } - - public static DiffReportEntry of(final DiffType type, - final String sourcePath) { - return of(type, sourcePath, null); - } - - public static DiffReportEntry of(final DiffType type, - final String sourcePath, - final String targetPath) { - return new DiffReportEntry(type, sourcePath, targetPath); - - } - - @Override - public String toString() { - String str = type.getLabel() + "\t" + sourcePath; - if (type == DiffType.RENAME) { - str += " -> " + targetPath; - } - return str; - } - - public DiffType getType() { - return type; - } - - @Override - public boolean equals(Object other) { - if (this == other) { - return true; - } - if (other instanceof DiffReportEntry) { - DiffReportEntry entry = (DiffReportEntry) other; - return this.type.equals(entry.getType()) && this.sourcePath - .equals(entry.sourcePath) && (this.targetPath != null ? - this.targetPath.equals(entry.targetPath) : true); - } - return false; - } - - @Override - public int hashCode() { - return toString().hashCode(); - } +public class SnapshotDiffReport + extends org.apache.hadoop.hdfs.protocol.SnapshotDiffReport { - public DiffReportEntryProto toProtobuf() { - final DiffReportEntryProto.Builder builder = DiffReportEntryProto - .newBuilder(); - builder.setDiffType(type.toProtobuf()).setSourcePath(sourcePath); - if (targetPath != null) { - builder.setTargetPath(targetPath); - } - return builder.build(); - } - - public static DiffReportEntry fromProtobuf( - final DiffReportEntryProto entry) { - return of(DiffType.fromProtobuf(entry.getDiffType()), - entry.getSourcePath(), - entry.hasTargetPath() ? entry.getTargetPath() : null); - } + private static final String LINE_SEPARATOR = + System.getProperty("line.separator", "\n"); - } - - - /** - * Volume name to which the snapshot bucket belongs. - */ - private final String volumeName; - - /** - * Bucket name to which the snapshot belongs. - */ - private final String bucketName; - /** - * start point of the diff. - */ - private final String fromSnapshot; - - /** - * end point of the diff. - */ - private final String toSnapshot; - - /** - * list of diff. - */ - private final List diffList; - - public SnapshotDiffReport(final String volumeName, final String bucketName, - final String fromSnapshot, final String toSnapshot, - List entryList) { + public SnapshotDiffReport(String snapshotRoot, String fromSnapshot, + String toSnapshot, PersistentList entryList, String volumeName, + String bucketName) { + // TODO handle conversion from PersistentList to java.util.List + super(snapshotRoot, fromSnapshot, toSnapshot, entryList); this.volumeName = volumeName; this.bucketName = bucketName; - this.fromSnapshot = fromSnapshot; - this.toSnapshot = toSnapshot; - this.diffList = entryList != null ? entryList : Collections.emptyList(); } - public List getDiffList() { - return diffList; - } + private String volumeName; + + private String bucketName; @Override public String toString() { StringBuilder str = new StringBuilder(); - String from = "snapshot " + fromSnapshot; - String to = "snapshot " + toSnapshot; + String from = "snapshot " + getFromSnapshot(); + String to = "snapshot " + getToSnapshot(); str.append("Difference between ").append(from).append(" and ").append(to) - .append(":") - .append(LINE_SEPARATOR); - for (DiffReportEntry entry : diffList) { + .append(":").append(LINE_SEPARATOR); + for (DiffReportEntry entry : getDiffList()) { str.append(entry.toString()).append(LINE_SEPARATOR); } return str.toString(); } - public SnapshotDiffReportProto toProtobuf() { - final SnapshotDiffReportProto.Builder builder = SnapshotDiffReportProto - .newBuilder(); - builder.setVolumeName(volumeName) - .setBucketName(bucketName) - .setFromSnapshot(fromSnapshot) - .setToSnapshot(toSnapshot); - builder.addAllDiffList(diffList.stream().map(DiffReportEntry::toProtobuf) - .collect(Collectors.toList())); + public OzoneManagerProtocolProtos.SnapshotDiffReportProto toProtobuf() { + final OzoneManagerProtocolProtos.SnapshotDiffReportProto.Builder builder = + OzoneManagerProtocolProtos.SnapshotDiffReportProto.newBuilder(); + builder.setVolumeName(volumeName).setBucketName(bucketName) + .setFromSnapshot(getFromSnapshot()).setToSnapshot(getToSnapshot()); + builder.addAllDiffList( + getDiffList().stream().map(x -> toProtobufDiffReportEntry(x)) + .collect(Collectors.toList())); return builder.build(); } + public static OzoneManagerProtocolProtos + .DiffReportEntryProto toProtobufDiffReportEntry(DiffReportEntry entry) { + final OzoneManagerProtocolProtos.DiffReportEntryProto.Builder builder = + OzoneManagerProtocolProtos.DiffReportEntryProto.newBuilder(); + builder.setDiffType(toProtobufDiffType(entry.getType())) + .setSourcePath(new String(entry.getSourcePath())); + if (entry.getTargetPath() != null) { + String targetPath = new String(entry.getTargetPath()); + builder.setTargetPath(targetPath); + } + return builder.build(); + } + + public static OzoneManagerProtocolProtos.DiffReportEntryProto + .DiffTypeProto toProtobufDiffType(DiffType type) { + return OzoneManagerProtocolProtos.DiffReportEntryProto + .DiffTypeProto.valueOf(type.name()); + } + public static SnapshotDiffReport fromProtobuf( - final SnapshotDiffReportProto report) { - return new SnapshotDiffReport(report.getVolumeName(), - report.getBucketName(), report.getFromSnapshot(), + final OzoneManagerProtocolProtos.SnapshotDiffReportProto report) { + Path bucketPath = new Path( + OZONE_URI_DELIMITER + report.getVolumeName() + + OZONE_URI_DELIMITER + report.getBucketName()); + OFSPath path = new OFSPath(bucketPath, new OzoneConfiguration()); + + // TODO handle conversion from PersistentList to java.util.List + return new SnapshotDiffReport(path.toString(), report.getFromSnapshot(), report.getToSnapshot(), report.getDiffListList().stream() - .map(DiffReportEntry::fromProtobuf).collect(Collectors.toList())); + .map(SnapshotDiffReport::fromProtobufDiffReportEntry) + .collect(Collectors.toList()), report.getVolumeName(), + report.getBucketName()); + } + + public static DiffType fromProtobufDiffType( + final OzoneManagerProtocolProtos.DiffReportEntryProto + .DiffTypeProto type) { + return DiffType.valueOf(type.name()); } + public static DiffReportEntry fromProtobufDiffReportEntry( + final OzoneManagerProtocolProtos.DiffReportEntryProto entry) { + if (entry == null) { + return null; + } + DiffType type = fromProtobufDiffType(entry.getDiffType()); + return type == null ? null : + new DiffReportEntry(type, entry.getSourcePath().getBytes(), + entry.hasTargetPath() ? entry.getTargetPath().getBytes() : null); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDistcpWithSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDistcpWithSnapshots.java new file mode 100644 index 000000000000..a36ec1033f07 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDistcpWithSnapshots.java @@ -0,0 +1,328 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.OFSPath; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OMStorage; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.tools.DistCp; +import org.apache.hadoop.tools.DistCpOptions; +import org.apache.hadoop.tools.DistCpSync; +import org.apache.hadoop.tools.mapred.CopyMapper; +import org.apache.ozone.test.GenericTestUtils; +import org.jetbrains.annotations.NotNull; +import org.junit.Rule; +import org.junit.AfterClass; +import org.junit.Test; +import org.junit.Assert; +import org.junit.rules.Timeout; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_SNAPSHOT_DIR; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; +import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; + +/** + * Testing Distcp with -diff option that uses snapdiff. + */ +@RunWith(Parameterized.class) +public class TestDistcpWithSnapshots { + + private static final Logger LOG = + LoggerFactory.getLogger(TestDistcpWithSnapshots.class); + + @Rule + public Timeout globalTimeout = Timeout.seconds(300); + private static OzoneConfiguration conf; + private static MiniOzoneCluster cluster = null; + private static FileSystem fs; + private static BucketLayout bucketLayout; + private static String rootPath; + private static boolean enableRatis; + + private static File metaDir; + + @Parameterized.Parameters + public static Collection data() { + return Arrays.asList( + new Object[] {true, BucketLayout.FILE_SYSTEM_OPTIMIZED}, + new Object[] {false, BucketLayout.LEGACY}); + } + + public TestDistcpWithSnapshots(boolean enableRatis, + BucketLayout bucketLayout) { + // do nothing + } + + @Parameterized.BeforeParam + public static void initParam(boolean ratisEnable, BucketLayout layout) + throws IOException, InterruptedException, TimeoutException { + // Initialize the cluster before EACH set of parameters + enableRatis = ratisEnable; + bucketLayout = layout; + initClusterAndEnv(); + } + + @Parameterized.AfterParam + public static void teardownParam() { + // Tear down the cluster after EACH set of parameters + if (cluster != null) { + cluster.shutdown(); + } + IOUtils.closeQuietly(fs); + } + + public static void initClusterAndEnv() + throws IOException, InterruptedException, TimeoutException { + conf = new OzoneConfiguration(); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, enableRatis); + bucketLayout = BucketLayout.FILE_SYSTEM_OPTIMIZED; + conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, bucketLayout.name()); + cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); + cluster.waitForClusterToBeReady(); + + rootPath = String.format("%s://%s/", OzoneConsts.OZONE_OFS_URI_SCHEME, + conf.get(OZONE_OM_ADDRESS_KEY)); + + // Set the fs.defaultFS and start the filesystem + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + conf.setClass("distcp.sync.class", OzoneDistcpSync.class, + DistCpSync.class); + fs = FileSystem.get(conf); + metaDir = OMStorage.getOmDbDir(conf); + } + + @AfterClass + public static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testDistcpWithSnapDiff() throws Exception { + Path srcBucketPath = createAndGetBucketPath(); + Path insideSrcBucket = new Path(srcBucketPath, "*"); + Path dstBucketPath = createAndGetBucketPath(); + // create 2 files on source + createFiles(srcBucketPath, 2); + // Create target directory/bucket + fs.mkdirs(dstBucketPath); + + // perform normal distcp + final DistCpOptions options = + new DistCpOptions.Builder(Collections.singletonList(insideSrcBucket), + dstBucketPath).build(); + options.appendToConf(conf); + Job distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 2, 2); + + // take Snapshot snap1 on both source and target + createSnapshot(srcBucketPath, "snap1"); + createSnapshot(dstBucketPath, "snap1"); + + + + // create another file on src and take snapshot snap2 on source + createFiles(srcBucketPath, 1); + Assert.assertEquals(3, fs.listStatus(srcBucketPath).length); + createSnapshot(srcBucketPath, "snap2"); + + // perform distcp providing snapshot snap1.snap2 as arguments to + // copy only diff b/w them. + final DistCpOptions options2 = + new DistCpOptions.Builder(Collections.singletonList(srcBucketPath), + dstBucketPath).withUseDiff("snap1", "snap2").withSyncFolder(true) + .build(); + + distcpJob = new DistCp(conf, options2).execute(); + verifyCopy(dstBucketPath, distcpJob, 1, 3); + } + + @NotNull + private static Path createAndGetBucketPath() throws IOException { + OzoneBucket bucket = + TestDataUtil.createVolumeAndBucket(cluster, bucketLayout); + Path volumePath = + new Path(OZONE_URI_DELIMITER, bucket.getVolumeName()); + Path bucketPath = new Path(volumePath, bucket.getName()); + return bucketPath; + } + + @Test + public void testDistcpWithSnapDiff2() throws Exception { + Path srcBucketPath = createAndGetBucketPath(); + Path insideSrcBucket = new Path(srcBucketPath, "*"); + Path dstBucketPath = createAndGetBucketPath(); + // create 2 files on source + createFiles(srcBucketPath, 2); + // Create target directory/bucket + fs.mkdirs(dstBucketPath); + + // perform normal distcp + final DistCpOptions options = + new DistCpOptions.Builder(Collections.singletonList(insideSrcBucket), + dstBucketPath).build(); + options.appendToConf(conf); + Job distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 2, 2); + + // take Snapshot snap1 on both source and target + createSnapshot(srcBucketPath, "snap1"); + createSnapshot(dstBucketPath, "snap1"); + + // delete 1 file on source + deleteFiles(srcBucketPath, 1); + Assert.assertEquals(1, fs.listStatus(srcBucketPath).length); + createSnapshot(srcBucketPath, "snap2"); + + // perform distcp providing snapshot snap1.snap2 as arguments to + // copy only diff b/w them. + final DistCpOptions options2 = + new DistCpOptions.Builder(Collections.singletonList(srcBucketPath), + dstBucketPath).withUseDiff("snap1", "snap2").withSyncFolder(true) + .build(); + + distcpJob = new DistCp(conf, options2).execute(); + verifyCopy(dstBucketPath, distcpJob, 0, 1); + } + + @Test + public void testDistcpWithSnapDiff3() throws Exception { + Path srcBucketPath = createAndGetBucketPath(); + Path insideSrcBucket = new Path(srcBucketPath, "*"); + Path dstBucketPath = createAndGetBucketPath(); + // create 2 files on source + createFiles(srcBucketPath, 2); + // Create target directory/bucket + fs.mkdirs(dstBucketPath); + + // perform normal distcp + final DistCpOptions options = + new DistCpOptions.Builder(Collections.singletonList(insideSrcBucket), + dstBucketPath).build(); + options.appendToConf(conf); + Job distcpJob = new DistCp(conf, options).execute(); + verifyCopy(dstBucketPath, distcpJob, 2, 2); + + // take Snapshot snap1 on both source and target + createSnapshot(srcBucketPath, "snap1"); + createSnapshot(dstBucketPath, "snap1"); + + // delete 1 file on source + renameFiles(srcBucketPath, 1); + Assert.assertEquals(2, fs.listStatus(srcBucketPath).length); + createSnapshot(srcBucketPath, "snap2"); + + // perform distcp providing snapshot snap1.snap2 as arguments to + // copy only diff b/w them. + final DistCpOptions options2 = + new DistCpOptions.Builder(Collections.singletonList(srcBucketPath), + dstBucketPath).withUseDiff("snap1", "snap2").withSyncFolder(true) + .build(); + + distcpJob = new DistCp(conf, options2).execute(); + verifyCopy(dstBucketPath, distcpJob, 0, 2); + } + + private void deleteFiles(Path path, int fileCount) throws IOException { + FileStatus[] filesInPath = fs.listStatus(path); + Assert.assertTrue("Cannot delete more files than what is existing", + fileCount <= filesInPath.length); + for (int i = 0; i < fileCount; i++) { + Path toDelete = filesInPath[i].getPath(); + fs.delete(toDelete, false); + } + } + + private void renameFiles(Path path, int fileCount) throws IOException { + FileStatus[] filesInPath = fs.listStatus(path); + Assert.assertTrue("Cannot delete more files than what is existing", + fileCount <= filesInPath.length); + for (int i = 0; i < fileCount; i++) { + Path from = filesInPath[i].getPath(); + Path to = new Path(from + "_renamed"); + fs.rename(from, to); + } + } + + private static void verifyCopy(Path dstBucketPath, Job distcpJob, + long expectedFilesToBeCopied, long expectedTotalFilesInDest) + throws IOException { + long filesCopied = + distcpJob.getCounters().findCounter(CopyMapper.Counter.COPY).getValue(); + FileStatus[] destinationFileStatus = fs.listStatus(dstBucketPath); + Assert.assertEquals(expectedTotalFilesInDest, destinationFileStatus.length); + Assert.assertEquals(expectedFilesToBeCopied, filesCopied); + } + + private static void createFiles(Path srcBucketPath, int fileCount) + throws IOException { + for (int i = 1; i <= fileCount; i++) { + String keyName = "key" + RandomStringUtils.randomNumeric(5); + Path file = + new Path(srcBucketPath, keyName); + ContractTestUtils.touch(fs, file); + } + } + + private Path createSnapshot(Path path, String snapshotName) + throws IOException, InterruptedException, TimeoutException { + OFSPath ofsPath = new OFSPath(path, conf); + String volume = ofsPath.getVolumeName(); + String bucket = ofsPath.getBucketName(); + Path snapPath = fs.createSnapshot(path, snapshotName); + SnapshotInfo snapshotInfo = + cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable() + .get(SnapshotInfo.getTableKey(volume, bucket, snapshotName)); + String snapshotDirName = + metaDir + OM_KEY_PREFIX + OM_SNAPSHOT_DIR + OM_KEY_PREFIX + + OM_DB_NAME + snapshotInfo.getCheckpointDirName() + OM_KEY_PREFIX + + "CURRENT"; + GenericTestUtils.waitFor(() -> new File(snapshotDirName).exists(), 1000, + 120000); + return snapPath; + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index c8983df3fe5d..1ac5d0c7b4e5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -465,12 +465,16 @@ public void testSnapDiff() throws Exception { // Diff should have 2 entries SnapshotDiffReport diff2 = store.snapshotDiff(volume, bucket, snap2, snap3); Assert.assertEquals(2, diff2.getDiffList().size()); - Assert.assertTrue(diff2.getDiffList().contains( - SnapshotDiffReport.DiffReportEntry - .of(SnapshotDiffReport.DiffType.CREATE, key2))); - Assert.assertTrue(diff2.getDiffList().contains( - SnapshotDiffReport.DiffReportEntry - .of(SnapshotDiffReport.DiffType.DELETE, key1))); + org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry del = + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry( + SnapshotDiffReport.DiffType.DELETE, key1.getBytes()); + org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry creat = + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry( + SnapshotDiffReport.DiffType.CREATE, key2.getBytes()); + + Assert.assertTrue(diff2.getDiffList().contains(creat)); + + Assert.assertTrue(diff2.getDiffList().contains(del)); // Rename Key2 String key2Renamed = key2 + "_renamed"; @@ -480,8 +484,9 @@ public void testSnapDiff() throws Exception { SnapshotDiffReport diff3 = store.snapshotDiff(volume, bucket, snap3, snap4); Assert.assertEquals(1, diff3.getDiffList().size()); Assert.assertTrue(diff3.getDiffList().contains( - SnapshotDiffReport.DiffReportEntry - .of(SnapshotDiffReport.DiffType.RENAME, key2, key2Renamed))); + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry( + SnapshotDiffReport.DiffType.RENAME, key2.getBytes(), + key2Renamed.getBytes()))); // Create a directory @@ -497,8 +502,8 @@ public void testSnapDiff() throws Exception { dir1 = dir1 + OM_KEY_PREFIX; } Assert.assertTrue(diff4.getDiffList().contains( - SnapshotDiffReport.DiffReportEntry - .of(SnapshotDiffReport.DiffType.CREATE, dir1))); + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry( + SnapshotDiffReport.DiffType.CREATE, dir1.getBytes()))); } diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDBDiffReportEntryCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDBDiffReportEntryCodec.java index 06ac8452a3a1..723bbc2c32fa 100644 --- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDBDiffReportEntryCodec.java +++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmDBDiffReportEntryCodec.java @@ -19,34 +19,39 @@ import java.io.IOException; import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport.DiffReportEntry; import static com.google.common.base.Preconditions.checkNotNull; /** * Codec to encode DiffReportEntry as byte array. */ -public class OmDBDiffReportEntryCodec implements Codec { +public class OmDBDiffReportEntryCodec implements Codec { @Override - public byte[] toPersistedFormat(DiffReportEntry object) + public byte[] toPersistedFormat(org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry object) throws IOException { checkNotNull(object, "Null object can't be converted to byte array."); - return object.toProtobuf().toByteArray(); + return SnapshotDiffReport.toProtobufDiffReportEntry(object).toByteArray(); } @Override - public DiffReportEntry fromPersistedFormat(byte[] rawData) + public org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry fromPersistedFormat(byte[] rawData) throws IOException { checkNotNull(rawData, "Null byte array can't be converted to " + "real object."); - return DiffReportEntry.fromProtobuf( + return SnapshotDiffReport.fromProtobufDiffReportEntry( OzoneManagerProtocolProtos.DiffReportEntryProto.parseFrom(rawData)); } @Override - public DiffReportEntry copyObject(DiffReportEntry object) { + public org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry copyObject(org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry object) { // Note: Not really a "copy". from OmDBDiffReportEntryCodec return object; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java index e7b9f4954c9c..93d6a457a94a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.util.PersistentList; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 06c601a6480c..a85936b752f1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -18,6 +18,8 @@ package org.apache.hadoop.ozone.om.snapshot; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import java.util.Iterator; import java.util.UUID; import org.apache.hadoop.hdds.utils.db.CodecRegistry; @@ -35,9 +37,9 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport; -import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport.DiffType; -import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport.DiffReportEntry; +import org.apache.hadoop.ozone.OFSPath; +import org.apache.hadoop.ozone.util.PersistentList; import org.apache.ozone.rocksdb.util.ManagedSstFileReader; import org.apache.ozone.rocksdb.util.RdbUtil; import org.apache.ozone.rocksdiff.DifferSnapshotInfo; @@ -60,6 +62,7 @@ import java.util.stream.Stream; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; /** * Class to generate snapshot diff. @@ -82,7 +85,8 @@ public SnapshotDiffManager(ManagedRocksDB db, this.codecRegistry.addCodec(Integer.class, new IntegerCodec()); // Need for Diff Report - this.codecRegistry.addCodec(DiffReportEntry.class, + this.codecRegistry.addCodec( + org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry.class, new OmDBDiffReportEntryCodec()); } @@ -191,11 +195,13 @@ public SnapshotDiffReport getSnapshotDiffReport(final String volume, Long.class); // Final diff report. - final PersistentList diffReport = + final PersistentList diffReport = new RocksDbPersistentList<>(db, diffReportColumnFamily, codecRegistry, - DiffReportEntry.class); + org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry.class); final Table fsKeyTable = fromSnapshot .getMetadataManager().getKeyTable(bucketLayout); @@ -242,14 +248,16 @@ public SnapshotDiffReport getSnapshotDiffReport(final String volume, // TODO: Need to change it to pagination. // https://issues.apache.org/jira/browse/HDDS-7548 - List diffReportList = new ArrayList<>(); + List diffReportList = new ArrayList<>(); diffReport.iterator().forEachRemaining(diffReportList::add); - return new SnapshotDiffReport(volume, - bucket, - fromSnapshot.getName(), + Path bucketPath = new Path( + OZONE_URI_DELIMITER + volume + OZONE_URI_DELIMITER + bucket); + OFSPath path = new OFSPath(bucketPath, new OzoneConfiguration()); + return new SnapshotDiffReport(path.toString(), fromSnapshot.getName(), toSnapshot.getName(), - diffReportList); + diffReport, volume, bucket); } finally { // Clean up: drop the intermediate column family and close them. @@ -395,7 +403,8 @@ private void generateDiffReport( final PersistentSet objectIDsToCheck, final PersistentMap oldObjIdToKeyMap, final PersistentMap newObjIdToKeyMap, - final PersistentList diffReport + final PersistentList diffReport ) throws RocksDBException, IOException { ColumnFamilyHandle deleteDiffColumnFamily = null; @@ -423,13 +432,17 @@ private void generateDiffReport( codecRegistry.asRawData(requestId + "-modifyDiff"), new ManagedColumnFamilyOptions())); - final PersistentList deleteDiffs = + final PersistentList deleteDiffs = createDiffReportPersistentList(deleteDiffColumnFamily); - final PersistentList renameDiffs = + final PersistentList renameDiffs = createDiffReportPersistentList(renameDiffColumnFamily); - final PersistentList createDiffs = + final PersistentList createDiffs = createDiffReportPersistentList(createDiffColumnFamily); - final PersistentList modifyDiffs = + final PersistentList modifyDiffs = createDiffReportPersistentList(modifyDiffColumnFamily); Iterator objectIdsIterator = objectIDsToCheck.iterator(); @@ -456,14 +469,31 @@ private void generateDiffReport( // This cannot happen. throw new IllegalStateException("Old and new key name both are null"); } else if (oldKeyName == null) { // Key Created. - createDiffs.add(DiffReportEntry.of(DiffType.CREATE, newKeyName)); + createDiffs.add( + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry(org.apache.hadoop.hdfs.protocol + .SnapshotDiffReport.DiffType.CREATE, + newKeyName.getBytes())); } else if (newKeyName == null) { // Key Deleted. - deleteDiffs.add(DiffReportEntry.of(DiffType.DELETE, oldKeyName)); + deleteDiffs.add( + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry(org.apache.hadoop.hdfs.protocol + .SnapshotDiffReport.DiffType.DELETE, + oldKeyName.getBytes())); } else if (oldKeyName.equals(newKeyName)) { // Key modified. - modifyDiffs.add(DiffReportEntry.of(DiffType.MODIFY, newKeyName)); + modifyDiffs.add( + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry( + org.apache.hadoop.hdfs.protocol + .SnapshotDiffReport.DiffType.MODIFY, + newKeyName.getBytes())); } else { // Key Renamed. renameDiffs.add( - DiffReportEntry.of(DiffType.RENAME, oldKeyName, newKeyName)); + new org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffReportEntry( + org.apache.hadoop.hdfs.protocol.SnapshotDiffReport + .DiffType.RENAME, + oldKeyName.getBytes(), newKeyName.getBytes())); } } @@ -527,13 +557,12 @@ private void generateDiffReport( } } - private PersistentList createDiffReportPersistentList( - ColumnFamilyHandle columnFamilyHandle - ) { - return new RocksDbPersistentList<>(db, - columnFamilyHandle, - codecRegistry, - DiffReportEntry.class); + private PersistentList createDiffReportPersistentList( + ColumnFamilyHandle columnFamilyHandle) { + return new RocksDbPersistentList<>(db, columnFamilyHandle, codecRegistry, + org.apache.hadoop.hdfs.protocol + .SnapshotDiffReport.DiffReportEntry.class); } private BucketLayout getBucketLayout(final String volume, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java index 9674acdc10b9..58640a7373ef 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestRocksDbPersistentList.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.util.PersistentList; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.params.ParameterizedTest; diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index de50a4f7cf52..b4aed46ced58 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -110,5 +110,10 @@ powermock-api-mockito test + + org.apache.hadoop + hadoop-distcp + provided + diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index f36dad9a9603..d8d5658ed3a6 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -63,6 +63,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenRenewer; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; @@ -646,4 +647,12 @@ public String createSnapshot(String pathStr, String snapshotName) ofsPath.getBucketName(), snapshotName); } + + @Override + public SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, + String fromSnapshot, String toSnapshot) throws IOException { + OFSPath ofsPath = new OFSPath(snapshotDir, config); + return objectStore.snapshotDiff(ofsPath.getVolumeName(), + ofsPath.getBucketName(), fromSnapshot, toSnapshot); + } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 0374b9ee2718..1ff5efa64703 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -74,6 +74,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenRenewer; @@ -1282,4 +1283,12 @@ public String createSnapshot(String pathStr, String snapshotName) ofsPath.getBucketName(), snapshotName); } + + @Override + public SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, + String fromSnapshot, String toSnapshot) throws IOException { + OFSPath ofsPath = new OFSPath(snapshotDir, config); + return proxy.snapshotDiff(ofsPath.getVolumeName(), ofsPath.getBucketName(), + fromSnapshot, toSnapshot); + } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index c7665b5668d3..2d15632e66fe 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.ozone.OFSPath; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -1417,4 +1418,13 @@ public ContentSummary getContentSummary(Path f) throws IOException { spaceConsumed(summary[1]).build(); } + public SnapshotDiffReport getSnapshotDiffReport(final Path snapshotDir, + final String fromSnapshot, final String toSnapshot) throws IOException { + OFSPath ofsPath = + new OFSPath(snapshotDir, OzoneConfiguration.of(getConf())); + Preconditions.checkArgument(ofsPath.isBucket(), + "Unsupported : Path is not a bucket"); + return adapter.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot); + } + } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java index 0b4ff1546920..04b43873096d 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java @@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.Path; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReport; import org.apache.hadoop.security.token.Token; /** @@ -86,4 +87,7 @@ FileStatusAdapter getFileStatus(String key, URI uri, FileChecksum getFileChecksum(String keyName, long length) throws IOException; String createSnapshot(String pathStr, String snapshotName) throws IOException; + + SnapshotDiffReport getSnapshotDiffReport(Path snapshotDir, + String fromSnapshot, String toSnapshot) throws IOException; } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneDistcpSync.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneDistcpSync.java new file mode 100644 index 000000000000..e3a84000ab48 --- /dev/null +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneDistcpSync.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.fs.ozone; + + + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.tools.DistCpSync; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import java.io.IOException; + +/** + * Ozone's DistcpSync implementation. + */ +public class OzoneDistcpSync extends DistCpSync { + + private static final Logger LOG = + LoggerFactory.getLogger(OzoneDistcpSync.class); + + @Override + public void checkFilesystemSupport(FileSystem srcFs, FileSystem tgtFs) { + if (!(srcFs instanceof BasicRootedOzoneFileSystem)) { + throw new IllegalArgumentException( + "Unsupported source file system: " + srcFs.getScheme() + "://. " + + "Supported file systems: ofs. If scheme is either " + + "hdfs/webhdfs use respective distcp.sync.class.name " + + "in the configuration "); + } + if (!(tgtFs instanceof BasicRootedOzoneFileSystem)) { + throw new IllegalArgumentException( + "Unsupported source file system: " + srcFs.getScheme() + "://. " + + "Supported file systems: ofs. If scheme is either " + + "hdfs/webhdfs use respective distcp.sync.class.name " + + "in the configuration "); + } + + } + + @Override + protected SnapshotDiffReport getSnapshotDiffReport(Path ssDir, FileSystem fs, + String from, String to) throws IOException { + if (fs instanceof BasicRootedOzoneFileSystem) { + BasicRootedOzoneFileSystem ofs = (BasicRootedOzoneFileSystem) fs; + return ofs.getSnapshotDiffReport(ssDir, from, to); + } else { + throw new IllegalArgumentException( + "Unsupported source file system: " + fs.getScheme() + "://. " + + "Supported file systems: ofs. If scheme is either " + + "hdfs/webhdfs use respective distcp.sync.class.name " + + "in the configuration "); + } + } + + @Override + protected boolean checkNoChange(FileSystem fs, Path path) { + // TODO fix HDDS-7905. + return true; + } +} diff --git a/pom.xml b/pom.xml index 1f76c46baaec..73d523d01790 100644 --- a/pom.xml +++ b/pom.xml @@ -63,7 +63,8 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.7.3 - 3.3.4 + + 3.4.0-SNAPSHOT ${ozone.version} @@ -177,6 +178,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 3.4.2 1.2.22 + 2.0.2 1.6.21 1.8 4.6.1