diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d63064e89851..34f95e6d37b1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -153,7 +153,7 @@ jobs: run: hadoop-ozone/dev-support/checks/build.sh -Dskip.npx -Dskip.installnpx -Djavac.version=${{ matrix.java }} env: OZONE_WITH_COVERAGE: false - CANCEL_NATIVE_VERSION_CHECK: true + SKIP_NATIVE_VERSION_CHECK: true - name: Delete temporary build artifacts before caching run: | #Never cache local artifacts @@ -173,12 +173,12 @@ jobs: steps: - name: Checkout project uses: actions/checkout@v3 - if: matrix.check != 'bats' + if: matrix.check != 'bats' && matrix.check != 'unit' - name: Checkout project with history uses: actions/checkout@v3 with: fetch-depth: 0 - if: matrix.check == 'bats' + if: matrix.check == 'bats' || matrix.check == 'unit' - name: Cache for maven dependencies uses: actions/cache@v3 with: @@ -337,8 +337,10 @@ jobs: - flaky fail-fast: false steps: - - name: Checkout project + - name: Checkout project with history uses: actions/checkout@v3 + with: + fetch-depth: 0 - name: Cache for maven dependencies uses: actions/cache@v3 with: diff --git a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java index da0252a09009..2ab264a6f695 100644 --- a/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java +++ b/hadoop-hdds/rocks-native/src/main/java/org/apache/hadoop/hdds/utils/NativeLibraryLoader.java @@ -40,6 +40,8 @@ public class NativeLibraryLoader { LoggerFactory.getLogger(NativeLibraryLoader.class); public static final int LIBRARY_SHUTDOWN_HOOK_PRIORITY = 1; private static final String OS = System.getProperty("os.name").toLowerCase(); + + public static final String NATIVE_LIB_TMP_DIR = "native.lib.tmp.dir"; private Map librariesLoaded; private static volatile NativeLibraryLoader instance; @@ -136,7 +138,8 @@ private Optional copyResourceFromJarToTemp(final String libraryName) // create a temporary file to copy the library to final File temp = File.createTempFile(libraryName, getLibOsSuffix(), - new File("")); + new File(Optional.ofNullable(System.getProperty(NATIVE_LIB_TMP_DIR)) + .orElse(""))); if (!temp.exists()) { return Optional.empty(); } else { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index c6314d8ce663..1c3df3a9e303 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -82,6 +82,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-rocks-native + + org.mockito + mockito-core + @@ -201,5 +205,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + native-testing + + + rocks_tools_native + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + ${maven-surefire-plugin.argLine} @{argLine} -Djava.library.path=${project.parent.basedir}/rocks-native/target/native/rocksdb + + + + + diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/ManagedSstFileReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/ManagedSstFileReader.java index 774c2f637c83..72d2a8b79f97 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/ManagedSstFileReader.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdb/util/ManagedSstFileReader.java @@ -88,8 +88,7 @@ public long getEstimatedTotalKeys() throws RocksDBException { return estimatedTotalKeys; } - public Stream getKeyStream() throws RocksDBException, - NativeLibraryNotLoadedException, IOException { + public Stream getKeyStream() throws RocksDBException { // TODO: [SNAPSHOT] Check if default Options and ReadOptions is enough. final MultipleSstFileIterator itr = new MultipleSstFileIterator(sstFiles) { diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java new file mode 100644 index 000000000000..5c37eca22a83 --- /dev/null +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdb/util/TestManagedSstFileReader.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.rocksdb.util; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.utils.NativeLibraryLoader; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSstFileWriter; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.rocksdb.RocksDBException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; + +/** + * ManagedSstFileReader tests. + */ +public class TestManagedSstFileReader { + + private static final Logger LOG = + LoggerFactory.getLogger(TestManagedSstFileReader.class); + + // Key prefix containing all characters, to check if all characters can be + // written & read from rocksdb through SSTDumptool + private static final String KEY_PREFIX = IntStream.range(0, 256).boxed() + .map(i -> String.format("%c", i)) + .collect(Collectors.joining("")); + + private String createRandomSSTFile(TreeMap keys) + throws IOException, RocksDBException { + File file = File.createTempFile("tmp_sst_file", ".sst"); + file.deleteOnExit(); + + try (ManagedOptions managedOptions = new ManagedOptions(); + ManagedEnvOptions managedEnvOptions = new ManagedEnvOptions(); + ManagedSstFileWriter sstFileWriter = new ManagedSstFileWriter( + managedEnvOptions, managedOptions)) { + sstFileWriter.open(file.getAbsolutePath()); + for (Map.Entry entry : keys.entrySet()) { + byte[] keyByte = StringUtils.string2Bytes(entry.getKey()); + if (entry.getValue() == 0) { + sstFileWriter.delete(keyByte); + } else { + sstFileWriter.put(keyByte, keyByte); + } + } + sstFileWriter.finish(); + } + return file.getAbsolutePath(); + } + + private Map createKeys(int startRange, int endRange) { + return IntStream.range(startRange, endRange).boxed() + .collect(Collectors.toMap(i -> KEY_PREFIX + i, + i -> i % 2)); + } + + private Pair, List> createDummyData( + int numberOfFiles) throws RocksDBException, IOException { + List files = new ArrayList<>(); + int numberOfKeysPerFile = 1000; + Map keys = new HashMap<>(); + int cnt = 0; + for (int i = 0; i < numberOfFiles; i++) { + TreeMap fileKeys = new TreeMap<>(createKeys(cnt, + cnt + numberOfKeysPerFile)); + cnt += fileKeys.size(); + String tmpSSTFile = createRandomSSTFile(fileKeys); + files.add(tmpSSTFile); + keys.putAll(fileKeys); + } + return Pair.of(keys, files); + } + + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 3, 7, 10}) + public void testGetKeyStream(int numberOfFiles) + throws RocksDBException, IOException { + Pair, List> data = + createDummyData(numberOfFiles); + List files = data.getRight(); + Map keys = data.getLeft(); + try (Stream keyStream = + new ManagedSstFileReader(files).getKeyStream()) { + keyStream.forEach(key -> { + Assertions.assertEquals(keys.get(key), 1); + keys.remove(key); + }); + keys.values().forEach(val -> Assertions.assertEquals(0, val)); + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 1, 2, 3, 7, 10}) + public void testGetKeyStreamWithTombstone(int numberOfFiles) + throws RocksDBException, IOException, NativeLibraryNotLoadedException { + NativeLibraryLoader.getInstance() + .loadLibrary(ROCKS_TOOLS_NATIVE_LIBRARY_NAME); + Pair, List> data = + createDummyData(numberOfFiles); + List files = data.getRight(); + Map keys = data.getLeft(); + ExecutorService executorService = new ThreadPoolExecutor(0, + 1, 60, TimeUnit.SECONDS, + new SynchronousQueue<>(), new ThreadFactoryBuilder() + .setNameFormat("snapshot-diff-manager-sst-dump-tool-TID-%d") + .build(), new ThreadPoolExecutor.DiscardPolicy()); + ManagedSSTDumpTool sstDumpTool = + new ManagedSSTDumpTool(executorService, 256); + + try (Stream keyStream = new ManagedSstFileReader(files) + .getKeyStreamWithTombstone(sstDumpTool)) { + keyStream.forEach(keys::remove); + Assertions.assertEquals(0, keys.size()); + } finally { + executorService.shutdown(); + } + + } +} diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh index d55837b1d88c..dbdb14a2c086 100755 --- a/hadoop-ozone/dev-support/checks/build.sh +++ b/hadoop-ozone/dev-support/checks/build.sh @@ -26,7 +26,7 @@ else MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" fi -if [[ "${CANCEL_NATIVE_VERSION_CHECK}" != "true" ]]; then +if [[ "${SKIP_NATIVE_VERSION_CHECK}" != "true" ]]; then NATIVE_MAVEN_OPTIONS="-Drocks_tools_native" . "$DIR/native_check.sh" init_native_maven_opts diff --git a/hadoop-ozone/dev-support/checks/junit.sh b/hadoop-ozone/dev-support/checks/junit.sh index c34e6925a126..471ad8681efc 100644 --- a/hadoop-ozone/dev-support/checks/junit.sh +++ b/hadoop-ozone/dev-support/checks/junit.sh @@ -19,6 +19,7 @@ set -u -o pipefail DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 +: ${SKIP_NATIVE_VERSION_CHECK:="false"} : ${CHECK:="unit"} : ${ITERATIONS:="1"} : ${OZONE_WITH_COVERAGE:="false"} @@ -29,7 +30,7 @@ if [[ ${ITERATIONS} -le 0 ]]; then fi export MAVEN_OPTS="-Xmx4096m $MAVEN_OPTS" -MAVEN_OPTIONS='-B -Dskip.npx -Dskip.installnpx --no-transfer-progress' +MAVEN_OPTIONS='-B -Dskip.npx -Dskip.installnpx -Dnative.lib.tmp.dir=/tmp --no-transfer-progress' if [[ "${OZONE_WITH_COVERAGE}" != "true" ]]; then MAVEN_OPTIONS="${MAVEN_OPTIONS} -Djacoco.skip" @@ -41,6 +42,13 @@ else MAVEN_OPTIONS="${MAVEN_OPTIONS} --fail-at-end" fi +if [[ "${SKIP_NATIVE_VERSION_CHECK}" == "false" ]]; then + NATIVE_MAVEN_OPTIONS="-Drocks_tools_native" + . "$DIR/native_check.sh" + init_native_maven_opts + MAVEN_OPTIONS="${MAVEN_OPTIONS} ${NATIVE_MAVEN_OPTIONS}" +fi + if [[ "${CHECK}" == "integration" ]] || [[ ${ITERATIONS} -gt 1 ]]; then mvn ${MAVEN_OPTIONS} -DskipTests clean install fi diff --git a/hadoop-ozone/dist/src/main/smoketest/snapshot/upgrade-snapshot-check.robot b/hadoop-ozone/dist/src/main/smoketest/snapshot/upgrade-snapshot-check.robot index 7ec7d3a31281..ee2856499a74 100644 --- a/hadoop-ozone/dist/src/main/smoketest/snapshot/upgrade-snapshot-check.robot +++ b/hadoop-ozone/dist/src/main/smoketest/snapshot/upgrade-snapshot-check.robot @@ -74,13 +74,13 @@ Attempt to snapshotDiff when snapshot feature is disabled ${output} = Execute and checkrc ozone sh snapshot snapshotDiff /snapvolume-2/snapbucket-1 snapshot1 snapshot2 255 Should contain ${output} NOT_SUPPORTED_OPERATION -# HDDS-8732 -#Delete snapshot -# [Tags] finalized-snapshot-tests -# ${output} = Execute ozone sh snapshot delete /snapvolume-1/snapbucket-1 snapshot1 -# Should not contain ${output} Failed -# ${output} = Execute ozone sh snapshot ls /snapvolume-1/snapbucket-1 -# Should contain ${output} SNAPSHOT_DELETED +Delete snapshot + [Tags] finalized-snapshot-tests + ${output} = Execute ozone sh snapshot delete /snapvolume-1/snapbucket-1 snapshot1 + Should not contain ${output} Failed + + ${output} = Execute ozone sh snapshot ls /snapvolume-1/snapbucket-1 | jq '[.[] | select(.name == "snapshot1") | .snapshotStatus] | if length > 0 then .[] else "SNAPSHOT_DELETED" end' + Should contain ${output} SNAPSHOT_DELETED Attempt to delete when snapshot feature is disabled [Tags] pre-finalized-snapshot-tests diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index a052e4bb161b..422407d927f8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -76,6 +76,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; @@ -127,6 +128,8 @@ public class TestOmSnapshot { private static final Duration POLL_INTERVAL_DURATION = Duration.ofMillis(500); private static final Duration POLL_MAX_DURATION = Duration.ofSeconds(10); + private static AtomicInteger counter; + @Rule public Timeout timeout = new Timeout(180, TimeUnit.SECONDS); @@ -207,6 +210,7 @@ private void init() throws Exception { keyManager.stop(); preFinalizationChecks(); finalizeOMUpgrade(); + counter = new AtomicInteger(); } private static void expectFailurePreFinalization(LambdaTestUtils. @@ -282,10 +286,10 @@ public static void tearDown() throws Exception { // based on TestOzoneRpcClientAbstract:testListKey public void testListKey() throws IOException, InterruptedException, TimeoutException { - String volumeA = "vol-a-" + RandomStringUtils.randomNumeric(5); - String volumeB = "vol-b-" + RandomStringUtils.randomNumeric(5); - String bucketA = "buc-a-" + RandomStringUtils.randomNumeric(5); - String bucketB = "buc-b-" + RandomStringUtils.randomNumeric(5); + String volumeA = "vol-a-" + counter.incrementAndGet(); + String volumeB = "vol-b-" + counter.incrementAndGet(); + String bucketA = "buc-a-" + counter.incrementAndGet(); + String bucketB = "buc-b-" + counter.incrementAndGet(); store.createVolume(volumeA); store.createVolume(volumeB); OzoneVolume volA = store.getVolume(volumeA); @@ -368,8 +372,8 @@ public void testListKey() // based on TestOzoneRpcClientAbstract:testListKeyOnEmptyBucket public void testListKeyOnEmptyBucket() throws IOException, InterruptedException, TimeoutException { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buc-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); vol.createBucket(bucket); @@ -434,8 +438,8 @@ public void checkKey() throws Exception { @Test public void testListDeleteKey() throws IOException, InterruptedException, TimeoutException { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buc-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); vol.createBucket(bucket); @@ -460,8 +464,8 @@ public void testListDeleteKey() @Test public void testListAddNewKey() throws IOException, InterruptedException, TimeoutException { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buc-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buc-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); vol.createBucket(bucket); @@ -498,8 +502,8 @@ private int keyCount(OzoneBucket bucket, String keyPrefix) @Test public void testNonExistentBucket() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buc-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buc-" + counter.incrementAndGet(); //create volume but not bucket store.createVolume(volume); @@ -510,8 +514,8 @@ public void testNonExistentBucket() throws Exception { @Test public void testCreateSnapshotMissingMandatoryParams() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucket); @@ -519,7 +523,7 @@ public void testCreateSnapshotMissingMandatoryParams() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap1); String nullstr = ""; @@ -533,9 +537,9 @@ public void testCreateSnapshotMissingMandatoryParams() throws Exception { @Test public void testBucketDeleteIfSnapshotExists() throws Exception { - String volume1 = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket1 = "buc-" + RandomStringUtils.randomNumeric(5); - String bucket2 = "buc-" + RandomStringUtils.randomNumeric(5); + String volume1 = "vol-" + counter.incrementAndGet(); + String bucket1 = "buc-" + counter.incrementAndGet(); + String bucket2 = "buc-" + counter.incrementAndGet(); store.createVolume(volume1); OzoneVolume volume = store.getVolume(volume1); volume.createBucket(bucket1); @@ -558,8 +562,8 @@ public void testBucketDeleteIfSnapshotExists() throws Exception { @Test public void testSnapDiff() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucket); @@ -567,10 +571,10 @@ public void testSnapDiff() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; key1 = createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap1); // Do nothing, take another snapshot - String snap2 = "snap" + RandomStringUtils.randomNumeric(5); + String snap2 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap2); SnapshotDiffReportOzone @@ -580,7 +584,7 @@ public void testSnapDiff() throws Exception { String key2 = "key-2-"; key2 = createFileKey(bucket1, key2); bucket1.deleteKey(key1); - String snap3 = "snap" + RandomStringUtils.randomNumeric(5); + String snap3 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap3); // Diff should have 2 entries @@ -597,7 +601,7 @@ public void testSnapDiff() throws Exception { // Rename Key2 String key2Renamed = key2 + "_renamed"; bucket1.renameKey(key2, key2Renamed); - String snap4 = "snap" + RandomStringUtils.randomNumeric(5); + String snap4 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap4); SnapshotDiffReportOzone @@ -609,9 +613,9 @@ public void testSnapDiff() throws Exception { // Create a directory - String dir1 = "dir-1" + RandomStringUtils.randomNumeric(5); + String dir1 = "dir-1" + counter.incrementAndGet(); bucket1.createDirectory(dir1); - String snap5 = "snap" + RandomStringUtils.randomNumeric(5); + String snap5 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap5); SnapshotDiffReportOzone diff4 = getSnapDiffReport(volume, bucket, snap4, snap5); @@ -644,8 +648,8 @@ private SnapshotDiffReportOzone getSnapDiffReport(String volume, @Test public void testSnapDiffNoSnapshot() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucket); @@ -653,9 +657,9 @@ public void testSnapDiffNoSnapshot() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap1); - String snap2 = "snap" + RandomStringUtils.randomNumeric(5); + String snap2 = "snap" + counter.incrementAndGet(); // Destination snapshot is invalid LambdaTestUtils.intercept(OMException.class, "KEY_NOT_FOUND", @@ -671,11 +675,11 @@ public void testSnapDiffNoSnapshot() throws Exception { @Test public void testSnapDiffNonExistentUrl() throws Exception { // Valid volume bucket - String volumea = "vol-" + RandomStringUtils.randomNumeric(5); - String bucketa = "buck-" + RandomStringUtils.randomNumeric(5); + String volumea = "vol-" + counter.incrementAndGet(); + String bucketa = "buck-" + counter.incrementAndGet(); // Dummy volume bucket - String volumeb = "vol-" + RandomStringUtils.randomNumeric(5); - String bucketb = "buck-" + RandomStringUtils.randomNumeric(5); + String volumeb = "vol-" + counter.incrementAndGet(); + String bucketb = "buck-" + counter.incrementAndGet(); store.createVolume(volumea); OzoneVolume volume1 = store.getVolume(volumea); volume1.createBucket(bucketa); @@ -683,9 +687,9 @@ public void testSnapDiffNonExistentUrl() throws Exception { // Create Key1 and take 2 snapshots String key1 = "key-1-"; createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volumea, bucketa, snap1); - String snap2 = "snap" + RandomStringUtils.randomNumeric(5); + String snap2 = "snap" + counter.incrementAndGet(); createSnapshot(volumea, bucketa, snap2); // Bucket is nonexistent LambdaTestUtils.intercept(OMException.class, @@ -706,8 +710,8 @@ public void testSnapDiffNonExistentUrl() throws Exception { @Test public void testSnapDiffMissingMandatoryParams() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucket); @@ -715,9 +719,9 @@ public void testSnapDiffMissingMandatoryParams() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap1); - String snap2 = "snap" + RandomStringUtils.randomNumeric(5); + String snap2 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap2); String nullstr = ""; // Destination snapshot is empty @@ -742,9 +746,9 @@ public void testSnapDiffMissingMandatoryParams() throws Exception { @Test public void testSnapDiffMultipleBuckets() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucketName1 = "buck-" + RandomStringUtils.randomNumeric(5); - String bucketName2 = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucketName1 = "buck-" + counter.incrementAndGet(); + String bucketName2 = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucketName1); @@ -754,13 +758,13 @@ public void testSnapDiffMultipleBuckets() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; key1 = createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucketName1, snap1); // Create key in bucket2 and bucket1 and calculate diff // Diff should not contain bucket2's key createFileKey(bucket1, key1); createFileKey(bucket2, key1); - String snap2 = "snap" + RandomStringUtils.randomNumeric(5); + String snap2 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucketName1, snap2); SnapshotDiffReportOzone diff1 = getSnapDiffReport(volume, bucketName1, snap1, snap2); @@ -778,7 +782,7 @@ public void testSnapDiffMultipleBuckets() throws Exception { public void testSnapDiffWithMultipleSSTs() throws IOException, InterruptedException, TimeoutException { // Create a volume and 2 buckets - String volumeName1 = "vol-" + RandomStringUtils.randomNumeric(5); + String volumeName1 = "vol-" + counter.incrementAndGet(); String bucketName1 = "buck1"; String bucketName2 = "buck2"; store.createVolume(volumeName1); @@ -790,7 +794,7 @@ public void testSnapDiffWithMultipleSSTs() String keyPrefix = "key-"; // add file to bucket1 and take snapshot createFileKey(bucket1, keyPrefix); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volumeName1, bucketName1, snap1); // 1.sst Assert.assertEquals(1, getKeyTableSstFiles().size()); // add files to bucket2 and flush twice to create 2 sst files @@ -806,7 +810,7 @@ public void testSnapDiffWithMultipleSSTs() Assert.assertEquals(3, getKeyTableSstFiles().size()); // add a file to bucket1 and take second snapshot createFileKey(bucket1, keyPrefix); - String snap2 = "snap" + RandomStringUtils.randomNumeric(5); + String snap2 = "snap" + counter.incrementAndGet(); createSnapshot(volumeName1, bucketName1, snap2); // 1.sst 2.sst 3.sst 4.sst Assert.assertEquals(4, getKeyTableSstFiles().size()); SnapshotDiffReportOzone diff1 = @@ -818,8 +822,8 @@ public void testSnapDiffWithMultipleSSTs() @Test public void testDeleteSnapshotTwice() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucket); @@ -827,7 +831,7 @@ public void testDeleteSnapshotTwice() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap1); store.deleteSnapshot(volume, bucket, snap1); @@ -839,8 +843,8 @@ public void testDeleteSnapshotTwice() throws Exception { @Test public void testDeleteSnapshotFailure() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucket); @@ -848,7 +852,7 @@ public void testDeleteSnapshotFailure() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap1); // Delete non-existent snapshot @@ -864,8 +868,8 @@ public void testDeleteSnapshotFailure() throws Exception { @Test public void testDeleteSnapshotMissingMandatoryParams() throws Exception { - String volume = "vol-" + RandomStringUtils.randomNumeric(5); - String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + String volume = "vol-" + counter.incrementAndGet(); + String bucket = "buck-" + counter.incrementAndGet(); store.createVolume(volume); OzoneVolume volume1 = store.getVolume(volume); volume1.createBucket(bucket); @@ -873,7 +877,7 @@ public void testDeleteSnapshotMissingMandatoryParams() throws Exception { // Create Key1 and take snapshot String key1 = "key-1-"; createFileKey(bucket1, key1); - String snap1 = "snap" + RandomStringUtils.randomNumeric(5); + String snap1 = "snap" + counter.incrementAndGet(); createSnapshot(volume, bucket, snap1); String nullstr = ""; // Snapshot is empty @@ -940,7 +944,7 @@ private void deleteKeys(OzoneBucket bucket) throws IOException { private String createFileKey(OzoneBucket bucket, String keyPrefix) throws IOException { byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); - String key = keyPrefix + RandomStringUtils.randomNumeric(5); + String key = keyPrefix + counter.incrementAndGet(); OzoneOutputStream fileKey = bucket.createKey(key, value.length); fileKey.write(value); fileKey.close(); diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index a2b8b284d05d..42551f99a130 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -258,6 +258,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-rocks-native + + org.mockito + mockito-junit-jupiter + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 69a4adcee842..9861ac0ac473 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -18,7 +18,9 @@ package org.apache.hadoop.ozone.om.snapshot; +import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.LoadingCache; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.BufferedWriter; import java.io.File; @@ -39,11 +41,8 @@ import java.util.Set; import java.util.UUID; -import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.utils.NativeConstants; -import org.apache.hadoop.hdds.utils.NativeLibraryLoader; import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.ExecutionException; @@ -52,7 +51,6 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; - import org.apache.commons.io.file.PathUtils; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.utils.db.CodecRegistry; @@ -153,7 +151,6 @@ public class SnapshotDiffManager implements AutoCloseable { */ private final PersistentMap snapDiffJobTable; private final ExecutorService snapDiffExecutor; - private ExecutorService sstDumpToolExecutor; /** * Directory to keep hardlinks of SST files for a snapDiff job temporarily. @@ -166,6 +163,8 @@ public class SnapshotDiffManager implements AutoCloseable { private final boolean snapshotForceFullDiff; private final Optional sstDumpTool; + private Optional sstDumptoolExecService; + @SuppressWarnings("parameternumber") public SnapshotDiffManager(ManagedRocksDB db, RocksDBCheckpointDiffer differ, @@ -250,11 +249,6 @@ public SnapshotDiffManager(ManagedRocksDB db, private Optional initSSTDumpTool( final OzoneConfiguration conf) { - if (!NativeLibraryLoader.getInstance() - .loadLibrary(NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME)) { - return Optional.empty(); - } - try { int threadPoolSize = conf.getInt( OMConfigKeys.OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_POOL_SIZE, @@ -265,17 +259,19 @@ private Optional initSSTDumpTool( OMConfigKeys .OZONE_OM_SNAPSHOT_SST_DUMPTOOL_EXECUTOR_BUFFER_SIZE_DEFAULT, StorageUnit.BYTES); - sstDumpToolExecutor = new ThreadPoolExecutor(0, + this.sstDumptoolExecService = Optional.of(new ThreadPoolExecutor(0, threadPoolSize, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), new ThreadFactoryBuilder() .setNameFormat("snapshot-diff-manager-sst-dump-tool-TID-%d") .build(), - new ThreadPoolExecutor.DiscardPolicy()); - return Optional.of(new ManagedSSTDumpTool(sstDumpToolExecutor, + new ThreadPoolExecutor.DiscardPolicy())); + return Optional.of(new ManagedSSTDumpTool(sstDumptoolExecService.get(), bufferSize)); } catch (NativeLibraryNotLoadedException e) { - return Optional.empty(); + this.sstDumptoolExecService.ifPresent(exec -> + closeExecutorService(exec, "SstDumpToolExecutor")); } + return Optional.empty(); } /** @@ -435,15 +431,17 @@ private static OFSPath getSnapshotRootPath(String volume, String bucket) { return new OFSPath(bucketPath, new OzoneConfiguration()); } - private SnapshotDiffReportOzone createPageResponse( - final SnapshotDiffJob snapDiffJob, - final String volumeName, - final String bucketName, - final String fromSnapshotName, - final String toSnapshotName, - final int index, - final int pageSize - ) throws IOException { + @VisibleForTesting + SnapshotDiffReportOzone createPageResponse(final SnapshotDiffJob snapDiffJob, + final String volumeName, final String bucketName, + final String fromSnapshotName, final String toSnapshotName, + final int index, final int pageSize) throws IOException { + if (index < 0 || pageSize <= 0) { + throw new IllegalArgumentException(String.format( + "Index should be a number >= 0. Given index %d. Page size " + + "should be a positive number > 0. Given page size is %d", + index, pageSize)); + } List diffReportList = new ArrayList<>(); OFSPath path = getSnapshotRootPath(volumeName, bucketName); @@ -464,9 +462,7 @@ private SnapshotDiffReportOzone createPageResponse( String tokenString = hasMoreEntries ? String.valueOf(idx) : null; - if (!hasMoreEntries) { - checkReportsIntegrity(snapDiffJob, idx); - } + checkReportsIntegrity(snapDiffJob, index, diffReportList.size()); return new SnapshotDiffReportOzone(path.toString(), volumeName, bucketName, fromSnapshotName, toSnapshotName, diffReportList, tokenString); @@ -479,13 +475,16 @@ private SnapshotDiffReportOzone createPageResponse( * service and throws the exception to client. */ private void checkReportsIntegrity(final SnapshotDiffJob diffJob, - final int totalDiffEntries) + final int pageStartIdx, + final int numberOfEntriesInPage) throws IOException { - if (diffJob.getTotalDiffEntries() != totalDiffEntries) { - LOG.error("Expected TotalDiffEntries: {} but found only " + + if ((pageStartIdx >= diffJob.getTotalDiffEntries() && + numberOfEntriesInPage != 0) || (pageStartIdx < + diffJob.getTotalDiffEntries() && numberOfEntriesInPage == 0)) { + LOG.error("Expected TotalDiffEntries: {} but found " + "TotalDiffEntries: {}", diffJob.getTotalDiffEntries(), - totalDiffEntries); + pageStartIdx + numberOfEntriesInPage); updateJobStatus(diffJob.getJobId(), DONE, FAILED); throw new IOException("Report integrity check failed. Retry after: " + ozoneManager.getOmSnapshotManager().getDiffCleanupServiceInterval()); @@ -785,7 +784,7 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( final PersistentMap oldObjIdToKeyMap, final PersistentMap newObjIdToKeyMap, final PersistentSet objectIDsToCheck, - final String diffDir + final String diffDir ) throws IOException, RocksDBException { List tablesToLookUp = Collections.singletonList(fsTable.getName()); @@ -833,20 +832,17 @@ private void getDeltaFilesAndDiffKeysToObjectIdToKeyMap( } @SuppressWarnings("checkstyle:ParameterNumber") - private void addToObjectIdMap(Table fsTable, - Table tsTable, - Set deltaFiles, - boolean nativeRocksToolsLoaded, - PersistentMap oldObjIdToKeyMap, - PersistentMap newObjIdToKeyMap, - PersistentSet objectIDsToCheck, - Map tablePrefixes) - throws IOException, NativeLibraryNotLoadedException, RocksDBException { - + void addToObjectIdMap(Table fsTable, + Table tsTable, + Set deltaFiles, boolean nativeRocksToolsLoaded, + PersistentMap oldObjIdToKeyMap, + PersistentMap newObjIdToKeyMap, + PersistentSet objectIDsToCheck, + Map tablePrefixes) throws IOException, + NativeLibraryNotLoadedException, RocksDBException { if (deltaFiles.isEmpty()) { return; } - boolean isDirectoryTable = fsTable.getName().equals(OmMetadataManagerImpl.DIRECTORY_TABLE); ManagedSstFileReader sstFileReader = new ManagedSstFileReader(deltaFiles); @@ -901,17 +897,12 @@ private String getKeyOrDirectoryName(boolean isDirectory, } @SuppressWarnings("checkstyle:ParameterNumber") - private Set getDeltaFiles(OmSnapshot fromSnapshot, - OmSnapshot toSnapshot, - List tablesToLookUp, - SnapshotInfo fsInfo, - SnapshotInfo tsInfo, - boolean useFullDiff, - Map tablePrefixes, - String diffDir) + Set getDeltaFiles(OmSnapshot fromSnapshot, OmSnapshot toSnapshot, + List tablesToLookUp, SnapshotInfo fsInfo, + SnapshotInfo tsInfo, boolean useFullDiff, + Map tablePrefixes, String diffDir) throws RocksDBException, IOException { // TODO: [SNAPSHOT] Refactor the parameter list - final Set deltaFiles = new HashSet<>(); // Check if compaction DAG is available, use that if so @@ -972,13 +963,12 @@ private void validateEstimatedKeyChangesAreInLimits( } } - private long generateDiffReport( - final String jobId, - final PersistentSet objectIDsToCheck, - final PersistentMap oldObjIdToKeyMap, - final PersistentMap newObjIdToKeyMap - ) { + long generateDiffReport(final String jobId, + final PersistentSet objectIDsToCheck, + final PersistentMap oldObjIdToKeyMap, + final PersistentMap + newObjIdToKeyMap) { LOG.debug("Starting diff report generation for jobId: {}.", jobId); ColumnFamilyHandle deleteDiffColumnFamily = null; ColumnFamilyHandle renameDiffColumnFamily = null; @@ -1206,8 +1196,8 @@ private boolean areKeysEqual(WithObjectID oldKey, WithObjectID newKey) { /** * check if the given key is in the bucket specified by tablePrefix map. */ - private boolean isKeyInBucket(String key, Map tablePrefixes, - String tableName) { + boolean isKeyInBucket(String key, Map tablePrefixes, + String tableName) { String volumeBucketDbPrefix; // In case of FSO - either File/Directory table // the key Prefix would be volumeId/bucketId and @@ -1267,9 +1257,8 @@ public void close() { if (snapDiffExecutor != null) { closeExecutorService(snapDiffExecutor, "SnapDiffExecutor"); } - if (sstDumpToolExecutor != null) { - closeExecutorService(sstDumpToolExecutor, "SstDumpToolExecutor"); - } + this.sstDumptoolExecService.ifPresent(exec -> + closeExecutorService(exec, "SstDumpToolExecutor")); } private void closeExecutorService(ExecutorService executorService, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/SnapshotTestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/SnapshotTestUtils.java new file mode 100644 index 000000000000..ed368dc3882a --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/SnapshotTestUtils.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.hadoop.util.ClosableIterator; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Comparator; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.TreeSet; + +/** + * Util classes for Snapshot Persistent DataStructures for tests. + */ +public class SnapshotTestUtils { + + private static String getStringKey(K key) { + if (key.getClass().isArray()) { + Class componentType = key.getClass().getComponentType(); + if (componentType == byte.class) { + return Arrays.toString((byte[])key); + } else if (componentType == int.class) { + return Arrays.toString((int[])key); + } else if (componentType == long.class) { + return Arrays.toString((long[])key); + } else if (componentType == float.class) { + return Arrays.toString((float[])key); + } else if (componentType == double.class) { + return Arrays.toString((double[])key); + } else if (componentType == char.class) { + return Arrays.toString((char[])key); + } else { + return Arrays.toString((Object[])key); + } + } + return key.toString(); + } + + /** + * Stubbed implementation of CloseableIterator containing iterators. + */ + private static class StubbedCloseableIterator implements + ClosableIterator { + private final Iterator iterator; + + StubbedCloseableIterator(Iterator iterator) { + this.iterator = iterator; + } + + @Override + public void close() { + + } + + @Override + public boolean hasNext() { + return this.iterator.hasNext(); + } + + @Override + public T next() { + return this.iterator.next(); + } + } + + /** + * Stubbed implementation Persistent Map for testing. + */ + public static class StubbedPersistentMap implements + PersistentMap { + + private final Map map; + + public StubbedPersistentMap(Map map) { + this(); + map.entrySet().iterator().forEachRemaining(i -> + this.put(i.getKey(), i.getValue())); + } + + public StubbedPersistentMap() { + this.map = new TreeMap<>( + Comparator.comparing(SnapshotTestUtils::getStringKey)); + } + + @Override + public V get(K key) { + return this.map.get(key); + } + + @Override + public void put(K key, V value) { + this.map.put(key, value); + } + + @Override + public void remove(K key) { + this.map.remove(key); + } + + @Override + public ClosableIterator> iterator() { + return new StubbedCloseableIterator<>( + this.map.entrySet().stream().iterator()); + } + } + + /** + * Stubbed implementation of Persistent Set for testing. + */ + public static class StubbedPersistentSet implements PersistentSet { + private final Set set; + + public StubbedPersistentSet(Set map) { + this(); + map.iterator().forEachRemaining(this::add); + } + + public StubbedPersistentSet() { + this.set = new TreeSet<>( + Comparator.comparing(SnapshotTestUtils::getStringKey)); + } + + @Override + public void add(K entry) { + set.add(entry); + } + + @Override + public ClosableIterator iterator() { + return new StubbedCloseableIterator<>(set.stream().iterator()); + } + } + + /** + * Stubbed implementation of Persistent List for testing. + */ + public static class ArrayPersistentList extends ArrayList + implements PersistentList { + + @Override + public boolean addAll(PersistentList from) { + boolean ret = true; + Iterator iterator = from.iterator(); + while (iterator.hasNext()) { + ret = ret && this.add(iterator.next()); + } + return ret; + } + + @Override + public ClosableIterator iterator() { + return new StubbedCloseableIterator<>(this.stream().iterator()); + } + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java new file mode 100644 index 000000000000..2033a81887d5 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -0,0 +1,613 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.snapshot; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; +import com.google.common.collect.Sets; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.hdds.client.ECReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.NativeLibraryNotLoadedException; +import org.apache.hadoop.hdds.utils.db.CodecRegistry; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedSSTDumpTool; +import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.helpers.WithObjectID; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; +import org.apache.ozone.rocksdb.util.ManagedSstFileReader; +import org.apache.ozone.rocksdb.util.RdbUtil; +import org.apache.ozone.rocksdiff.DifferSnapshotInfo; +import org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer; +import org.apache.ozone.rocksdiff.RocksDiffUtils; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.Matchers; +import org.mockito.Mock; +import org.mockito.MockedConstruction; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; +import org.mockito.stubbing.Answer; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksDBException; +import org.rocksdb.RocksIterator; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.LongStream; + +import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; + +/** + * Test class for SnapshotDiffManager Class. + */ +@ExtendWith(MockitoExtension.class) +@MockitoSettings(strictness = Strictness.LENIENT) +public class TestSnapshotDiffManager { + + @Mock + private ManagedRocksDB snapdiffDB; + + @Mock + private RocksDBCheckpointDiffer differ; + + @Mock + private OzoneManager ozoneManager; + + private LoadingCache snapshotCache; + + @Mock + private ColumnFamilyHandle snapdiffJobCFH; + + @Mock + private ColumnFamilyHandle snapdiffReportCFH; + + @Mock + private ManagedColumnFamilyOptions columnFamilyOptions; + + @Mock + private RocksDB rocksDB; + + @Mock + private RocksIterator jobTableIterator; + + private static CodecRegistry codecRegistry; + + @BeforeAll + public static void initCodecRegistry() { + // Integers are used for indexing persistent list. + codecRegistry = CodecRegistry.newBuilder() + .addCodec(SnapshotDiffReportOzone.DiffReportEntry.class, + SnapshotDiffReportOzone.getDiffReportEntryCodec()) + .addCodec(SnapshotDiffJob.class, SnapshotDiffJob.getCodec()).build(); + } + + private DBStore getMockedDBStore(String dbStorePath) { + DBStore dbStore = Mockito.mock(DBStore.class); + Mockito.when(dbStore.getDbLocation()).thenReturn(new File(dbStorePath)); + return dbStore; + } + + private OmSnapshot getMockedOmSnapshot(String snapshot) { + OmSnapshot omSnapshot = Mockito.mock(OmSnapshot.class); + OMMetadataManager metadataManager = Mockito.mock(OMMetadataManager.class); + DBStore dbStore = getMockedDBStore(snapshot); + Mockito.when(omSnapshot.getName()).thenReturn(snapshot); + Mockito.when(omSnapshot.getMetadataManager()).thenReturn(metadataManager); + Mockito.when(metadataManager.getStore()).thenReturn(dbStore); + return omSnapshot; + } + + private SnapshotDiffManager getMockedSnapshotDiffManager(int cacheSize) { + + Mockito.when(snapdiffDB.get()).thenReturn(rocksDB); + Mockito.when(rocksDB.newIterator(snapdiffJobCFH)) + .thenReturn(jobTableIterator); + CacheLoader loader = + new CacheLoader() { + @Override + public OmSnapshot load(String key) { + return getMockedOmSnapshot(key); + } + }; + snapshotCache = CacheBuilder.newBuilder() + .maximumSize(cacheSize) + .build(loader); + Mockito.when(ozoneManager.getConfiguration()) + .thenReturn(new OzoneConfiguration()); + OMMetadataManager mockedMetadataManager = + Mockito.mock(OMMetadataManager.class); + RDBStore mockedRDBStore = Mockito.mock(RDBStore.class); + Mockito.when(mockedMetadataManager.getStore()).thenReturn(mockedRDBStore); + Mockito.when(ozoneManager.getMetadataManager()) + .thenReturn(mockedMetadataManager); + SnapshotDiffManager snapshotDiffManager = Mockito.spy( + new SnapshotDiffManager(snapdiffDB, differ, ozoneManager, snapshotCache, + snapdiffJobCFH, snapdiffReportCFH, columnFamilyOptions, + codecRegistry)); + return snapshotDiffManager; + } + + private SnapshotInfo getMockedSnapshotInfo(String snapshot) { + SnapshotInfo snapshotInfo = Mockito.mock(SnapshotInfo.class); + Mockito.when(snapshotInfo.getSnapshotID()).thenReturn(snapshot); + return snapshotInfo; + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 5, 10, 100, 1000, 10000}) + public void testGetDeltaFilesWithDag(int numberOfFiles) + throws ExecutionException, RocksDBException, IOException { + + SnapshotDiffManager snapshotDiffManager = getMockedSnapshotDiffManager(10); + String snap1 = "snap1"; + String snap2 = "snap2"; + + String diffDir = Files.createTempDirectory("snapdiff_dir").toString(); + Set randomStrings = IntStream.range(0, numberOfFiles) + .mapToObj(i -> RandomStringUtils.randomAlphabetic(10)) + .collect(Collectors.toSet()); + Mockito.when(differ.getSSTDiffListWithFullPath(Mockito.any(), + Mockito.any(), Mockito.eq(diffDir))) + .thenReturn(Lists.newArrayList(randomStrings)); + SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); + SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); + Mockito.when(jobTableIterator.isValid()).thenReturn(false); + Set deltaFiles = snapshotDiffManager.getDeltaFiles( + snapshotCache.get(snap1), snapshotCache.get(snap2), + Arrays.asList("cf1", "cf2"), fromSnapshotInfo, toSnapshotInfo, false, + Collections.EMPTY_MAP, diffDir); + Assertions.assertEquals(randomStrings, deltaFiles); + } + + @ParameterizedTest + @CsvSource({"0,true", "1,true", "2,true", "5,true", "10,true", "100,true", + "1000,true", "10000,true", "0,false", "1,false", "2,false", "5,false", + "10,false", "100,false", "1000,false", "10000,false"}) + public void testGetDeltaFilesWithFullDiff(int numberOfFiles, + boolean useFullDiff) + throws ExecutionException, RocksDBException, IOException { + try (MockedStatic mockedRdbUtil = + Mockito.mockStatic(RdbUtil.class); + MockedStatic mockedRocksDiffUtils = + Mockito.mockStatic(RocksDiffUtils.class)) { + Set deltaStrings = new HashSet<>(); + mockedRdbUtil.when( + () -> RdbUtil.getSSTFilesForComparison(Matchers.anyString(), + Matchers.anyList())) + .thenAnswer((Answer>) invocation -> { + Set retVal = IntStream.range(0, numberOfFiles) + .mapToObj(i -> RandomStringUtils.randomAlphabetic(10)) + .collect(Collectors.toSet()); + deltaStrings.addAll(retVal); + return retVal; + }); + mockedRocksDiffUtils.when(() -> RocksDiffUtils.filterRelevantSstFiles( + Matchers.anySet(), Matchers.anyMap())) + .thenAnswer((Answer) invocationOnMock -> { + invocationOnMock.getArgument(0, Set.class).stream() + .findAny().ifPresent(val -> { + Assertions.assertTrue(deltaStrings.contains(val)); + invocationOnMock.getArgument(0, Set.class).remove(val); + deltaStrings.remove(val); + }); + return null; + }); + SnapshotDiffManager snapshotDiffManager = + getMockedSnapshotDiffManager(10); + String snap1 = "snap1"; + String snap2 = "snap2"; + if (!useFullDiff) { + Set randomStrings = Collections.emptySet(); + Mockito.when(differ.getSSTDiffListWithFullPath( + Mockito.any(DifferSnapshotInfo.class), + Mockito.any(DifferSnapshotInfo.class), + Matchers.anyString())) + .thenReturn(Lists.newArrayList(randomStrings)); + } + SnapshotInfo fromSnapshotInfo = getMockedSnapshotInfo(snap1); + SnapshotInfo toSnapshotInfo = getMockedSnapshotInfo(snap1); + Mockito.when(jobTableIterator.isValid()).thenReturn(false); + Set deltaFiles = snapshotDiffManager.getDeltaFiles( + snapshotCache.get(snap1), snapshotCache.get(snap2), + Arrays.asList("cf1", "cf2"), fromSnapshotInfo, toSnapshotInfo, false, + Collections.EMPTY_MAP, Files.createTempDirectory("snapdiff_dir") + .toAbsolutePath().toString()); + Assertions.assertEquals(deltaStrings, deltaFiles); + } + } + + private Table getMockedTable( + Map map, String tableName) + throws IOException { + Table mocked = Mockito.mock(Table.class); + Mockito.when(mocked.get(Matchers.any())) + .thenAnswer(invocation -> map.get(invocation.getArgument(0))); + Mockito.when(mocked.getName()).thenReturn(tableName); + return mocked; + } + + private WithObjectID getObjectID(int objectId, int updateId, + String snapshotTableName) { + String name = "key" + objectId; + if (snapshotTableName.equals(OmMetadataManagerImpl.DIRECTORY_TABLE)) { + return OmDirectoryInfo.newBuilder() + .setObjectID(objectId).setName(name).build(); + } + return new OmKeyInfo.Builder().setObjectID(objectId) + .setVolumeName("vol").setBucketName("bucket").setUpdateID(updateId) + .setReplicationConfig(new ECReplicationConfig(3, 2)) + .setKeyName(name).build(); + } + + /** + * Test mocks the SSTFileReader to return object Ids from 0-50 + * when not reading tombstones & Object Ids 0-100 when reading tombstones. + * Creating a mock snapshot table where the from Snapshot Table contains + * Object Ids in the range 0-25 & 50-100 and to Snaphshot Table contains data + * with object Ids in the range 0-50. + * Function should return 25-50 in the new Persistent map. + * In the case of reading tombstones old Snapshot Persistent map should have + * object Ids in the range 50-100 & should be empty otherwise + * + * @param nativeLibraryLoaded + * @param snapshotTableName + * @throws NativeLibraryNotLoadedException + * @throws IOException + */ + @SuppressFBWarnings({"DLS_DEAD_LOCAL_STORE", + "RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT"}) + @ParameterizedTest + @CsvSource({"false," + OmMetadataManagerImpl.DIRECTORY_TABLE, + "true," + OmMetadataManagerImpl.DIRECTORY_TABLE, + "false," + OmMetadataManagerImpl.FILE_TABLE, + "true," + OmMetadataManagerImpl.FILE_TABLE, + "false," + OmMetadataManagerImpl.KEY_TABLE, + "true," + OmMetadataManagerImpl.KEY_TABLE}) + public void testObjectIdMapWithTombstoneEntries(boolean nativeLibraryLoaded, + String snapshotTableName) + throws NativeLibraryNotLoadedException, IOException, RocksDBException { + // Mocking SST file with keys in SST file including tombstones + Set keysWithTombstones = IntStream.range(0, 100) + .boxed().map(i -> "key" + i).collect(Collectors.toSet()); + // Mocking SST file with keys in SST file excluding tombstones + Set keys = IntStream.range(0, 50).boxed() + .map(i -> "key" + i).collect(Collectors.toSet()); + // Mocking SSTFileReader functions to return the above keys list. + try (MockedConstruction mockedSSTFileReader = + Mockito.mockConstruction(ManagedSstFileReader.class, + (mock, context) -> { + Mockito.when(mock.getKeyStreamWithTombstone(Matchers.any())) + .thenReturn(keysWithTombstones.stream()); + Mockito.when(mock.getKeyStream()) + .thenReturn(keys.stream()); + }); + MockedConstruction mockedSSTDumpTool = + Mockito.mockConstruction(ManagedSSTDumpTool.class, + (mock, context) -> { + }) + ) { + // + Map toSnapshotTableMap = + IntStream.concat(IntStream.range(0, 25), IntStream.range(50, 100)) + .boxed().collect(Collectors.toMap(i -> "key" + i, + i -> getObjectID(i, i, snapshotTableName))); + // Mocking To snapshot table containing list of keys b/w 0-25, 50-100 + Table toSnapshotTable = + getMockedTable(toSnapshotTableMap, snapshotTableName); + // Mocking To snapshot table containing list of keys b/w 0-50 + Map fromSnapshotTableMap = + IntStream.range(0, 50) + .boxed().collect(Collectors.toMap(i -> "key" + i, + i -> getObjectID(i, i, snapshotTableName))); + // Expected Diff 25-50 are newly created keys & keys b/w are deleted, + // when reding keys with tombstones the keys would be added to + // objectIdsToBeChecked otherwise it wouldn't be added + Table fromSnapshotTable = + getMockedTable(fromSnapshotTableMap, snapshotTableName); + SnapshotDiffManager snapshotDiffManager = + getMockedSnapshotDiffManager(10); + // Mocking to filter even keys in bucket. + // Odd keys should be filtered out in the diff. + Mockito.doAnswer((Answer) invocationOnMock -> + Integer.parseInt(invocationOnMock.getArgument(0, String.class) + .substring(3)) % 2 == 0).when(snapshotDiffManager) + .isKeyInBucket(Matchers.anyString(), Matchers.anyMap(), + Matchers.anyString()); + PersistentMap oldObjectIdKeyMap = + new SnapshotTestUtils.StubbedPersistentMap<>(); + PersistentMap newObjectIdKeyMap = + new SnapshotTestUtils.StubbedPersistentMap<>(); + PersistentSet objectIdsToCheck = + new SnapshotTestUtils.StubbedPersistentSet<>(); + snapshotDiffManager.addToObjectIdMap(toSnapshotTable, + fromSnapshotTable, Sets.newHashSet("dummy.sst"), + nativeLibraryLoaded, oldObjectIdKeyMap, newObjectIdKeyMap, + objectIdsToCheck, Maps.newHashMap()); + + Iterator> oldObjectIdIter = + oldObjectIdKeyMap.iterator(); + int oldObjectIdCnt = 0; + while (oldObjectIdIter.hasNext()) { + Map.Entry v = oldObjectIdIter.next(); + long objectId = this.codecRegistry.asObject(v.getKey(), Long.class); + Assertions.assertTrue(objectId % 2 == 0); + Assertions.assertTrue(objectId >= 50); + Assertions.assertTrue(objectId < 100); + oldObjectIdCnt += 1; + } + Assertions.assertEquals(nativeLibraryLoaded ? 25 : 0, oldObjectIdCnt); + Iterator> newObjectIdIter = + newObjectIdKeyMap.iterator(); + int newObjectIdCnt = 0; + while (newObjectIdIter.hasNext()) { + Map.Entry v = newObjectIdIter.next(); + long objectId = this.codecRegistry.asObject(v.getKey(), Long.class); + Assertions.assertTrue(objectId % 2 == 0); + Assertions.assertTrue(objectId >= 26); + Assertions.assertTrue(objectId < 50); + newObjectIdCnt += 1; + } + Assertions.assertEquals(12, newObjectIdCnt); + + Iterator objectIdsToCheckIter = objectIdsToCheck.iterator(); + int objectIdCnt = 0; + while (objectIdsToCheckIter.hasNext()) { + byte[] v = objectIdsToCheckIter.next(); + long objectId = this.codecRegistry.asObject(v, Long.class); + Assertions.assertTrue(objectId % 2 == 0); + Assertions.assertTrue(objectId >= 26); + Assertions.assertTrue(objectId < (nativeLibraryLoaded ? 100 : 50)); + objectIdCnt += 1; + } + Assertions.assertEquals(nativeLibraryLoaded ? 37 : 12, objectIdCnt); + } + } + + /** + Testing generateDiffReport function by providing PersistentMap containing + objectId Map of diff keys to be checked with their corresponding key names. + */ + @Test + public void testGenerateDiffReport() throws IOException { + // Mocking RocksDbPersistentMap constructor to use stubbed + // implementation instead. + try (MockedConstruction mockedRocksDbPersistentMap = + Mockito.mockConstruction(RocksDbPersistentMap.class, + (mock, context) -> { + PersistentMap obj = + new SnapshotTestUtils.StubbedPersistentMap<>(); + Mockito.when(mock.iterator()).thenReturn(obj.iterator()); + Mockito.when(mock.get(Matchers.any())) + .thenAnswer(i -> obj.get(i.getArgument(0))); + Mockito.doAnswer((Answer) i -> { + obj.put(i.getArgument(0), i.getArgument(1)); + return null; + }).when(mock).put(Matchers.any(), Matchers.any()); + }); + MockedConstruction mockedPersistentList = + Mockito.mockConstruction( + RocksDbPersistentList.class, (mock, context) -> { + PersistentList obj = + new SnapshotTestUtils.ArrayPersistentList<>(); + Mockito.when(mock.add(Matchers.any())) + .thenAnswer(i -> obj.add(i.getArgument(0))); + Mockito.when(mock.get(Matchers.anyInt())) + .thenAnswer(i -> obj.get(i.getArgument(0))); + Mockito.when(mock.addAll(Matchers.any(PersistentList.class))) + .thenAnswer(i -> obj.addAll(i.getArgument(0))); + Mockito.when(mock.iterator()) + .thenAnswer(i -> obj.iterator()); + })) { + PersistentMap oldObjectIdKeyMap = + new SnapshotTestUtils.StubbedPersistentMap<>(); + PersistentMap newObjectIdKeyMap = + new SnapshotTestUtils.StubbedPersistentMap<>(); + PersistentSet objectIdsToCheck = + new SnapshotTestUtils.StubbedPersistentSet<>(); + Map diffMap = new HashMap<>(); + LongStream.range(0, 100).forEach(objectId -> { + try { + byte[] objectIdVal = codecRegistry.asRawData(objectId); + byte[] key = codecRegistry.asRawData("key" + objectId); + if (objectId >= 0 && objectId <= 25 || + objectId >= 50 && objectId <= 100) { + oldObjectIdKeyMap.put(objectIdVal, key); + } + if (objectId >= 0 && objectId <= 25 && objectId % 4 == 0 || + objectId > 25 && objectId < 50) { + newObjectIdKeyMap.put(objectIdVal, key); + } + if (objectId >= 0 && objectId <= 25 && objectId % 4 == 1) { + byte[] keyVal = codecRegistry.asRawData("renamed-key" + objectId); + newObjectIdKeyMap.put(objectIdVal, keyVal); + diffMap.put(objectId, SnapshotDiffReport.DiffType.RENAME); + } + objectIdsToCheck.add(objectIdVal); + if (objectId >= 50 && objectId <= 100 || + objectId >= 0 && objectId <= 25 && objectId % 4 > 1) { + diffMap.put(objectId, SnapshotDiffReport.DiffType.DELETE); + } + if (objectId >= 0 && objectId <= 25 && objectId % 4 == 0) { + diffMap.put(objectId, SnapshotDiffReport.DiffType.MODIFY); + } + if (objectId > 25 && objectId < 50) { + diffMap.put(objectId, SnapshotDiffReport.DiffType.CREATE); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + SnapshotDiffManager snapshotDiffManager = + getMockedSnapshotDiffManager(10); + snapshotDiffManager.generateDiffReport("jobId", + objectIdsToCheck, oldObjectIdKeyMap, newObjectIdKeyMap); + SnapshotDiffJob snapshotDiffJob = new SnapshotDiffJob(0, "jobId", + SnapshotDiffResponse.JobStatus.DONE, "vol", "buck", "fs", "ts", + true, diffMap.size()); + SnapshotDiffReportOzone snapshotDiffReportOzone = + snapshotDiffManager.createPageResponse(snapshotDiffJob, "vol", + "buck", "fs", "ts", + 0, Integer.MAX_VALUE); + Set expectedOrder = new LinkedHashSet<>(); + expectedOrder.add(SnapshotDiffReport.DiffType.DELETE); + expectedOrder.add(SnapshotDiffReport.DiffType.RENAME); + expectedOrder.add(SnapshotDiffReport.DiffType.CREATE); + expectedOrder.add(SnapshotDiffReport.DiffType.MODIFY); + + Set actualOrder = new LinkedHashSet<>(); + for (SnapshotDiffReport.DiffReportEntry entry : + snapshotDiffReportOzone.getDiffList()) { + actualOrder.add(entry.getType()); + + long objectId = Long.parseLong( + DFSUtilClient.bytes2String(entry.getSourcePath()).substring(3)); + Assertions.assertEquals(diffMap.get(objectId), entry.getType()); + } + Assertions.assertEquals(expectedOrder, actualOrder); + } + } + + private SnapshotDiffReport.DiffReportEntry getTestDiffEntry(String jobId, + int idx) throws IOException { + return new SnapshotDiffReport.DiffReportEntry( + SnapshotDiffReport.DiffType.values()[idx % + SnapshotDiffReport.DiffType.values().length], + codecRegistry.asRawData(jobId + DELIMITER + idx)); + } + + /** + Testing generateDiffReport function by providing PersistentMap containing + objectId Map of diff keys to be checked with their corresponding key names. + */ + @ParameterizedTest + @CsvSource({"0,10,1000", "1,10,8", "1000,1000,10", "-1,1000,10000", + "1,0,1000", "1,-1,1000"}) + public void testCreatePageResponse(int startIdx, int pageSize, + int totalNumberOfRecords) throws IOException { + // Mocking RocksDbPersistentMap constructor to use stubbed + // implementation instead. + Map + cfHandleRocksDbPersistentMap = new HashMap<>(); + try (MockedConstruction mockedRocksDbPersistentMap = + Mockito.mockConstruction(RocksDbPersistentMap.class, + (mock, context) -> { + ColumnFamilyHandle cf = + (ColumnFamilyHandle) context.arguments().stream() + .filter(arg -> arg instanceof ColumnFamilyHandle) + .findFirst().get(); + cfHandleRocksDbPersistentMap.put(cf, mock); + PersistentMap obj = + new SnapshotTestUtils.StubbedPersistentMap<>(); + Mockito.when(mock.iterator()).thenReturn(obj.iterator()); + Mockito.when(mock.get(Matchers.any())) + .thenAnswer(i -> obj.get(i.getArgument(0))); + Mockito.doAnswer((Answer) i -> { + obj.put(i.getArgument(0), i.getArgument(1)); + return null; + }).when(mock).put(Matchers.any(), Matchers.any()); + })) { + String testJobId = "jobId"; + String testJobId2 = "jobId2"; + SnapshotDiffManager snapshotDiffManager = + getMockedSnapshotDiffManager(10); + IntStream.range(0, totalNumberOfRecords).boxed().forEach(idx -> { + try { + cfHandleRocksDbPersistentMap.get(snapdiffReportCFH) + .put(codecRegistry.asRawData(testJobId + DELIMITER + idx), + codecRegistry.asRawData(getTestDiffEntry(testJobId, idx))); + cfHandleRocksDbPersistentMap.get(snapdiffReportCFH) + .put(codecRegistry.asRawData(testJobId2 + DELIMITER + idx), + codecRegistry.asRawData(getTestDiffEntry(testJobId2, idx))); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + SnapshotDiffJob snapshotDiffJob = new SnapshotDiffJob(0, testJobId, + SnapshotDiffResponse.JobStatus.DONE, "vol", "buck", "fs", "ts", + true, totalNumberOfRecords); + SnapshotDiffJob snapshotDiffJob2 = new SnapshotDiffJob(0, testJobId2, + SnapshotDiffResponse.JobStatus.DONE, "vol", "buck", "fs", "ts", + true, totalNumberOfRecords); + cfHandleRocksDbPersistentMap.get(snapdiffJobCFH) + .put(codecRegistry.asRawData(testJobId), snapshotDiffJob); + cfHandleRocksDbPersistentMap.get(snapdiffJobCFH) + .put(codecRegistry.asRawData(testJobId), snapshotDiffJob2); + if (pageSize <= 0 || startIdx < 0) { + Assertions.assertThrows(IllegalArgumentException.class, + () -> snapshotDiffManager.createPageResponse(snapshotDiffJob, "vol", + "buck", "fs", "ts", startIdx, pageSize)); + return; + } + SnapshotDiffReportOzone snapshotDiffReportOzone = + snapshotDiffManager.createPageResponse(snapshotDiffJob, "vol", + "buck", "fs", "ts", + startIdx, pageSize); + int expectedTotalNumberOfRecords = + Math.max(Math.min(pageSize, totalNumberOfRecords - startIdx), 0); + Assertions.assertEquals(snapshotDiffReportOzone.getDiffList().size(), + expectedTotalNumberOfRecords); + + int idx = startIdx; + for (SnapshotDiffReport.DiffReportEntry entry : + snapshotDiffReportOzone.getDiffList()) { + Assertions.assertEquals(getTestDiffEntry(testJobId, idx), entry); + idx++; + } + } + } + +} diff --git a/pom.xml b/pom.xml index 2b83f05a464e..5320055ec59e 100644 --- a/pom.xml +++ b/pom.xml @@ -217,7 +217,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 2.7.5 2.8.0 1.10.19 - 2.28.2 + 3.5.9 1.3 1.6.5 2.0.4 @@ -253,7 +253,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs [3.3.0,) - -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError + -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError -Dnative.lib.tmp.dir=${native.lib.tmp.dir} 3.0.0-M5 ${maven-surefire-plugin.version} ${maven-surefire-plugin.version} @@ -306,6 +306,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.4.9 5.1.0 +