diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 9817d877eb55..867da0865472 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -226,6 +226,7 @@ private OzoneConsts() { public static final String CHUNKS_PATH = "chunksPath"; public static final String CONTAINER_DB_TYPE = "containerDBType"; public static final String CHECKSUM = "checksum"; + public static final String DATA_SCAN_TIMESTAMP = "dataScanTimestamp"; public static final String ORIGIN_PIPELINE_ID = "originPipelineId"; public static final String ORIGIN_NODE_ID = "originNodeId"; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index 85738e240974..a6cef899d468 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -21,6 +21,8 @@ import com.google.common.collect.Lists; import java.io.IOException; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.time.Instant; import java.util.List; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. @@ -32,13 +34,17 @@ import java.util.Collections; import java.util.Map; +import java.util.Optional; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; import org.yaml.snakeyaml.Yaml; +import javax.annotation.Nullable; + import static org.apache.hadoop.ozone.OzoneConsts.CHECKSUM; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ID; import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_TYPE; +import static org.apache.hadoop.ozone.OzoneConsts.DATA_SCAN_TIMESTAMP; import static org.apache.hadoop.ozone.OzoneConsts.LAYOUTVERSION; import static org.apache.hadoop.ozone.OzoneConsts.MAX_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.METADATA; @@ -89,7 +95,14 @@ public abstract class ContainerData { private HddsVolume volume; private String checksum; - public static final Charset CHARSET_ENCODING = Charset.forName("UTF-8"); + + /** Timestamp of last data scan (milliseconds since Unix Epoch). + * {@code null} if not yet scanned (or timestamp not recorded, + * eg. in prior versions). */ + private Long dataScanTimestamp; // for serialization + private transient Optional lastDataScanTime = Optional.empty(); + + public static final Charset CHARSET_ENCODING = StandardCharsets.UTF_8; private static final String DUMMY_CHECKSUM = new String(new byte[64], CHARSET_ENCODING); @@ -103,6 +116,7 @@ public abstract class ContainerData { METADATA, MAX_SIZE, CHECKSUM, + DATA_SCAN_TIMESTAMP, ORIGIN_PIPELINE_ID, ORIGIN_NODE_ID)); @@ -506,6 +520,30 @@ public String getChecksum() { return this.checksum; } + /** + * @return {@code Optional} with the timestamp of last data scan. + * {@code absent} if not yet scanned or timestamp was not recorded. + */ + public Optional lastDataScanTime() { + return lastDataScanTime; + } + + public void updateDataScanTime(@Nullable Instant time) { + lastDataScanTime = Optional.ofNullable(time); + dataScanTimestamp = time != null ? time.toEpochMilli() : null; + } + + // for deserialization + public void setDataScanTimestamp(Long timestamp) { + dataScanTimestamp = timestamp; + lastDataScanTime = timestamp != null + ? Optional.of(Instant.ofEpochMilli(timestamp)) + : Optional.empty(); + } + + public Long getDataScanTimestamp() { + return dataScanTimestamp; + } /** * Returns the origin pipeline Id of this container. @@ -557,4 +595,5 @@ public void computeAndSetChecksum(Yaml yaml) throws IOException { * Returns the blockCommitSequenceId. */ public abstract long getBlockCommitSequenceId(); + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataScanOrder.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataScanOrder.java new file mode 100644 index 000000000000..f26f1bbc2e11 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataScanOrder.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.common.impl; + +import org.apache.hadoop.ozone.container.common.interfaces.Container; + +import java.time.Instant; +import java.util.Comparator; +import java.util.Optional; + +/** + * Orders containers: + * 1. containers not yet scanned first, + * 2. then least recently scanned first, + * 3. ties are broken by containerID. + */ +public class ContainerDataScanOrder implements Comparator> { + + public static final Comparator> INSTANCE = + new ContainerDataScanOrder(); + + @Override + public int compare(Container o1, Container o2) { + ContainerData d1 = o1.getContainerData(); + ContainerData d2 = o2.getContainerData(); + + Optional scan1 = d1.lastDataScanTime(); + boolean scanned1 = scan1.isPresent(); + Optional scan2 = d2.lastDataScanTime(); + boolean scanned2 = scan2.isPresent(); + + int result = Boolean.compare(scanned1, scanned2); + if (0 == result && scanned1 && scanned2) { + result = scan1.get().compareTo(scan2.get()); + } + if (0 == result) { + result = Long.compare(d1.getContainerID(), d2.getContainerID()); + } + + return result; + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index 1f9966c1a76c..a16d8aafac72 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -53,6 +53,7 @@ import org.yaml.snakeyaml.introspector.PropertyUtils; import org.yaml.snakeyaml.nodes.MappingNode; import org.yaml.snakeyaml.nodes.Node; +import org.yaml.snakeyaml.nodes.NodeTuple; import org.yaml.snakeyaml.nodes.ScalarNode; import org.yaml.snakeyaml.nodes.Tag; import org.yaml.snakeyaml.representer.Representer; @@ -92,7 +93,6 @@ public static void createContainerFile(ContainerType containerType, containerFile); writer = new OutputStreamWriter(out, "UTF-8"); yaml.dump(containerData, writer); - } finally { try { if (writer != null) { @@ -217,6 +217,17 @@ protected Set getProperties(Class type) } return filtered; } + + /** + * Omit properties with null value. + */ + @Override + protected NodeTuple representJavaBeanProperty( + Object bean, Property property, Object value, Tag tag) { + return value == null + ? null + : super.representJavaBeanProperty(bean, property, value, tag); + } } /** @@ -260,6 +271,8 @@ public Object construct(Node node) { Map meta = (Map) nodes.get(OzoneConsts.METADATA); kvData.setMetadata(meta); kvData.setChecksum((String) nodes.get(OzoneConsts.CHECKSUM)); + Long timestamp = (Long) nodes.get(OzoneConsts.DATA_SCAN_TIMESTAMP); + kvData.setDataScanTimestamp(timestamp); String state = (String) nodes.get(OzoneConsts.STATE); kvData .setState(ContainerProtos.ContainerDataProto.State.valueOf(state)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 41415ebe0ac3..ae0d3ea13b0a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -132,6 +132,7 @@ public Iterator> getContainerIterator() { /** * Return an iterator of containers associated with the specified volume. + * The iterator is sorted by last data scan timestamp in increasing order. * * @param volume the HDDS volume which should be used to filter containers * @return {@literal Iterator>} @@ -143,6 +144,7 @@ public Iterator> getContainerIterator(HddsVolume volume) { return containerMap.values().stream() .filter(x -> volumeUuid.equals(x.getContainerData().getVolume() .getStorageID())) + .sorted(ContainerDataScanOrder.INSTANCE) .iterator(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java index 7f7deaf92063..9d90aceaf5ae 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Container.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.time.Instant; import java.util.Map; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -66,6 +67,9 @@ void create(VolumeSet volumeSet, VolumeChoosingPolicy volumeChoosingPolicy, void update(Map metaData, boolean forceUpdate) throws StorageContainerException; + void updateDataScanTimestamp(Instant timestamp) + throws StorageContainerException; + /** * Get metadata about the container. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index a6e914b90b83..bfad2c6b0375 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -24,6 +24,7 @@ import java.io.OutputStream; import java.nio.file.Files; import java.nio.file.StandardCopyOption; +import java.time.Instant; import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -337,6 +338,17 @@ public void close() throws StorageContainerException { containerData.getBlockCommitSequenceId()); } + @Override + public void updateDataScanTimestamp(Instant time) + throws StorageContainerException { + writeLock(); + try { + updateContainerData(() -> containerData.updateDataScanTime(time)); + } finally { + writeUnlock(); + } + } + /** * * Must be invoked with the writeLock held. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 8bbdec96695e..6c8a25a3275f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -32,6 +32,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.time.Instant; import java.util.Iterator; import java.util.Map; @@ -174,4 +175,10 @@ public Iterator> getContainers(HddsVolume volume) { return containerSet.getContainerIterator(volume); } + void updateDataScanTimestamp(long containerId, Instant timestamp) + throws IOException { + Container container = containerSet.getContainer(containerId); + container.updateDataScanTimestamp(timestamp); + } + } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java index 1141951dcc00..8f733d576112 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScanner.java @@ -18,12 +18,15 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import java.io.IOException; +import java.time.Instant; import java.util.Iterator; +import java.util.Optional; import java.util.concurrent.TimeUnit; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; +import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.slf4j.Logger; @@ -95,14 +98,19 @@ public void runIteration() { while (!stopping && itr.hasNext()) { Container c = itr.next(); if (c.shouldScanData()) { + ContainerData containerData = c.getContainerData(); + long containerId = containerData.getContainerID(); try { + logScanStart(containerData); if (!c.scanData(throttler, canceler)) { metrics.incNumUnHealthyContainers(); - controller.markContainerUnhealthy( - c.getContainerData().getContainerID()); + controller.markContainerUnhealthy(containerId); + } else { + Instant now = Instant.now(); + logScanCompleted(containerData, now); + controller.updateDataScanTimestamp(containerId, now); } } catch (IOException ex) { - long containerId = c.getContainerData().getContainerID(); LOG.warn("Unexpected exception while scanning container " + containerId, ex); } finally { @@ -135,6 +143,23 @@ public void runIteration() { } } + private static void logScanStart(ContainerData containerData) { + if (LOG.isDebugEnabled()) { + Optional scanTimestamp = containerData.lastDataScanTime(); + Object lastScanTime = scanTimestamp.map(ts -> "at " + ts).orElse("never"); + LOG.debug("Scanning container {}, last scanned {}", + containerData.getContainerID(), lastScanTime); + } + } + + private static void logScanCompleted( + ContainerData containerData, Instant timestamp) { + if (LOG.isDebugEnabled()) { + LOG.debug("Completed scan of container {} at {}", + containerData.getContainerID(), timestamp); + } + } + public synchronized void shutdown() { this.stopping = true; this.canceler.cancel("ContainerDataScanner("+volume+") is shutting down"); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index c611ccb28e7e..97d0206e695b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -29,6 +29,7 @@ import java.io.File; import java.io.IOException; +import java.time.Instant; import java.util.UUID; import static org.junit.Assert.assertEquals; @@ -45,6 +46,7 @@ public class TestContainerDataYaml { private static String testRoot = new FileSystemTestHelper().getTestRootDir(); private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5); + private static final Instant SCAN_TIME = Instant.now(); /** * Creates a .container file. cleanup() should be called at the end of the @@ -61,6 +63,7 @@ private File createContainerFile(long containerID) throws IOException { keyValueContainerData.setContainerDBType("RocksDB"); keyValueContainerData.setMetadataPath(testRoot); keyValueContainerData.setChunksPath(testRoot); + keyValueContainerData.updateDataScanTime(SCAN_TIME); File containerFile = new File(testRoot, containerPath); @@ -98,6 +101,11 @@ public void testCreateContainerFile() throws IOException { assertEquals(1, kvData.getLayOutVersion()); assertEquals(0, kvData.getMetadata().size()); assertEquals(MAXSIZE, kvData.getMaxSize()); + assertEquals(MAXSIZE, kvData.getMaxSize()); + assertTrue(kvData.lastDataScanTime().isPresent()); + assertEquals(SCAN_TIME, kvData.lastDataScanTime().get()); + assertEquals(SCAN_TIME.toEpochMilli(), + kvData.getDataScanTimestamp().longValue()); // Update ContainerData. kvData.addMetadata("VOLUME", "hdfs"); @@ -126,6 +134,10 @@ public void testCreateContainerFile() throws IOException { assertEquals("hdfs", kvData.getMetadata().get("VOLUME")); assertEquals("ozone", kvData.getMetadata().get("OWNER")); assertEquals(MAXSIZE, kvData.getMaxSize()); + assertTrue(kvData.lastDataScanTime().isPresent()); + assertEquals(SCAN_TIME, kvData.lastDataScanTime().get()); + assertEquals(SCAN_TIME.toEpochMilli(), + kvData.getDataScanTimestamp().longValue()); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index e1e7119727b3..233dca7faaff 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF @@ -33,10 +33,13 @@ import org.mockito.Mockito; import java.io.IOException; +import java.time.Instant; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.Random; import java.util.UUID; import static org.junit.Assert.assertEquals; @@ -68,7 +71,7 @@ public void testAddGetRemoveContainer() throws StorageContainerException { boolean result = containerSet.addContainer(keyValueContainer); assertTrue(result); try { - result = containerSet.addContainer(keyValueContainer); + containerSet.addContainer(keyValueContainer); fail("Adding same container ID twice should fail."); } catch (StorageContainerException ex) { GenericTestUtils.assertExceptionContains("Container already exists with" + @@ -78,7 +81,7 @@ public void testAddGetRemoveContainer() throws StorageContainerException { //getContainer KeyValueContainer container = (KeyValueContainer) containerSet .getContainer(containerId); - KeyValueContainerData keyValueContainerData = (KeyValueContainerData) + KeyValueContainerData keyValueContainerData = container.getContainerData(); assertEquals(containerId, keyValueContainerData.getContainerID()); assertEquals(state, keyValueContainerData.getState()); @@ -178,6 +181,56 @@ public void testIteratorPerVolume() throws StorageContainerException { assertEquals(5, count2); } + @Test + public void iteratorIsOrderedByScanTime() throws StorageContainerException { + HddsVolume vol = Mockito.mock(HddsVolume.class); + Mockito.when(vol.getStorageID()).thenReturn("uuid-1"); + Random random = new Random(); + ContainerSet containerSet = new ContainerSet(); + int containerCount = 50; + for (int i = 0; i < containerCount; i++) { + KeyValueContainerData kvData = new KeyValueContainerData(i, + (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), + UUID.randomUUID().toString()); + if (random.nextBoolean()) { + Instant scanTime = Instant.ofEpochMilli(Math.abs(random.nextLong())); + kvData.updateDataScanTime(scanTime); + } + kvData.setVolume(vol); + kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); + KeyValueContainer kv = new KeyValueContainer(kvData, new + OzoneConfiguration()); + containerSet.addContainer(kv); + } + + int containersToBeScanned = 0; + Optional prevScanTime = Optional.empty(); + long prevContainerID = Long.MIN_VALUE; + for (Iterator> iter = containerSet.getContainerIterator(vol); + iter.hasNext();) { + ContainerData data = iter.next().getContainerData(); + Optional scanTime = data.lastDataScanTime(); + if (prevScanTime.isPresent()) { + if (scanTime.isPresent()) { + int result = scanTime.get().compareTo(prevScanTime.get()); + assertTrue(result >= 0); + if (result == 0) { + assertTrue(prevContainerID < data.getContainerID()); + } + } else { + fail("Containers not yet scanned should be sorted before " + + "already scanned ones"); + } + } + + prevScanTime = scanTime; + prevContainerID = data.getContainerID(); + containersToBeScanned++; + } + + assertEquals(containerCount, containersToBeScanned); + } + @Test public void testGetContainerReport() throws IOException { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java index b9b1beabdbd1..fe6a9296a16d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerScrubberMetrics.java @@ -23,88 +23,106 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.junit.Assert; +import org.junit.Before; import org.junit.Test; -import org.mockito.Mockito; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.runners.MockitoJUnitRunner; import java.util.Arrays; import java.util.Collection; +import java.util.concurrent.atomic.AtomicLong; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; /** * This test verifies the container scrubber metrics functionality. */ +@RunWith(MockitoJUnitRunner.class) public class TestContainerScrubberMetrics { + + private final AtomicLong containerIdSeq = new AtomicLong(100); + + @Mock + private Container healthy; + + @Mock + private Container corruptMetadata; + + @Mock + private Container corruptData; + + @Mock + private HddsVolume vol; + + private ContainerScrubberConfiguration conf; + private ContainerController controller; + + @Before + public void setup() { + conf = new OzoneConfiguration() + .getObject(ContainerScrubberConfiguration.class); + conf.setMetadataScanInterval(0); + conf.setDataScanInterval(0); + controller = mockContainerController(); + } + @Test public void testContainerMetaDataScrubberMetrics() { - OzoneConfiguration conf = new OzoneConfiguration(); - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - c.setMetadataScanInterval(0); - HddsVolume vol = Mockito.mock(HddsVolume.class); - ContainerController cntrl = mockContainerController(vol); - - ContainerMetadataScanner mc = new ContainerMetadataScanner(c, cntrl); - mc.runIteration(); - - Assert.assertEquals(1, mc.getMetrics().getNumScanIterations()); - Assert.assertEquals(3, mc.getMetrics().getNumContainersScanned()); - Assert.assertEquals(1, mc.getMetrics().getNumUnHealthyContainers()); + ContainerMetadataScanner subject = + new ContainerMetadataScanner(conf, controller); + subject.runIteration(); + + ContainerMetadataScrubberMetrics metrics = subject.getMetrics(); + assertEquals(1, metrics.getNumScanIterations()); + assertEquals(3, metrics.getNumContainersScanned()); + assertEquals(1, metrics.getNumUnHealthyContainers()); } @Test public void testContainerDataScrubberMetrics() { - OzoneConfiguration conf = new OzoneConfiguration(); - ContainerScrubberConfiguration c = conf.getObject( - ContainerScrubberConfiguration.class); - c.setDataScanInterval(0); - HddsVolume vol = Mockito.mock(HddsVolume.class); - ContainerController cntrl = mockContainerController(vol); - - ContainerDataScanner sc = new ContainerDataScanner(c, cntrl, vol); - sc.runIteration(); - - ContainerDataScrubberMetrics m = sc.getMetrics(); - Assert.assertEquals(1, m.getNumScanIterations()); - Assert.assertEquals(2, m.getNumContainersScanned()); - Assert.assertEquals(1, m.getNumUnHealthyContainers()); + ContainerDataScanner subject = + new ContainerDataScanner(conf, controller, vol); + subject.runIteration(); + + ContainerDataScrubberMetrics metrics = subject.getMetrics(); + assertEquals(1, metrics.getNumScanIterations()); + assertEquals(2, metrics.getNumContainersScanned()); + assertEquals(1, metrics.getNumUnHealthyContainers()); } - private ContainerController mockContainerController(HddsVolume vol) { + private ContainerController mockContainerController() { // healthy container - Container c1 = Mockito.mock(Container.class); - Mockito.when(c1.shouldScanData()).thenReturn(true); - Mockito.when(c1.scanMetaData()).thenReturn(true); - Mockito.when(c1.scanData( - Mockito.any(DataTransferThrottler.class), - Mockito.any(Canceler.class))).thenReturn(true); + setupMockContainer(healthy, true, true, true); // unhealthy container (corrupt data) - ContainerData c2d = Mockito.mock(ContainerData.class); - Mockito.when(c2d.getContainerID()).thenReturn(101L); - Container c2 = Mockito.mock(Container.class); - Mockito.when(c2.scanMetaData()).thenReturn(true); - Mockito.when(c2.shouldScanData()).thenReturn(true); - Mockito.when(c2.scanData( - Mockito.any(DataTransferThrottler.class), - Mockito.any(Canceler.class))).thenReturn(false); - Mockito.when(c2.getContainerData()).thenReturn(c2d); + setupMockContainer(corruptData, true, true, false); // unhealthy container (corrupt metadata) - ContainerData c3d = Mockito.mock(ContainerData.class); - Mockito.when(c3d.getContainerID()).thenReturn(102L); - Container c3 = Mockito.mock(Container.class); - Mockito.when(c3.shouldScanData()).thenReturn(false); - Mockito.when(c3.scanMetaData()).thenReturn(false); - Mockito.when(c3.getContainerData()).thenReturn(c3d); - - Collection> containers = Arrays.asList(c1, c2, c3); - ContainerController cntrl = Mockito.mock(ContainerController.class); - Mockito.when(cntrl.getContainers(vol)) - .thenReturn(containers.iterator()); - Mockito.when(cntrl.getContainers()) - .thenReturn(containers.iterator()); - - return cntrl; + setupMockContainer(corruptMetadata, false, false, false); + + Collection> containers = Arrays.asList( + healthy, corruptData, corruptMetadata); + ContainerController mock = mock(ContainerController.class); + when(mock.getContainers(vol)).thenReturn(containers.iterator()); + when(mock.getContainers()).thenReturn(containers.iterator()); + + return mock; + } + + private void setupMockContainer( + Container c, boolean shouldScanData, + boolean scanMetaDataSuccess, boolean scanDataSuccess) { + ContainerData data = mock(ContainerData.class); + when(data.getContainerID()).thenReturn(containerIdSeq.getAndIncrement()); + when(c.getContainerData()).thenReturn(data); + when(c.shouldScanData()).thenReturn(shouldScanData); + when(c.scanMetaData()).thenReturn(scanMetaDataSuccess); + when(c.scanData(any(DataTransferThrottler.class), any(Canceler.class))) + .thenReturn(scanDataSuccess); } }