diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java index 2124bd9c4700..22454e60a98c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerSet.java @@ -25,12 +25,12 @@ import com.google.protobuf.Message; import java.io.IOException; import java.time.Clock; -import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; @@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.utils.db.InMemoryTestTable; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -57,43 +56,39 @@ public class ContainerSet implements Iterable> { private static final Logger LOG = LoggerFactory.getLogger(ContainerSet.class); + public static ContainerSet newReadOnlyContainerSet(long recoveringTimeout) { + return new ContainerSet(null, recoveringTimeout); + } + + public static ContainerSet newRwContainerSet(Table containerIdsTable, long recoveringTimeout) { + Objects.requireNonNull(containerIdsTable, "containerIdsTable == null"); + return new ContainerSet(containerIdsTable, recoveringTimeout); + } + private final ConcurrentSkipListMap> containerMap = new ConcurrentSkipListMap<>(); private final ConcurrentSkipListSet missingContainerSet = new ConcurrentSkipListSet<>(); private final ConcurrentSkipListMap recoveringContainerMap = new ConcurrentSkipListMap<>(); - private Clock clock; + private final Clock clock; private long recoveringTimeout; private final Table containerIdsTable; - @VisibleForTesting - public ContainerSet(long recoveringTimeout) { - this(new InMemoryTestTable<>(), recoveringTimeout); + private ContainerSet(Table continerIdsTable, long recoveringTimeout) { + this(continerIdsTable, recoveringTimeout, null); } - public ContainerSet(Table continerIdsTable, long recoveringTimeout) { - this(continerIdsTable, recoveringTimeout, false); - } - - public ContainerSet(Table continerIdsTable, long recoveringTimeout, boolean readOnly) { - this.clock = Clock.system(ZoneOffset.UTC); + ContainerSet(Table continerIdsTable, long recoveringTimeout, Clock clock) { + this.clock = clock != null ? clock : Clock.systemUTC(); this.containerIdsTable = continerIdsTable; this.recoveringTimeout = recoveringTimeout; - if (!readOnly && containerIdsTable == null) { - throw new IllegalArgumentException("Container table cannot be null when container set is not read only"); - } } public long getCurrentTime() { return clock.millis(); } - @VisibleForTesting - public void setClock(Clock clock) { - this.clock = clock; - } - @VisibleForTesting public void setRecoveringTimeout(long recoveringTimeout) { this.recoveringTimeout = recoveringTimeout; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index b2ca06699cb9..b3fa5133823e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -188,7 +188,8 @@ public OzoneContainer(HddsDatanodeService hddsDatanodeService, OZONE_RECOVERING_CONTAINER_TIMEOUT, OZONE_RECOVERING_CONTAINER_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); this.witnessedContainerMetadataStore = WitnessedContainerMetadataStoreImpl.get(conf); - containerSet = new ContainerSet(witnessedContainerMetadataStore.getContainerIdsTable(), recoveringContainerTimeout); + containerSet = ContainerSet.newRwContainerSet(witnessedContainerMetadataStore.getContainerIdsTable(), + recoveringContainerTimeout); metadataScanner = null; metrics = ContainerMetrics.create(conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index d75aa4cfd8cb..e33a4c4a3ce5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -49,8 +49,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; @@ -335,7 +335,7 @@ public static ContainerDispatcher getNoopContainerDispatcher() { } private static final ContainerController EMPTY_CONTAINER_CONTROLLER - = new ContainerController(new ContainerSet(1000), Collections.emptyMap()); + = new ContainerController(ContainerImplTestUtils.newContainerSet(), Collections.emptyMap()); public static ContainerController getEmptyContainerController() { return EMPTY_CONTAINER_CONTROLLER; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 60117d25609e..4e8ad609f2b5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -17,8 +17,6 @@ package org.apache.hadoop.ozone.container.common; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; @@ -26,6 +24,7 @@ import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; @@ -50,6 +49,7 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.commons.lang3.StringUtils; @@ -168,7 +168,8 @@ private KeyValueContainerData createToDeleteBlocks(ContainerSet containerSet, } else { chunkManager = new FilePerChunkStrategy(true, null); } - byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8); + byte[] arr = new byte[1048576]; + ThreadLocalRandom.current().nextBytes(arr); ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr)); int txnID = 0; long containerID = ContainerTestHelper.getTestContainerID(); @@ -425,7 +426,7 @@ public void testPendingDeleteBlockReset(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(blockDeleteLimit); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); // Create one container with no actual pending delete blocks, but an // incorrect metadata value indicating it has enough pending deletes to @@ -533,7 +534,7 @@ public void testBlockDeletion(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(2); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); createToDeleteBlocks(containerSet, 1, 3, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = @@ -659,7 +660,7 @@ public void testWithUnrecordedBlocks(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(2); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); createToDeleteBlocks(containerSet, numOfContainers, numOfBlocksPerContainer, numOfChunksPerBlock); @@ -767,7 +768,7 @@ public void testShutdownService(ContainerTestVersionInfo versionInfo) conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); // Create 1 container with 100 blocks createToDeleteBlocks(containerSet, 1, 100, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); @@ -798,7 +799,7 @@ public void testBlockDeletionTimeout(ContainerTestVersionInfo versionInfo) blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); createToDeleteBlocks(containerSet, 1, 3, 1); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = @@ -900,7 +901,7 @@ public void testContainerThrottle(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(1); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); int containerCount = 2; int chunksPerBlock = 10; @@ -960,7 +961,7 @@ public void testContainerMaxLockHoldingTime( dnConf.setBlockDeletingMaxLockHoldingTime(Duration.ofMillis(-1)); dnConf.setBlockDeletionLimit(3); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); int containerCount = 1; int chunksPerBlock = 10; @@ -1024,7 +1025,7 @@ public void testBlockThrottle(ContainerTestVersionInfo versionInfo) dnConf.setBlockDeletionLimit(10); this.blockLimitPerInterval = dnConf.getBlockDeletionLimit(); conf.setFromObject(dnConf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); ContainerMetrics metrics = ContainerMetrics.create(conf); KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index aa3ec32280fc..f92c89480a70 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -564,7 +565,7 @@ private void runBlockDeletingService(KeyValueHandler keyValueHandler) } private ContainerSet makeContainerSet() throws Exception { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); KeyValueContainer container = new KeyValueContainer(newKvData(), conf); containerSet.addContainer(container); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java index 3d8932010e1d..1dcc3be6c337 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaTwoBackwardsCompatibility.java @@ -24,6 +24,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.PENDING_DELETE_BLOCK_COUNT; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; @@ -131,7 +132,7 @@ public void setup() throws Exception { blockManager = new BlockManagerImpl(conf); chunkManager = new FilePerBlockStrategy(true, blockManager); - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, ContainerMetrics.create(conf), c -> { }); ozoneContainer = mock(OzoneContainer.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java index 8e9ccf488160..59e662db01bc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestStaleRecoveringContainerScrubbingService.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.CLOSED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyLong; @@ -145,8 +146,7 @@ private List createTestContainers( public void testScrubbingStaleRecoveringContainers( ContainerTestVersionInfo versionInfo) throws Exception { initVersionInfo(versionInfo); - ContainerSet containerSet = new ContainerSet(10); - containerSet.setClock(testClock); + ContainerSet containerSet = newContainerSet(10, testClock); StaleRecoveringContainerScrubbingService srcss = new StaleRecoveringContainerScrubbingService( 50, TimeUnit.MILLISECONDS, 10, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/ContainerImplTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/ContainerImplTestUtils.java new file mode 100644 index 000000000000..1e27e748d69c --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/ContainerImplTestUtils.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.impl; + +import java.time.Clock; +import org.apache.hadoop.hdds.utils.db.InMemoryTestTable; + +/** + * Helper utility to test container impl. + */ +public final class ContainerImplTestUtils { + + private ContainerImplTestUtils() { + } + + public static ContainerSet newContainerSet() { + return newContainerSet(1000); + } + + public static ContainerSet newContainerSet(long recoveringTimeout) { + return ContainerSet.newRwContainerSet(new InMemoryTestTable<>(), recoveringTimeout); + } + + public static ContainerSet newContainerSet(long recoveringTimeout, Clock clock) { + return new ContainerSet(new InMemoryTestTable<>(), recoveringTimeout, clock); + } +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index e1b216a2796d..bb6aadae84dc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common.impl; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -81,7 +82,7 @@ public void testRandomChoosingPolicy(ContainerLayoutVersion layout) conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, RandomContainerDeletionChoosingPolicy.class.getName()); - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); int numContainers = 10; for (int i = 0; i < numContainers; i++) { @@ -142,7 +143,7 @@ public void testTopNOrderedChoosingPolicy(ContainerLayoutVersion layout) conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, TopNOrderedContainerDeletionChoosingPolicy.class.getName()); - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); int numContainers = 10; Random random = new Random(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 3189a343987b..0a31b9746d57 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk; import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData; import static org.apache.hadoop.ozone.container.ContainerTestHelper.setDataChecksum; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -151,7 +152,7 @@ public static void shutdown() throws IOException { @BeforeEach public void setupPaths() throws IOException { - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); volumeSet = new MutableVolumeSet(DATANODE_UUID, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); // Initialize volume directories. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index 84d44534525f..6d69f3db64bd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common.impl; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -66,7 +67,7 @@ private void setLayoutVersion(ContainerLayoutVersion layoutVersion) { public void testAddGetRemoveContainer(ContainerLayoutVersion layout) throws StorageContainerException { setLayoutVersion(layout); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); long containerId = 100L; ContainerProtos.ContainerDataProto.State state = ContainerProtos .ContainerDataProto.State.CLOSED; @@ -155,7 +156,7 @@ public void testIteratorPerVolume(ContainerLayoutVersion layout) HddsVolume vol2 = mock(HddsVolume.class); when(vol2.getStorageID()).thenReturn("uuid-2"); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); for (int i = 0; i < 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layout, @@ -198,7 +199,7 @@ public void iteratorIsOrderedByScanTime(ContainerLayoutVersion layout) HddsVolume vol = mock(HddsVolume.class); when(vol.getStorageID()).thenReturn("uuid-1"); Random random = new Random(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); int containerCount = 50; for (int i = 0; i < containerCount; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, @@ -296,7 +297,7 @@ private static void assertContainerIds(int startId, int count, } private ContainerSet createContainerSet() throws StorageContainerException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); for (int i = FIRST_ID; i < FIRST_ID + 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layoutVersion, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index be568a075708..257c104ce79c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getContainerCommandResponse; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -129,7 +130,7 @@ public void testContainerCloseActionWhenFull( try { UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); StateContext context = ContainerTestUtils.getMockContext(dd, conf); KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, @@ -264,7 +265,7 @@ public void testContainerCloseActionWhenVolumeFull( .thenReturn(Collections.singletonList(volumeBuilder.build())); try { UUID scmId = UUID.randomUUID(); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); StateContext context = ContainerTestUtils.getMockContext(dd, conf); // create a 50 byte container KeyValueContainerData containerData = new KeyValueContainerData(1L, @@ -514,7 +515,7 @@ static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf, TokenVerifier tokenVerifier) throws IOException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); VolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); volumeSet.getVolumesList().stream().forEach(v -> { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 7b99928fa13b..6b0d05afe493 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -19,6 +19,7 @@ import static java.util.Collections.singletonMap; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -89,7 +90,7 @@ private void init() throws Exception { pipelineID.getId().toString(), null); container = new KeyValueContainer(data, conf); - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); containerSet.addContainer(container); containerHandler = mock(Handler.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java index 8f84eb57516d..fe3872e2dbde 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteBlocksCommandHandler.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.BLOCK_DELETE_COMMAND_WORKER_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler.DeleteBlockTransactionExecutionResult; @@ -110,7 +111,7 @@ private void setup() throws Exception { conf = new OzoneConfiguration(); layout = ContainerLayoutVersion.FILE_PER_BLOCK; ozoneContainer = mock(OzoneContainer.class); - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); volume1 = mock(HddsVolume.class); when(volume1.getStorageID()).thenReturn("uuid-1"); for (int i = 0; i <= 10; i++) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 60eea0ac3fb5..2c77b00bd56b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.common.volume; import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -264,7 +265,7 @@ public void testVolumeFailure() throws IOException { new DummyChecker(conf, new Timer(), 0); OzoneContainer ozoneContainer = mock(OzoneContainer.class); - ContainerSet conSet = new ContainerSet(20); + ContainerSet conSet = newContainerSet(20); when(ozoneContainer.getContainerSet()).thenReturn(conSet); String path = dir.getPath(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 27e5087624c9..f55a515a5b72 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -274,7 +275,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception { volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); try { - ContainerSet cset = new ContainerSet(1000); + ContainerSet cset = newContainerSet(); int[] interval = new int[1]; interval[0] = 2; ContainerMetrics metrics = new ContainerMetrics(interval); @@ -367,7 +368,7 @@ public void testDeleteContainer() throws IOException { final String clusterId = UUID.randomUUID().toString(); final String datanodeId = UUID.randomUUID().toString(); final ConfigurationSource conf = new OzoneConfiguration(); - final ContainerSet containerSet = new ContainerSet(1000); + final ContainerSet containerSet = newContainerSet(); final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); HddsVolume hddsVolume = new HddsVolume.Builder(testDir).conf(conf) @@ -461,7 +462,7 @@ public void testDeleteContainerTimeout() throws IOException { final String clusterId = UUID.randomUUID().toString(); final String datanodeId = UUID.randomUUID().toString(); final ConfigurationSource conf = new OzoneConfiguration(); - final ContainerSet containerSet = new ContainerSet(1000); + final ContainerSet containerSet = newContainerSet(); final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); final Clock clock = mock(Clock.class); long startTime = System.currentTimeMillis(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 6e488ae2bcc2..e315ced9f48d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.RECOVERING; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State.UNHEALTHY; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.createDbInstancesForTestIfNeeded; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -102,7 +103,7 @@ private void setup(ContainerTestVersionInfo versionInfo) throws Exception { Files.createDirectory(tempDir.resolve("volumeDir")).toFile(); this.conf = new OzoneConfiguration(); volumeSet = mock(MutableVolumeSet.class); - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); datanodeId = UUID.randomUUID(); hddsVolume = new HddsVolume.Builder(volumeDir @@ -266,7 +267,7 @@ public void testContainerReaderWithLoadException( setup(versionInfo); MutableVolumeSet volumeSet1; HddsVolume hddsVolume1; - ContainerSet containerSet1 = new ContainerSet(1000); + ContainerSet containerSet1 = newContainerSet(); File volumeDir1 = Files.createDirectory(tempDir.resolve("volumeDir" + 1)).toFile(); RoundRobinVolumeChoosingPolicy volumeChoosingPolicy1; @@ -316,7 +317,7 @@ public void testContainerReaderWithInvalidDbPath( setup(versionInfo); MutableVolumeSet volumeSet1; HddsVolume hddsVolume1; - ContainerSet containerSet1 = new ContainerSet(1000); + ContainerSet containerSet1 = newContainerSet(); File volumeDir1 = Files.createDirectory(tempDir.resolve("volumeDirDbDelete")).toFile(); RoundRobinVolumeChoosingPolicy volumeChoosingPolicy1; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java index dc5e91207367..c283d5a9840b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerImporter.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.replication; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -87,7 +88,7 @@ void importSameContainerWhenAlreadyImport() throws Exception { KeyValueContainer container = new KeyValueContainer(containerData, conf); ContainerController controllerMock = mock(ContainerController.class); // create containerImporter object - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = newContainerSet(0); containerSet.addContainer(container); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); @@ -118,7 +119,7 @@ void importSameContainerWhenFirstInProgress() throws Exception { return container; }); // create containerImporter object - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = newContainerSet(0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, @@ -157,7 +158,7 @@ public void testInconsistentChecksumContainerShouldThrowError() throws Exception doNothing().when(containerData).setChecksumTo0ByteArray(); // create containerImporter object ContainerController controllerMock = mock(ContainerController.class); - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = newContainerSet(0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = spy(new ContainerImporter(conf, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java index 4adf9edc4f26..9822c07068d5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcReplicationService.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.replication; import static org.apache.hadoop.ozone.OzoneConsts.GB; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.toTarget; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -93,7 +94,7 @@ public void init() throws Exception { SecurityConfig secConf = new SecurityConfig(conf); - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); DatanodeDetails.Builder dn = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index df3462d644b7..b6517f4fead9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.LOW; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicationCommandPriority.NORMAL; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status.DONE; import static org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand.fromSources; import static org.assertj.core.api.Assertions.assertThat; @@ -125,7 +126,7 @@ public class TestReplicationSupervisor { @BeforeEach public void setUp() throws Exception { clock = new TestClock(Instant.now(), ZoneId.systemDefault()); - set = new ContainerSet(1000); + set = newContainerSet(); DatanodeStateMachine stateMachine = mock(DatanodeStateMachine.class); context = new StateContext( new OzoneConfiguration(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java index be2c315198b7..226cb07c0cb2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestSendContainerRequestHandler.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.replication; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -62,11 +63,11 @@ void setup() { void testReceiveDataForExistingContainer() throws Exception { long containerId = 1; // create containerImporter - ContainerSet containerSet = new ContainerSet(0); + ContainerSet containerSet = newContainerSet(0); MutableVolumeSet volumeSet = new MutableVolumeSet("test", conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); ContainerImporter containerImporter = new ContainerImporter(conf, - new ContainerSet(0), mock(ContainerController.class), volumeSet); + newContainerSet(0), mock(ContainerController.class), volumeSet); KeyValueContainerData containerData = new KeyValueContainerData(containerId, ContainerLayoutVersion.FILE_PER_BLOCK, 100, "test", "test"); // add container to container set diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java similarity index 97% rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java rename to hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java index c60339a1a0fd..0432c7290635 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/InMemoryTestTable.java @@ -108,8 +108,7 @@ public List> getRangeKVs(KEY startKey, int count, @Override public List> getSequentialRangeKVs(KEY startKey, int count, KEY prefix, - MetadataKeyFilters.MetadataKeyFilter... filters) - throws IOException, IllegalArgumentException { + MetadataKeyFilters.MetadataKeyFilter... filters) { throw new UnsupportedOperationException(); } diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 07ffa537bb04..a38886357d99 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -140,6 +140,12 @@ test-jar test + + org.apache.ozone + hdds-server-framework + test-jar + test + org.apache.ozone hdds-test-utils diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java index 30f426823806..4d9501887678 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/upgrade/TestUpgradeManager.java @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.COMMIT_STAGE; import static org.apache.hadoop.ozone.container.common.ContainerTestUtils.WRITE_STAGE; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -132,7 +133,7 @@ public void setup() throws Exception { return volumes.get(ii); }); - containerSet = new ContainerSet(1000); + containerSet = newContainerSet(); blockManager = new BlockManagerImpl(CONF); chunkManager = new FilePerBlockStrategy(true, blockManager); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index 387f0db17cc8..662858a99553 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.common.transport.server.ratis; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.ozone.test.MetricsAsserts.assertCounter; import static org.apache.ozone.test.MetricsAsserts.getDoubleGauge; import static org.apache.ozone.test.MetricsAsserts.getMetrics; @@ -45,7 +46,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; @@ -182,7 +182,7 @@ static XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(newContainerSet(), Maps.newHashMap()), null, null); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index ebb4cca97ef1..a5fea3dfd7a0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.container.metrics; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.ozone.test.MetricsAsserts.assertCounter; import static org.apache.ozone.test.MetricsAsserts.assertQuantileGauges; import static org.apache.ozone.test.MetricsAsserts.getMetrics; @@ -132,7 +133,7 @@ private static MutableVolumeSet createVolumeSet(DatanodeDetails dn, String path) } private HddsDispatcher createDispatcher(DatanodeDetails dd, VolumeSet volumeSet) { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); StateContext context = ContainerTestUtils.getMockContext( dd, CONF); ContainerMetrics metrics = ContainerMetrics.create(CONF); @@ -241,7 +242,7 @@ private XceiverServerSpi newXceiverServerRatis(DatanodeDetails dn, MutableVolume final ContainerDispatcher dispatcher = createDispatcher(dn, volumeSet); return XceiverServerRatis.newXceiverServerRatis(null, dn, CONF, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(newContainerSet(), Maps.newHashMap()), null, null); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index 9cc46eeb542b..fe83aa0881ca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -129,7 +130,7 @@ static XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = new TestContainerDispatcher(); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(newContainerSet(), Maps.newHashMap()), caClient, null); } @@ -186,7 +187,7 @@ static void runTestClientServer( private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index b0c198df3b79..f78971af4f50 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -32,6 +32,7 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.newPutBlockRequestBuilder; import static org.apache.hadoop.ozone.container.ContainerTestHelper.newReadChunkRequestBuilder; import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder; +import static org.apache.hadoop.ozone.container.common.impl.ContainerImplTestUtils.newContainerSet; import static org.apache.ratis.rpc.SupportedRpcType.GRPC; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -161,7 +162,7 @@ public void testClientServer() throws Exception { private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, OzoneConfiguration conf) throws IOException { - ContainerSet containerSet = new ContainerSet(1000); + ContainerSet containerSet = newContainerSet(); conf.set(HDDS_DATANODE_DIR_KEY, Paths.get(testDir.toString(), "dfs", "data", "hdds", RandomStringUtils.randomAlphabetic(4)).toString()); @@ -207,7 +208,7 @@ XceiverServerRatis newXceiverServerRatis( final ContainerDispatcher dispatcher = createDispatcher(dn, UUID.randomUUID(), conf); return XceiverServerRatis.newXceiverServerRatis(null, dn, conf, dispatcher, - new ContainerController(new ContainerSet(1000), Maps.newHashMap()), + new ContainerController(newContainerSet(), Maps.newHashMap()), caClient, null); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java index f8c7ceecbf30..7945c3bbfbf9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java @@ -84,7 +84,7 @@ public class ContainerCommands extends AbstractSubcommand { public void loadContainersFromVolumes() throws IOException { OzoneConfiguration conf = getOzoneConf(); - ContainerSet containerSet = new ContainerSet(null, 1000, true); + ContainerSet containerSet = ContainerSet.newReadOnlyContainerSet(1000); ContainerMetrics metrics = ContainerMetrics.create(conf); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 452165f1c0f8..8d4a4e99af57 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -194,7 +194,7 @@ private void initializeReplicationSupervisor( WitnessedContainerMetadataStore referenceCountedDS = WitnessedContainerMetadataStoreImpl.get(conf); this.witnessedContainerMetadataStore = referenceCountedDS; - ContainerSet containerSet = new ContainerSet(referenceCountedDS.getContainerIdsTable(), 1000); + ContainerSet containerSet = ContainerSet.newRwContainerSet(referenceCountedDS.getContainerIdsTable(), 1000); ContainerMetrics metrics = ContainerMetrics.create(conf);