diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index 68d2899f9feb..1178127e9e0d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -54,6 +54,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_FIND_CHUNK; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.onFailure; import org.slf4j.Logger; @@ -339,6 +340,17 @@ private static void checkSize(String of, long expected, long actual, } } + public static void limitReadSize(long len) + throws StorageContainerException { + if (len > OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) { + String err = String.format( + "Oversize read. max: %d, actual: %d", + OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE, len); + LOG.error(err); + throw new StorageContainerException(err, UNSUPPORTED_REQUEST); + } + } + private static StorageContainerException wrapInStorageContainerException( IOException e) { ContainerProtos.Result result = translate(e); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java index 4cf0834dc487..8ea989946aec 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java @@ -34,6 +34,8 @@ import java.nio.ByteBuffer; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize; + /** * Implementation of ChunkManager built for running performance tests. * Chunks are not written to disk, Reads are returned with zero-filled buffers @@ -72,8 +74,10 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, */ @Override public ChunkBuffer readChunk(Container container, BlockID blockID, - ChunkInfo info, DispatcherContext dispatcherContext) { + ChunkInfo info, DispatcherContext dispatcherContext) + throws StorageContainerException { + limitReadSize(info.getLen()); // stats are handled in ChunkManagerImpl return ChunkBuffer.wrap(ByteBuffer.allocate((int) info.getLen())); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 338c59706fcd..715ce4c7ddab 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -59,6 +59,7 @@ import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage.COMMIT_DATA; import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.onFailure; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize; import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.validateChunkForOverwrite; import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.verifyChunkFileExists; @@ -152,6 +153,8 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, return ChunkBuffer.wrap(ByteBuffer.wrap(new byte[0])); } + limitReadSize(info.getLen()); + KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); @@ -159,7 +162,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, File chunkFile = getChunkFile(container, blockID, info); - long len = info.getLen(); + int len = (int) info.getLen(); long offset = info.getOffset(); long bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index 5eefbda720c9..f2109cb745da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -56,6 +56,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; +import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize; /** * This class is for performing chunk related operations. @@ -208,6 +209,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, throws StorageContainerException { checkLayoutVersion(container); + limitReadSize(info.getLen()); KeyValueContainer kvContainer = (KeyValueContainer) container; KeyValueContainerData containerData = kvContainer.getContainerData(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index ca9c7b83fd63..eb2e63c902c7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ozone.container.keyvalue.impl; +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -28,9 +30,14 @@ import org.apache.hadoop.test.GenericTestUtils; import org.junit.Test; +import java.io.File; +import java.io.IOException; import java.nio.ByteBuffer; +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -65,6 +72,32 @@ public void testWriteChunkIncorrectLength() { } } + @Test + public void testReadOversizeChunk() throws IOException { + // GIVEN + ChunkManager chunkManager = createTestSubject(); + DispatcherContext dispatcherContext = getDispatcherContext(); + KeyValueContainer container = getKeyValueContainer(); + int tooLarge = OZONE_SCM_CHUNK_MAX_SIZE + 1; + byte[] array = RandomStringUtils.randomAscii(tooLarge).getBytes(UTF_8); + assertTrue(array.length >= tooLarge); + + BlockID blockID = getBlockID(); + ChunkInfo chunkInfo = new ChunkInfo( + String.format("%d.data.%d", blockID.getLocalID(), 0), + 0, array.length); + + // write chunk bypassing size limit + File chunkFile = getStrategy().getLayout() + .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo); + FileUtils.writeByteArrayToFile(chunkFile, array); + + // WHEN+THEN + assertThrows(StorageContainerException.class, () -> + chunkManager.readChunk(container, blockID, chunkInfo, dispatcherContext) + ); + } + @Test public void testWriteChunkStageCombinedData() throws Exception { // GIVEN