Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_FIND_CHUNK;
import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.onFailure;

import org.slf4j.Logger;
Expand Down Expand Up @@ -339,6 +340,17 @@ private static void checkSize(String of, long expected, long actual,
}
}

public static void limitReadSize(long len)
throws StorageContainerException {
if (len > OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) {
String err = String.format(
"Oversize read. max: %d, actual: %d",
OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE, len);
LOG.error(err);
throw new StorageContainerException(err, UNSUPPORTED_REQUEST);
}
}

private static StorageContainerException wrapInStorageContainerException(
IOException e) {
ContainerProtos.Result result = translate(e);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@

import java.nio.ByteBuffer;

import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;

/**
* Implementation of ChunkManager built for running performance tests.
* Chunks are not written to disk, Reads are returned with zero-filled buffers
Expand Down Expand Up @@ -72,8 +74,10 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info,
*/
@Override
public ChunkBuffer readChunk(Container container, BlockID blockID,
ChunkInfo info, DispatcherContext dispatcherContext) {
ChunkInfo info, DispatcherContext dispatcherContext)
throws StorageContainerException {

limitReadSize(info.getLen());
// stats are handled in ChunkManagerImpl
return ChunkBuffer.wrap(ByteBuffer.allocate((int) info.getLen()));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@
import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
import static org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage.COMMIT_DATA;
import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.onFailure;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.validateChunkForOverwrite;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.verifyChunkFileExists;

Expand Down Expand Up @@ -152,14 +153,16 @@ public ChunkBuffer readChunk(Container container, BlockID blockID,
return ChunkBuffer.wrap(ByteBuffer.wrap(new byte[0]));
}

limitReadSize(info.getLen());

KeyValueContainerData containerData = (KeyValueContainerData) container
.getContainerData();

HddsVolume volume = containerData.getVolume();

File chunkFile = getChunkFile(container, blockID, info);

long len = info.getLen();
int len = (int) info.getLen();
long offset = info.getOffset();
long bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info,
defaultReadBufferCapacity);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@

import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;

/**
* This class is for performing chunk related operations.
Expand Down Expand Up @@ -208,6 +209,7 @@ public ChunkBuffer readChunk(Container container, BlockID blockID,
throws StorageContainerException {

checkLayoutVersion(container);
limitReadSize(info.getLen());

KeyValueContainer kvContainer = (KeyValueContainer) container;
KeyValueContainerData containerData = kvContainer.getContainerData();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
*/
package org.apache.hadoop.ozone.container.keyvalue.impl;

import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
Expand All @@ -28,9 +30,14 @@
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;

import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;

import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThrows;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;

Expand Down Expand Up @@ -65,6 +72,32 @@ public void testWriteChunkIncorrectLength() {
}
}

@Test
public void testReadOversizeChunk() throws IOException {
// GIVEN
ChunkManager chunkManager = createTestSubject();
DispatcherContext dispatcherContext = getDispatcherContext();
KeyValueContainer container = getKeyValueContainer();
int tooLarge = OZONE_SCM_CHUNK_MAX_SIZE + 1;
byte[] array = RandomStringUtils.randomAscii(tooLarge).getBytes(UTF_8);
assertTrue(array.length >= tooLarge);

BlockID blockID = getBlockID();
ChunkInfo chunkInfo = new ChunkInfo(
String.format("%d.data.%d", blockID.getLocalID(), 0),
0, array.length);

// write chunk bypassing size limit
File chunkFile = getStrategy().getLayout()
.getChunkFile(getKeyValueContainerData(), blockID, chunkInfo);
FileUtils.writeByteArrayToFile(chunkFile, array);

// WHEN+THEN
assertThrows(StorageContainerException.class, () ->
chunkManager.readChunk(container, blockID, chunkInfo, dispatcherContext)
);
}

@Test
public void testWriteChunkStageCombinedData() throws Exception {
// GIVEN
Expand Down