Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
9939e0c
HDDS-10231. ContainerStateManager should not finalize OPEN containers…
nandakumar131 Jan 30, 2024
8068518
HDDS-10367. Fix possible NPE in listKeysLight, listStatus, listStatus…
ivanzlenko Feb 28, 2024
44a075f
HDDS-10472. Audit log should include EC replication config (#6338)
adoroszlai Mar 6, 2024
2f10325
HDDS-10503. Bump jgrapht to 1.4.0 (#6364)
ivanzlenko Mar 11, 2024
7001cbb
HDDS-10526. Bump follow-redirects to 1.15.6 (#6382)
dependabot[bot] Mar 15, 2024
ac9ace6
HDDS-10554. Bump Zookeeper to 3.8.4 (#6407)
adoroszlai Mar 20, 2024
8d9b19e
HDDS-10566. Bump commons-configuration2 to 2.10.1 (#6417)
dependabot[bot] Mar 22, 2024
f921ad8
HDDS-10562. Fix infinite loop in ozone fs -ls /volume (#6416)
symious Mar 24, 2024
030a43a
HDDS-10583. Thread name prefix in ReplicationSupervisor is null (#6430)
adoroszlai Mar 25, 2024
b2107b3
HDDS-10547. Fix shared buffer for datanode checksum calculation (#6402)
Cyrill Mar 25, 2024
f7430a5
HDDS-10588. Bump hadoop-shaded-guava to 1.2.0 (#6440)
vtutrinov Mar 27, 2024
d58acad
HDDS-10594. Update file encryption info when overwriting key (#6445)
ChenSammi Mar 28, 2024
48567dd
HDDS-10600. Bump nimbus-jose-jwt to 9.37.2 (#6454)
vtutrinov Mar 28, 2024
de8d43c
HDDS-10524. [Snapshot] Invalidate the cache entry from snapshotInfoTa…
hemantk-12 Mar 27, 2024
42e2de4
HDDS-10408. NPE causes OM crash in Snapshot Purge request (#6250)
aswinshakil Feb 22, 2024
27b2aaa
HDDS-10475. Refine audit logging for bucket creation (#6366)
tanvipenumudy Mar 12, 2024
8fdba8e
HDDS-10460. Refine audit logging for bucket property update operation…
tanvipenumudy Mar 6, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,14 @@ public String getReplication() {
+ chunkKB();
}

/** Similar to {@link #getReplication()}, but applies to proto structure, without any validation. */
public static String toString(HddsProtos.ECReplicationConfig proto) {
return proto.getCodec() + EC_REPLICATION_PARAMS_DELIMITER
+ proto.getData() + EC_REPLICATION_PARAMS_DELIMITER
+ proto.getParity() + EC_REPLICATION_PARAMS_DELIMITER
+ proto.getEcChunkSize();
}

public HddsProtos.ECReplicationConfig toProto() {
return HddsProtos.ECReplicationConfig.newBuilder()
.setData(data)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,10 @@ public enum HealthState {
"OpenUnhealthyContainers"),
QUASI_CLOSED_STUCK(
"Containers QuasiClosed with insufficient datanode origins",
"StuckQuasiClosedContainers");
"StuckQuasiClosedContainers"),
OPEN_WITHOUT_PIPELINE(
"Containers in OPEN state without any healthy Pipeline",
"OpenContainersWithoutPipeline");

private String description;
private String metricName;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,7 @@ public void testJsonOutput() throws IOException {
assertEquals(0, stats.get("EMPTY").longValue());
assertEquals(0, stats.get("OPEN_UNHEALTHY").longValue());
assertEquals(0, stats.get("QUASI_CLOSED_STUCK").longValue());
assertEquals(0, stats.get("OPEN_WITHOUT_PIPELINE").longValue());

JsonNode samples = json.get("samples");
assertEquals(ARRAY, samples.get("UNDER_REPLICATED").getNodeType());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -777,7 +777,7 @@ private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info)
throws StorageContainerException {
if (validateChunkChecksumData) {
try {
Checksum.verifyChecksum(data, info.getChecksumData(), 0);
Checksum.verifyChecksum(data.duplicate(data.position(), data.limit()), info.getChecksumData(), 0);
} catch (OzoneChecksumException ex) {
throw ChunkUtils.wrapInStorageContainerException(ex);
}
Expand Down Expand Up @@ -877,9 +877,9 @@ ContainerCommandResponseProto handlePutSmallFile(

// chunks will be committed as a part of handling putSmallFile
// here. There is no need to maintain this info in openContainerBlockMap.
validateChunkChecksumData(data, chunkInfo);
chunkManager
.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
validateChunkChecksumData(data, chunkInfo);
chunkManager.finishWriteChunks(kvContainer, blockData);

List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ public static class Builder {
private Clock clock;
private IntConsumer executorThreadUpdater = threadCount -> {
};
private String threadNamePrefix;

public Builder clock(Clock newClock) {
clock = newClock;
Expand Down Expand Up @@ -138,11 +137,6 @@ public Builder executorThreadUpdater(IntConsumer newUpdater) {
return this;
}

public Builder threadNamePrefix(String threadPrefix) {
this.threadNamePrefix = threadPrefix;
return this;
}

public ReplicationSupervisor build() {
if (replicationConfig == null || datanodeConfig == null) {
ConfigurationSource conf = new OzoneConfiguration();
Expand All @@ -162,6 +156,7 @@ public ReplicationSupervisor build() {
if (executor == null) {
LOG.info("Initializing replication supervisor with thread count = {}",
replicationConfig.getReplicationMaxStreams());
String threadNamePrefix = context != null ? context.getThreadNamePrefix() : "";
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(threadNamePrefix + "ContainerReplicationThread-%d")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import com.google.common.collect.Maps;
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.client.BlockID;
Expand All @@ -40,13 +41,16 @@
import org.apache.hadoop.hdds.security.token.TokenVerifier;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.common.Checksum;
import org.apache.hadoop.ozone.common.ChecksumData;
import org.apache.hadoop.ozone.common.OzoneChecksumException;
import org.apache.hadoop.ozone.common.utils.BufferUtils;
import org.apache.hadoop.ozone.container.ContainerTestHelper;
import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
import org.apache.hadoop.ozone.container.common.interfaces.Container;
import org.apache.hadoop.ozone.container.common.interfaces.Handler;
import org.apache.hadoop.ozone.container.common.report.IncrementalReportSender;
import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.Op;
Expand Down Expand Up @@ -170,6 +174,76 @@ public void testContainerCloseActionWhenFull() throws IOException {
}
}

@Test
public void testSmallFileChecksum() throws IOException {
String testDirPath = GenericTestUtils.getTempPath(
TestHddsDispatcher.class.getSimpleName());
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
dnConf.setChunkDataValidationCheck(true);
conf.setFromObject(dnConf);
DatanodeDetails dd = randomDatanodeDetails();
HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);

ContainerCommandResponseProto smallFileResponse =
hddsDispatcher.dispatch(newPutSmallFile(1L, 1L), null);

assertEquals(ContainerProtos.Result.SUCCESS, smallFileResponse.getResult());
} finally {
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDirPath));
}
}

@Test
public void testWriteChunkChecksum() throws IOException {
String testDirPath = GenericTestUtils.getTempPath(
TestHddsDispatcher.class.getSimpleName());
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDirPath);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDirPath);
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
dnConf.setChunkDataValidationCheck(true);
conf.setFromObject(dnConf);
DatanodeDetails dd = randomDatanodeDetails();
HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
//Send a few WriteChunkRequests
ContainerCommandResponseProto response;
ContainerCommandRequestProto writeChunkRequest0 = getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 0);
hddsDispatcher.dispatch(writeChunkRequest0, null);
hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 1), null);
response = hddsDispatcher.dispatch(getWriteChunkRequest0(dd.getUuidString(), 1L, 1L, 2), null);

assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
// Send Read Chunk request for written chunk.
response =
hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), null);
assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());

ByteString responseData = BufferUtils.concatByteStrings(
response.getReadChunk().getDataBuffers().getBuffersList());
assertEquals(writeChunkRequest0.getWriteChunk().getData(),
responseData);

// Test checksum on Read:
final DispatcherContext context = DispatcherContext
.newBuilder(DispatcherContext.Op.READ_STATE_MACHINE_DATA)
.build();
response =
hddsDispatcher.dispatch(getReadChunkRequest(writeChunkRequest0), context);
assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
} finally {
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDirPath));
}
}

@Test
public void testContainerCloseActionWhenVolumeFull() throws Exception {
String testDir = GenericTestUtils.getTempPath(
Expand Down Expand Up @@ -527,6 +601,84 @@ private ContainerCommandRequestProto getWriteChunkRequest(
.build();
}

static ChecksumData checksum(ByteString data) {
try {
return new Checksum(ContainerProtos.ChecksumType.CRC32, 256)
.computeChecksum(data.asReadOnlyByteBuffer());
} catch (OzoneChecksumException e) {
throw new IllegalStateException(e);
}
}

private ContainerCommandRequestProto getWriteChunkRequest0(
String datanodeId, Long containerId, Long localId, int chunkNum) {
final int lenOfBytes = 32;
ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32));

ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo
.newBuilder()
.setChunkName(
DigestUtils.md5Hex("dummy-key") + "_stream_"
+ containerId + "_chunk_" + localId)
.setOffset((long) chunkNum * lenOfBytes)
.setLen(lenOfBytes)
.setChecksumData(checksum(chunkData).getProtoBufMessage())
.build();

WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto
.newBuilder()
.setBlockID(new BlockID(containerId, localId)
.getDatanodeBlockIDProtobuf())
.setChunkData(chunk)
.setData(chunkData);

return ContainerCommandRequestProto
.newBuilder()
.setContainerID(containerId)
.setCmdType(ContainerProtos.Type.WriteChunk)
.setDatanodeUuid(datanodeId)
.setWriteChunk(writeChunkRequest)
.build();
}

static ContainerCommandRequestProto newPutSmallFile(Long containerId, Long localId) {
ByteString chunkData = ByteString.copyFrom(RandomUtils.nextBytes(32));
return newPutSmallFile(new BlockID(containerId, localId), chunkData);
}

static ContainerCommandRequestProto newPutSmallFile(
BlockID blockID, ByteString data) {
final ContainerProtos.BlockData.Builder blockData
= ContainerProtos.BlockData.newBuilder()
.setBlockID(blockID.getDatanodeBlockIDProtobuf());
final ContainerProtos.PutBlockRequestProto.Builder putBlockRequest
= ContainerProtos.PutBlockRequestProto.newBuilder()
.setBlockData(blockData);
final ContainerProtos.KeyValue keyValue = ContainerProtos.KeyValue.newBuilder()
.setKey("OverWriteRequested")
.setValue("true")
.build();
final ContainerProtos.ChunkInfo chunk = ContainerProtos.ChunkInfo.newBuilder()
.setChunkName(blockID.getLocalID() + "_chunk")
.setOffset(0)
.setLen(data.size())
.addMetadata(keyValue)
.setChecksumData(checksum(data).getProtoBufMessage())
.build();
final ContainerProtos.PutSmallFileRequestProto putSmallFileRequest
= ContainerProtos.PutSmallFileRequestProto.newBuilder()
.setChunkInfo(chunk)
.setBlock(putBlockRequest)
.setData(data)
.build();
return ContainerCommandRequestProto.newBuilder()
.setCmdType(ContainerProtos.Type.PutSmallFile)
.setContainerID(blockID.getContainerID())
.setDatanodeUuid(UUID.randomUUID().toString())
.setPutSmallFile(putSmallFileRequest)
.build();
}

/**
* Creates container read chunk request using input container write chunk
* request.
Expand Down
20 changes: 20 additions & 0 deletions hadoop-hdds/hadoop-dependency-client/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<groupId>com.nimbusds</groupId>
<artifactId>nimbus-jose-jwt</artifactId>
</exclusion>
<exclusion>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
Expand Down Expand Up @@ -194,12 +202,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</exclusions>

</dependency>
<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId>
</dependency>
<dependency>
<groupId>com.nimbusds</groupId>
<artifactId>nimbus-jose-jwt</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop.version}</version>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
Expand Down
20 changes: 20 additions & 0 deletions hadoop-hdds/hadoop-dependency-server/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>hadoop-common</artifactId>
<version>${hadoop.version}</version>
<exclusions>
<exclusion>
<groupId>com.nimbusds</groupId>
<artifactId>nimbus-jose-jwt</artifactId>
</exclusion>
<exclusion>
<groupId>org.xerial.snappy</groupId>
<artifactId>snappy-java</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.curator</groupId>
<artifactId>*</artifactId>
Expand Down Expand Up @@ -101,6 +109,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId>
</dependency>
<dependency>
<groupId>com.nimbusds</groupId>
<artifactId>nimbus-jose-jwt</artifactId>
</dependency>
<dependency>
<!-- commons-cli is required by DFSUtil.addPBProtocol -->
<groupId>commons-cli</groupId>
Expand All @@ -112,6 +128,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<version>${hadoop.version}</version>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.hadoop.thirdparty</groupId>
<artifactId>hadoop-shaded-guava</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>*</artifactId>
Expand Down
5 changes: 5 additions & 0 deletions hadoop-hdds/rocksdb-checkpoint-differ/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.ozone</groupId>
<artifactId>hdds-rocks-native</artifactId>
Expand Down
Loading