Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
163dc8b
HDDS-10780. NullPointerException in watchForCommit (#6627)
duongkame May 3, 2024
9d1ad1a
HDDS-10777. S3 Gateway error when parsing XML concurrently (#6609)
guohao-rosicky May 8, 2024
995aacd
HDDS-10918. NPE in OM when leader transfers (#6735)
ChenSammi Jun 1, 2024
dd7d1ff
HDDS-11136. Some containers affected by HDDS-8129 may still be in the…
siddhantsangwan Jul 26, 2024
e04bcdf
HDDS-11223. Fix iteration over ChunkBufferImplWithByteBufferList (#6999)
Cyrill Jul 28, 2024
d2bce4d
HDDS-11436. Minor update in Recon API handling. (#7178)
ArafatKhan2198 Sep 10, 2024
39c175b
HDDS-11472. Avoid recreating external access authorizer on OM state r…
devabhishekpal Sep 30, 2024
6bb1405
HDDS-11504. Update Ratis to 3.1.1. (#7257)
jojochuang Oct 1, 2024
a65d1da
HDDS-11536. Bump macOS runner version to macos-13 (#7279)
adoroszlai Oct 6, 2024
5c250a6
HDDS-10480. Avoid proto2 ByteString.toByteArray() calls. (#6342)
szetszwo Mar 8, 2024
06a3d1f
HDDS-10465. Change ozone.client.bytes.per.checksum default to 16KB (#…
ChenSammi May 6, 2024
9655a7b
HDDS-11482. EC Checksum throws IllegalArgumentException because the b…
aswinshakil Oct 10, 2024
1822d2a
HDDS-11498. Improve SCM deletion efficiency. (#7249)
slfan1989 Oct 17, 2024
67a869e
HDDS-11570. Fix HDDS Docs build failure with Hugo v0.135.0 (#7337)
scolley31 Oct 20, 2024
6425679
HDDS-11414. Key listing for FSO buckets fails with forward client (#7…
tanvipenumudy Sep 9, 2024
adcc577
HDDS-10168. Add Ozone 1.4.0 to compatibility acceptance tests (#6040)
adoroszlai Jan 21, 2024
b28b416
HDDS-11333. Avoid hard-coded current version in upgrade/xcompat tests…
adoroszlai Aug 23, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ jobs:
include:
- os: ubuntu-20.04
- java: 8
os: macos-12
os: macos-13
fail-fast: false
runs-on: ${{ matrix.os }}
steps:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,13 +170,13 @@ public enum ChecksumCombineMode {
private String checksumType = ChecksumType.CRC32.name();

@Config(key = "bytes.per.checksum",
defaultValue = "1MB",
defaultValue = "16KB",
type = ConfigType.SIZE,
description = "Checksum will be computed for every bytes per checksum "
+ "number of bytes and stored sequentially. The minimum value for "
+ "this config is 16KB.",
+ "this config is 8KB.",
tags = ConfigTag.CLIENT)
private int bytesPerChecksum = 1024 * 1024;
private int bytesPerChecksum = 16 * 1024;

@Config(key = "verify.checksum",
defaultValue = "true",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,8 @@ public long updateCommitInfosMap(
} else {
stream = commitInfoProtos.stream().map(proto -> commitInfoMap
.computeIfPresent(RatisHelper.toDatanodeId(proto.getServer()),
(address, index) -> proto.getCommitIndex()));
(address, index) -> proto.getCommitIndex()))
.filter(Objects::nonNull);
}
return stream.mapToLong(Long::longValue).min().orElse(0);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ public final class OzoneConfigKeys {
"hdds.datanode.replication.work.dir";


public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 16 * 1024;
public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 8 * 1024;

public static final String OZONE_CLIENT_READ_TIMEOUT
= "ozone.client.read.timeout";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@ public class ChunkBufferImplWithByteBufferList implements ChunkBuffer {

private void findCurrent() {
boolean found = false;
limitPrecedingCurrent = 0;
for (int i = 0; i < buffers.size(); i++) {
final ByteBuffer buf = buffers.get(i);
final int pos = buf.position();
Expand Down Expand Up @@ -185,6 +186,8 @@ public ChunkBuffer duplicate(int newPosition, int newLimit) {
*/
@Override
public Iterable<ByteBuffer> iterate(int bufferSize) {
Preconditions.checkArgument(bufferSize > 0);

return () -> new Iterator<ByteBuffer>() {
@Override
public boolean hasNext() {
Expand All @@ -198,10 +201,40 @@ public ByteBuffer next() {
}
findCurrent();
ByteBuffer current = buffers.get(currentIndex);
final ByteBuffer duplicated = current.duplicate();
duplicated.limit(current.limit());
current.position(current.limit());
return duplicated;

// If current buffer has enough space or it's the last one, return it.
if (current.remaining() >= bufferSize || currentIndex == buffers.size() - 1) {
final ByteBuffer duplicated = current.duplicate();
int duplicatedLimit = Math.min(current.position() + bufferSize, current.limit());
duplicated.limit(duplicatedLimit);
duplicated.position(current.position());

current.position(duplicatedLimit);
return duplicated;
}

// Otherwise, create a new buffer.
int newBufferSize = Math.min(bufferSize, remaining());
ByteBuffer allocated = ByteBuffer.allocate(newBufferSize);
int remainingToFill = allocated.remaining();

while (remainingToFill > 0) {
final ByteBuffer b = current();
int bytes = Math.min(b.remaining(), remainingToFill);
b.limit(b.position() + bytes);
allocated.put(b);
remainingToFill -= bytes;
advanceCurrent();
}

allocated.flip();

// Up-to-date current.
current = buffers.get(currentIndex);
// Reset
current.limit(current.capacity());

return allocated;
}
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,76 @@ public void rejectsMultipleCurrentBuffers() {
assertThrows(IllegalArgumentException.class, () -> ChunkBuffer.wrap(list));
}

@Test
public void testIterateSmallerOverSingleChunk() {
ChunkBuffer subject = ChunkBuffer.wrap(ImmutableList.of(ByteBuffer.allocate(100)));

assertEquals(0, subject.position());
assertEquals(100, subject.remaining());
assertEquals(100, subject.limit());

subject.iterate(25).forEach(buffer -> assertEquals(25, buffer.remaining()));

assertEquals(100, subject.position());
assertEquals(0, subject.remaining());
assertEquals(100, subject.limit());
}

@Test
public void testIterateOverMultipleChunksFitChunkSize() {
ByteBuffer b1 = ByteBuffer.allocate(100);
ByteBuffer b2 = ByteBuffer.allocate(100);
ByteBuffer b3 = ByteBuffer.allocate(100);
ChunkBuffer subject = ChunkBuffer.wrap(ImmutableList.of(b1, b2, b3));

assertEquals(0, subject.position());
assertEquals(300, subject.remaining());
assertEquals(300, subject.limit());

subject.iterate(100).forEach(buffer -> assertEquals(100, buffer.remaining()));

assertEquals(300, subject.position());
assertEquals(0, subject.remaining());
assertEquals(300, subject.limit());
}

@Test
public void testIterateOverMultipleChunksSmallerChunks() {
ByteBuffer b1 = ByteBuffer.allocate(100);
ByteBuffer b2 = ByteBuffer.allocate(100);
ByteBuffer b3 = ByteBuffer.allocate(100);
ChunkBuffer subject = ChunkBuffer.wrap(ImmutableList.of(b1, b2, b3));

assertEquals(0, subject.position());
assertEquals(300, subject.remaining());
assertEquals(300, subject.limit());

subject.iterate(50).forEach(buffer -> assertEquals(50, buffer.remaining()));

assertEquals(300, subject.position());
assertEquals(0, subject.remaining());
assertEquals(300, subject.limit());
}

@Test
public void testIterateOverMultipleChunksBiggerChunks() {
ByteBuffer b1 = ByteBuffer.allocate(100);
ByteBuffer b2 = ByteBuffer.allocate(100);
ByteBuffer b3 = ByteBuffer.allocate(100);
ByteBuffer b4 = ByteBuffer.allocate(100);
ChunkBuffer subject = ChunkBuffer.wrap(ImmutableList.of(b1, b2, b3, b4));

assertEquals(0, subject.position());
assertEquals(400, subject.remaining());
assertEquals(400, subject.limit());

subject.iterate(200).forEach(buffer -> assertEquals(200, buffer.remaining()));

assertEquals(400, subject.position());
assertEquals(0, subject.remaining());
assertEquals(400, subject.limit());
}

private static void assertEmpty(ChunkBuffer subject) {
assertEquals(0, subject.position());
assertEquals(0, subject.remaining());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -354,10 +354,11 @@ private void processCmd(DeleteCmdInfo cmd) {
DeletedContainerBlocksSummary summary =
DeletedContainerBlocksSummary.getFrom(containerBlocks);
LOG.info("Summary of deleting container blocks, numOfTransactions={}, "
+ "numOfContainers={}, numOfBlocks={}",
+ "numOfContainers={}, numOfBlocks={}, commandId={}.",
summary.getNumOfTxs(),
summary.getNumOfContainers(),
summary.getNumOfBlocks());
summary.getNumOfBlocks(),
cmd.getCmd().getId());
if (LOG.isDebugEnabled()) {
LOG.debug("Start to delete container blocks, TXIDs={}",
summary.getTxIDSummary());
Expand All @@ -384,7 +385,8 @@ private void processCmd(DeleteCmdInfo cmd) {
LOG.debug("Sending following block deletion ACK to SCM");
for (DeleteBlockTransactionResult result : blockDeletionACK
.getResultsList()) {
LOG.debug("{} : {}", result.getTxID(), result.getSuccess());
LOG.debug("TxId = {} : ContainerId = {} : {}",
result.getTxID(), result.getContainerID(), result.getSuccess());
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.container.keyvalue;

import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdfs.util.Canceler;
Expand All @@ -43,7 +44,6 @@
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Arrays;

import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
import org.slf4j.Logger;
Expand Down Expand Up @@ -404,8 +404,8 @@ private static ScanResult verifyChecksum(BlockData block,
" for block %s",
ChunkInfo.getFromProtoBuf(chunk),
i,
Arrays.toString(expected.toByteArray()),
Arrays.toString(actual.toByteArray()),
StringUtils.bytes2Hex(expected.asReadOnlyByteBuffer()),
StringUtils.bytes2Hex(actual.asReadOnlyByteBuffer()),
block.getBlockID());
return ScanResult.unhealthy(
ScanResult.FailureType.CORRUPT_CHUNK, chunkFile,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ static int getBufferCapacityForChunkRead(ChunkInfo chunkInfo,
} else {
// Set buffer capacity to checksum boundary size so that each buffer
// corresponds to one checksum. If checksum is NONE, then set buffer
// capacity to default (OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY = 64KB).
// capacity to default (OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_KEY = 1MB).
ChecksumData checksumData = chunkInfo.getChecksumData();

if (checksumData != null) {
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/feature/Quota.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Quota in Ozone"
date: "2020-October-22"
date: "2020-10-22"
weight: 4
summary: Quota in Ozone
icon: user
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/feature/Quota.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Ozone 中的配额"
date: "2020-October-22"
date: "2020-10-22"
weight: 4
summary: Ozone中的配额
icon: user
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/GDPR.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "GDPR in Ozone"
date: "2019-September-17"
date: "2019-09-17"
weight: 3
icon: user
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/GDPR.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Ozone 中的 GDPR"
date: "2019-September-17"
date: "2019-09-17"
weight: 3
summary: Ozone 中的 GDPR
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecureOzone.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Securing Ozone"
date: "2019-April-03"
date: "2019-04-03"
summary: Overview of Ozone security concepts and steps to secure Ozone Manager and SCM.
weight: 1
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecureOzone.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "安全化 Ozone"
date: "2019-April-03"
date: "2019-04-03"
summary: 简要介绍 Ozone 中的安全概念以及安全化 OM 和 SCM 的步骤。
weight: 1
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecuringDatanodes.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Securing Datanodes"
date: "2019-April-03"
date: "2019-04-03"
weight: 3
menu:
main:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "安全化 Datanode"
date: "2019-April-03"
date: "2019-04-03"
weight: 3
menu:
main:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecuringOzoneHTTP.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Securing HTTP"
date: "2020-June-17"
date: "2020-06-17"
summary: Secure HTTP web-consoles for Ozone services
weight: 4
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecuringS3.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Securing S3"
date: "2019-April-03"
date: "2019-04-03"
summary: Ozone supports S3 protocol, and uses AWS Signature Version 4 protocol which allows a seamless S3 experience.
weight: 5
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecuringS3.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "安全化 S3"
date: "2019-April-03"
date: "2019-04-03"
summary: Ozone 支持 S3 协议,并使用 AWS Signature Version 4 protocol which allows a seamless S3
experience.
weight: 5
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecuringTDE.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Transparent Data Encryption"
date: "2019-April-03"
date: "2019-04-03"
summary: TDE allows data on the disks to be encrypted-at-rest and automatically decrypted during access.
weight: 2
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecuringTDE.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "透明数据加密"
date: "2019-April-03"
date: "2019-04-03"
summary: 透明数据加密(Transparent Data Encryption,TDE)以密文形式在磁盘上保存数据,但可以在用户访问的时候自动进行解密。
weight: 2
menu:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecurityAcls.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Ozone ACLs"
date: "2019-April-03"
date: "2019-04-03"
weight: 6
menu:
main:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecurityAcls.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Ozone 访问控制列表"
date: "2019-April-03"
date: "2019-04-03"
weight: 6
menu:
main:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecurityWithRanger.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Apache Ranger"
date: "2019-April-03"
date: "2019-04-03"
weight: 7
menu:
main:
Expand Down
2 changes: 1 addition & 1 deletion hadoop-hdds/docs/content/security/SecurityWithRanger.zh.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
title: "Apache Ranger"
date: "2019-April-03"
date: "2019-04-03"
weight: 7
menu:
main:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) {
blocksDeleted += tx.getLocalIDCount();
if (SCMBlockDeletingService.LOG.isDebugEnabled()) {
SCMBlockDeletingService.LOG
.debug("Transaction added: {} <- TX({})", dnID, tx.getTxID());
.debug("Transaction added: {} <- TX({}), DN {} <- blocksDeleted Add {}.",
dnID, tx.getTxID(), dnID, tx.getLocalIDCount());
}
}

Expand Down
Loading