Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
79 commits
Select commit Hold shift + click to select a range
5b5f10c
HDDS-9400. Introduce DatanodeID to avoid passing UUID/String object i…
nandakumar131 Jan 4, 2025
5d6908e
JSON Conversion support added
nandakumar131 Jan 4, 2025
ce386ef
Merge branch 'master' into HDDS-9400
nandakumar131 Jan 24, 2025
1608faa
HDDS-12130. Improve assertion compatibility with old Hadoop (#7738)
adoroszlai Jan 24, 2025
99c3467
HDDS-12131. NPE in OM when overwriting empty file using multipart upl…
k5342 Jan 24, 2025
82d452c
HDDS-12089. Move execute_debug_tests out of testlib.sh (#7744)
adoroszlai Jan 24, 2025
c76b039
HDDS-12114. Prevent delete commands running after a long lock wait an…
sodonnel Jan 24, 2025
f9a4aa7
HDDS-12081. TestKeyInputStream repeats tests with default container l…
adoroszlai Jan 24, 2025
081fd26
HDDS-12138. Bump assertj-core to 3.27.3 (#7751)
dependabot[bot] Jan 25, 2025
1a2af7f
HDDS-12098. Bump Hugo to 0.141.0 (#7731)
adoroszlai Jan 25, 2025
df1690e
HDDS-12122. Add unit test for SnapshotChainRepair (#7741)
peterxcli Jan 25, 2025
696408a
HDDS-11798. Move SafeModeRule names to respective rules (#7742)
nandakumar131 Jan 25, 2025
78297ea
HDDS-12115. RM selects replicas to delete non-deterministically if no…
sodonnel Jan 25, 2025
8feb62e
HDDS-11892. Remove config from SCM for disabling Ratis. (#7711)
nandakumar131 Jan 26, 2025
93d7db0
HDDS-12135. Set RM default deadline to 12 minutes and datanode offset…
sodonnel Jan 26, 2025
3c28b19
HDDS-12099. Generate kubernetes Robot report in container (#7754)
chiacyu Jan 27, 2025
1315a6a
HDDS-12117. Create endpoint builders for S3G tests (#7753)
peterxcli Jan 27, 2025
2a43909
HDDS-12084. Persist currently selected UI type (new/old) between refr…
devabhishekpal Jan 27, 2025
5e2bbdd
HDDS-11837. Support executing multiple commands in Ozone CLI (#7727)
adoroszlai Jan 27, 2025
bdc0e8e
HDDS-12032. Remove DefaultConfigManager from SCM. (#7757)
nandakumar131 Jan 28, 2025
5a35e0d
HDDS-12139. Refactor TestSnapshotChainRepair. (#7752)
adoroszlai Jan 28, 2025
57e3759
HDDS-12127. RM should not expire pending deletes, but retry until del…
sodonnel Jan 28, 2025
c4346c9
HDDS-12141. Replace direct dependency on hadoop-hdfs-client (#7762)
adoroszlai Jan 28, 2025
9773cc5
HDDS-12140. Replace leftover rebot in k8s/examples/test-all.sh (#7756)
adoroszlai Jan 28, 2025
3327ef2
HDDS-11125. Do not log user-controlled data in HddsConfServlet (#7767)
len548 Jan 28, 2025
d55c151
HDDS-12147. Remove server dependencies from hdds-tools (#7771)
adoroszlai Jan 29, 2025
2a46b2c
HDDS-12116. Customizable prefix for shaded protobuf in ozonefs-hadoop…
apotheque Jan 29, 2025
3e7357e
HDDS-12010. Block ozone repair if service is running (#7758)
adoroszlai Jan 30, 2025
82b5d05
HDDS-12155. Create new submodule for ozone shell (#7775)
adoroszlai Jan 30, 2025
3b67c44
HDDS-12162. Log available space of HddsVolume and DbVolume upon Datan…
smengcl Jan 30, 2025
e495928
HDDS-12143. Generate list of integration check splits dynamically (#7…
adoroszlai Jan 30, 2025
ee2e950
HDDS-12040. `ozone freon cr` fails with NPE in ReplicationSupervisor …
len548 Jan 30, 2025
7400016
HDDS-12144. Remove unsupported replication types from config descript…
adoroszlai Jan 31, 2025
d8c771c
HDDS-11277. Remove dependency on hadoop-hdfs in Ozone client (#7781)
adoroszlai Jan 31, 2025
0044566
HDDS-12132. Parameterize testUpdateTransactionInfoTable for SCM (#7768)
sarvekshayr Jan 31, 2025
54270c0
HDDS-12085. Add manual refresh button for DU page (#7780)
devabhishekpal Jan 31, 2025
4292fd4
HDDS-12165. Refactor VolumeInfoMetrics to use getCurrentUsage (#7784)
adoroszlai Feb 1, 2025
e866507
HDDS-12181. Bump jline to 3.29.0 (#7789)
dependabot[bot] Feb 1, 2025
75c6bb5
HDDS-12176. Trivial dependency cleanup.(#7787)
adoroszlai Feb 1, 2025
8380fad
HDDS-12163. Reduce number of individual getCapacity/getAvailable/getU…
adoroszlai Feb 2, 2025
21b21c1
HDDS-12142. Save logs from build check (#7782)
adoroszlai Feb 3, 2025
51ffefd
HDDS-12073. Don't show Source Bucket and Volume if null in DU metadat…
devabhishekpal Feb 3, 2025
0fa552f
HDDS-11508. Decouple delete batch limits from Ratis request size for …
sadanand48 Feb 3, 2025
a05539c
HDDS-12186. Avoid array allocation for table iterator. (#7797)
sadanand48 Feb 3, 2025
50869b9
HDDS-12186. (addendum) Avoid array allocation for table iterator (#7799)
adoroszlai Feb 4, 2025
f0bc4fa
HDDS-11714. resetDeletedBlockRetryCount with --all may fail and can c…
aryangupta1998 Feb 4, 2025
c20e4eb
HDDS-12183. Reuse cluster across safe test classes (#7793)
adoroszlai Feb 4, 2025
a94f111
HDDS-12203. Initialize block length before skip (#7809)
oneonestar Feb 4, 2025
bf86fe3
HDDS-12202. OpsCreate and OpsAppend metrics not incremented (#7811)
peterxcli Feb 5, 2025
30543e6
HDDS-12200. Fix grammar in OM HA, EC and Snapshot doc (#7806)
sreejasahithi Feb 5, 2025
f7deae1
HDDS-12195. Implement skip() in OzoneFSInputStream (#7801)
oneonestar Feb 5, 2025
abcf385
HDDS-12212. Fix grammar in decommissioning and observability document…
Gargi-jais11 Feb 5, 2025
e7b38b6
HDDS-12112. Fix interval used for Chunk Read/Write Dashboard (#7724)
kerneltime Feb 5, 2025
6d819de
HDDS-11442. Add dashboard for memory consumption metrics (#7198)
len548 Feb 6, 2025
4dcec6e
HDDS-12159. Remove redundant seek for rocksDBs (#7794)
Tejaskriya Feb 6, 2025
adb1446
HDDS-7003. Make read-replicas tool compatible with EC replication typ…
len548 Feb 6, 2025
33f7c65
HDDS-10607. Remove unused config property ozone.block.deleting.contai…
sreejasahithi Feb 6, 2025
c5958fe
HDDS-12044. Fix heatmap calendar closing on skipping years/months (#7…
devabhishekpal Feb 6, 2025
10d87cd
HDDS-12221. Remove unused config property ozone.block.deleting.limit.…
Gargi-jais11 Feb 6, 2025
0bc2d20
HDDS-12217. Remove reference to FileUtil in hdds-common. (#7818)
adoroszlai Feb 6, 2025
9d39cd8
HDDS-12231. Logging in Container Balancer is too verbose. (#7826)
siddhantsangwan Feb 6, 2025
4fa1d76
HDDS-12033. ScmHAUnfinalizedStateValidationAction can be remove as it…
jojochuang Feb 6, 2025
0b0994b
HDDS-11866. Remove code paths for non-Ratis OM (#7778)
adoroszlai Feb 6, 2025
d8b6278
HDDS-12218. Add more to integration test with shared cluster (#7821)
adoroszlai Feb 7, 2025
85d64fd
HDDS-12227. Avoid Clutter in Recon Logs by Reducing Log Level of Cont…
ArafatKhan2198 Feb 7, 2025
c229d5c
HDDS-12228. Fix Duplicate Key Violation Condition in FileSizeCountTas…
ArafatKhan2198 Feb 7, 2025
5d26991
HDDS-12180. Store snapshot in CachingSpaceUsageSource (#7798)
adoroszlai Feb 7, 2025
e0b98a4
HDDS-12230. Improve error message in `ozone sh key put` when file not…
sreejasahithi Feb 7, 2025
92ccfd5
HDDS-12232. Move container from QUASI_CLODED to CLOSED only when SCM …
sodonnel Feb 7, 2025
209a609
HDDS-12149. Do not require dependency-convergence. (#7772)
adoroszlai Feb 8, 2025
46c901d
HDDS-11784. Allow aborting FSO multipart uploads with missing parent …
sokui Feb 8, 2025
7f69eb2
HDDS-12205. Reduce log level in TestHSync (#7838)
chiacyu Feb 8, 2025
3a1672c
HDDS-12287. Bump sqlite-jdbc to 3.49.0.0 (#7839)
dependabot[bot] Feb 8, 2025
fc22b97
HDDS-12248. Make allowListAllVolumes reconfigurable in OM (#7837)
adoroszlai Feb 8, 2025
d5dab3e
HDDS-12292. Change log level in SCMNodeManager#getNodesByAddress to d…
nandakumar131 Feb 10, 2025
5fa5353
Fixed review comments.
nandakumar131 Feb 10, 2025
7d84d44
Fixed typo in hdds.proto
nandakumar131 Feb 10, 2025
4dd0c39
Merge remote-tracking branch 'upstream' into HDDS-9400
nandakumar131 Feb 10, 2025
9536a98
Use StringWithByteString to store UUID String.
nandakumar131 Feb 11, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,7 @@ public static Codec<DatanodeDetails> getCodec() {
/**
* DataNode's unique identifier in the cluster.
*/
private final UUID uuid;
private final StringWithByteString uuidString;
private final DatanodeID id;
private final String threadNamePrefix;
private StringWithByteString ipAddress;
private StringWithByteString hostName;
Expand All @@ -103,9 +102,8 @@ public static Codec<DatanodeDetails> getCodec() {

private DatanodeDetails(Builder b) {
super(b.hostName, b.networkLocation, NetConstants.NODE_COST_DEFAULT);
uuid = b.id;
uuidString = StringWithByteString.valueOf(uuid.toString());
threadNamePrefix = HddsUtils.threadNamePrefix(uuidString);
id = b.id;
threadNamePrefix = HddsUtils.threadNamePrefix(id.toString());
ipAddress = b.ipAddress;
hostName = b.hostName;
ports = b.ports;
Expand All @@ -129,9 +127,8 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) {
super(datanodeDetails.getHostNameAsByteString(), datanodeDetails.getNetworkLocationAsByteString(),
datanodeDetails.getParent(), datanodeDetails.getLevel(),
datanodeDetails.getCost());
this.uuid = datanodeDetails.uuid;
this.uuidString = datanodeDetails.uuidString;
threadNamePrefix = HddsUtils.threadNamePrefix(uuidString);
this.id = datanodeDetails.id;
threadNamePrefix = HddsUtils.threadNamePrefix(id.toString());
this.ipAddress = datanodeDetails.ipAddress;
this.hostName = datanodeDetails.hostName;
this.ports = datanodeDetails.ports;
Expand All @@ -148,13 +145,19 @@ public DatanodeDetails(DatanodeDetails datanodeDetails) {
this.currentVersion = datanodeDetails.getCurrentVersion();
}

public DatanodeID getID() {
return id;
}

/**
* Returns the DataNode UUID.
*
* @return UUID of DataNode
*/
// TODO: Remove this in follow-up Jira (HDDS-12015)
@Deprecated
public UUID getUuid() {
return uuid;
return id.getUuid();
}

/**
Expand All @@ -163,7 +166,7 @@ public UUID getUuid() {
* @return UUID of DataNode
*/
public String getUuidString() {
return uuidString.getString();
return id.toString();
}

/**
Expand Down Expand Up @@ -392,11 +395,16 @@ public Port getStandalonePort() {
public static DatanodeDetails.Builder newBuilder(
HddsProtos.DatanodeDetailsProto datanodeDetailsProto) {
DatanodeDetails.Builder builder = newBuilder();
if (datanodeDetailsProto.hasUuid128()) {

if (datanodeDetailsProto.hasId()) {
builder.setID(DatanodeID.fromProto(datanodeDetailsProto.getId()));
// The else parts are for backward compatibility.
} else if (datanodeDetailsProto.hasUuid128()) {
HddsProtos.UUID uuid = datanodeDetailsProto.getUuid128();
builder.setUuid(new UUID(uuid.getMostSigBits(), uuid.getLeastSigBits()));
builder.setID(DatanodeID.of(new UUID(
uuid.getMostSigBits(), uuid.getLeastSigBits())));
} else if (datanodeDetailsProto.hasUuid()) {
builder.setUuid(UUID.fromString(datanodeDetailsProto.getUuid()));
builder.setID(DatanodeID.fromUuidString(datanodeDetailsProto.getUuid()));
}

if (datanodeDetailsProto.hasIpAddress()) {
Expand Down Expand Up @@ -504,20 +512,17 @@ public HddsProtos.DatanodeDetailsProto toProto(int clientVersion, Set<Port.Name>
* If empty, all available ports will be included.
* @return A {@link HddsProtos.DatanodeDetailsProto.Builder} Object.
*/

public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder(
int clientVersion, Set<Port.Name> filterPorts) {

HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder()
.setMostSigBits(uuid.getMostSignificantBits())
.setLeastSigBits(uuid.getLeastSignificantBits())
.build();
final HddsProtos.DatanodeIDProto idProto = id.toProto();
final HddsProtos.DatanodeDetailsProto.Builder builder =
HddsProtos.DatanodeDetailsProto.newBuilder();

HddsProtos.DatanodeDetailsProto.Builder builder =
HddsProtos.DatanodeDetailsProto.newBuilder()
.setUuid128(uuid128);

builder.setUuidBytes(uuidString.getBytes());
builder.setId(idProto);
// Both are deprecated.
builder.setUuid128(idProto.getUuid());
builder.setUuidBytes(id.getByteString());

if (ipAddress != null) {
builder.setIpAddressBytes(ipAddress.getBytes());
Expand Down Expand Up @@ -619,11 +624,11 @@ public void setCurrentVersion(int currentVersion) {

@Override
public String toString() {
return uuidString + "(" + hostName + "/" + ipAddress + ")";
return id + "(" + hostName + "/" + ipAddress + ")";
}

public String toDebugString() {
return uuid.toString() + "{" +
return id + "{" +
"ip: " +
ipAddress +
", host: " +
Expand All @@ -639,13 +644,13 @@ public String toDebugString() {

@Override
public int compareTo(DatanodeDetails that) {
return this.getUuid().compareTo(that.getUuid());
return this.id.compareTo(that.id);
}

@Override
public boolean equals(Object obj) {
return obj instanceof DatanodeDetails &&
uuid.equals(((DatanodeDetails) obj).uuid);
id.equals(((DatanodeDetails) obj).id);
}


Expand All @@ -664,7 +669,7 @@ public boolean compareNodeValues(DatanodeDetails datanodeDetails) {

@Override
public int hashCode() {
return uuid.hashCode();
return id.hashCode();
}

/**
Expand All @@ -685,7 +690,7 @@ public String threadNamePrefix() {
* Builder class for building DatanodeDetails.
*/
public static final class Builder {
private UUID id;
private DatanodeID id;
private StringWithByteString ipAddress;
private StringWithByteString hostName;
private StringWithByteString networkName;
Expand Down Expand Up @@ -716,7 +721,7 @@ private Builder() {
* @return DatanodeDetails.Builder
*/
public Builder setDatanodeDetails(DatanodeDetails details) {
this.id = details.getUuid();
this.id = details.id;
this.ipAddress = details.getIpAddressAsByteString();
this.hostName = details.getHostNameAsByteString();
this.networkName = details.getHostNameAsByteString();
Expand All @@ -740,7 +745,12 @@ public Builder setDatanodeDetails(DatanodeDetails details) {
* @return DatanodeDetails.Builder
*/
public Builder setUuid(UUID uuid) {
this.id = uuid;
this.id = DatanodeID.of(uuid);
return this;
}

public Builder setID(DatanodeID dnId) {
this.id = dnId;
return this;
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdds.protocol;

import com.google.protobuf.ByteString;
import java.util.UUID;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeIDProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.util.StringWithByteString;

/**
* DatanodeID is the primary identifier of the Datanode.
* They are unique for every Datanode in the cluster.
* <p>
* This class is immutable and thread safe.
*/
public final class DatanodeID implements Comparable<DatanodeID> {

private static final ConcurrentMap<UUID, DatanodeID> CACHE = new ConcurrentHashMap<>();

private final UUID uuid;
private final StringWithByteString uuidByteString;

private DatanodeID(final UUID uuid) {
this.uuid = uuid;
this.uuidByteString = StringWithByteString.valueOf(uuid.toString());
}

// Mainly used for JSON conversion
public String getID() {
return toString();
}

@Override
public int compareTo(final DatanodeID that) {
return this.uuid.compareTo(that.uuid);
}

@Override
public boolean equals(final Object obj) {
return obj instanceof DatanodeID &&
uuid.equals(((DatanodeID) obj).uuid);
}

@Override
public int hashCode() {
return uuid.hashCode();
}

@Override
public String toString() {
return uuidByteString.getString();
}

/**
* This will be removed once the proto structure is refactored
* to remove deprecated fields.
*/
@Deprecated
public ByteString getByteString() {
return uuidByteString.getBytes();
}

public DatanodeIDProto toProto() {
return DatanodeIDProto.newBuilder().setUuid(toProto(uuid)).build();
}

public static DatanodeID fromProto(final DatanodeIDProto proto) {
return of(fromProto(proto.getUuid()));
}

public static DatanodeID fromUuidString(final String id) {
return of(UUID.fromString(id));
}

public static DatanodeID of(final UUID id) {
return CACHE.computeIfAbsent(id, DatanodeID::new);
}

/**
* Returns a random DatanodeID.
*/
public static DatanodeID randomID() {
// We don't want to add Random ID to cache.
return new DatanodeID(UUID.randomUUID());
}

private static UUID fromProto(final HddsProtos.UUID id) {
return new UUID(id.getMostSigBits(), id.getLeastSigBits());
}

private static HddsProtos.UUID toProto(final UUID id) {
return HddsProtos.UUID.newBuilder()
.setMostSigBits(id.getMostSignificantBits())
.setLeastSigBits(id.getLeastSignificantBits())
.build();
}

// TODO: Remove this in follow-up Jira. (HDDS-12015)
UUID getUuid() {
return uuid;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
Expand All @@ -39,6 +38,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.ReconfigurationHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.DatanodeID;
import org.apache.hadoop.hdds.protocol.SecretKeyProtocol;
import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
Expand Down Expand Up @@ -439,7 +439,7 @@ private DatanodeDetails initializeDatanodeDetails()
} else {
// There is no datanode.id file, this might be the first time datanode
// is started.
details = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build();
details = DatanodeDetails.newBuilder().setID(DatanodeID.randomID()).build();
details.setInitialVersion(getInitialVersion());
}
// Current version is always overridden to the latest
Expand Down
17 changes: 15 additions & 2 deletions hadoop-hdds/interface-client/src/main/proto/hdds.proto
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,12 @@ message UUID {
required int64 leastSigBits = 2;
}

message DatanodeIDProto {
required UUID uuid = 1;
}

message DatanodeDetailsProto {
// deprecated, please use uuid128 instead
// deprecated, please use DatanodeIDProto instead
optional string uuid = 1; // UUID assigned to the Datanode.
required string ipAddress = 2; // IP address
required string hostName = 3; // hostname
Expand All @@ -49,6 +53,8 @@ message DatanodeDetailsProto {
// TODO(runzhiwang): when uuid is gone, specify 1 as the index of uuid128 and mark as required
optional UUID uuid128 = 100; // UUID with 128 bits assigned to the Datanode.
optional uint32 level = 101;
// TODO: Replace UUID with DatanodeID and make it required
optional DatanodeIDProto id = 102;
}

/**
Expand Down Expand Up @@ -123,14 +129,20 @@ message Pipeline {
optional ReplicationType type = 3 [default = STAND_ALONE];
optional ReplicationFactor factor = 4 [default = ONE];
required PipelineID id = 5;
// TODO: Deprecate this and replace with leaderDatanodeID
optional string leaderID = 6;
repeated uint32 memberOrders = 7;
optional uint64 creationTimeStamp = 8;
// TODO: Deprecate this and replace with suggestedLeaderDatanodeID
optional UUID suggestedLeaderID = 9;
repeated uint32 memberReplicaIndexes = 10;
optional ECReplicationConfig ecReplicationConfig = 11;
// TODO(runzhiwang): when leaderID is gone, specify 6 as the index of leaderID128

// TODO: Replace UUID with DatanodeIDProto
optional UUID leaderID128 = 100;

optional DatanodeIDProto leaderDatanodeID = 101;
optional DatanodeIDProto suggestedLeaderDatanodeID = 102;
}

message KeyValue {
Expand Down Expand Up @@ -324,6 +336,7 @@ enum ScmOps {
}

message ExcludeListProto {
// TODO: Replace with DatanodeID
repeated string datanodes = 1;
// Replace int64 with ContainerID message
repeated int64 containerIds = 2;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ message CommandQueueReportProto {
* A group of commands for the datanode to execute
*/
message SCMHeartbeatResponseProto {
// TODO: change this to DatanodeID
required string datanodeUUID = 1;
repeated SCMCommandProto commands = 2;

Expand Down
Loading