Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
import java.io.IOException;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.ozone.container.common.interfaces.StorageLocationReportMXBean;
import org.apache.hadoop.ozone.container.common.volume.VolumeUsage;

Expand Down Expand Up @@ -107,52 +107,52 @@ public StorageType getStorageType() {
return storageType;
}

private StorageTypeProto getStorageTypeProto() throws IOException {
private HddsProtos.StorageType getStorageTypeProto() throws IOException {
return getStorageTypeProto(getStorageType());
}

public static StorageTypeProto getStorageTypeProto(StorageType type)
public static HddsProtos.StorageType getStorageTypeProto(StorageType type)
throws IOException {
StorageTypeProto storageTypeProto;
HddsProtos.StorageType storageType;
switch (type) {
case SSD:
storageTypeProto = StorageTypeProto.SSD;
storageType = HddsProtos.StorageType.SSD_TYPE;
break;
case DISK:
storageTypeProto = StorageTypeProto.DISK;
storageType = HddsProtos.StorageType.DISK_TYPE;
break;
case ARCHIVE:
storageTypeProto = StorageTypeProto.ARCHIVE;
storageType = HddsProtos.StorageType.ARCHIVE_TYPE;
break;
case PROVIDED:
storageTypeProto = StorageTypeProto.PROVIDED;
storageType = HddsProtos.StorageType.PROVIDED_TYPE;
break;
case RAM_DISK:
storageTypeProto = StorageTypeProto.RAM_DISK;
storageType = HddsProtos.StorageType.RAM_DISK_TYPE;
break;
default:
throw new IOException("Illegal Storage Type specified");
}
return storageTypeProto;
return storageType;
}

private static StorageType getStorageType(StorageTypeProto proto) throws
private static StorageType getStorageType(HddsProtos.StorageType proto) throws
IOException {
StorageType storageType;
switch (proto) {
case SSD:
case SSD_TYPE:
storageType = StorageType.SSD;
break;
case DISK:
case DISK_TYPE:
storageType = StorageType.DISK;
break;
case ARCHIVE:
case ARCHIVE_TYPE:
storageType = StorageType.ARCHIVE;
break;
case PROVIDED:
case PROVIDED_TYPE:
storageType = StorageType.PROVIDED;
break;
case RAM_DISK:
case RAM_DISK_TYPE:
storageType = StorageType.RAM_DISK;
break;
default:
Expand All @@ -179,7 +179,7 @@ public StorageReportProto getProtoBufMessage(ConfigurationSource conf)
.setScmUsed(getScmUsed())
.setRemaining(getRemaining())
.setCommitted(getCommitted())
.setStorageType(getStorageTypeProto())
.setStorageTypeProto(getStorageTypeProto())
.setStorageLocation(getStorageLocation())
.setFailed(isFailed())
.setFreeSpaceToSpare(conf != null ?
Expand All @@ -200,7 +200,7 @@ public MetadataStorageReportProto getMetadataProtoBufMessage()
return srb.setCapacity(getCapacity())
.setScmUsed(getScmUsed())
.setRemaining(getRemaining())
.setStorageType(getStorageTypeProto())
.setStorageTypeProto(getStorageTypeProto())
.setStorageLocation(getStorageLocation())
.setFailed(isFailed())
.build();
Expand All @@ -224,8 +224,8 @@ public static StorageLocationReport getFromProtobuf(StorageReportProto report)
if (report.hasScmUsed()) {
builder.setScmUsed(report.getScmUsed());
}
if (report.hasStorageType()) {
builder.setStorageType(getStorageType(report.getStorageType()));
if (report.hasStorageTypeProto()) {
builder.setStorageType(getStorageType(report.getStorageTypeProto()));
}
if (report.hasRemaining()) {
builder.setRemaining(report.getRemaining());
Expand Down Expand Up @@ -254,8 +254,8 @@ public static StorageLocationReport getMetadataFromProtobuf(
if (report.hasScmUsed()) {
builder.setScmUsed(report.getScmUsed());
}
if (report.hasStorageType()) {
builder.setStorageType(getStorageType(report.getStorageType()));
if (report.hasStorageTypeProto()) {
builder.setStorageType(getStorageType(report.getStorageTypeProto()));
}
if (report.hasRemaining()) {
builder.setRemaining(report.getRemaining());
Expand Down
8 changes: 8 additions & 0 deletions hadoop-hdds/interface-client/src/main/proto/hdds.proto
Original file line number Diff line number Diff line change
Expand Up @@ -307,6 +307,14 @@ enum ReplicationFactor {
ZERO = 0; // Invalid Factor
}

enum StorageType {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To avoid confusing with org.apache.hadoop.hdds.protocol.StorageType, let's use StorageTypeProto?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Both hdds.proto and ScmServerDatanodeHeartbeatProtocol.proto are package hadoop.hdds; so we cannot name it to StorageTypeProto

Copy link
Contributor

@szetszwo szetszwo Mar 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since they are in the same package, we could safely move the StorageTypeProto from ScmServerDatanodeHeartbeatProtocol.proto to hdds.proto. The only difference is the java_outer_classname. Fortunately, ScmServerDatanodeHeartbeatProtocol.proto is a non-user facing internal protocol and the change is wire compatible. So there is no compatibility issues. See https://issues.apache.org/jira/secure/attachment/13075616/7109_review.patch

BTW, let's rename ScmServerDatanodeHeartbeatProtocol.proto to StorageContainerDatanodeProtocol.proto, i.e. make it consistent with the java_outer_classname.

DISK_TYPE = 1;
SSD_TYPE = 2;
ARCHIVE_TYPE = 3;
RAM_DISK_TYPE = 4;
PROVIDED_TYPE = 5;
Comment on lines +311 to +315
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't recall who, but back in HDFS land after storage policy feature was implemented, someone (Arpit or Anu?) regrettably said it was a mistake to have these types continuous, because it wasn't possible to add any storage types in between. It might be a good idea to space them out.

@szetszwo do you recall?

Copy link
Contributor

@szetszwo szetszwo Apr 1, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I also don't recall.

... it was impossible to insert any new storage types in between. ...

Not really -- we may use 1.1 if there is a need.

Also, why it has to insert in between? We may put the new policy at the end. I don't think Storage policy has a total ordering.

}

message ECReplicationConfig {
required int32 data = 1;
required int32 parity = 2;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,19 +178,21 @@ message StorageReportProto {
optional uint64 capacity = 3 [default = 0];
optional uint64 scmUsed = 4 [default = 0];
optional uint64 remaining = 5 [default = 0];
optional StorageTypeProto storageType = 6 [default = DISK];
optional StorageTypeProto storageType = 6 [default = DISK, deprecated = true];
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It seems to be safe to just replace StorageTypeProto with Hdds.StorageTypeProto.

We should test it.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for your suggestion, I think this is compatibility, but the maven plugin proto-backwards-compatibility will report error say the "field CONFLICT".

And the plugin proto-backwards-compatibility not support to ignore a specific field.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This will also cause proto-backwards-compatibility to report an error. It seems that it is difficult for us to change the existing proto message

[INFO] --- proto-backwards-compatibility:1.0.7:backwards-compatibility-check (default) @ hdds-interface-server ---
[INFO] protolock cmd line: /Users/xichen/community/ozone/hadoop-hdds/interface-server/target/protolock-bin/protolock status --lockdir=/Users/xichen/community/ozone/hadoop-hdds/interface-server/target/classes --protoroot=/Users/xichen/community/ozone/hadoop-hdds/interface-server/target/classes
[INFO] CONFLICT: "StorageTypeProto" field: "ARCHIVE" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" field: "DISK" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" field: "PROVIDED" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" field: "RAM_DISK" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" field: "SSD" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" integer: "1" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" integer: "2" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" integer: "3" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" integer: "4" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]
[INFO] CONFLICT: "StorageTypeProto" integer: "5" has been removed, but is not reserved [ScmServerDatanodeHeartbeatProtocol.proto]

optional bool failed = 7 [default = false];
optional uint64 committed = 8 [default = 0];
optional uint64 freeSpaceToSpare = 9 [default = 0];
optional StorageType storageTypeProto = 10 [default = DISK_TYPE];
}

message MetadataStorageReportProto {
required string storageLocation = 1;
optional StorageTypeProto storageType = 2 [default = DISK];
optional StorageTypeProto storageType = 2 [default = DISK, deprecated = true];
optional uint64 capacity = 3 [default = 0];
optional uint64 scmUsed = 4 [default = 0];
optional uint64 remaining = 5 [default = 0];
optional bool failed = 6 [default = false];
optional StorageType storageTypeProto = 7 [default = DISK_TYPE];
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.DatanodeID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
Expand Down Expand Up @@ -1098,16 +1098,16 @@ public Map<String, Long> getNodeInfo() {
}
List<StorageReportProto> storageReportProtos = node.getStorageReports();
for (StorageReportProto reportProto : storageReportProtos) {
if (reportProto.getStorageType() ==
StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) {
if (reportProto.getStorageTypeProto() ==
HddsProtos.StorageType.DISK_TYPE) {
nodeInfo.compute(keyPrefix + UsageMetrics.DiskCapacity.name(),
(k, v) -> v + reportProto.getCapacity());
nodeInfo.compute(keyPrefix + UsageMetrics.DiskRemaining.name(),
(k, v) -> v + reportProto.getRemaining());
nodeInfo.compute(keyPrefix + UsageMetrics.DiskUsed.name(),
(k, v) -> v + reportProto.getScmUsed());
} else if (reportProto.getStorageType() ==
StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) {
} else if (reportProto.getStorageTypeProto() ==
HddsProtos.StorageType.SSD_TYPE) {
nodeInfo.compute(keyPrefix + UsageMetrics.SSDCapacity.name(),
(k, v) -> v + reportProto.getCapacity());
nodeInfo.compute(keyPrefix + UsageMetrics.SSDRemaining.name(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageType;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
Expand All @@ -51,7 +52,6 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
Expand Down Expand Up @@ -223,8 +223,8 @@ public static StorageReportProto getRandomStorageReport(UUID nodeId,
return createStorageReport(nodeId, path,
random.nextInt(1000),
random.nextInt(500),
random.nextInt(500),
StorageTypeProto.DISK);
random.nextInt(500),
StorageType.DISK_TYPE);
}

/**
Expand All @@ -240,7 +240,7 @@ public static MetadataStorageReportProto getRandomMetadataStorageReport(
random.nextInt(1000),
random.nextInt(500),
random.nextInt(500),
StorageTypeProto.DISK);
StorageType.DISK_TYPE);
}

public static StorageReportProto createStorageReport(UUID nodeId, String path,
Expand All @@ -249,11 +249,11 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path,
capacity,
0,
capacity,
StorageTypeProto.DISK);
StorageType.DISK_TYPE);
}

public static StorageReportProto createStorageReport(UUID nodeId, String path,
long capacity, long used, long remaining, StorageTypeProto type) {
long capacity, long used, long remaining, StorageType type) {
return createStorageReport(nodeId, path, capacity, used, remaining,
type, false);
}
Expand All @@ -270,7 +270,7 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path,
* @return StorageReportProto
*/
public static StorageReportProto createStorageReport(UUID nodeId, String path,
long capacity, long used, long remaining, StorageTypeProto type,
long capacity, long used, long remaining, StorageType type,
boolean failed) {
Preconditions.checkNotNull(nodeId);
Preconditions.checkNotNull(path);
Expand All @@ -281,9 +281,9 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path,
.setScmUsed(used)
.setFailed(failed)
.setRemaining(remaining);
StorageTypeProto storageTypeProto =
type == null ? StorageTypeProto.DISK : type;
srb.setStorageType(storageTypeProto);
StorageType storageType =
type == null ? StorageType.DISK_TYPE : type;
srb.setStorageTypeProto(storageType);
return srb.build();
}

Expand All @@ -293,12 +293,12 @@ public static MetadataStorageReportProto createMetadataStorageReport(
capacity,
0,
capacity,
StorageTypeProto.DISK, false);
StorageType.DISK_TYPE, false);
}

public static MetadataStorageReportProto createMetadataStorageReport(
String path, long capacity, long used, long remaining,
StorageTypeProto type) {
StorageType type) {
return createMetadataStorageReport(path, capacity, used, remaining,
type, false);
}
Expand All @@ -316,7 +316,7 @@ public static MetadataStorageReportProto createMetadataStorageReport(
*/
public static MetadataStorageReportProto createMetadataStorageReport(
String path, long capacity, long used, long remaining,
StorageTypeProto type, boolean failed) {
StorageType type, boolean failed) {
Preconditions.checkNotNull(path);
MetadataStorageReportProto.Builder srb = MetadataStorageReportProto
.newBuilder();
Expand All @@ -325,9 +325,9 @@ public static MetadataStorageReportProto createMetadataStorageReport(
.setScmUsed(used)
.setFailed(failed)
.setRemaining(remaining);
StorageTypeProto storageTypeProto =
type == null ? StorageTypeProto.DISK : type;
srb.setStorageType(storageTypeProto);
StorageType storageType =
type == null ? StorageType.DISK_TYPE : type;
srb.setStorageTypeProto(storageType);
return srb.build();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

package org.apache.hadoop.hdds.scm;

import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageType.DISK_TYPE;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
Expand Down Expand Up @@ -482,14 +482,14 @@ public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() {
// capacity = 200000, used = 90000, remaining = 101000, committed = 500
StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 =
HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds",
200000, 90000, 101000, DISK).toBuilder()
200000, 90000, 101000, DISK_TYPE).toBuilder()
.setCommitted(500)
.setFreeSpaceToSpare(10000)
.build();
// capacity = 200000, used = 90000, remaining = 101000, committed = 1000
StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 =
HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds",
200000, 90000, 101000, DISK).toBuilder()
200000, 90000, 101000, DISK_TYPE).toBuilder()
.setCommitted(1000)
.setFreeSpaceToSpare(100000)
.build();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
/**
* Ozone specific storage types.
*/
@Deprecated
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is it really deprecated? If so, what replaces it?

public enum StorageType {
RAM_DISK,
SSD,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,7 @@ message BucketInfo {
required string bucketName = 2;
repeated OzoneAclInfo acls = 3;
required bool isVersionEnabled = 4 [default = false];
required StorageTypeProto storageType = 5 [default = DISK];
optional StorageTypeProto storageType = 5 [default = DISK, deprecated = true];
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this change from required to optional backward compatible?
Imagine a new OM server and an old client. The new server omits this field while client expects this field. Wouldn't the client fail to parse?

optional uint64 creationTime = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
optional BucketEncryptionInfoProto beinfo = 8;
Expand Down Expand Up @@ -843,7 +843,7 @@ message BucketArgs {
required string volumeName = 1;
required string bucketName = 2;
optional bool isVersionEnabled = 5;
optional StorageTypeProto storageType = 6;
optional StorageTypeProto storageType = 6 [deprecated = true];
repeated hadoop.hdds.KeyValue metadata = 7;
optional uint64 quotaInBytes = 8;
optional uint64 quotaInNamespace = 9;
Expand Down
Loading