diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java index e30da44e8b00..889ce0a3769d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java @@ -20,9 +20,9 @@ import java.io.IOException; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.ozone.container.common.interfaces.StorageLocationReportMXBean; import org.apache.hadoop.ozone.container.common.volume.VolumeUsage; @@ -107,52 +107,52 @@ public StorageType getStorageType() { return storageType; } - private StorageTypeProto getStorageTypeProto() throws IOException { + private HddsProtos.StorageType getStorageTypeProto() throws IOException { return getStorageTypeProto(getStorageType()); } - public static StorageTypeProto getStorageTypeProto(StorageType type) + public static HddsProtos.StorageType getStorageTypeProto(StorageType type) throws IOException { - StorageTypeProto storageTypeProto; + HddsProtos.StorageType storageType; switch (type) { case SSD: - storageTypeProto = StorageTypeProto.SSD; + storageType = HddsProtos.StorageType.SSD_TYPE; break; case DISK: - storageTypeProto = StorageTypeProto.DISK; + storageType = HddsProtos.StorageType.DISK_TYPE; break; case ARCHIVE: - storageTypeProto = StorageTypeProto.ARCHIVE; + storageType = HddsProtos.StorageType.ARCHIVE_TYPE; break; case PROVIDED: - storageTypeProto = StorageTypeProto.PROVIDED; + storageType = HddsProtos.StorageType.PROVIDED_TYPE; break; case RAM_DISK: - storageTypeProto = StorageTypeProto.RAM_DISK; + storageType = HddsProtos.StorageType.RAM_DISK_TYPE; break; default: throw new IOException("Illegal Storage Type specified"); } - return storageTypeProto; + return storageType; } - private static StorageType getStorageType(StorageTypeProto proto) throws + private static StorageType getStorageType(HddsProtos.StorageType proto) throws IOException { StorageType storageType; switch (proto) { - case SSD: + case SSD_TYPE: storageType = StorageType.SSD; break; - case DISK: + case DISK_TYPE: storageType = StorageType.DISK; break; - case ARCHIVE: + case ARCHIVE_TYPE: storageType = StorageType.ARCHIVE; break; - case PROVIDED: + case PROVIDED_TYPE: storageType = StorageType.PROVIDED; break; - case RAM_DISK: + case RAM_DISK_TYPE: storageType = StorageType.RAM_DISK; break; default: @@ -179,7 +179,7 @@ public StorageReportProto getProtoBufMessage(ConfigurationSource conf) .setScmUsed(getScmUsed()) .setRemaining(getRemaining()) .setCommitted(getCommitted()) - .setStorageType(getStorageTypeProto()) + .setStorageTypeProto(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) .setFreeSpaceToSpare(conf != null ? @@ -200,7 +200,7 @@ public MetadataStorageReportProto getMetadataProtoBufMessage() return srb.setCapacity(getCapacity()) .setScmUsed(getScmUsed()) .setRemaining(getRemaining()) - .setStorageType(getStorageTypeProto()) + .setStorageTypeProto(getStorageTypeProto()) .setStorageLocation(getStorageLocation()) .setFailed(isFailed()) .build(); @@ -224,8 +224,8 @@ public static StorageLocationReport getFromProtobuf(StorageReportProto report) if (report.hasScmUsed()) { builder.setScmUsed(report.getScmUsed()); } - if (report.hasStorageType()) { - builder.setStorageType(getStorageType(report.getStorageType())); + if (report.hasStorageTypeProto()) { + builder.setStorageType(getStorageType(report.getStorageTypeProto())); } if (report.hasRemaining()) { builder.setRemaining(report.getRemaining()); @@ -254,8 +254,8 @@ public static StorageLocationReport getMetadataFromProtobuf( if (report.hasScmUsed()) { builder.setScmUsed(report.getScmUsed()); } - if (report.hasStorageType()) { - builder.setStorageType(getStorageType(report.getStorageType())); + if (report.hasStorageTypeProto()) { + builder.setStorageType(getStorageType(report.getStorageTypeProto())); } if (report.hasRemaining()) { builder.setRemaining(report.getRemaining()); diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 6cf58d3b1e70..757380b06341 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -307,6 +307,14 @@ enum ReplicationFactor { ZERO = 0; // Invalid Factor } +enum StorageType { + DISK_TYPE = 1; + SSD_TYPE = 2; + ARCHIVE_TYPE = 3; + RAM_DISK_TYPE = 4; + PROVIDED_TYPE = 5; +} + message ECReplicationConfig { required int32 data = 1; required int32 parity = 2; diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 648cf77883dd..c9a00924ec0f 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -178,19 +178,21 @@ message StorageReportProto { optional uint64 capacity = 3 [default = 0]; optional uint64 scmUsed = 4 [default = 0]; optional uint64 remaining = 5 [default = 0]; - optional StorageTypeProto storageType = 6 [default = DISK]; + optional StorageTypeProto storageType = 6 [default = DISK, deprecated = true]; optional bool failed = 7 [default = false]; optional uint64 committed = 8 [default = 0]; optional uint64 freeSpaceToSpare = 9 [default = 0]; + optional StorageType storageTypeProto = 10 [default = DISK_TYPE]; } message MetadataStorageReportProto { required string storageLocation = 1; - optional StorageTypeProto storageType = 2 [default = DISK]; + optional StorageTypeProto storageType = 2 [default = DISK, deprecated = true]; optional uint64 capacity = 3 [default = 0]; optional uint64 scmUsed = 4 [default = 0]; optional uint64 remaining = 5 [default = 0]; optional bool failed = 6 [default = false]; + optional StorageType storageTypeProto = 7 [default = DISK_TYPE]; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index b3b02cb4c54c..8789896c4d7d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -54,9 +54,9 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; @@ -1098,16 +1098,16 @@ public Map getNodeInfo() { } List storageReportProtos = node.getStorageReports(); for (StorageReportProto reportProto : storageReportProtos) { - if (reportProto.getStorageType() == - StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) { + if (reportProto.getStorageTypeProto() == + HddsProtos.StorageType.DISK_TYPE) { nodeInfo.compute(keyPrefix + UsageMetrics.DiskCapacity.name(), (k, v) -> v + reportProto.getCapacity()); nodeInfo.compute(keyPrefix + UsageMetrics.DiskRemaining.name(), (k, v) -> v + reportProto.getRemaining()); nodeInfo.compute(keyPrefix + UsageMetrics.DiskUsed.name(), (k, v) -> v + reportProto.getScmUsed()); - } else if (reportProto.getStorageType() == - StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) { + } else if (reportProto.getStorageTypeProto() == + HddsProtos.StorageType.SSD_TYPE) { nodeInfo.compute(keyPrefix + UsageMetrics.SSDCapacity.name(), (k, v) -> v + reportProto.getCapacity()); nodeInfo.compute(keyPrefix + UsageMetrics.SSDRemaining.name(), diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index 9551ca63b4f4..8d2b87303b5e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageType; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; @@ -51,7 +52,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -223,8 +223,8 @@ public static StorageReportProto getRandomStorageReport(UUID nodeId, return createStorageReport(nodeId, path, random.nextInt(1000), random.nextInt(500), - random.nextInt(500), - StorageTypeProto.DISK); + random.nextInt(500), + StorageType.DISK_TYPE); } /** @@ -240,7 +240,7 @@ public static MetadataStorageReportProto getRandomMetadataStorageReport( random.nextInt(1000), random.nextInt(500), random.nextInt(500), - StorageTypeProto.DISK); + StorageType.DISK_TYPE); } public static StorageReportProto createStorageReport(UUID nodeId, String path, @@ -249,11 +249,11 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path, capacity, 0, capacity, - StorageTypeProto.DISK); + StorageType.DISK_TYPE); } public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type) { + long capacity, long used, long remaining, StorageType type) { return createStorageReport(nodeId, path, capacity, used, remaining, type, false); } @@ -270,7 +270,7 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path, * @return StorageReportProto */ public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type, + long capacity, long used, long remaining, StorageType type, boolean failed) { Preconditions.checkNotNull(nodeId); Preconditions.checkNotNull(path); @@ -281,9 +281,9 @@ public static StorageReportProto createStorageReport(UUID nodeId, String path, .setScmUsed(used) .setFailed(failed) .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); + StorageType storageType = + type == null ? StorageType.DISK_TYPE : type; + srb.setStorageTypeProto(storageType); return srb.build(); } @@ -293,12 +293,12 @@ public static MetadataStorageReportProto createMetadataStorageReport( capacity, 0, capacity, - StorageTypeProto.DISK, false); + StorageType.DISK_TYPE, false); } public static MetadataStorageReportProto createMetadataStorageReport( String path, long capacity, long used, long remaining, - StorageTypeProto type) { + StorageType type) { return createMetadataStorageReport(path, capacity, used, remaining, type, false); } @@ -316,7 +316,7 @@ public static MetadataStorageReportProto createMetadataStorageReport( */ public static MetadataStorageReportProto createMetadataStorageReport( String path, long capacity, long used, long remaining, - StorageTypeProto type, boolean failed) { + StorageType type, boolean failed) { Preconditions.checkNotNull(path); MetadataStorageReportProto.Builder srb = MetadataStorageReportProto .newBuilder(); @@ -325,9 +325,9 @@ public static MetadataStorageReportProto createMetadataStorageReport( .setScmUsed(used) .setFailed(failed) .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); + StorageType storageType = + type == null ? StorageType.DISK_TYPE : type; + srb.setStorageTypeProto(storageType); return srb.build(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java index 8b9dfe873e80..da22dc4d146a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java @@ -17,8 +17,8 @@ package org.apache.hadoop.hdds.scm; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageType.DISK_TYPE; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -482,14 +482,14 @@ public void testDatanodeIsInvalidInCaseOfIncreasingCommittedBytes() { // capacity = 200000, used = 90000, remaining = 101000, committed = 500 StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport1 = HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", - 200000, 90000, 101000, DISK).toBuilder() + 200000, 90000, 101000, DISK_TYPE).toBuilder() .setCommitted(500) .setFreeSpaceToSpare(10000) .build(); // capacity = 200000, used = 90000, remaining = 101000, committed = 1000 StorageContainerDatanodeProtocolProtos.StorageReportProto storageReport2 = HddsTestUtils.createStorageReport(UUID.randomUUID(), "/data/hdds", - 200000, 90000, 101000, DISK).toBuilder() + 200000, 90000, 101000, DISK_TYPE).toBuilder() .setCommitted(1000) .setFreeSpaceToSpare(100000) .build(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java index 1180c33ccc8c..a22b78873ab9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java @@ -22,6 +22,7 @@ /** * Ozone specific storage types. */ +@Deprecated public enum StorageType { RAM_DISK, SSD, diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index df97028a0f31..445ec2644b20 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -754,7 +754,7 @@ message BucketInfo { required string bucketName = 2; repeated OzoneAclInfo acls = 3; required bool isVersionEnabled = 4 [default = false]; - required StorageTypeProto storageType = 5 [default = DISK]; + optional StorageTypeProto storageType = 5 [default = DISK, deprecated = true]; optional uint64 creationTime = 6; repeated hadoop.hdds.KeyValue metadata = 7; optional BucketEncryptionInfoProto beinfo = 8; @@ -843,7 +843,7 @@ message BucketArgs { required string volumeName = 1; required string bucketName = 2; optional bool isVersionEnabled = 5; - optional StorageTypeProto storageType = 6; + optional StorageTypeProto storageType = 6 [deprecated = true]; repeated hadoop.hdds.KeyValue metadata = 7; optional uint64 quotaInBytes = 8; optional uint64 quotaInNamespace = 9; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index eb09cf3ca705..a548b6f01f7b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -86,7 +86,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -382,13 +381,13 @@ public void setUp() throws Exception { .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto1 = - StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + StorageReportProto.newBuilder().setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400) .setCapacity(25000) .setStorageUuid(UUID.randomUUID().toString()) .setFailed(false).build(); StorageReportProto storageReportProto2 = - StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + StorageReportProto.newBuilder().setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk2").setScmUsed(25000).setRemaining(10000) .setCapacity(50000) .setStorageUuid(UUID.randomUUID().toString()) @@ -412,13 +411,13 @@ public void setUp() throws Exception { .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto3 = - StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + StorageReportProto.newBuilder().setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800) .setCapacity(50000) .setStorageUuid(UUID.randomUUID().toString()) .setFailed(false).build(); StorageReportProto storageReportProto4 = - StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + StorageReportProto.newBuilder().setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000) .setCapacity(80000) .setStorageUuid(UUID.randomUUID().toString()) @@ -443,13 +442,13 @@ public void setUp() throws Exception { .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36") .build(); StorageReportProto storageReportProto5 = - StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + StorageReportProto.newBuilder().setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800) .setCapacity(50000) .setStorageUuid(UUID.randomUUID().toString()) .setFailed(false).build(); StorageReportProto storageReportProto6 = - StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK) + StorageReportProto.newBuilder().setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000) .setCapacity(80000) .setStorageUuid(UUID.randomUUID().toString()) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java index 5537d6b5cee6..840fafb1165a 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java @@ -57,7 +57,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -302,7 +301,7 @@ public void setUp() throws Exception { StorageReportProto storageReportProto1 = StorageReportProto.newBuilder() - .setStorageType(StorageTypeProto.DISK) + .setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk1") .setScmUsed(10 * OzoneConsts.GB) .setRemaining(90 * OzoneConsts.GB) @@ -312,7 +311,7 @@ public void setUp() throws Exception { StorageReportProto storageReportProto2 = StorageReportProto.newBuilder() - .setStorageType(StorageTypeProto.DISK) + .setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE) .setStorageLocation("/disk2") .setScmUsed(10 * OzoneConsts.GB) .setRemaining(90 * OzoneConsts.GB) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java index 455413d10e1b..5a4b52c15f65 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -59,7 +60,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -898,7 +898,7 @@ private static StorageReportProto createStorageReport(UUID nodeId) { .setScmUsed(0) .setFailed(false) .setRemaining(100 * OzoneConsts.TB) - .setStorageType(StorageTypeProto.DISK); + .setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE); return srb.build(); } @@ -910,7 +910,7 @@ private static MetadataStorageReportProto createMetadataStorageReport() { .setScmUsed(0) .setFailed(false) .setRemaining(100 * OzoneConsts.GB) - .setStorageType(StorageTypeProto.DISK); + .setStorageTypeProto(HddsProtos.StorageType.DISK_TYPE); return mrb.build(); } diff --git a/pom.xml b/pom.xml index 47fa1b83ba86..f1d3b59e5005 100644 --- a/pom.xml +++ b/pom.xml @@ -1700,6 +1700,18 @@ ${project.build.directory}/generated-sources/protobuf/java + + true + + ${basedir}/target/generated-sources/java/ + ${basedir}/target/generated-sources/protobuf/java/ + + true + Deprecated StorageTypeProto + + org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto + +