Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
import java.io.IOException;
import net.jcip.annotations.Immutable;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.ozone.container.common.interfaces.StorageLocationReportMXBean;

/**
Expand Down Expand Up @@ -227,36 +227,6 @@ public static StorageLocationReport getFromProtobuf(StorageReportProto report)
return builder.build();
}

/**
* Returns the StorageLocationReport from the protoBuf message.
* @param report MetadataStorageReportProto
* @return StorageLocationReport
* @throws IOException in case of invalid storage type
*/

public static StorageLocationReport getMetadataFromProtobuf(
MetadataStorageReportProto report) throws IOException {
StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
builder.setStorageLocation(report.getStorageLocation());
if (report.hasCapacity()) {
builder.setCapacity(report.getCapacity());
}
if (report.hasScmUsed()) {
builder.setScmUsed(report.getScmUsed());
}
if (report.hasStorageType()) {
builder.setStorageType(getStorageType(report.getStorageType()));
}
if (report.hasRemaining()) {
builder.setRemaining(report.getRemaining());
}

if (report.hasFailed()) {
builder.setFailed(report.getFailed());
}
return builder.build();
}

/**
* Returns StorageLocation.Builder instance.
*
Expand Down
11 changes: 11 additions & 0 deletions hadoop-hdds/interface-client/src/main/proto/hdds.proto
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,17 @@ message KeyValue {
optional string value = 2;
}

/**
* Types of storage media.
*/
enum StorageTypeProto {
DISK = 1;
SSD = 2;
ARCHIVE = 3;
RAM_DISK = 4;
PROVIDED = 5;
}

/**
* Type of the node.
*/
Expand Down
25 changes: 25 additions & 0 deletions hadoop-hdds/interface-client/src/main/resources/proto.lock
Original file line number Diff line number Diff line change
Expand Up @@ -1827,6 +1827,31 @@
}
]
},
{
"name": "StorageTypeProto",
"enum_fields": [
{
"name": "DISK",
"integer": 1
},
{
"name": "SSD",
"integer": 2
},
{
"name": "ARCHIVE",
"integer": 3
},
{
"name": "RAM_DISK",
"integer": 4
},
{
"name": "PROVIDED",
"integer": 5
}
]
},
{
"name": "NodeType",
"enum_fields": [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -193,16 +193,6 @@ message MetadataStorageReportProto {
optional bool failed = 6 [default = false];
}

/**
* Types of recognized storage media.
*/
enum StorageTypeProto {
DISK = 1;
SSD = 2;
ARCHIVE = 3;
RAM_DISK = 4;
PROVIDED = 5;
}

message ContainerReportsProto {
repeated ContainerReplicaProto reports = 1;
Expand Down
25 changes: 0 additions & 25 deletions hadoop-hdds/interface-server/src/main/resources/proto.lock
Original file line number Diff line number Diff line change
Expand Up @@ -805,31 +805,6 @@
}
]
},
{
"name": "StorageTypeProto",
"enum_fields": [
{
"name": "DISK",
"integer": 1
},
{
"name": "SSD",
"integer": 2
},
{
"name": "ARCHIVE",
"integer": 3
},
{
"name": "RAM_DISK",
"integer": 4
},
{
"name": "PROVIDED",
"integer": 5
}
]
},
{
"name": "ContainerReplicaProto.State",
"enum_fields": [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
Expand Down Expand Up @@ -1098,16 +1098,14 @@ public Map<String, Long> getNodeInfo() {
}
List<StorageReportProto> storageReportProtos = node.getStorageReports();
for (StorageReportProto reportProto : storageReportProtos) {
if (reportProto.getStorageType() ==
StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) {
if (reportProto.getStorageType() == StorageTypeProto.DISK) {
nodeInfo.compute(keyPrefix + UsageMetrics.DiskCapacity.name(),
(k, v) -> v + reportProto.getCapacity());
nodeInfo.compute(keyPrefix + UsageMetrics.DiskRemaining.name(),
(k, v) -> v + reportProto.getRemaining());
nodeInfo.compute(keyPrefix + UsageMetrics.DiskUsed.name(),
(k, v) -> v + reportProto.getScmUsed());
} else if (reportProto.getStorageType() ==
StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) {
} else if (reportProto.getStorageType() == StorageTypeProto.SSD) {
nodeInfo.compute(keyPrefix + UsageMetrics.SSDCapacity.name(),
(k, v) -> v + reportProto.getCapacity());
nodeInfo.compute(keyPrefix + UsageMetrics.SSDRemaining.name(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
Expand All @@ -51,7 +52,6 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@

package org.apache.hadoop.hdds.scm;

import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto.DISK;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

package org.apache.hadoop.hdds.protocol;

import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;

/**
* Ozone specific storage types.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
import static org.junit.jupiter.api.Assertions.assertNotNull;

import java.util.UUID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.MiniOzoneCluster;
Expand Down Expand Up @@ -88,7 +89,7 @@ public void testCreateBucketWithOlderClient() throws Exception {
OzoneManagerProtocolProtos.BucketInfo.newBuilder()
.setVolumeName(volumeName).setBucketName(buckName)
.setIsVersionEnabled(false).setStorageType(
OzoneManagerProtocolProtos.StorageTypeProto.DISK)
StorageTypeProto.DISK)
.build())
.build()).build();
createBucketReq = createBucketReq.toBuilder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,7 @@ message BucketInfo {
required string bucketName = 2;
repeated OzoneAclInfo acls = 3;
required bool isVersionEnabled = 4 [default = false];
required StorageTypeProto storageType = 5 [default = DISK];
required hadoop.hdds.StorageTypeProto storageType = 5 [default = DISK];
optional uint64 creationTime = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
optional BucketEncryptionInfoProto beinfo = 8;
Expand All @@ -772,13 +772,6 @@ message BucketInfo {
optional hadoop.hdds.DefaultReplicationConfig defaultReplicationConfig = 20;
}

enum StorageTypeProto {
DISK = 1;
SSD = 2;
ARCHIVE = 3;
RAM_DISK = 4;
}

enum BucketLayoutProto {
LEGACY = 1;
FILE_SYSTEM_OPTIMIZED = 2;
Expand Down Expand Up @@ -843,7 +836,7 @@ message BucketArgs {
required string volumeName = 1;
required string bucketName = 2;
optional bool isVersionEnabled = 5;
optional StorageTypeProto storageType = 6;
optional hadoop.hdds.StorageTypeProto storageType = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
optional uint64 quotaInBytes = 8;
optional uint64 quotaInNamespace = 9;
Expand Down
27 changes: 3 additions & 24 deletions hadoop-ozone/interface-client/src/main/resources/proto.lock
Original file line number Diff line number Diff line change
Expand Up @@ -982,27 +982,6 @@
}
]
},
{
"name": "StorageTypeProto",
"enum_fields": [
{
"name": "DISK",
"integer": 1
},
{
"name": "SSD",
"integer": 2
},
{
"name": "ARCHIVE",
"integer": 3
},
{
"name": "RAM_DISK",
"integer": 4
}
]
},
{
"name": "BucketLayoutProto",
"enum_fields": [
Expand Down Expand Up @@ -3071,7 +3050,7 @@
{
"id": 5,
"name": "storageType",
"type": "StorageTypeProto",
"type": "hadoop.hdds.StorageTypeProto",
"required": true,
"options": [
{
Expand Down Expand Up @@ -3330,7 +3309,7 @@
{
"id": 6,
"name": "storageType",
"type": "StorageTypeProto",
"type": "hadoop.hdds.StorageTypeProto",
"optional": true
},
{
Expand Down Expand Up @@ -8330,4 +8309,4 @@
}
}
]
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
Expand Down Expand Up @@ -468,7 +469,7 @@ private OMBucketCreateResponse createBucket(String volumeName,

BucketInfo.Builder bucketInfo =
newBucketInfoBuilder(bucketName, volumeName)
.setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK);
.setStorageType(StorageTypeProto.DISK);
OzoneManagerProtocolProtos.OMRequest omRequest =
OMRequestTestUtils.newCreateBucketRequest(bucketInfo).build();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -741,7 +741,7 @@ public static BucketInfo.Builder newBucketInfoBuilder(
return BucketInfo.newBuilder()
.setBucketName(bucketName)
.setVolumeName(volumeName)
.setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.SSD)
.setStorageType(HddsProtos.StorageTypeProto.SSD)
.setIsVersionEnabled(false)
.addAllMetadata(getMetadataList());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
import java.nio.file.Path;
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
Expand Down Expand Up @@ -95,7 +96,7 @@ public void testUserInfoInCaseOfHadoopTransport() throws Exception {
BucketInfo.Builder bucketInfo =
newBucketInfoBuilder(bucketName, volumeName)
.setIsVersionEnabled(true)
.setStorageType(OzoneManagerProtocolProtos.StorageTypeProto.DISK);
.setStorageType(StorageTypeProto.DISK);
OMRequest omRequest = newCreateBucketRequest(bucketInfo).build();

OMBucketCreateRequest omBucketCreateRequest =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import java.util.UUID;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
Expand All @@ -41,7 +42,6 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Time;
import org.junit.jupiter.api.Test;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
Expand All @@ -86,7 +87,6 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
Expand Down
Loading