From 73464d90bc1a0f51c23ad86ffaf1b2b49f9e1527 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Fri, 2 Jun 2023 19:54:24 +0300 Subject: [PATCH 01/15] List snapdiff job --- .../hadoop/ozone/client/ObjectStore.java | 16 + .../ozone/client/protocol/ClientProtocol.java | 14 + .../hadoop/ozone/client/rpc/RpcClient.java | 15 + .../java/org/apache/hadoop/ozone/OmUtils.java | 1 + .../ozone/om/helpers}/SnapshotDiffJob.java | 31 +- .../om/protocol/OzoneManagerProtocol.java | 17 + ...ManagerProtocolClientSideTranslatorPB.java | 28 ++ .../ozone/snapshot/SnapshotDiffResponse.java | 9 + .../main/smoketest/snapshot/snapshot-sh.robot | 7 + .../src/main/proto/OmClientProtocol.proto | 27 ++ .../hadoop/ozone/om/OmSnapshotManager.java | 14 +- .../apache/hadoop/ozone/om/OzoneManager.java | 8 + .../service/SnapshotDiffCleanupService.java | 2 +- .../om/snapshot/SnapshotDiffManager.java | 35 ++ .../OzoneManagerRequestHandler.java | 23 ++ .../TestSnapshotDiffCleanupService.java | 2 +- .../om/snapshot/TestSnapshotDiffManager.java | 323 ++++++++++++++++++ .../ozone/client/ClientProtocolStub.java | 7 + .../snapshot/ListSnapshotDiffHandler.java | 78 +++++ .../shell/snapshot/SnapshotCommands.java | 3 +- 20 files changed, 655 insertions(+), 5 deletions(-) rename hadoop-ozone/{ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot => common/src/main/java/org/apache/hadoop/ozone/om/helpers}/SnapshotDiffJob.java (85%) create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 592fb252794e..c3c57fe6507d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -611,4 +612,19 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, return proxy.snapshotDiff(volumeName, bucketName, fromSnapshot, toSnapshot, token, pageSize, forceFullDiff); } + + /** + * Get a list of the SnapshotDiff jobs for a bucket based on the JobStatus. + * @param volumeName Name of the volume to which the snapshotted bucket belong + * @param bucketName Name of the bucket to which the snapshots belong + * @param jobStatus JobStatus to be used to filter the snapshot diff jobs + * @return a list of SnapshotDiffJob objects + * @throws IOException in case there is a failure while getting a response. + */ + public List listSnapshotDiffJobs(String volumeName, + String bucketName, + String jobStatus) + throws IOException { + return proxy.listSnapshotDiffJobs(volumeName, bucketName, jobStatus); + } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index ceb3fcad1c54..9434b5ddca9c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -1072,6 +1073,19 @@ SnapshotDiffResponse snapshotDiff(String volumeName, String bucketName, boolean forceFullDiff) throws IOException; + /** + * Get a list of the SnapshotDiff jobs for a bucket based on the JobStatus. + * @param volumeName Name of the volume to which the snapshotted bucket belong + * @param bucketName Name of the bucket to which the snapshots belong + * @param jobStatus JobStatus to be used to filter the snapshot diff jobs + * @return a list of SnapshotDiffJob objects + * @throws IOException in case there is a failure while getting a response. + */ + List listSnapshotDiffJobs(String volumeName, + String bucketName, + String jobStatus) + throws IOException; + /** * Time to be set for given Ozone object. This operations updates modification * time and access time for the given key. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 81d10381a911..d184fdc8dee6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -134,6 +134,7 @@ import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -996,6 +997,20 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, fromSnapshot, toSnapshot, token, pageSize, forceFullDiff); } + @Override + public List listSnapshotDiffJobs(String volumeName, + String bucketName, + String jobStatus) + throws IOException { + Preconditions.checkArgument(Strings.isNotBlank(volumeName), + "volume can't be null or empty."); + Preconditions.checkArgument(Strings.isNotBlank(bucketName), + "bucket can't be null or empty."); + + return ozoneManagerClient.listSnapshotDiffJobs( + volumeName, bucketName, jobStatus); + } + /** * List snapshots in a volume/bucket. * @param volumeName volume name diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index f366ae18758a..16ac60dda39e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -265,6 +265,7 @@ public static boolean isReadOnly( // operation SetRangerServiceVersion. case GetKeyInfo: case SnapshotDiff: + case ListSnapshotDiffJob: case TransferLeadership: return true; case CreateVolume: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffJob.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java similarity index 85% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffJob.java rename to hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java index 50e968a75c8e..380483f15a13 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffJob.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om.snapshot; +package org.apache.hadoop.ozone.om.helpers; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.databind.DeserializationFeature; @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.Objects; import org.apache.hadoop.hdds.utils.db.Codec; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotDiffJobProto; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus; /** @@ -185,6 +186,34 @@ public int hashCode() { fromSnapshot, toSnapshot, forceFullDiff, totalDiffEntries); } + public SnapshotDiffJobProto toProtoBuf() { + return SnapshotDiffJobProto.newBuilder() + .setCreationTime(creationTime) + .setJobId(jobId) + .setStatus(status.toProtobuf()) + .setVolume(volume) + .setBucket(bucket) + .setFromSnapshot(fromSnapshot) + .setToSnapshot(toSnapshot) + .setForceFullDiff(forceFullDiff) + .setTotalDiffEntries(totalDiffEntries) + .build(); + } + + public static SnapshotDiffJob getFromProtoBuf( + SnapshotDiffJobProto diffJobProto) { + return new SnapshotDiffJob( + diffJobProto.getCreationTime(), + diffJobProto.getJobId(), + JobStatus.fromProtobuf(diffJobProto.getStatus()), + diffJobProto.getVolume(), + diffJobProto.getBucket(), + diffJobProto.getFromSnapshot(), + diffJobProto.getToSnapshot(), + diffJobProto.getForceFullDiff(), + diffJobProto.getTotalDiffEntries()); + } + /** * Codec to encode SnapshotDiffJob as byte array. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 1a9f3c6a0b34..e1df456896fc 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -52,6 +52,7 @@ import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; @@ -724,6 +725,22 @@ default SnapshotDiffResponse snapshotDiff(String volumeName, "this to be implemented"); } + /** + * Get a list of the SnapshotDiff jobs for a bucket based on the JobStatus. + * @param volumeName Name of the volume to which the snapshotted bucket belong + * @param bucketName Name of the bucket to which the snapshots belong + * @param jobStatus JobStatus to be used to filter the snapshot diff jobs + * @return a list of SnapshotDiffJob objects + * @throws IOException in case there is a failure while getting a response. + */ + default List listSnapshotDiffJobs(String volumeName, + String bucketName, + String jobStatus) + throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented"); + } + /** * Assign admin role to a user identified by an accessId in a tenant. * @param accessId access ID. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 39206e509477..6677a822e5dd 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -64,6 +64,7 @@ import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; @@ -1231,6 +1232,33 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, diffResponse.getWaitTimeInMs()); } + /** + * {@inheritDoc} + */ + @Override + public List listSnapshotDiffJobs(String volumeName, + String bucketName, + String jobStatus) + throws IOException { + final OzoneManagerProtocolProtos + .ListSnapshotDiffJobRequest.Builder requestBuilder = + OzoneManagerProtocolProtos + .ListSnapshotDiffJobRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setJobStatus(jobStatus); + + final OMRequest omRequest = createOMRequest(Type.ListSnapshotDiffJob) + .setListSnapshotDiffJobRequest(requestBuilder) + .build(); + final OMResponse omResponse = submitRequest(omRequest); + handleError(omResponse); + return omResponse.getListSnapshotDiffJobResponse() + .getSnapshotDiffJobList().stream() + .map(SnapshotDiffJob::getFromProtoBuf) + .collect(Collectors.toList()); + } + /** * {@inheritDoc} */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java index 6cd9bff03922..c5fdac14362f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java @@ -41,6 +41,15 @@ public JobStatusProto toProtobuf() { public static JobStatus fromProtobuf(JobStatusProto jobStatusProto) { return JobStatus.valueOf(jobStatusProto.name()); } + + public static JobStatus getJobStatusFromString(String jobStatus) { + for (JobStatus status : JobStatus.values()) { + if (status.toString().equalsIgnoreCase(jobStatus)) { + return status; + } + } + return null; + } } private final SnapshotDiffReportOzone snapshotDiffReport; diff --git a/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot b/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot index e76ce5dc2290..f563e57b2e11 100644 --- a/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot +++ b/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot @@ -53,6 +53,13 @@ Snapshot Diff Should contain ${result} + ${KEY_TWO} Should contain ${result} + ${KEY_THREE} +List Snapshot Diff Jobs + ${result} = Execute ozone sh snapshot listSnapshotDiff /${VOLUME}/${BUCKET} all + Should contain ${result} ${VOLUME} + Should contain ${result} ${BUCKET} + Should contain ${result} ${SNAPSHOT_ONE} + Should contain ${result} ${SNAPSHOT_TWO} + Read Snapshot Key Should Match Local File /${VOLUME}/${BUCKET}/${SNAPSHOT_INDICATOR}/${SNAPSHOT_ONE}/${KEY_ONE} /etc/hosts Key Should Match Local File /${VOLUME}/${BUCKET}/${SNAPSHOT_INDICATOR}/${SNAPSHOT_TWO}/${KEY_TWO} /etc/passwd diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 869713bf3c70..9567e3de6040 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -135,6 +135,7 @@ enum Type { SnapshotPurge = 118; RecoverLease = 119; SetTimes = 120; + ListSnapshotDiffJob = 121; } message OMRequest { @@ -254,6 +255,8 @@ message OMRequest { optional RecoverLeaseRequest RecoverLeaseRequest = 119; optional SetTimesRequest SetTimesRequest = 120; + + optional ListSnapshotDiffJobRequest ListSnapshotDiffJobRequest = 121; } message OMResponse { @@ -365,6 +368,8 @@ message OMResponse { optional SnapshotPurgeResponse SnapshotPurgeResponse = 118; optional RecoverLeaseResponse RecoverLeaseResponse = 119; optional SetTimesResponse SetTimesResponse = 120; + + optional ListSnapshotDiffJobResponse ListSnapshotDiffJobResponse = 121; } enum Status { @@ -789,6 +794,18 @@ message SnapshotInfo { optional int64 dbTxSequenceNumber = 12; } +message SnapshotDiffJobProto { + optional uint64 creationTime = 1; + optional string jobId = 2; + optional SnapshotDiffResponse.JobStatusProto status = 3; + optional string volume = 4; + optional string bucket = 5; + optional string fromSnapshot = 6; + optional string toSnapshot = 7; + optional bool forceFullDiff = 8; + optional uint64 totalDiffEntries = 9; +} + message OzoneObj { enum ObjectType { VOLUME = 1; @@ -1709,6 +1726,12 @@ message SnapshotDiffRequest { optional bool forceFullDiff = 7; } +message ListSnapshotDiffJobRequest { + optional string volumeName = 1; + optional string bucketName = 2; + optional string jobStatus = 3; +} + message DeleteSnapshotRequest { optional string volumeName = 1; optional string bucketName = 2; @@ -1791,6 +1814,10 @@ message SnapshotDiffResponse { optional int64 waitTimeInMs = 3; } +message ListSnapshotDiffJobResponse { + repeated SnapshotDiffJobProto snapshotDiffJob = 1; +} + message DeleteSnapshotResponse { } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 537e9c712f64..f59c10f8e0a8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -57,7 +57,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.service.SnapshotDiffCleanupService; -import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffJob; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffManager; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; @@ -487,6 +487,11 @@ public static String getOzonePathKeyWithVolumeBucketNames( return OM_KEY_PREFIX + volumeId + OM_KEY_PREFIX + bucketId + OM_KEY_PREFIX; } + @VisibleForTesting + public SnapshotDiffManager getSnapshotDiffManager() { + return snapshotDiffManager; + } + /** * Helper method to locate the end key with the given prefix and iterator. * @param keyIter TableIterator @@ -681,6 +686,13 @@ public SnapshotDiffResponse getSnapshotDiffReport(final String volume, return snapshotDiffReport; } + public List getSnapshotDiffList(final String volumeName, + final String bucketName, + final String jobStatus) { + return snapshotDiffManager.getSnapshotDiffJobList( + volumeName, bucketName, jobStatus); + } + private void validateSnapshotsExistAndActive(final String volumeName, final String bucketName, final String fromSnapshotName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index aa31bacad632..acfbf4b15603 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -87,6 +87,7 @@ import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneManagerVersion; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.ratis_snapshot.OmRatisSnapshotProvider; import org.apache.hadoop.ozone.om.ha.OMHAMetrics; import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; @@ -4531,6 +4532,13 @@ public SnapshotDiffResponse snapshotDiff(String volume, fromSnapshot, toSnapshot, token, pageSize, forceFullDiff); } + public List listSnapshotDiffJobs(String volume, + String bucket, + String jobStatus) { + return omSnapshotManager.getSnapshotDiffList(volume, + bucket, jobStatus); + } + @Override // ReconfigureProtocol public String getServerName() { return "OM"; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java index dd8a3ad4ef5c..ba2a4e64d95f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDiffCleanupService.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffJob; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 69a4adcee842..2be84dd23ee1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.snapshot; +import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.LoadingCache; import java.io.BufferedWriter; @@ -70,6 +71,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; @@ -248,6 +250,11 @@ public SnapshotDiffManager(ManagedRocksDB db, this.loadJobsOnStartUp(); } + @VisibleForTesting + public PersistentMap getSnapDiffJobTable() { + return snapDiffJobTable; + } + private Optional initSSTDumpTool( final OzoneConfiguration conf) { if (!NativeLibraryLoader.getInstance() @@ -371,6 +378,34 @@ private Set getSSTFileListForSnapshot(OmSnapshot snapshot, .getPath(), tablesToLookUp); } + public List getSnapshotDiffJobList( + String volumeName, String bucketName, String jobStatus) { + List jobList = new ArrayList<>(); + + try (ClosableIterator> iterator = + snapDiffJobTable.iterator()) { + while (iterator.hasNext()) { + SnapshotDiffJob snapshotDiffJob = iterator.next().getValue(); + if (snapshotDiffJob.getVolume().equals(volumeName) && + snapshotDiffJob.getBucket().equals(bucketName)) { + if (jobStatus.equalsIgnoreCase("all")) { + jobList.add(snapshotDiffJob); + continue; + } + + // If provided job status is invalid, + // then all jobs on the table will be ignored. + // No need to check if getJobStatusFromString doesn't return null. + if (snapshotDiffJob.getStatus().equals( + JobStatus.getJobStatusFromString(jobStatus))) { + jobList.add(snapshotDiffJob); + } + } + } + } + return jobList; + } + public SnapshotDiffResponse getSnapshotDiffReport( final String volumeName, final String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 6dbe04e3644b..f732d7b7fe8a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; @@ -68,6 +69,8 @@ import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.upgrade.DisallowedUntilLayoutVersion; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListSnapshotDiffJobRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ListSnapshotDiffJobResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CheckVolumeAccessResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.EchoRPCRequest; @@ -302,6 +305,11 @@ public OMResponse handleReadRequest(OMRequest request) { request.getSnapshotDiffRequest()); responseBuilder.setSnapshotDiffResponse(snapshotDiffReport); break; + case ListSnapshotDiffJob: + ListSnapshotDiffJobResponse listSnapDiffResponse = + listSnapshotDiffJobs(request.getListSnapshotDiffJobRequest()); + responseBuilder.setListSnapshotDiffJobResponse(listSnapDiffResponse); + break; case EchoRPC: EchoRPCResponse echoRPCResponse = echoRPC(request.getEchoRPCRequest()); @@ -1245,6 +1253,21 @@ private SnapshotDiffResponse snapshotDiff( return builder.build(); } + private ListSnapshotDiffJobResponse listSnapshotDiffJobs( + ListSnapshotDiffJobRequest listSnapshotDiffJobRequest) { + List snapshotDiffJobs = + impl.listSnapshotDiffJobs( + listSnapshotDiffJobRequest.getVolumeName(), + listSnapshotDiffJobRequest.getBucketName(), + listSnapshotDiffJobRequest.getJobStatus()); + ListSnapshotDiffJobResponse.Builder builder = + ListSnapshotDiffJobResponse.newBuilder(); + for (SnapshotDiffJob diffJob : snapshotDiffJobs) { + builder.addSnapshotDiffJob(diffJob.toProtoBuf()); + } + return builder.build(); + } + public OzoneManager getOzoneManager() { return impl; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java index 9cbddb4e5bd1..a2d6b6885b46 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestSnapshotDiffCleanupService.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.snapshot.SnapshotDiffJob; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.snapshot.SnapshotDiffReportOzone; import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus; import org.apache.ozone.test.GenericTestUtils; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java new file mode 100644 index 000000000000..6078cbfcd7f1 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -0,0 +1,323 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.snapshot; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmTestManagers; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.rocksdb.RocksDBException; + +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; + +/** + * Tests for {@link SnapshotDiffManager}. + */ +public class TestSnapshotDiffManager { + + private static final String VOLUME = "vol"; + private static final String BUCKET = "bucket"; + + private static File metaDir; + private static OzoneManager ozoneManager; + private static OMMetadataManager omMetadataManager; + private static SnapshotDiffManager snapshotDiffManager; + private static PersistentMap snapDiffJobTable; + + @BeforeAll + public static void init() throws AuthenticationException, + IOException, RocksDBException { + metaDir = GenericTestUtils.getRandomizedTestDir(); + if (!metaDir.exists()) { + Assertions.assertTrue(metaDir.mkdirs()); + } + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, + metaDir.getAbsolutePath()); + + OmTestManagers omTestManagers = new OmTestManagers(conf); + ozoneManager = omTestManagers.getOzoneManager(); + omMetadataManager = omTestManagers.getMetadataManager(); + + snapshotDiffManager = ozoneManager + .getOmSnapshotManager().getSnapshotDiffManager(); + snapDiffJobTable = snapshotDiffManager.getSnapDiffJobTable(); + + createVolumeAndBucket(); + } + + @AfterAll + public static void cleanUp() { + FileUtil.fullyDelete(metaDir); + } + + @Test + public void testListSnapshotDiffJobs() + throws IOException { + String fromSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String toSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String fromSnapshotId = UUID.randomUUID().toString(); + String toSnapshotId = UUID.randomUUID().toString(); + String diffJobKey = fromSnapshotId + DELIMITER + toSnapshotId; + + setUpKeysAndSnapshots(fromSnapshotName, toSnapshotName, + fromSnapshotId, toSnapshotId); + + SnapshotDiffJob diffJob = snapDiffJobTable.get(diffJobKey); + Assertions.assertNull(diffJob); + + // There are no jobs in the table, therefore + // the response list should be empty. + List jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "queued"); + Assertions.assertTrue(jobList.isEmpty()); + + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "done"); + Assertions.assertTrue(jobList.isEmpty()); + + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress"); + Assertions.assertTrue(jobList.isEmpty()); + + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "all"); + Assertions.assertTrue(jobList.isEmpty()); + + // Submit a job. + SnapshotDiffResponse snapshotDiffResponse = snapshotDiffManager + .getSnapshotDiffReport(VOLUME, BUCKET, + fromSnapshotName, toSnapshotName, + 0, 0, false); + + // Response should be IN_PROGRESS. + Assertions.assertEquals(JobStatus.IN_PROGRESS, + snapshotDiffResponse.getJobStatus()); + + // Check snapDiffJobTable. + diffJob = snapDiffJobTable.get(diffJobKey); + Assertions.assertNotNull(diffJob); + // Status stored in the table should be IN_PROGRESS. + Assertions.assertEquals(JobStatus.IN_PROGRESS, + diffJob.getStatus()); + + // Response list for 'queued' and 'done' should be empty. + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "queued"); + Assertions.assertTrue(jobList.isEmpty()); + + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "done"); + Assertions.assertTrue(jobList.isEmpty()); + + // SnapshotDiffJob in the response list should be the same + // as the one we got from the table. + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress"); + Assertions.assertTrue(jobList.contains(diffJob)); + + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "all"); + Assertions.assertTrue(jobList.contains(diffJob)); + + // Providing an invalid jobStatus results in an empty list. + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid"); + Assertions.assertTrue(jobList.isEmpty()); + + // Set up new snapshots to submit a second snapshot diff job. + String fromSnapshotName2 = "snap-" + RandomStringUtils.randomNumeric(5); + String toSnapshotName2 = "snap-" + RandomStringUtils.randomNumeric(5); + String fromSnapshotId2 = UUID.randomUUID().toString(); + String toSnapshotId2 = UUID.randomUUID().toString(); + String diffJobKey2 = fromSnapshotId2 + DELIMITER + toSnapshotId2; + + setUpKeysAndSnapshots(fromSnapshotName2, toSnapshotName2, + fromSnapshotId2, toSnapshotId2); + + // Submit a second job. + snapshotDiffManager.getSnapshotDiffReport(VOLUME, BUCKET, + fromSnapshotName2, toSnapshotName2, 0, 0, false); + SnapshotDiffJob diffJob2 = snapDiffJobTable.get(diffJobKey2); + + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "all"); + + Assertions.assertEquals(2, jobList.size()); + Assertions.assertTrue(jobList.contains(diffJob2)); + } + + private void setUpKeysAndSnapshots(String fromSnapshotName, + String toSnapshotName, + String fromSnapshotId, + String toSnapshotId) + throws IOException { + // Get IDs. + long volumeId = omMetadataManager + .getVolumeId(VOLUME); + long bucketId = omMetadataManager + .getBucketId(VOLUME, BUCKET); + + // Create 5 keys. + for (int i = 0; i < 5; i++) { + OmKeyInfo omKeyInfo = createOmKeyInfo(bucketId); + String tableKey = omMetadataManager.getOzonePathKey(volumeId, + bucketId, bucketId, omKeyInfo.getFileName()); + omMetadataManager.getFileTable() + .addCacheEntry(new CacheKey<>(tableKey), + CacheValue.get(1, omKeyInfo)); + omMetadataManager.getFileTable().put(tableKey, omKeyInfo); + } + + // Create 1st snapshot and put it in SnapshotTable. + SnapshotInfo fromSnapshotInfo = SnapshotInfo + .newInstance(VOLUME, BUCKET, + fromSnapshotName, fromSnapshotId, + System.currentTimeMillis()); + fromSnapshotInfo.setSnapshotStatus(SnapshotInfo + .SnapshotStatus.SNAPSHOT_ACTIVE); + + String fromSnapKey = fromSnapshotInfo.getTableKey(); + + OmSnapshot omSnapshotFrom = new OmSnapshot( + ozoneManager.getKeyManager(), ozoneManager.getPrefixManager(), + ozoneManager, VOLUME, BUCKET, fromSnapshotName); + + ozoneManager.getOmSnapshotManager().getSnapshotCache() + .put(fromSnapKey, omSnapshotFrom); + + omMetadataManager.getSnapshotInfoTable() + .addCacheEntry(new CacheKey<>(fromSnapKey), + CacheValue.get(1, fromSnapshotInfo)); + omMetadataManager + .getSnapshotInfoTable().put(fromSnapKey, fromSnapshotInfo); + + // Create 20 keys. + for (int i = 0; i < 20; i++) { + OmKeyInfo omKeyInfo = createOmKeyInfo(bucketId); + String tableKey = omMetadataManager.getOzonePathKey(volumeId, + bucketId, bucketId, omKeyInfo.getFileName()); + omMetadataManager.getFileTable() + .addCacheEntry(new CacheKey<>(tableKey), + CacheValue.get(1, omKeyInfo)); + omMetadataManager.getFileTable().put(tableKey, omKeyInfo); + } + + // Create 2nd snapshot and put it in SnapshotTable. + SnapshotInfo toSnapshotInfo = SnapshotInfo + .newInstance(VOLUME, BUCKET, + toSnapshotName, toSnapshotId, + System.currentTimeMillis()); + fromSnapshotInfo.setSnapshotStatus(SnapshotInfo + .SnapshotStatus.SNAPSHOT_ACTIVE); + + String toSnapKey = toSnapshotInfo.getTableKey(); + + OmSnapshot omSnapshotTo = new OmSnapshot( + ozoneManager.getKeyManager(), ozoneManager.getPrefixManager(), + ozoneManager, VOLUME, BUCKET, toSnapshotName); + + ozoneManager.getOmSnapshotManager().getSnapshotCache() + .put(toSnapKey, omSnapshotTo); + + omMetadataManager.getSnapshotInfoTable() + .addCacheEntry(new CacheKey<>(toSnapKey), + CacheValue.get(1, toSnapshotInfo)); + omMetadataManager + .getSnapshotInfoTable().put(toSnapKey, toSnapshotInfo); + } + + private static void createVolumeAndBucket() throws IOException { + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + // Create volume and put it in VolumeTable. + OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() + .setVolume(VOLUME) + .setAdminName(ugi.getShortUserName()) + .setOwnerName(ugi.getShortUserName()) + .build(); + String volumeKey = omMetadataManager + .getVolumeKey(VOLUME); + omMetadataManager.getVolumeTable() + .addCacheEntry(new CacheKey<>(volumeKey), + CacheValue.get(1, volumeArgs)); + omMetadataManager.getVolumeTable() + .put(volumeKey, volumeArgs); + + // Create bucket and put it in BucketTable. + OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(VOLUME) + .setBucketName(BUCKET) + .setBucketLayout(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .setOwner(ugi.getShortUserName()) + .build(); + String bucketKey = omMetadataManager + .getBucketKey(VOLUME, BUCKET); + + omMetadataManager.getBucketTable() + .addCacheEntry(new CacheKey<>(bucketKey), + CacheValue.get(1, bucketInfo)); + omMetadataManager.getBucketTable() + .put(bucketKey, bucketInfo); + } + + private OmKeyInfo createOmKeyInfo(long parentObjectId) { + String keyName = "key-" + RandomStringUtils.randomNumeric(5); + long objectId = ThreadLocalRandom.current().nextLong(100); + + return new OmKeyInfo.Builder() + .setVolumeName(VOLUME) + .setBucketName(BUCKET) + .setKeyName(keyName) + .setFileName(keyName) + .setReplicationConfig( + StandaloneReplicationConfig.getInstance(ONE)) + .setObjectID(objectId) + .setParentObjectID(parentObjectId) + .setDataSize(500L) + .setCreationTime(System.currentTimeMillis()) + .build(); + } +} diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 69f8a2f499a1..0add7e2d3247 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -642,6 +643,12 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, return null; } + @Override + public List listSnapshotDiffJobs( + String volumeName, String bucketName, String jobStatus) { + return null; + } + @Override public void setTimes(OzoneObj obj, String keyName, long mtime, long atime) throws IOException { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java new file mode 100644 index 000000000000..71dd7be5edfc --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell.snapshot; + +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; +import org.apache.hadoop.ozone.shell.Handler; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import org.apache.hadoop.ozone.shell.bucket.BucketUri; +import picocli.CommandLine; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +/** + * ozone sh snapshot listSnapshotDiff. + */ +@CommandLine.Command(name = "listSnapshotDiff", + description = "List snapshotDiff jobs for a bucket.") +public class ListSnapshotDiffHandler extends Handler { + + @CommandLine.Mixin + private BucketUri snapshotPath; + + @CommandLine.Parameters(description = "List jobs based on status.\n" + + "Accepted values are: queued, in_progress, done, failed, rejected, all", + defaultValue = "in_progress") + private String jobStatus; + + private static final String[] STATUS_VALUES = + {"queued", "in_progress", "done", "failed", "rejected", "all"}; + + @Override + protected OzoneAddress getAddress() { + return snapshotPath.getValue(); + } + + @Override + protected void execute(OzoneClient client, OzoneAddress address) + throws IOException { + + String volumeName = snapshotPath.getValue().getVolumeName(); + String bucketName = snapshotPath.getValue().getBucketName(); + + if (Arrays.asList(STATUS_VALUES) + .contains(jobStatus)) { + List jobList = + client.getObjectStore().listSnapshotDiffJobs( + volumeName, bucketName, jobStatus); + + int counter = printAsJsonArray(jobList.iterator(), + jobList.size()); + if (isVerbose()) { + System.out.printf("Found : %d snapshot diff jobs for o3://%s/ %s ", + counter, volumeName, bucketName); + } + } else { + System.out.println("Invalid job status, accepted values are: " + + Arrays.toString(STATUS_VALUES)); + } + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index a2169e04dca7..0b3daab6a3f5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -41,7 +41,8 @@ CreateSnapshotHandler.class, DeleteSnapshotHandler.class, ListSnapshotHandler.class, - SnapshotDiffHandler.class + SnapshotDiffHandler.class, + ListSnapshotDiffHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) From 1a0807601c6b9a5c998e18f9f8866562e1f8b7d6 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Fri, 2 Jun 2023 20:39:51 +0300 Subject: [PATCH 02/15] listAll option --- .../hadoop/ozone/client/ObjectStore.java | 6 +++-- .../ozone/client/protocol/ClientProtocol.java | 3 ++- .../hadoop/ozone/client/rpc/RpcClient.java | 5 ++-- .../om/protocol/OzoneManagerProtocol.java | 3 ++- ...ManagerProtocolClientSideTranslatorPB.java | 6 +++-- .../main/smoketest/snapshot/snapshot-sh.robot | 2 +- .../src/main/proto/OmClientProtocol.proto | 1 + .../hadoop/ozone/om/OmSnapshotManager.java | 5 ++-- .../apache/hadoop/ozone/om/OzoneManager.java | 7 +++--- .../om/snapshot/SnapshotDiffManager.java | 5 ++-- .../OzoneManagerRequestHandler.java | 3 ++- .../om/snapshot/TestSnapshotDiffManager.java | 25 +++++++++++-------- .../ozone/client/ClientProtocolStub.java | 3 ++- .../snapshot/ListSnapshotDiffHandler.java | 14 ++++++++--- 14 files changed, 56 insertions(+), 32 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index c3c57fe6507d..89425ac53ed6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -623,8 +623,10 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, */ public List listSnapshotDiffJobs(String volumeName, String bucketName, - String jobStatus) + String jobStatus, + boolean listAll) throws IOException { - return proxy.listSnapshotDiffJobs(volumeName, bucketName, jobStatus); + return proxy.listSnapshotDiffJobs(volumeName, + bucketName, jobStatus, listAll); } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 9434b5ddca9c..1cd35b6e34dc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1083,7 +1083,8 @@ SnapshotDiffResponse snapshotDiff(String volumeName, String bucketName, */ List listSnapshotDiffJobs(String volumeName, String bucketName, - String jobStatus) + String jobStatus, + boolean listAll) throws IOException; /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index d184fdc8dee6..83decde3bde1 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1000,7 +1000,8 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, @Override public List listSnapshotDiffJobs(String volumeName, String bucketName, - String jobStatus) + String jobStatus, + boolean listAll) throws IOException { Preconditions.checkArgument(Strings.isNotBlank(volumeName), "volume can't be null or empty."); @@ -1008,7 +1009,7 @@ public List listSnapshotDiffJobs(String volumeName, "bucket can't be null or empty."); return ozoneManagerClient.listSnapshotDiffJobs( - volumeName, bucketName, jobStatus); + volumeName, bucketName, jobStatus, listAll); } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index e1df456896fc..1fcf6b994bef 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -735,7 +735,8 @@ default SnapshotDiffResponse snapshotDiff(String volumeName, */ default List listSnapshotDiffJobs(String volumeName, String bucketName, - String jobStatus) + String jobStatus, + boolean listAll) throws IOException { throw new UnsupportedOperationException("OzoneManager does not require " + "this to be implemented"); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 6677a822e5dd..da2f905f8909 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1238,7 +1238,8 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, @Override public List listSnapshotDiffJobs(String volumeName, String bucketName, - String jobStatus) + String jobStatus, + boolean listAll) throws IOException { final OzoneManagerProtocolProtos .ListSnapshotDiffJobRequest.Builder requestBuilder = @@ -1246,7 +1247,8 @@ public List listSnapshotDiffJobs(String volumeName, .ListSnapshotDiffJobRequest.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) - .setJobStatus(jobStatus); + .setJobStatus(jobStatus) + .setListAll(listAll); final OMRequest omRequest = createOMRequest(Type.ListSnapshotDiffJob) .setListSnapshotDiffJobRequest(requestBuilder) diff --git a/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot b/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot index f563e57b2e11..1cbdc33dc6ce 100644 --- a/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot +++ b/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot @@ -54,7 +54,7 @@ Snapshot Diff Should contain ${result} + ${KEY_THREE} List Snapshot Diff Jobs - ${result} = Execute ozone sh snapshot listSnapshotDiff /${VOLUME}/${BUCKET} all + ${result} = Execute ozone sh snapshot listSnapshotDiff /${VOLUME}/${BUCKET} --all Should contain ${result} ${VOLUME} Should contain ${result} ${BUCKET} Should contain ${result} ${SNAPSHOT_ONE} diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 9567e3de6040..f1127d96b5fb 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -1730,6 +1730,7 @@ message ListSnapshotDiffJobRequest { optional string volumeName = 1; optional string bucketName = 2; optional string jobStatus = 3; + optional bool listAll = 4; } message DeleteSnapshotRequest { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index f59c10f8e0a8..00bfa9240f9b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -688,9 +688,10 @@ public SnapshotDiffResponse getSnapshotDiffReport(final String volume, public List getSnapshotDiffList(final String volumeName, final String bucketName, - final String jobStatus) { + final String jobStatus, + final boolean listAll) { return snapshotDiffManager.getSnapshotDiffJobList( - volumeName, bucketName, jobStatus); + volumeName, bucketName, jobStatus, listAll); } private void validateSnapshotsExistAndActive(final String volumeName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index acfbf4b15603..fee10a19a0e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -4533,10 +4533,11 @@ public SnapshotDiffResponse snapshotDiff(String volume, } public List listSnapshotDiffJobs(String volume, - String bucket, - String jobStatus) { + String bucket, + String jobStatus, + boolean listAll) { return omSnapshotManager.getSnapshotDiffList(volume, - bucket, jobStatus); + bucket, jobStatus, listAll); } @Override // ReconfigureProtocol diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 2be84dd23ee1..a696420d051a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -379,7 +379,8 @@ private Set getSSTFileListForSnapshot(OmSnapshot snapshot, } public List getSnapshotDiffJobList( - String volumeName, String bucketName, String jobStatus) { + String volumeName, String bucketName, + String jobStatus, boolean listAll) { List jobList = new ArrayList<>(); try (ClosableIterator> iterator = @@ -388,7 +389,7 @@ public List getSnapshotDiffJobList( SnapshotDiffJob snapshotDiffJob = iterator.next().getValue(); if (snapshotDiffJob.getVolume().equals(volumeName) && snapshotDiffJob.getBucket().equals(bucketName)) { - if (jobStatus.equalsIgnoreCase("all")) { + if (listAll) { jobList.add(snapshotDiffJob); continue; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index f732d7b7fe8a..cac9d0359687 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -1259,7 +1259,8 @@ private ListSnapshotDiffJobResponse listSnapshotDiffJobs( impl.listSnapshotDiffJobs( listSnapshotDiffJobRequest.getVolumeName(), listSnapshotDiffJobRequest.getBucketName(), - listSnapshotDiffJobRequest.getJobStatus()); + listSnapshotDiffJobRequest.getJobStatus(), + listSnapshotDiffJobRequest.getListAll()); ListSnapshotDiffJobResponse.Builder builder = ListSnapshotDiffJobResponse.newBuilder(); for (SnapshotDiffJob diffJob : snapshotDiffJobs) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 6078cbfcd7f1..95b446bfd232 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -112,19 +112,19 @@ public void testListSnapshotDiffJobs() // There are no jobs in the table, therefore // the response list should be empty. List jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "queued"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "queued", false); Assertions.assertTrue(jobList.isEmpty()); jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "done"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "done", false); Assertions.assertTrue(jobList.isEmpty()); jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress", false); Assertions.assertTrue(jobList.isEmpty()); jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "all"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "", true); Assertions.assertTrue(jobList.isEmpty()); // Submit a job. @@ -146,28 +146,33 @@ public void testListSnapshotDiffJobs() // Response list for 'queued' and 'done' should be empty. jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "queued"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "queued", false); Assertions.assertTrue(jobList.isEmpty()); jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "done"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "done", false); Assertions.assertTrue(jobList.isEmpty()); // SnapshotDiffJob in the response list should be the same // as the one we got from the table. jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress", false); Assertions.assertTrue(jobList.contains(diffJob)); jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "all"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "", true); Assertions.assertTrue(jobList.contains(diffJob)); // Providing an invalid jobStatus results in an empty list. jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid", false); Assertions.assertTrue(jobList.isEmpty()); + // If listAll is true, jobStatus will be ignored even if it's invalid. + jobList = snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid", true); + Assertions.assertEquals(1, jobList.size()); + // Set up new snapshots to submit a second snapshot diff job. String fromSnapshotName2 = "snap-" + RandomStringUtils.randomNumeric(5); String toSnapshotName2 = "snap-" + RandomStringUtils.randomNumeric(5); @@ -184,7 +189,7 @@ public void testListSnapshotDiffJobs() SnapshotDiffJob diffJob2 = snapDiffJobTable.get(diffJobKey2); jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "all"); + .getSnapshotDiffJobList(VOLUME, BUCKET, "", true); Assertions.assertEquals(2, jobList.size()); Assertions.assertTrue(jobList.contains(diffJob2)); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 0add7e2d3247..90b5476a9502 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -645,7 +645,8 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, @Override public List listSnapshotDiffJobs( - String volumeName, String bucketName, String jobStatus) { + String volumeName, String bucketName, + String jobStatus, boolean listAll) { return null; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java index 71dd7be5edfc..e5bbb1a82515 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java @@ -38,13 +38,19 @@ public class ListSnapshotDiffHandler extends Handler { @CommandLine.Mixin private BucketUri snapshotPath; - @CommandLine.Parameters(description = "List jobs based on status.\n" + - "Accepted values are: queued, in_progress, done, failed, rejected, all", + @CommandLine.Option(names = {"-s, --status"}, + description = "List jobs based on status.\n" + + "Accepted values are: queued, in_progress, done, failed, rejected", defaultValue = "in_progress") private String jobStatus; + @CommandLine.Option(names = {"-a, --all"}, + description = "List all jobs regardless of status.", + defaultValue = "false") + private boolean listAll; + private static final String[] STATUS_VALUES = - {"queued", "in_progress", "done", "failed", "rejected", "all"}; + {"queued", "in_progress", "done", "failed", "rejected"}; @Override protected OzoneAddress getAddress() { @@ -62,7 +68,7 @@ protected void execute(OzoneClient client, OzoneAddress address) .contains(jobStatus)) { List jobList = client.getObjectStore().listSnapshotDiffJobs( - volumeName, bucketName, jobStatus); + volumeName, bucketName, jobStatus, listAll); int counter = printAsJsonArray(jobList.iterator(), jobList.size()); From 03e375419a3f91e4cee57636c6e8e5d7a2b13586 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Thu, 8 Jun 2023 21:13:41 +0300 Subject: [PATCH 03/15] code improvements - cleanup --- .../hadoop/ozone/client/ObjectStore.java | 1 + .../ozone/client/protocol/ClientProtocol.java | 1 + .../java/org/apache/hadoop/ozone/OmUtils.java | 2 +- .../ozone/om/helpers/SnapshotDiffJob.java | 3 -- ...ManagerProtocolClientSideTranslatorPB.java | 2 +- .../ozone/snapshot/SnapshotDiffResponse.java | 9 ------ .../src/main/proto/OmClientProtocol.proto | 6 ++-- .../hadoop/ozone/om/OmSnapshotManager.java | 16 +++++++++- .../apache/hadoop/ozone/om/OzoneManager.java | 3 +- .../om/snapshot/SnapshotDiffManager.java | 24 ++++++++++----- .../OzoneManagerRequestHandler.java | 5 ++-- .../snapshot/ListSnapshotDiffHandler.java | 30 +++++++------------ 12 files changed, 54 insertions(+), 48 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 89425ac53ed6..0b55b62ca380 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -618,6 +618,7 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, * @param volumeName Name of the volume to which the snapshotted bucket belong * @param bucketName Name of the bucket to which the snapshots belong * @param jobStatus JobStatus to be used to filter the snapshot diff jobs + * @param listAll Option to specify whether to list all jobs or not * @return a list of SnapshotDiffJob objects * @throws IOException in case there is a failure while getting a response. */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 1cd35b6e34dc..e56b40827b12 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -1078,6 +1078,7 @@ SnapshotDiffResponse snapshotDiff(String volumeName, String bucketName, * @param volumeName Name of the volume to which the snapshotted bucket belong * @param bucketName Name of the bucket to which the snapshots belong * @param jobStatus JobStatus to be used to filter the snapshot diff jobs + * @param listAll Option to specify whether to list all jobs or not * @return a list of SnapshotDiffJob objects * @throws IOException in case there is a failure while getting a response. */ diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 16ac60dda39e..3ad0b6f33d58 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -265,7 +265,7 @@ public static boolean isReadOnly( // operation SetRangerServiceVersion. case GetKeyInfo: case SnapshotDiff: - case ListSnapshotDiffJob: + case ListSnapshotDiffJobs: case TransferLeadership: return true; case CreateVolume: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java index 380483f15a13..eeeaf6e517a9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java @@ -189,14 +189,11 @@ public int hashCode() { public SnapshotDiffJobProto toProtoBuf() { return SnapshotDiffJobProto.newBuilder() .setCreationTime(creationTime) - .setJobId(jobId) .setStatus(status.toProtobuf()) .setVolume(volume) .setBucket(bucket) .setFromSnapshot(fromSnapshot) .setToSnapshot(toSnapshot) - .setForceFullDiff(forceFullDiff) - .setTotalDiffEntries(totalDiffEntries) .build(); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index da2f905f8909..0b3eb6119e26 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1250,7 +1250,7 @@ public List listSnapshotDiffJobs(String volumeName, .setJobStatus(jobStatus) .setListAll(listAll); - final OMRequest omRequest = createOMRequest(Type.ListSnapshotDiffJob) + final OMRequest omRequest = createOMRequest(Type.ListSnapshotDiffJobs) .setListSnapshotDiffJobRequest(requestBuilder) .build(); final OMResponse omResponse = submitRequest(omRequest); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java index c5fdac14362f..6cd9bff03922 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/snapshot/SnapshotDiffResponse.java @@ -41,15 +41,6 @@ public JobStatusProto toProtobuf() { public static JobStatus fromProtobuf(JobStatusProto jobStatusProto) { return JobStatus.valueOf(jobStatusProto.name()); } - - public static JobStatus getJobStatusFromString(String jobStatus) { - for (JobStatus status : JobStatus.values()) { - if (status.toString().equalsIgnoreCase(jobStatus)) { - return status; - } - } - return null; - } } private final SnapshotDiffReportOzone snapshotDiffReport; diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index f1127d96b5fb..a7a6d4cb77a2 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -135,7 +135,7 @@ enum Type { SnapshotPurge = 118; RecoverLease = 119; SetTimes = 120; - ListSnapshotDiffJob = 121; + ListSnapshotDiffJobs = 121; } message OMRequest { @@ -1727,8 +1727,8 @@ message SnapshotDiffRequest { } message ListSnapshotDiffJobRequest { - optional string volumeName = 1; - optional string bucketName = 2; + required string volumeName = 1; + required string bucketName = 2; optional string jobStatus = 3; optional bool listAll = 4; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 00bfa9240f9b..0b460b535617 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -689,7 +689,21 @@ public SnapshotDiffResponse getSnapshotDiffReport(final String volume, public List getSnapshotDiffList(final String volumeName, final String bucketName, final String jobStatus, - final boolean listAll) { + final boolean listAll) + throws IOException { + String volumeKey = ozoneManager.getMetadataManager() + .getVolumeKey(volumeName); + String bucketKey = ozoneManager.getMetadataManager() + .getBucketKey(volumeName, bucketName); + + if (ozoneManager.getMetadataManager() + .getVolumeTable().getIfExist(volumeKey) == null || + ozoneManager.getMetadataManager() + .getBucketTable().getIfExist(bucketKey) == null) { + throw new IOException("Provided volume name " + volumeName + + " or bucket name " + bucketName + " doesn't exist"); + } + return snapshotDiffManager.getSnapshotDiffJobList( volumeName, bucketName, jobStatus, listAll); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 55db22b44657..785272f7203e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -4520,7 +4520,8 @@ public SnapshotDiffResponse snapshotDiff(String volume, public List listSnapshotDiffJobs(String volume, String bucket, String jobStatus, - boolean listAll) { + boolean listAll) + throws IOException { return omSnapshotManager.getSnapshotDiffList(volume, bucket, jobStatus, listAll); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index a696420d051a..48b3c1d742d9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -387,18 +387,18 @@ public List getSnapshotDiffJobList( snapDiffJobTable.iterator()) { while (iterator.hasNext()) { SnapshotDiffJob snapshotDiffJob = iterator.next().getValue(); - if (snapshotDiffJob.getVolume().equals(volumeName) && - snapshotDiffJob.getBucket().equals(bucketName)) { + if (Objects.equals(snapshotDiffJob.getVolume(), volumeName) && + Objects.equals(snapshotDiffJob.getBucket(), bucketName)) { if (listAll) { jobList.add(snapshotDiffJob); continue; } - // If provided job status is invalid, - // then all jobs on the table will be ignored. - // No need to check if getJobStatusFromString doesn't return null. - if (snapshotDiffJob.getStatus().equals( - JobStatus.getJobStatusFromString(jobStatus))) { + // First check if the provided JobStatus is valid, + // then check for matches with the provided JobStatus. + if (validateStringJobStatusExists(jobStatus) && + Objects.equals(snapshotDiffJob.getStatus(), + JobStatus.valueOf(jobStatus.toUpperCase()))) { jobList.add(snapshotDiffJob); } } @@ -407,6 +407,16 @@ public List getSnapshotDiffJobList( return jobList; } + private boolean validateStringJobStatusExists(String jobStatus) { + for (JobStatus status : JobStatus.values()) { + if (Objects.equals(status.name(), + jobStatus.toUpperCase())) { + return true; + } + } + return false; + } + public SnapshotDiffResponse getSnapshotDiffReport( final String volumeName, final String bucketName, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index cac9d0359687..c673c4726b52 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -305,7 +305,7 @@ public OMResponse handleReadRequest(OMRequest request) { request.getSnapshotDiffRequest()); responseBuilder.setSnapshotDiffResponse(snapshotDiffReport); break; - case ListSnapshotDiffJob: + case ListSnapshotDiffJobs: ListSnapshotDiffJobResponse listSnapDiffResponse = listSnapshotDiffJobs(request.getListSnapshotDiffJobRequest()); responseBuilder.setListSnapshotDiffJobResponse(listSnapDiffResponse); @@ -1254,7 +1254,8 @@ private SnapshotDiffResponse snapshotDiff( } private ListSnapshotDiffJobResponse listSnapshotDiffJobs( - ListSnapshotDiffJobRequest listSnapshotDiffJobRequest) { + ListSnapshotDiffJobRequest listSnapshotDiffJobRequest) + throws IOException { List snapshotDiffJobs = impl.listSnapshotDiffJobs( listSnapshotDiffJobRequest.getVolumeName(), diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java index e5bbb1a82515..4707209b8e7c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java @@ -25,7 +25,6 @@ import picocli.CommandLine; import java.io.IOException; -import java.util.Arrays; import java.util.List; /** @@ -38,20 +37,17 @@ public class ListSnapshotDiffHandler extends Handler { @CommandLine.Mixin private BucketUri snapshotPath; - @CommandLine.Option(names = {"-s, --status"}, + @CommandLine.Option(names = {"-s", "--status"}, description = "List jobs based on status.\n" + "Accepted values are: queued, in_progress, done, failed, rejected", defaultValue = "in_progress") private String jobStatus; - @CommandLine.Option(names = {"-a, --all"}, + @CommandLine.Option(names = {"-a", "--all"}, description = "List all jobs regardless of status.", defaultValue = "false") private boolean listAll; - private static final String[] STATUS_VALUES = - {"queued", "in_progress", "done", "failed", "rejected"}; - @Override protected OzoneAddress getAddress() { return snapshotPath.getValue(); @@ -64,21 +60,15 @@ protected void execute(OzoneClient client, OzoneAddress address) String volumeName = snapshotPath.getValue().getVolumeName(); String bucketName = snapshotPath.getValue().getBucketName(); - if (Arrays.asList(STATUS_VALUES) - .contains(jobStatus)) { - List jobList = - client.getObjectStore().listSnapshotDiffJobs( - volumeName, bucketName, jobStatus, listAll); + List jobList = + client.getObjectStore().listSnapshotDiffJobs( + volumeName, bucketName, jobStatus, listAll); - int counter = printAsJsonArray(jobList.iterator(), - jobList.size()); - if (isVerbose()) { - System.out.printf("Found : %d snapshot diff jobs for o3://%s/ %s ", - counter, volumeName, bucketName); - } - } else { - System.out.println("Invalid job status, accepted values are: " + - Arrays.toString(STATUS_VALUES)); + int counter = printAsJsonArray(jobList.iterator(), + jobList.size()); + if (isVerbose()) { + System.out.printf("Found : %d snapshot diff jobs for o3://%s/ %s ", + counter, volumeName, bucketName); } } } From c8ddff7bd22388d24cfe44cc6d7955652ff53860 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Thu, 8 Jun 2023 21:44:57 +0300 Subject: [PATCH 04/15] parameterized test --- .../ozone/om/helpers/SnapshotDiffJob.java | 3 + .../om/snapshot/TestSnapshotDiffManager.java | 69 ++++++++++--------- 2 files changed, 40 insertions(+), 32 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java index eeeaf6e517a9..380483f15a13 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotDiffJob.java @@ -189,11 +189,14 @@ public int hashCode() { public SnapshotDiffJobProto toProtoBuf() { return SnapshotDiffJobProto.newBuilder() .setCreationTime(creationTime) + .setJobId(jobId) .setStatus(status.toProtobuf()) .setVolume(volume) .setBucket(bucket) .setFromSnapshot(fromSnapshot) .setToSnapshot(toSnapshot) + .setForceFullDiff(forceFullDiff) + .setTotalDiffEntries(totalDiffEntries) .build(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 95b446bfd232..2c1c67eb4b90 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -37,16 +37,21 @@ import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.apache.hadoop.util.ClosableIterator; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.rocksdb.RocksDBException; import java.io.File; import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; @@ -94,8 +99,22 @@ public static void cleanUp() { FileUtil.fullyDelete(metaDir); } - @Test - public void testListSnapshotDiffJobs() + /** + * Clear the SnapshotDiffTable before every test run. + */ + @BeforeEach + public void setUp() { + ClosableIterator> + iterator = snapDiffJobTable.iterator(); + while (iterator.hasNext()) { + String key = iterator.next().getKey(); + snapDiffJobTable.remove(key); + } + } + + @ParameterizedTest + @ValueSource(strings = {"queued", "done", "in_progress", ""}) + public void testListSnapshotDiffJobs(String jobStatus) throws IOException { String fromSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); String toSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); @@ -112,19 +131,7 @@ public void testListSnapshotDiffJobs() // There are no jobs in the table, therefore // the response list should be empty. List jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "queued", false); - Assertions.assertTrue(jobList.isEmpty()); - - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "done", false); - Assertions.assertTrue(jobList.isEmpty()); - - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress", false); - Assertions.assertTrue(jobList.isEmpty()); - - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "", true); + .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, false); Assertions.assertTrue(jobList.isEmpty()); // Submit a job. @@ -144,23 +151,20 @@ public void testListSnapshotDiffJobs() Assertions.assertEquals(JobStatus.IN_PROGRESS, diffJob.getStatus()); - // Response list for 'queued' and 'done' should be empty. - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "queued", false); - Assertions.assertTrue(jobList.isEmpty()); - + // If the job is IN_PROGRESS, there should be a response. + // Otherwise, response list should be empty. jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "done", false); - Assertions.assertTrue(jobList.isEmpty()); + .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, false); - // SnapshotDiffJob in the response list should be the same - // as the one we got from the table. - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "in_progress", false); - Assertions.assertTrue(jobList.contains(diffJob)); + if (Objects.equals(jobStatus, "in_progress")) { + Assertions.assertTrue(jobList.contains(diffJob)); + } else { + Assertions.assertTrue(jobList.isEmpty()); + } + // If listAll is true, jobStatus is ignored. jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "", true); + .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, true); Assertions.assertTrue(jobList.contains(diffJob)); // Providing an invalid jobStatus results in an empty list. @@ -171,7 +175,7 @@ public void testListSnapshotDiffJobs() // If listAll is true, jobStatus will be ignored even if it's invalid. jobList = snapshotDiffManager .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid", true); - Assertions.assertEquals(1, jobList.size()); + Assertions.assertTrue(jobList.size() > 0); // Set up new snapshots to submit a second snapshot diff job. String fromSnapshotName2 = "snap-" + RandomStringUtils.randomNumeric(5); @@ -189,10 +193,11 @@ public void testListSnapshotDiffJobs() SnapshotDiffJob diffJob2 = snapDiffJobTable.get(diffJobKey2); jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "", true); + .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, true); - Assertions.assertEquals(2, jobList.size()); + Assertions.assertTrue(jobList.contains(diffJob)); Assertions.assertTrue(jobList.contains(diffJob2)); + Assertions.assertEquals(2, jobList.size()); } private void setUpKeysAndSnapshots(String fromSnapshotName, From bdb148280524212ac21717138a30650e8cf95edd Mon Sep 17 00:00:00 2001 From: xBis7 Date: Fri, 9 Jun 2023 12:33:47 +0300 Subject: [PATCH 05/15] resolving conflicts and merging with master --- .github/workflows/ci.yml | 1 + .../apache/hadoop/hdds/HddsConfigKeys.java | 34 ++ .../apache/hadoop/hdds/client/OzoneQuota.java | 7 +- .../hadoop/hdds/recon/ReconConfigKeys.java | 3 + .../exception/SCMSecretKeyException.java | 47 +++ .../token/ContainerTokenIdentifier.java | 17 +- .../token/OzoneBlockTokenIdentifier.java | 26 +- .../token/ShortLivedTokenIdentifier.java | 24 +- .../hdds/security/x509/SecurityConfig.java | 4 + .../hadoop/ozone/OzoneSecurityUtil.java | 1 + .../org/apache/hadoop/util/ProtobufUtils.java | 41 ++ .../src/main/resources/ozone-default.xml | 114 +++++ .../token/TestOzoneBlockTokenIdentifier.java | 306 -------------- .../hadoop/hdds/utils/TestProtobufUtils.java | 48 +++ .../hadoop/ozone/HddsDatanodeService.java | 23 +- .../statemachine/DatanodeStateMachine.java | 12 +- .../ECReconstructionCoordinator.java | 10 +- .../ec/reconstruction/TokenHelper.java | 47 +-- .../container/ozoneimpl/OzoneContainer.java | 17 +- .../container/common/ContainerTestUtils.java | 2 +- .../common/TestDatanodeStateMachine.java | 11 +- .../ozoneimpl/TestOzoneContainer.java | 6 +- .../TestDataNodeStartupSlvLessThanMlv.java | 3 +- .../TestDatanodeUpgradeToSchemaV3.java | 10 +- .../upgrade/TestDatanodeUpgradeToScmHA.java | 8 +- hadoop-hdds/docs/content/feature/Quota.md | 6 + hadoop-hdds/docs/content/feature/Quota.zh.md | 5 + .../hdds/protocol/SCMSecurityProtocol.java | 1 + .../hdds/protocol/SecretKeyProtocol.java | 55 +++ .../protocol/SecretKeyProtocolDatanode.java | 34 ++ .../hdds/protocol/SecretKeyProtocolOm.java | 32 ++ .../hdds/protocol/SecretKeyProtocolScm.java | 31 ++ ...cretKeyProtocolClientSideTranslatorPB.java | 165 ++++++++ .../SecretKeyProtocolDatanodePB.java | 40 ++ .../protocolPB/SecretKeyProtocolOmPB.java | 39 ++ .../protocolPB/SecretKeyProtocolScmPB.java | 38 ++ ...ecretKeyProtocolFailoverProxyProvider.java | 303 ++++++++++++++ .../SingleSecretKeyProtocolProxyProvider.java | 56 +++ .../symmetric/DefaultSecretKeyClient.java | 72 ++++ .../DefaultSecretKeySignerClient.java | 131 ++++++ .../DefaultSecretKeyVerifierClient.java | 105 +++++ .../symmetric/LocalSecretKeyStore.java | 199 +++++++++ .../security/symmetric/ManagedSecretKey.java | 154 +++++++ .../security/symmetric/SecretKeyClient.java | 26 ++ .../security/symmetric/SecretKeyConfig.java | 109 +++++ .../security/symmetric/SecretKeyManager.java | 173 ++++++++ .../symmetric/SecretKeySignerClient.java | 46 ++ .../security/symmetric/SecretKeyState.java | 60 +++ .../symmetric/SecretKeyStateImpl.java | 139 ++++++ .../security/symmetric/SecretKeyStore.java | 35 ++ .../symmetric/SecretKeyVerifierClient.java | 32 ++ .../hdds/security/symmetric/package-info.java | 63 +++ .../security/token/BlockTokenVerifier.java | 7 +- .../token/ContainerTokenSecretManager.java | 14 +- .../token/ContainerTokenVerifier.java | 6 +- .../token/OzoneBlockTokenSecretManager.java | 66 +-- .../token/ShortLivedTokenSecretManager.java | 75 ++-- .../token/ShortLivedTokenVerifier.java | 66 ++- .../hdds/security/token/TokenVerifier.java | 9 +- .../hadoop/hdds/utils/HddsServerUtil.java | 58 +++ .../symmetric/LocalSecretKeyStoreTest.java | 188 +++++++++ .../symmetric/ManagedSecretKeyTest.java | 75 ++++ .../symmetric/SecretKeyManagerTest.java | 206 +++++++++ .../security/symmetric/SecretKeyTestUtil.java | 52 +++ .../token/TestBlockTokenVerifier.java | 18 +- .../token/TestContainerTokenVerifier.java | 12 +- .../token/TestOzoneBlockTokenIdentifier.java | 103 +++++ .../TestOzoneBlockTokenSecretManager.java | 245 +++-------- .../security/token/TokenVerifierTests.java | 168 ++++---- .../org.mockito.plugins.MockMaker | 16 + .../src/main/proto/ScmAdminProtocol.proto | 3 +- .../src/main/proto/hdds.proto | 3 +- .../src/main/proto/SCMRatisProtocol.proto | 1 + .../src/main/proto/ScmSecretKeyProtocol.proto | 111 +++++ .../proto/ScmServerSecurityProtocol.proto | 3 +- .../hadoop/hdds/scm/ha/SCMHAManager.java | 8 + .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 26 ++ .../hadoop/hdds/scm/ha/SCMHAManagerStub.java | 6 + .../hadoop/hdds/scm/ha/SCMStateMachine.java | 28 +- .../hadoop/hdds/scm/ha/io/CodecFactory.java | 2 + .../hadoop/hdds/scm/ha/io/ListCodec.java | 6 +- .../hdds/scm/ha/io/ManagedSecretKeyCodec.java | 44 ++ ...cretKeyProtocolServerSideTranslatorPB.java | 165 ++++++++ .../security/ScmSecretKeyStateBuilder.java | 60 +++ .../scm/security/SecretKeyManagerService.java | 163 ++++++++ .../hdds/scm/security/package-info.java | 22 + .../hdds/scm/server/SCMPolicyProvider.java | 15 + .../scm/server/SCMSecurityProtocolServer.java | 76 +++- .../scm/server/StorageContainerManager.java | 74 ++-- .../hdds/scm/ha/TestSCMHAManagerImpl.java | 4 +- .../server/TestSCMSecurityProtocolServer.java | 2 +- .../ozone/container/common/TestEndPoint.java | 12 +- .../ozone/client/io/KeyInputStream.java | 17 +- .../java/org/apache/hadoop/ozone/OmUtils.java | 1 + .../apache/hadoop/ozone/om/OMConfigKeys.java | 13 + .../om/protocol/OzoneManagerProtocol.java | 3 + ...ManagerProtocolClientSideTranslatorPB.java | 17 + .../dist/src/main/compose/ozone/test-ec.sh | 43 ++ .../dist/src/main/compose/ozone/test.sh | 16 +- .../main/compose/ozonesecure-ha/docker-config | 5 + .../main/smoketest/admincli/datanode.robot | 13 +- .../smoketest/basic/ozone-shell-lib.robot | 24 +- .../hdds/scm/TestSCMInstallSnapshot.java | 2 +- .../scm/storage/TestContainerCommandsEC.java | 32 +- .../hdds/upgrade/TestScmHAFinalization.java | 4 +- .../apache/hadoop/ozone/MiniOzoneCluster.java | 7 + .../hadoop/ozone/MiniOzoneClusterImpl.java | 11 + .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 11 + .../apache/hadoop/ozone/TestBlockTokens.java | 394 ++++++++++++++++++ .../hadoop/ozone/TestBlockTokensCLI.java | 237 +++++++++++ .../hadoop/ozone/TestMiniOzoneCluster.java | 8 +- .../hadoop/ozone/TestSecretKeysApi.java | 366 ++++++++++++++++ .../hadoop/ozone/TestSecureOzoneCluster.java | 263 ------------ .../ozone/client/SecretKeyTestClient.java | 73 ++++ .../client/rpc/TestContainerStateMachine.java | 2 + .../TestContainerStateMachineFlushDelay.java | 2 + .../client/rpc/TestOzoneAtRestEncryption.java | 2 + .../client/rpc/TestSecureOzoneRpcClient.java | 8 +- .../ozoneimpl/TestOzoneContainer.java | 5 +- .../ozoneimpl/TestOzoneContainerWithTLS.java | 20 +- .../ozoneimpl/TestSecureOzoneContainer.java | 18 +- .../server/TestSecureContainerServer.java | 17 +- .../scm/TestSCMInstallSnapshotWithHA.java | 4 +- .../ozone/scm/TestSecretKeySnapshot.java | 289 +++++++++++++ .../src/main/proto/OmClientProtocol.proto | 17 +- .../ozone/om/GrpcOzoneManagerServer.java | 54 +++ .../apache/hadoop/ozone/om/OzoneManager.java | 77 ++-- .../OzoneManagerRequestHandler.java | 14 + .../security/TestOzoneManagerBlockToken.java | 251 ----------- .../recon/api/types/FeatureProvider.java | 22 +- .../ozone/recon/api/TestFeaturesEndPoint.java | 32 +- .../ozone/admin/om/FetchKeySubCommand.java | 56 +++ .../apache/hadoop/ozone/admin/om/OMAdmin.java | 3 +- 133 files changed, 6049 insertions(+), 1593 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecretKeyException.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/util/ProtobufUtils.java delete mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestProtobufUtils.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocol.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolDatanode.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolOm.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolScm.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolDatanodePB.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolOmPB.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolScmPB.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SecretKeyProtocolFailoverProxyProvider.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SingleSecretKeyProtocolProxyProvider.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStore.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKey.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyClient.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyConfig.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManager.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeySignerClient.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyState.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStateImpl.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStore.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/package-info.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStoreTest.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKeyTest.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManagerTest.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyTestUtil.java create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java create mode 100644 hadoop-hdds/framework/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker create mode 100644 hadoop-hdds/interface-server/src/main/proto/ScmSecretKeyProtocol.proto create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SecretKeyProtocolServerSideTranslatorPB.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/ScmSecretKeyStateBuilder.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/package-info.java create mode 100644 hadoop-ozone/dist/src/main/compose/ozone/test-ec.sh create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/SecretKeyTestClient.java create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java delete mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FetchKeySubCommand.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d63064e89851..ff4d386aec10 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -254,6 +254,7 @@ jobs: - secure - unsecure - compat + - EC - HA-secure - HA-unsecure - MR diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index b68422074734..4c0339c35d81 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -228,6 +228,28 @@ public final class HddsConfigKeys { public static final String HDDS_X509_ROOTCA_PRIVATE_KEY_FILE_DEFAULT = ""; + public static final String HDDS_SECRET_KEY_FILE = + "hdds.secret.key.file.name"; + public static final String HDDS_SECRET_KEY_FILE_DEFAULT = "secret_keys.json"; + + public static final String HDDS_SECRET_KEY_EXPIRY_DURATION = + "hdds.secret.key.expiry.duration"; + public static final String HDDS_SECRET_KEY_EXPIRY_DURATION_DEFAULT = "7d"; + + public static final String HDDS_SECRET_KEY_ROTATE_DURATION = + "hdds.secret.key.rotate.duration"; + public static final String HDDS_SECRET_KEY_ROTATE_DURATION_DEFAULT = "1d"; + + public static final String HDDS_SECRET_KEY_ALGORITHM = + "hdds.secret.key.algorithm"; + public static final String HDDS_SECRET_KEY_ALGORITHM_DEFAULT = + "HmacSHA256"; + + public static final String HDDS_SECRET_KEY_ROTATE_CHECK_DURATION = + "hdds.secret.key.rotate.check.duration"; + public static final String HDDS_SECRET_KEY_ROTATE_CHECK_DURATION_DEFAULT + = "10m"; + /** * Do not instantiate. */ @@ -277,6 +299,18 @@ private HddsConfigKeys() { public static final String HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL = "hdds.security.client.scm.certificate.protocol.acl"; + public static final String + HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_OM_PROTOCOL_ACL = + "hdds.security.client.scm.secretkey.om.protocol.acl"; + + public static final String + HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_SCM_PROTOCOL_ACL = + "hdds.security.client.scm.secretkey.scm.protocol.acl"; + + public static final String + HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL = + "hdds.security.client.scm.secretkey.datanode.protocol.acl"; + // Determines if the Container Chunk Manager will write user data to disk // Set to false only for specific performance tests public static final String HDDS_CONTAINER_PERSISTDATA = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java index ec337baa6589..04996f10b43a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java @@ -191,9 +191,10 @@ public static OzoneQuota parseSpaceQuota(String quotaInBytes) { nSize = Long.parseLong(size); } catch (NumberFormatException e) { throw new IllegalArgumentException("Invalid values for quota, to ensure" + - " that the Quota format is legal(supported values are B," + - " KB, MB, GB and TB). And the quota value cannot be greater than " + - "Long.MAX_VALUE BYTES"); + " that the Quota format is legal(supported values are B," + + " KB, MB, GB and TB with positive long values)." + + " And the quota value cannot be greater than " + + "Long.MAX_VALUE BYTES"); } if (nSize <= 0) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java index 47d3b00dd565..3571d39bc8a2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java @@ -44,6 +44,9 @@ private ReconConfigKeys() { // Fully qualified heatmap provider implementation class name key. public static final String OZONE_RECON_HEATMAP_PROVIDER_KEY = "ozone.recon.heatmap.provider"; + public static final String OZONE_RECON_HEATMAP_ENABLE_KEY = + "ozone.recon.heatmap.enable"; + public static final boolean OZONE_RECON_HEATMAP_ENABLE_DEFAULT = false; public static final String OZONE_RECON_ADDRESS_DEFAULT = "0.0.0.0:9891"; public static final String OZONE_RECON_HTTP_ADDRESS_KEY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecretKeyException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecretKeyException.java new file mode 100644 index 000000000000..2b2b2518690e --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/exception/SCMSecretKeyException.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.exception; + +import java.io.IOException; + +/** + * Exception for all secret key related errors. + */ +public class SCMSecretKeyException extends IOException { + private final ErrorCode errorCode; + + public SCMSecretKeyException(String message, ErrorCode errorCode) { + super(message); + this.errorCode = errorCode; + } + + public ErrorCode getErrorCode() { + return errorCode; + } + + /** + * Error codes to make it easy to decode these exceptions. + */ + public enum ErrorCode { + OK, + INTERNAL_ERROR, + SECRET_KEY_NOT_ENABLED, + SECRET_KEY_NOT_INITIALIZED + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenIdentifier.java index ebb0709d4b3c..7cd8c279ba26 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenIdentifier.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenIdentifier.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerTokenSecretProto; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.io.Text; +import org.apache.hadoop.util.ProtobufUtils; import java.io.DataInput; import java.io.DataInputStream; @@ -28,6 +29,7 @@ import java.io.IOException; import java.time.Instant; import java.util.Objects; +import java.util.UUID; /** * Token identifier for container operations, similar to block token. @@ -43,11 +45,18 @@ public ContainerTokenIdentifier() { } public ContainerTokenIdentifier(String ownerId, ContainerID containerID, - String certSerialId, Instant expiryDate) { - super(ownerId, expiryDate, certSerialId); + Instant expiryDate) { + super(ownerId, expiryDate); this.containerID = containerID; } + public ContainerTokenIdentifier(String ownerId, ContainerID containerID, + UUID secretKeyId, + Instant expiryDate) { + this(ownerId, containerID, expiryDate); + setSecretKeyId(secretKeyId); + } + @Override public Text getKind() { return KIND; @@ -58,7 +67,7 @@ public void write(DataOutput out) throws IOException { ContainerTokenSecretProto.Builder builder = ContainerTokenSecretProto .newBuilder() .setOwnerId(getOwnerId()) - .setCertSerialId(getCertSerialId()) + .setSecretKeyId(ProtobufUtils.toProtobuf(getSecretKeyId())) .setExpiryDate(getExpiry().toEpochMilli()) .setContainerId(containerID.getProtobuf()); out.write(builder.build().toByteArray()); @@ -72,7 +81,7 @@ public void readFields(DataInput in) throws IOException { } ContainerTokenSecretProto proto = ContainerTokenSecretProto.parseFrom((DataInputStream) in); - setCertSerialId(proto.getCertSerialId()); + setSecretKeyId(ProtobufUtils.fromProtobuf(proto.getSecretKeyId())); setExpiry(Instant.ofEpochMilli(proto.getExpiryDate())); setOwnerId(proto.getOwnerId()); this.containerID = ContainerID.getFromProtobuf(proto.getContainerId()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java index dcd75d6334ca..be18f90a07c6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenIdentifier.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token.TrivialRenewer; +import org.apache.hadoop.util.ProtobufUtils; import java.io.DataInput; import java.io.DataInputStream; @@ -59,16 +60,14 @@ public OzoneBlockTokenIdentifier() { } public OzoneBlockTokenIdentifier(String ownerId, BlockID blockId, - Set modes, long expiryDate, String omCertSerialId, - long maxLength) { - this(ownerId, getTokenService(blockId), modes, expiryDate, omCertSerialId, + Set modes, long expiryDate, long maxLength) { + this(ownerId, getTokenService(blockId), modes, expiryDate, maxLength); } public OzoneBlockTokenIdentifier(String ownerId, String blockId, - Set modes, long expiryDate, String omCertSerialId, - long maxLength) { - super(ownerId, Instant.ofEpochMilli(expiryDate), omCertSerialId); + Set modes, long expiryDate, long maxLength) { + super(ownerId, Instant.ofEpochMilli(expiryDate)); this.blockId = blockId; this.modes = modes == null ? EnumSet.noneOf(AccessModeProto.class) : EnumSet.copyOf(modes); @@ -136,7 +135,7 @@ public void readFields(DataInput in) throws IOException { BlockTokenSecretProto.parseFrom((DataInputStream) in); setOwnerId(token.getOwnerId()); setExpiry(Instant.ofEpochMilli(token.getExpiryDate())); - setCertSerialId(token.getOmCertSerialId()); + setSecretKeyId(ProtobufUtils.fromProtobuf(token.getSecretKeyId())); this.blockId = token.getBlockId(); this.modes = EnumSet.copyOf(token.getModesList()); this.maxLength = token.getMaxLength(); @@ -147,10 +146,13 @@ public static OzoneBlockTokenIdentifier readFieldsProtobuf(DataInput in) throws IOException { BlockTokenSecretProto token = BlockTokenSecretProto.parseFrom((DataInputStream) in); - return new OzoneBlockTokenIdentifier(token.getOwnerId(), - token.getBlockId(), EnumSet.copyOf(token.getModesList()), - token.getExpiryDate(), token.getOmCertSerialId(), - token.getMaxLength()); + OzoneBlockTokenIdentifier tokenId = + new OzoneBlockTokenIdentifier(token.getOwnerId(), + token.getBlockId(), EnumSet.copyOf(token.getModesList()), + token.getExpiryDate(), + token.getMaxLength()); + tokenId.setSecretKeyId(ProtobufUtils.fromProtobuf(token.getSecretKeyId())); + return tokenId; } @Override @@ -158,7 +160,7 @@ public void write(DataOutput out) throws IOException { BlockTokenSecretProto.Builder builder = BlockTokenSecretProto.newBuilder() .setBlockId(blockId) .setOwnerId(getOwnerId()) - .setOmCertSerialId(getCertSerialId()) + .setSecretKeyId(ProtobufUtils.toProtobuf(getSecretKeyId())) .setExpiryDate(getExpiryDate()) .setMaxLength(maxLength); // Add access mode allowed diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenIdentifier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenIdentifier.java index 7475fa50423f..dbd168f96a33 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenIdentifier.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenIdentifier.java @@ -24,6 +24,7 @@ import java.time.Instant; import java.util.Objects; +import java.util.UUID; /** * Base class for short-lived tokens (block, container). @@ -33,18 +34,16 @@ public abstract class ShortLivedTokenIdentifier extends TokenIdentifier { private String ownerId; private Instant expiry; - private String certSerialId; + private UUID secretKeyId; public abstract String getService(); protected ShortLivedTokenIdentifier() { } - protected ShortLivedTokenIdentifier(String ownerId, Instant expiry, - String certSerialId) { + protected ShortLivedTokenIdentifier(String ownerId, Instant expiry) { this.ownerId = ownerId; this.expiry = expiry; - this.certSerialId = certSerialId; } @Override @@ -67,22 +66,23 @@ protected void setExpiry(Instant expiry) { this.expiry = expiry; } - protected void setCertSerialId(String certSerialId) { - this.certSerialId = certSerialId; + public void setSecretKeyId(UUID secretKeyId) { + this.secretKeyId = secretKeyId; } public Instant getExpiry() { return expiry; } - public String getCertSerialId() { - return certSerialId; - } public String getOwnerId() { return ownerId; } + public UUID getSecretKeyId() { + return secretKeyId; + } + @Override public boolean equals(Object o) { if (this == o) { @@ -95,18 +95,18 @@ public boolean equals(Object o) { ShortLivedTokenIdentifier that = (ShortLivedTokenIdentifier) o; return Objects.equals(ownerId, that.ownerId) && Objects.equals(expiry, that.expiry) && - Objects.equals(certSerialId, that.certSerialId); + Objects.equals(secretKeyId, that.secretKeyId); } @Override public int hashCode() { - return Objects.hash(ownerId, expiry, certSerialId); + return Objects.hash(ownerId, expiry, secretKeyId); } @Override public String toString() { return "ownerId=" + ownerId + ", expiry=" + expiry + - ", certSerialId=" + certSerialId; + ", secretKeyId=" + secretKeyId; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java index 6a8a91e7b2d9..12a4de1c684a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java @@ -487,4 +487,8 @@ public long getS3AuthInfoMaxDate() { OzoneConfigKeys.OZONE_S3_AUTHINFO_MAX_LIFETIME_KEY_DEFAULT, TimeUnit.MICROSECONDS); } + + public boolean isTokenEnabled() { + return blockTokenEnabled || containerTokenEnabled; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java index 9a5a1fd563e0..407a967682b2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.commons.validator.routines.InetAddressValidator; + import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/ProtobufUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/ProtobufUtils.java new file mode 100644 index 000000000000..428157981e3a --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/ProtobufUtils.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import java.util.UUID; + +/** + * Contains utilities to ease common protobuf to java object conversions. + */ +public final class ProtobufUtils { + private ProtobufUtils() { + } + + public static HddsProtos.UUID toProtobuf(UUID uuid) { + return HddsProtos.UUID.newBuilder() + .setMostSigBits(uuid.getMostSignificantBits()) + .setLeastSigBits(uuid.getLeastSignificantBits()) + .build(); + } + + public static UUID fromProtobuf(HddsProtos.UUID proto) { + return new UUID(proto.getMostSigBits(), proto.getLeastSigBits()); + } +} diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index f90fea4908b3..87be7a509a20 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2529,6 +2529,33 @@ client scm container protocol. + + hdds.security.client.scm.secretkey.om.protocol.acl + * + SECURITY + + Comma separated list of users and groups allowed to access + client scm secret key protocol for om. + + + + hdds.security.client.scm.secretkey.scm.protocol.acl + * + SECURITY + + Comma separated list of users and groups allowed to access + client scm secret key protocol for om. + + + + hdds.security.client.scm.secretkey.datanode.protocol.acl + * + SECURITY + + Comma separated list of users and groups allowed to access + client scm secret key protocol for datanodes. + + ozone.om.security.client.protocol.acl * @@ -3525,6 +3552,34 @@ OM/S3GATEWAY OMRequest, OMResponse over grpc max message length (bytes). + + + ozone.om.grpc.read.thread.num + 32 + OZONE, OM, S3GATEWAY + + OM grpc server read thread pool core thread size. + + + + + ozone.om.grpc.bossgroup.size + 8 + OZONE, OM, S3GATEWAY + + OM grpc server netty boss event group size. + + + + + ozone.om.grpc.workergroup.size + 32 + OZONE, OM, S3GATEWAY + + OM grpc server netty worker event group size. + + + ozone.default.bucket.layout @@ -3735,6 +3790,17 @@ + + ozone.recon.heatmap.enable + false + OZONE, RECON + + To enable/disable recon heatmap feature. Along with this config, user must also provide the implementation + of "org.apache.hadoop.ozone.recon.heatmap.IHeatMapProvider" interface and configure in + "ozone.recon.heatmap.provider" configuration. + + + ozone.fs.datastream.enabled false @@ -3930,4 +3996,52 @@ Max numbers of keys changed allowed for a snapshot diff job. + + + hdds.secret.key.file.name + secret_keys.json + SCM, SECURITY + + Name of file which stores symmetric secret keys for token signatures. + + + + hdds.secret.key.expiry.duration + 7d + SCM, SECURITY + + The duration for which symmetric secret keys issued by SCM are valid. + This default value, in combination with hdds.secret.key.rotate.duration=1d, results in 7 secret keys (for the + last 7 days) are kept valid at any point of time. + + + + hdds.secret.key.rotate.duration + 1d + SCM, SECURITY + + The duration that SCM periodically generate a new symmetric secret keys. + + + + hdds.secret.key.rotate.check.duration + 10m + SCM, SECURITY + + The duration that SCM periodically checks if it's time to generate new symmetric secret keys. + This config has an impact on the practical correctness of secret key expiry and rotation period. For example, + if hdds.secret.key.rotate.duration=1d and hdds.secret.key.rotate.check.duration=10m, the actual key rotation + will happen each 1d +/- 10m. + + + + hdds.secret.key.algorithm + HmacSHA256 + SCM, SECURITY + + The algorithm that SCM uses to generate symmetric secret keys. + A valid algorithm is the one supported by KeyGenerator, as described at + https://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#KeyGenerator. + + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java deleted file mode 100644 index c834e8c89c59..000000000000 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.security.token; - -import java.io.ByteArrayInputStream; -import java.io.DataInputStream; -import java.io.File; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateEncodingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; -import javax.crypto.KeyGenerator; -import javax.crypto.Mac; -import javax.crypto.SecretKey; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.hadoop.security.token.Token; -import org.apache.ozone.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Assumptions; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Test class for {@link OzoneBlockTokenIdentifier}. - */ -class TestOzoneBlockTokenIdentifier { - - private static final Logger LOG = LoggerFactory - .getLogger(TestOzoneBlockTokenIdentifier.class); - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneBlockTokenIdentifier.class.getSimpleName()); - private static final String KEYSTORES_DIR = - new File(BASEDIR).getAbsolutePath(); - private static long expiryTime; - private static KeyPair keyPair; - private static X509Certificate cert; - - @BeforeAll - static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - Assumptions.assumeTrue(base.mkdirs()); - expiryTime = Time.monotonicNow() + 60 * 60 * 24; - - // Create Ozone Master key pair. - keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - // Create Ozone Master certificate (SCM CA issued cert) and key store. - cert = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - } - - @AfterEach - void cleanUp() { - // KeyStoreTestUtil.cleanupSSLConfig(KEYSTORES_DIR, sslConfsDir); - } - - @Test - void testSignToken() throws GeneralSecurityException, IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 128L); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - // Verify a valid signed OzoneMaster Token with Ozone Master - // public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - - // Verify an invalid signed OzoneMaster Token with Ozone Master - // public key(certificate) - tokenId = new OzoneBlockTokenIdentifier("", "", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 128L); - LOG.info("Unsigned token {} is {}", tokenId, - verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert)); - - } - - @Test - void testTokenSerialization() throws GeneralSecurityException, - IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - long maxLength = 128L; - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), maxLength); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - - Token token = new Token<>(tokenId.getBytes(), - signedToken, tokenId.getKind(), new Text("host:port")); - - String encodeToUrlString = token.encodeToUrlString(); - - TokendecodedToken = new Token<>(); - decodedToken.decodeFromUrlString(encodeToUrlString); - - OzoneBlockTokenIdentifier decodedTokenId = new OzoneBlockTokenIdentifier(); - decodedTokenId.readFields(new DataInputStream( - new ByteArrayInputStream(decodedToken.getIdentifier()))); - - Assertions.assertEquals(tokenId, decodedTokenId); - Assertions.assertEquals(maxLength, decodedTokenId.getMaxLength()); - - // Verify a decoded signed Token with public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(decodedTokenId, decodedToken - .getPassword(), cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - } - - - private byte[] signTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - PrivateKey privateKey) throws NoSuchAlgorithmException, - InvalidKeyException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initSign(privateKey); - rsaSignature.update(tokenId.getBytes()); - return rsaSignature.sign(); - } - - private boolean verifyTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - byte[] signature, Certificate certificate) throws InvalidKeyException, - NoSuchAlgorithmException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initVerify(certificate); - rsaSignature.update(tokenId.getBytes()); - return rsaSignature.verify(signature); - } - - private byte[] signTokenSymmetric(OzoneBlockTokenIdentifier identifier, - Mac mac, SecretKey key) { - try { - mac.init(key); - } catch (InvalidKeyException ike) { - throw new IllegalArgumentException("Invalid key to HMAC computation", - ike); - } - return mac.doFinal(identifier.getBytes()); - } - - private OzoneBlockTokenIdentifier generateTestToken() { - return new OzoneBlockTokenIdentifier(RandomStringUtils.randomAlphabetic(6), - RandomStringUtils.randomAlphabetic(5), - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), 1024768L); - } - - @Test - void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, - CertificateEncodingException, NoSuchProviderException, - InvalidKeyException, SignatureException { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordAsym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyPair kp = KeyStoreTestUtil.generateKeyPair("RSA"); - - // Create Ozone Master certificate (SCM CA issued cert) and key store - X509Certificate certificate; - certificate = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster", - kp, 30, "SHA256withRSA"); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordAsym.add( - signTokenAsymmetric(tokenIds.get(i), kp.getPrivate())); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration / testTokenCount); - - startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), - certificate); - } - duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration / testTokenCount); - } - - @Test - void testSymmetricTokenPerf() { - String hmacSHA1 = "HmacSHA1"; - String hmacSHA256 = "HmacSHA256"; - - testSymmetricTokenPerfHelper(hmacSHA1, 64); - testSymmetricTokenPerfHelper(hmacSHA256, 1024); - } - - private void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordSym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyGenerator keyGen; - try { - keyGen = KeyGenerator.getInstance(hmacAlgorithm); - keyGen.init(keyLen); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - Mac mac; - try { - mac = Mac.getInstance(hmacAlgorithm); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - SecretKey secretKey = keyGen.generateKey(); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordSym.add( - signTokenSymmetric(tokenIds.get(i), mac, secretKey)); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration / tokenPasswordSym.size()); - } -} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestProtobufUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestProtobufUtils.java new file mode 100644 index 000000000000..fe6a57846c2c --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestProtobufUtils.java @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.junit.jupiter.api.Test; + +import java.util.UUID; + +import static org.apache.hadoop.util.ProtobufUtils.fromProtobuf; +import static org.apache.hadoop.util.ProtobufUtils.toProtobuf; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Test-cases for {@link org.apache.hadoop.util.ProtobufUtils}. + */ +public class TestProtobufUtils { + @Test + public void testUuidToProtobuf() { + UUID object = UUID.randomUUID(); + HddsProtos.UUID protobuf = toProtobuf(object); + assertEquals(object.getLeastSignificantBits(), protobuf.getLeastSigBits()); + assertEquals(object.getMostSignificantBits(), protobuf.getMostSigBits()); + } + + @Test + public void testUuidConversion() { + UUID original = UUID.randomUUID(); + HddsProtos.UUID protobuf = toProtobuf(original); + UUID deserialized = fromProtobuf(protobuf); + assertEquals(original, deserialized); + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index c438779af501..01391abccafa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -41,6 +41,9 @@ import org.apache.hadoop.hdds.datanode.metadata.DatanodeCRLStore; import org.apache.hadoop.hdds.datanode.metadata.DatanodeCRLStoreImpl; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; +import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeyClient; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; @@ -102,6 +105,7 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private DatanodeStateMachine datanodeStateMachine; private List plugins; private CertificateClient dnCertClient; + private SecretKeyClient secretKeyClient; private String component; private HddsDatanodeHttpServer httpServer; private boolean printBanner; @@ -299,9 +303,17 @@ public void start() { if (OzoneSecurityUtil.isSecurityEnabled(conf)) { dnCertClient = initializeCertificateClient(dnCertClient); + + if (secConf.isTokenEnabled()) { + SecretKeyProtocol secretKeyProtocol = + HddsServerUtil.getSecretKeyClientForDatanode(conf); + secretKeyClient = DefaultSecretKeyClient.create(conf, + secretKeyProtocol); + secretKeyClient.start(conf); + } } datanodeStateMachine = new DatanodeStateMachine(datanodeDetails, conf, - dnCertClient, this::terminateDatanode, dnCRLStore); + dnCertClient, secretKeyClient, this::terminateDatanode, dnCRLStore); try { httpServer = new HddsDatanodeHttpServer(conf); httpServer.start(); @@ -597,6 +609,10 @@ public void stop() { LOG.error("Datanode CRL store stop failed", ex); } RatisDropwizardExports.clear(ratisMetricsMap, ratisReporterList); + + if (secretKeyClient != null) { + secretKeyClient.stop(); + } } } @@ -639,6 +655,11 @@ public void setCertificateClient(CertificateClient client) dnCertClient = client; } + @VisibleForTesting + public void setSecretKeyClient(SecretKeyClient client) { + this.secretKeyClient = client; + } + @Override public void printError(Throwable error) { LOG.error("Exception in HddsDatanodeService.", error); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index 14c6cb992587..c988b21867cf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.hdds.utils.IOUtils; @@ -137,6 +138,7 @@ public class DatanodeStateMachine implements Closeable { public DatanodeStateMachine(DatanodeDetails datanodeDetails, ConfigurationSource conf, CertificateClient certClient, + SecretKeyClient secretKeyClient, HddsDatanodeStopService hddsDatanodeStopService, DatanodeCRLStore crlStore) throws IOException { DatanodeConfiguration dnConf = @@ -169,7 +171,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, constructionLock.writeLock().lock(); try { container = new OzoneContainer(this.datanodeDetails, - conf, context, certClient); + conf, context, certClient, secretKeyClient); } finally { constructionLock.writeLock().unlock(); } @@ -206,7 +208,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, ecReconstructionMetrics = ECReconstructionMetrics.create(); ecReconstructionCoordinator = new ECReconstructionCoordinator( - conf, certClient, context, ecReconstructionMetrics); + conf, certClient, secretKeyClient, context, ecReconstructionMetrics); // This is created as an instance variable as Mockito needs to access it in // a test. The test mocks it in a running mini-cluster. @@ -256,6 +258,12 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, queueMetrics = DatanodeQueueMetrics.create(this); } + @VisibleForTesting + public DatanodeStateMachine(DatanodeDetails datanodeDetails, + ConfigurationSource conf) throws IOException { + this(datanodeDetails, conf, null, null, null, null); + } + private int getEndPointTaskThreadPoolSize() { // TODO(runzhiwang): current only support one recon, if support multiple // recon in future reconServerCount should be the real number of recon diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java index 2f44f90326d6..f454e202761d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionCoordinator.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.scm.storage.ECBlockOutputStream; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.IOUtils; @@ -109,9 +110,9 @@ public class ECReconstructionCoordinator implements Closeable { private final ECReconstructionMetrics metrics; private final StateContext context; - public ECReconstructionCoordinator(ConfigurationSource conf, - CertificateClient certificateClient, - StateContext context, + public ECReconstructionCoordinator( + ConfigurationSource conf, CertificateClient certificateClient, + SecretKeySignerClient secretKeyClient, StateContext context, ECReconstructionMetrics metrics) throws IOException { this.context = context; this.containerOperationClient = new ECContainerOperationClient(conf, @@ -127,7 +128,7 @@ public ECReconstructionCoordinator(ConfigurationSource conf, new ThreadPoolExecutor.CallerRunsPolicy()); this.blockInputStreamFactory = BlockInputStreamFactoryImpl .getInstance(byteBufferPool, () -> ecReconstructExecutor); - tokenHelper = new TokenHelper(conf, certificateClient); + tokenHelper = new TokenHelper(conf, secretKeyClient); this.clientMetrics = ContainerClientMetrics.acquire(); this.metrics = metrics; } @@ -386,7 +387,6 @@ public void close() throws IOException { if (containerOperationClient != null) { containerOperationClient.close(); } - tokenHelper.stop(); } private Pipeline rebuildInputPipeline(ECReplicationConfig repConfig, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/TokenHelper.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/TokenHelper.java index b5f7871d45cb..682b9dc14766 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/TokenHelper.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/TokenHelper.java @@ -22,17 +22,16 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.token.ContainerTokenSecretManager; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import java.io.IOException; -import java.time.Duration; import java.util.EnumSet; import java.util.Set; import java.util.concurrent.TimeUnit; @@ -52,7 +51,7 @@ class TokenHelper { private static final Set MODES = EnumSet.of(READ, WRITE, DELETE); - TokenHelper(ConfigurationSource conf, CertificateClient certClient) + TokenHelper(ConfigurationSource conf, SecretKeySignerClient secretKeyClient) throws IOException { SecurityConfig securityConfig = new SecurityConfig(conf); @@ -61,7 +60,7 @@ class TokenHelper { // checking certClient != null instead of securityConfig.isSecurityEnabled() // to allow integration test without full kerberos etc. setup - boolean securityEnabled = certClient != null; + boolean securityEnabled = secretKeyClient != null; if (securityEnabled && (blockTokenEnabled || containerTokenEnabled)) { user = UserGroupInformation.getCurrentUser().getShortUserName(); @@ -70,32 +69,17 @@ class TokenHelper { HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME, HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT, TimeUnit.MILLISECONDS); - long certificateGracePeriod = Duration.parse( - conf.get(HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION, - HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION_DEFAULT)) - .toMillis(); - boolean tokenSanityChecksEnabled = conf.getBoolean( - HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOKEN_CHECKS_ENABLED, - HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOKEN_CHECKS_ENABLED_DEFAULT); - if (tokenSanityChecksEnabled && expiryTime > certificateGracePeriod) { - throw new IllegalArgumentException("Certificate grace period " + - HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION + - " should be greater than maximum block/container token lifetime " + - HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME); - } if (blockTokenEnabled) { - blockTokenMgr = new OzoneBlockTokenSecretManager( - securityConfig, expiryTime); - blockTokenMgr.start(certClient); + blockTokenMgr = new OzoneBlockTokenSecretManager(expiryTime, + secretKeyClient); } else { blockTokenMgr = null; } if (containerTokenEnabled) { - containerTokenMgr = new ContainerTokenSecretManager( - securityConfig, expiryTime); - containerTokenMgr.start(certClient); + containerTokenMgr = new ContainerTokenSecretManager(expiryTime, + secretKeyClient); } else { containerTokenMgr = null; } @@ -106,23 +90,6 @@ class TokenHelper { } } - void stop() { - if (blockTokenMgr != null) { - try { - blockTokenMgr.stop(); - } catch (IOException ignored) { - // no threads involved, cannot really happen - } - } - if (containerTokenMgr != null) { - try { - containerTokenMgr.stop(); - } catch (IOException ignored) { - // no threads involved, cannot really happen - } - } - } - Token getBlockToken(BlockID blockID, long length) { return blockTokenMgr != null ? blockTokenMgr.generateToken(user, blockID, MODES, length) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index 4e52d25e4793..dbb3832b9450 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; @@ -135,7 +136,8 @@ enum InitializingStatus { */ public OzoneContainer( DatanodeDetails datanodeDetails, ConfigurationSource conf, - StateContext context, CertificateClient certClient) throws IOException { + StateContext context, CertificateClient certClient, + SecretKeyVerifierClient secretKeyClient) throws IOException { config = conf; this.datanodeDetails = datanodeDetails; this.context = context; @@ -189,7 +191,8 @@ public OzoneContainer( SecurityConfig secConf = new SecurityConfig(conf); hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet, - handlers, context, metrics, TokenVerifier.create(secConf, certClient)); + handlers, context, metrics, + TokenVerifier.create(secConf, secretKeyClient)); /* * ContainerController is the control plane @@ -260,6 +263,16 @@ public OzoneContainer( new AtomicReference<>(InitializingStatus.UNINITIALIZED); } + /** + * Shorthand constructor used for testing in non-secure context. + */ + @VisibleForTesting + public OzoneContainer( + DatanodeDetails datanodeDetails, ConfigurationSource conf, + StateContext context) throws IOException { + this(datanodeDetails, conf, context, null, null); + } + public GrpcTlsConfig getTlsClientConfig() { return tlsClientConfig; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 77714c0eb744..afc6c3ca64e1 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -106,7 +106,7 @@ public static OzoneContainer getOzoneContainer( StateContext context = Mockito.mock(StateContext.class); Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(datanodeDetails); Mockito.when(context.getParent()).thenReturn(stateMachine); - return new OzoneContainer(datanodeDetails, conf, context, null); + return new OzoneContainer(datanodeDetails, conf, context); } public static DatanodeDetails createDatanodeDetails() { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 1fc56b29639f..97903fb13b75 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -157,8 +157,7 @@ public void tearDown() throws Exception { public void testStartStopDatanodeStateMachine() throws IOException, InterruptedException, TimeoutException { try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(getNewDatanodeDetails(), conf, null, null, - null)) { + new DatanodeStateMachine(getNewDatanodeDetails(), conf)) { stateMachine.startDaemon(); SCMConnectionManager connectionManager = stateMachine.getConnectionManager(); @@ -220,8 +219,7 @@ public void testDatanodeStateContext() throws IOException, datanodeDetails.setPort(port); ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath, conf); try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(datanodeDetails, conf, null, null, - null)) { + new DatanodeStateMachine(datanodeDetails, conf)) { DatanodeStateMachine.DatanodeStates currentState = stateMachine.getContext().getState(); Assertions.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, @@ -343,8 +341,7 @@ public void testDatanodeStateMachineWithIdWriteFail() throws Exception { datanodeDetails.setPort(port); try (DatanodeStateMachine stateMachine = - new DatanodeStateMachine(datanodeDetails, conf, null, null, - null)) { + new DatanodeStateMachine(datanodeDetails, conf)) { DatanodeStateMachine.DatanodeStates currentState = stateMachine.getContext().getState(); Assertions.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, @@ -402,7 +399,7 @@ public void testDatanodeStateMachineWithInvalidConfiguration() perTestConf.setStrings(entry.getKey(), entry.getValue()); LOG.info("Test with {} = {}", entry.getKey(), entry.getValue()); try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( - getNewDatanodeDetails(), perTestConf, null, null, null)) { + getNewDatanodeDetails(), perTestConf)) { DatanodeStateMachine.DatanodeStates currentState = stateMachine.getContext().getState(); Assertions.assertEquals(DatanodeStateMachine.DatanodeStates.INIT, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index cceed793902f..f06eea2ec132 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -178,7 +178,7 @@ public void testBuildContainerMap() throws Exception { // loaded into the containerSet. // Also expected to initialize committed space for each volume. OzoneContainer ozoneContainer = new - OzoneContainer(datanodeDetails, conf, context, null); + OzoneContainer(datanodeDetails, conf, context); ContainerSet containerset = ozoneContainer.getContainerSet(); assertEquals(numTestContainers, containerset.containerCount()); @@ -213,7 +213,7 @@ public void testBuildNodeReport() throws Exception { // loaded into the containerSet. // Also expected to initialize committed space for each volume. OzoneContainer ozoneContainer = new - OzoneContainer(datanodeDetails, conf, context, null); + OzoneContainer(datanodeDetails, conf, context); Assert.assertEquals(volumeSet.getVolumesList().size(), ozoneContainer.getNodeReport().getStorageReportList().size()); Assert.assertEquals(3, @@ -234,7 +234,7 @@ public void testBuildNodeReportWithDefaultRatisLogDir() throws Exception { // loaded into the containerSet. // Also expected to initialize committed space for each volume. OzoneContainer ozoneContainer = new - OzoneContainer(datanodeDetails, conf, context, null); + OzoneContainer(datanodeDetails, conf, context); Assert.assertEquals(volumeSet.getVolumesList().size(), ozoneContainer.getNodeReport().getStorageReportList().size()); Assert.assertEquals(1, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java index 306cdb77b329..82a9db037787 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java @@ -65,8 +65,7 @@ public void testStartupSlvLessThanMlv() throws Exception { HddsProtos.NodeType.DATANODE, mlv); try { - new DatanodeStateMachine(getNewDatanodeDetails(), conf, null, - null, null); + new DatanodeStateMachine(getNewDatanodeDetails(), conf); Assert.fail("Expected IOException due to incorrect MLV on DataNode " + "creation."); } catch (IOException e) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java index 1670c1ebc076..7669a577cf7e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java @@ -222,7 +222,7 @@ public void testDBCreatedInFinalize() throws Exception { HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT.layoutVersion()); layoutStorage.initialize(); dsm = new DatanodeStateMachine( - ContainerTestUtils.createDatanodeDetails(), conf, null, null, null); + ContainerTestUtils.createDatanodeDetails(), conf); HddsVolume dataVolume = ( HddsVolume) dsm.getContainer().getVolumeSet().getVolumesList().get(0); // Format HddsVolume to mimic the real cluster upgrade situation @@ -493,7 +493,7 @@ public void testFinalizeFailure() throws Exception { HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT.layoutVersion()); layoutStorage.initialize(); dsm = new DatanodeStateMachine( - ContainerTestUtils.createDatanodeDetails(), conf, null, null, null); + ContainerTestUtils.createDatanodeDetails(), conf); HddsVolume dataVolume = ( HddsVolume) dsm.getContainer().getVolumeSet().getVolumesList().get(0); // Format HddsVolume to mimic the real cluster upgrade situation @@ -592,8 +592,7 @@ public void startPreFinalizedDatanode() throws Exception { // Build and start the datanode. DatanodeDetails dd = ContainerTestUtils.createDatanodeDetails(); - DatanodeStateMachine newDsm = new DatanodeStateMachine(dd, - conf, null, null, null); + DatanodeStateMachine newDsm = new DatanodeStateMachine(dd, conf); int actualMlv = newDsm.getLayoutVersionManager().getMetadataLayoutVersion(); Assert.assertEquals( HDDSLayoutFeature.ERASURE_CODED_STORAGE_SUPPORT.layoutVersion(), @@ -613,8 +612,7 @@ public void restartDatanode(int expectedMlv, boolean exactMatch) dsm.close(); // Start new datanode with the same configuration. - dsm = new DatanodeStateMachine(dd, - conf, null, null, null); + dsm = new DatanodeStateMachine(dd, conf); int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion(); if (exactMatch) { Assert.assertEquals(expectedMlv, mlv); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index 3ff347fbdf93..79f55001c79c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -516,9 +516,7 @@ public void startPreFinalizedDatanode() throws Exception { // Build and start the datanode. DatanodeDetails dd = ContainerTestUtils.createDatanodeDetails(); - DatanodeStateMachine newDsm = new DatanodeStateMachine(dd, - conf, null, null, - null); + DatanodeStateMachine newDsm = new DatanodeStateMachine(dd, conf); int actualMlv = newDsm.getLayoutVersionManager().getMetadataLayoutVersion(); Assert.assertEquals(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), actualMlv); @@ -534,9 +532,7 @@ public void restartDatanode(int expectedMlv, boolean exactMatch) dsm.close(); // Start new datanode with the same configuration. - dsm = new DatanodeStateMachine(dd, - conf, null, null, - null); + dsm = new DatanodeStateMachine(dd, conf); int mlv = dsm.getLayoutVersionManager().getMetadataLayoutVersion(); if (exactMatch) { Assert.assertEquals(expectedMlv, mlv); diff --git a/hadoop-hdds/docs/content/feature/Quota.md b/hadoop-hdds/docs/content/feature/Quota.md index 9f243f499215..afa68b98e1f3 100644 --- a/hadoop-hdds/docs/content/feature/Quota.md +++ b/hadoop-hdds/docs/content/feature/Quota.md @@ -70,6 +70,12 @@ f. If the cluster is upgraded from old version less than 1.1.0, use of quota on ### Storage Space level quota Storage space level quotas allow the use of units B, KB, MB, GB and TB. Represents how much storage Spaces will be used. +#### Note: + +- Decimals are not supported while setting quota for volume and bucket. For example, 1.5 TB. + +- Ensure that the minimum storage quota is default block size * replication factor. If you set the value lesser than the default block size * replication factor, while writing the data (key put) operation, an operation error is displayed. + #### Volume Storage Space level quota ```shell bin/ozone sh volume create --space-quota 5MB /volume1 diff --git a/hadoop-hdds/docs/content/feature/Quota.zh.md b/hadoop-hdds/docs/content/feature/Quota.zh.md index f00b3f646494..7d1c7307f5f6 100644 --- a/hadoop-hdds/docs/content/feature/Quota.zh.md +++ b/hadoop-hdds/docs/content/feature/Quota.zh.md @@ -67,6 +67,11 @@ menu: ## 客户端用法 ### Storage space级别配额 Storage space级别配额允许使用 B, KB ,MB ,GB ,TB 等单位。表示将使用多少个存储空间。 + +#### 注意: +- Volume 和 Bucket 不支持设置带小数点的配额值,例如 1.5 TB. +- 最小的有效空间配额,是一个数据块需要的存储空间,即默认块大小 * 副本数. 请确保设置的空间配额不小于这个数值,不然对象/文件写入操作,会失败。 + #### Volume Space quota用法 ```shell bin/ozone sh volume create --space-quota 5MB /volume1 diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java index 26107d54acc3..2354a9b8d85c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SCMSecurityProtocol.java @@ -170,4 +170,5 @@ long revokeCertificates(List certIds, int reason, long revocationTime) */ String getCertificate(NodeDetailsProto nodeDetails, String certSignReq) throws IOException; + } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocol.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocol.java new file mode 100644 index 000000000000..ec12efab532a --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocol.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocol; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.security.KerberosInfo; + +import java.io.IOException; +import java.util.List; +import java.util.UUID; + +/** + * The protocol used to expose secret keys in SCM. + */ +@KerberosInfo( + serverPrincipal = ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) +@InterfaceAudience.Private +public interface SecretKeyProtocol { + + /** + * Get the current SecretKey that is used for signing tokens. + * @return ManagedSecretKey + */ + ManagedSecretKey getCurrentSecretKey() throws IOException; + + /** + * Get a particular SecretKey by ID. + * + * @param id the id to get SecretKey. + * @return ManagedSecretKey. + */ + ManagedSecretKey getSecretKey(UUID id) throws IOException; + + /** + * Get all the non-expired SecretKey managed by SCM. + * @return list of ManagedSecretKey. + */ + List getAllSecretKeys() throws IOException; +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolDatanode.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolDatanode.java new file mode 100644 index 000000000000..433c27bd8a8a --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolDatanode.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocol; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.security.KerberosInfo; + +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; + +/** + * The client protocol to access secret key from Datanode. + */ +@KerberosInfo( + serverPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY, + clientPrincipal = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY +) +@InterfaceAudience.Private +public interface SecretKeyProtocolDatanode extends SecretKeyProtocol { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolOm.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolOm.java new file mode 100644 index 000000000000..9eebaecfe336 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolOm.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocol; + +import org.apache.hadoop.security.KerberosInfo; + +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; + +/** + * The client protocol to access secret key from OM. + */ +@KerberosInfo( + serverPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY, + // TODO: move OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY to hdds-common. + clientPrincipal = "ozone.om.kerberos.principal" +) +public interface SecretKeyProtocolOm extends SecretKeyProtocol { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolScm.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolScm.java new file mode 100644 index 000000000000..9439c2ede030 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocol/SecretKeyProtocolScm.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocol; + +import org.apache.hadoop.security.KerberosInfo; + +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; + +/** + * The client protocol to access secret key from SCM. + */ +@KerberosInfo( + serverPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY, + clientPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY +) +public interface SecretKeyProtocolScm extends SecretKeyProtocol { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java new file mode 100644 index 000000000000..a0206555e397 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolClientSideTranslatorPB.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import com.google.common.base.Preconditions; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMGetSecretKeyRequest; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMGetSecretKeyResponse; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyProtocolService.BlockingInterface; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyRequest; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyRequest.Builder; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyResponse; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.Type; +import org.apache.hadoop.hdds.scm.proxy.SecretKeyProtocolFailoverProxyProvider; +import org.apache.hadoop.hdds.security.exception.SCMSecretKeyException; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.io.retry.RetryProxy; +import org.apache.hadoop.ipc.ProtobufHelper; +import org.apache.hadoop.ipc.ProtocolTranslator; +import org.apache.hadoop.ipc.RPC; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.UUID; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +/** + * This class is the client-side translator that forwards requests for + * {@link SecretKeyProtocol} to the server proxy. + */ +public class SecretKeyProtocolClientSideTranslatorPB implements + SecretKeyProtocol, ProtocolTranslator, Closeable { + + /** + * RpcController is not used and hence is set to null. + */ + private static final RpcController NULL_RPC_CONTROLLER = null; + private final BlockingInterface rpcProxy; + private SecretKeyProtocolFailoverProxyProvider failoverProxyProvider; + + public SecretKeyProtocolClientSideTranslatorPB( + SecretKeyProtocolFailoverProxyProvider + proxyProvider, Class proxyClazz) { + Preconditions.checkState(proxyProvider != null); + this.failoverProxyProvider = proxyProvider; + this.rpcProxy = (BlockingInterface) RetryProxy.create( + proxyClazz, failoverProxyProvider, + failoverProxyProvider.getRetryPolicy()); + } + + /** + * Helper method to wrap the request and send the message. + */ + private SCMSecretKeyResponse submitRequest( + Type type, + Consumer builderConsumer) throws IOException { + final SCMSecretKeyResponse response; + try { + + Builder builder = SCMSecretKeyRequest.newBuilder() + .setCmdType(type) + .setTraceID(TracingUtil.exportCurrentSpan()); + builderConsumer.accept(builder); + SCMSecretKeyRequest wrapper = builder.build(); + + response = rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); + + handleError(response); + + } catch (ServiceException ex) { + throw ProtobufHelper.getRemoteException(ex); + } + return response; + } + + private SCMSecretKeyResponse handleError(SCMSecretKeyResponse resp) + throws SCMSecretKeyException { + if (resp.getStatus() != SCMSecretKeyProtocolProtos.Status.OK) { + throw new SCMSecretKeyException(resp.getMessage(), + SCMSecretKeyException.ErrorCode.values()[resp.getStatus().ordinal()]); + } + return resp; + } + + /** + * Closes this stream and releases any system resources associated + * with it. If the stream is already closed then invoking this + * method has no effect. + * + *

As noted in {@link AutoCloseable#close()}, cases where the + * close may fail require careful attention. It is strongly advised + * to relinquish the underlying resources and to internally + * mark the {@code Closeable} as closed, prior to throwing + * the {@code IOException}. + * + * @throws IOException if an I/O error occurs + */ + @Override + public void close() throws IOException { + RPC.stopProxy(rpcProxy); + } + + @Override + public ManagedSecretKey getCurrentSecretKey() throws IOException { + SCMSecretKeyProtocolProtos.ManagedSecretKey secretKeyProto = + submitRequest(Type.GetCurrentSecretKey, builder -> { + }).getCurrentSecretKeyResponseProto().getSecretKey(); + return ManagedSecretKey.fromProtobuf(secretKeyProto); + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) throws IOException { + SCMGetSecretKeyRequest request = SCMGetSecretKeyRequest.newBuilder() + .setSecretKeyId(HddsProtos.UUID.newBuilder() + .setMostSigBits(id.getMostSignificantBits()) + .setLeastSigBits(id.getLeastSignificantBits())).build(); + SCMGetSecretKeyResponse response = submitRequest(Type.GetSecretKey, + builder -> builder.setGetSecretKeyRequest(request)) + .getGetSecretKeyResponseProto(); + + return response.hasSecretKey() ? + ManagedSecretKey.fromProtobuf(response.getSecretKey()) : null; + } + + @Override + public List getAllSecretKeys() throws IOException { + List secretKeysList = + submitRequest(Type.GetAllSecretKeys, builder -> { + }).getSecretKeysListResponseProto().getSecretKeysList(); + return secretKeysList.stream() + .map(ManagedSecretKey::fromProtobuf) + .collect(Collectors.toList()); + } + + /** + * Return the proxy object underlying this protocol translator. + * + * @return the proxy object underlying this protocol translator. + */ + @Override + public Object getUnderlyingProxyObject() { + return rpcProxy; + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolDatanodePB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolDatanodePB.java new file mode 100644 index 000000000000..57f53df87925 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolDatanodePB.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyProtocolService; +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; + +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; + +/** + * Protocol for secret key related operations, to be used by datanode + * service role. + */ +@ProtocolInfo(protocolName = + "org.apache.hadoop.hdds.protocol.SecretKeyProtocolDatanode", + protocolVersion = 1) +@KerberosInfo( + serverPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY, + clientPrincipal = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY +) +public interface SecretKeyProtocolDatanodePB extends + SCMSecretKeyProtocolService.BlockingInterface { + +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolOmPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolOmPB.java new file mode 100644 index 000000000000..5865e67e4615 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolOmPB.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyProtocolService; +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; + +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; + +/** + * Protocol for secret key related operations, to be used by OM service role. + */ +@ProtocolInfo(protocolName = + "org.apache.hadoop.hdds.protocol.SecretKeyProtocolOm", + protocolVersion = 1) +@KerberosInfo( + serverPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY, + // TODO: move OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY to hdds-common. + clientPrincipal = "ozone.om.kerberos.principal" +) +public interface SecretKeyProtocolOmPB extends + SCMSecretKeyProtocolService.BlockingInterface { + +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolScmPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolScmPB.java new file mode 100644 index 000000000000..42a1b15683ca --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SecretKeyProtocolScmPB.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.protocolPB; + +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyProtocolService; +import org.apache.hadoop.ipc.ProtocolInfo; +import org.apache.hadoop.security.KerberosInfo; + +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; + +/** + * Protocol for secret key related operations, to be used by SCM service role. + */ +@ProtocolInfo(protocolName = + "org.apache.hadoop.hdds.protocol.SecretKeyProtocolScm", + protocolVersion = 1) +@KerberosInfo( + serverPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY, + clientPrincipal = HDDS_SCM_KERBEROS_PRINCIPAL_KEY +) +public interface SecretKeyProtocolScmPB extends + SCMSecretKeyProtocolService.BlockingInterface { + +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SecretKeyProtocolFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SecretKeyProtocolFailoverProxyProvider.java new file mode 100644 index 000000000000..9e985e942724 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SecretKeyProtocolFailoverProxyProvider.java @@ -0,0 +1,303 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.proxy; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationException; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyProtocolService; +import org.apache.hadoop.hdds.ratis.ServerNotLeaderException; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; +import org.apache.hadoop.hdds.scm.ha.SCMNodeInfo; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; +import org.apache.hadoop.io.retry.FailoverProxyProvider; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +/** + * Failover proxy provider for SCMSecretKeyProtocolService server. + */ +public class SecretKeyProtocolFailoverProxyProvider + implements + FailoverProxyProvider, Closeable { + + public static final Logger LOG = + LoggerFactory.getLogger(SecretKeyProtocolFailoverProxyProvider.class); + + // scmNodeId -> ProxyInfo + private final Map> scmProxies; + + // scmNodeId -> SCMProxyInfo + private final Map scmProxyInfoMap; + + private List scmNodeIds; + + // As SCM Client is shared across threads, performFailOver() + // updates the currentProxySCMNodeId based on the updateLeaderNodeId which is + // updated in shouldRetry(). When 2 or more threads run in parallel, the + // RetryInvocationHandler will check the expectedFailOverCount + // and not execute performFailOver() for one of them. So the other thread(s) + // shall not call performFailOver(), it will call getProxy() which uses + // currentProxySCMNodeId and returns the proxy. + private volatile String currentProxySCMNodeId; + private volatile int currentProxyIndex; + + + private final ConfigurationSource conf; + private final SCMClientConfig scmClientConfig; + private final long scmVersion; + + private String scmServiceId; + + private final int maxRetryCount; + private final long retryInterval; + + private final UserGroupInformation ugi; + private final Class proxyClazz; + + private String updatedLeaderNodeID = null; + + /** + * Construct fail-over proxy provider for SCMSecurityProtocol Server. + * @param conf + * @param userGroupInformation + */ + public SecretKeyProtocolFailoverProxyProvider(ConfigurationSource conf, + UserGroupInformation userGroupInformation, Class proxyClazz) { + Preconditions.checkNotNull(userGroupInformation); + this.ugi = userGroupInformation; + this.conf = conf; + this.proxyClazz = proxyClazz; + this.scmVersion = RPC.getProtocolVersion(proxyClazz); + + this.scmProxies = new HashMap<>(); + this.scmProxyInfoMap = new HashMap<>(); + loadConfigs(); + + this.currentProxyIndex = 0; + currentProxySCMNodeId = scmNodeIds.get(currentProxyIndex); + scmClientConfig = conf.getObject(SCMClientConfig.class); + this.maxRetryCount = scmClientConfig.getRetryCount(); + this.retryInterval = scmClientConfig.getRetryInterval(); + } + + protected synchronized void loadConfigs() { + List scmNodeInfoList = SCMNodeInfo.buildNodeInfo(conf); + scmNodeIds = new ArrayList<>(); + + for (SCMNodeInfo scmNodeInfo : scmNodeInfoList) { + if (scmNodeInfo.getScmSecurityAddress() == null) { + throw new ConfigurationException("SCM Client Address could not " + + "be obtained from config. Config is not properly defined"); + } else { + InetSocketAddress scmSecurityAddress = + NetUtils.createSocketAddr(scmNodeInfo.getScmSecurityAddress()); + + scmServiceId = scmNodeInfo.getServiceId(); + String scmNodeId = scmNodeInfo.getNodeId(); + + scmNodeIds.add(scmNodeId); + SCMProxyInfo scmProxyInfo = new SCMProxyInfo(scmServiceId, scmNodeId, + scmSecurityAddress); + scmProxyInfoMap.put(scmNodeId, scmProxyInfo); + } + } + } + + @Override + public synchronized ProxyInfo getProxy() { + ProxyInfo currentProxyInfo = scmProxies.get(getCurrentProxySCMNodeId()); + if (currentProxyInfo == null) { + currentProxyInfo = createSCMProxy(getCurrentProxySCMNodeId()); + } + return currentProxyInfo; + } + + /** + * Creates proxy object. + */ + private ProxyInfo createSCMProxy(String nodeId) { + ProxyInfo proxyInfo; + SCMProxyInfo scmProxyInfo = scmProxyInfoMap.get(nodeId); + InetSocketAddress address = scmProxyInfo.getAddress(); + try { + T scmProxy = createSCMProxy(address); + // Create proxyInfo here, to make it work with all Hadoop versions. + proxyInfo = new ProxyInfo(scmProxy, scmProxyInfo.toString()); + scmProxies.put(nodeId, proxyInfo); + return proxyInfo; + } catch (IOException ioe) { + LOG.error("{} Failed to create RPC proxy to SCM at {}", + this.getClass().getSimpleName(), address, ioe); + throw new RuntimeException(ioe); + } + } + + private T createSCMProxy(InetSocketAddress scmAddress) + throws IOException { + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + RPC.setProtocolEngine(hadoopConf, proxyClazz, + ProtobufRpcEngine.class); + + // FailoverOnNetworkException ensures that the IPC layer does not attempt + // retries on the same SCM in case of connection exception. This retry + // policy essentially results in TRY_ONCE_THEN_FAIL. + + RetryPolicy connectionRetryPolicy = RetryPolicies + .failoverOnNetworkException(0); + + return RPC.getProtocolProxy(proxyClazz, + scmVersion, scmAddress, ugi, + hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf), + (int)scmClientConfig.getRpcTimeOut(), connectionRetryPolicy).getProxy(); + } + + + @Override + public synchronized void performFailover(T currentProxy) { + if (updatedLeaderNodeID != null) { + currentProxySCMNodeId = updatedLeaderNodeID; + } else { + nextProxyIndex(); + } + LOG.debug("Failing over to next proxy. {}", getCurrentProxySCMNodeId()); + } + + public synchronized void performFailoverToAssignedLeader(String newLeader, + Exception e) { + ServerNotLeaderException snle = + (ServerNotLeaderException) SCMHAUtils.getServerNotLeaderException(e); + if (snle != null && snle.getSuggestedLeader() != null) { + Optional< SCMProxyInfo > matchedProxyInfo = + scmProxyInfoMap.values().stream().filter( + proxyInfo -> NetUtils.getHostPortString(proxyInfo.getAddress()) + .equals(snle.getSuggestedLeader())).findFirst(); + if (matchedProxyInfo.isPresent()) { + newLeader = matchedProxyInfo.get().getNodeId(); + LOG.debug("Performing failover to suggested leader {}, nodeId {}", + snle.getSuggestedLeader(), newLeader); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Suggested leader {} does not match with any of the " + + "proxyInfo adress {}", snle.getSuggestedLeader(), + Arrays.toString(scmProxyInfoMap.values().toArray())); + } + } + } + assignLeaderToNode(newLeader); + } + + + private synchronized void assignLeaderToNode(String newLeaderNodeId) { + if (!currentProxySCMNodeId.equals(newLeaderNodeId)) { + if (scmProxyInfoMap.containsKey(newLeaderNodeId)) { + updatedLeaderNodeID = newLeaderNodeId; + LOG.debug("Updated LeaderNodeID {}", updatedLeaderNodeID); + } else { + updatedLeaderNodeID = null; + } + } + } + + /** + * Update the proxy index to the next proxy in the list. + * @return the new proxy index + */ + private synchronized void nextProxyIndex() { + // round robin the next proxy + currentProxyIndex = (getCurrentProxyIndex() + 1) % scmProxyInfoMap.size(); + currentProxySCMNodeId = scmNodeIds.get(currentProxyIndex); + } + + public RetryPolicy getRetryPolicy() { + // Client will attempt up to maxFailovers number of failovers between + // available SCMs before throwing exception. + + return (exception, retries, failovers, isIdempotentOrAtMostOnce) -> { + + if (LOG.isDebugEnabled()) { + if (exception.getCause() != null) { + LOG.debug("RetryProxy: SCM Security Server {}: {}: {}", + getCurrentProxySCMNodeId(), + exception.getCause().getClass().getSimpleName(), + exception.getCause().getMessage()); + } else { + LOG.debug("RetryProxy: SCM {}: {}", getCurrentProxySCMNodeId(), + exception.getMessage()); + } + } + + if (SCMHAUtils.checkRetriableWithNoFailoverException(exception)) { + setUpdatedLeaderNodeID(); + } else { + performFailoverToAssignedLeader(null, exception); + } + return SCMHAUtils + .getRetryAction(failovers, retries, exception, maxRetryCount, + getRetryInterval()); + }; + } + + public synchronized void setUpdatedLeaderNodeID() { + this.updatedLeaderNodeID = getCurrentProxySCMNodeId(); + } + + @Override + public Class getInterface() { + return proxyClazz; + } + + @Override + public synchronized void close() throws IOException { + for (ProxyInfo proxyInfo : scmProxies.values()) { + if (proxyInfo.proxy != null) { + RPC.stopProxy(proxyInfo.proxy); + } + } + } + + public synchronized String getCurrentProxySCMNodeId() { + return currentProxySCMNodeId; + } + + public synchronized int getCurrentProxyIndex() { + return currentProxyIndex; + } + + private long getRetryInterval() { + return retryInterval; + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SingleSecretKeyProtocolProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SingleSecretKeyProtocolProxyProvider.java new file mode 100644 index 000000000000..f50f57a7a060 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SingleSecretKeyProtocolProxyProvider.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.proxy; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyProtocolService; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Proxy provider for SCMSecretKeyProtocolService against a + * single SCM node (no fail-over). + */ +public class SingleSecretKeyProtocolProxyProvider + + extends SecretKeyProtocolFailoverProxyProvider { + private final String scmNodeId; + + public SingleSecretKeyProtocolProxyProvider( + ConfigurationSource conf, + UserGroupInformation userGroupInformation, + Class clazz, + String scmNodeId) { + super(conf, userGroupInformation, clazz); + this.scmNodeId = scmNodeId; + } + + @Override + public synchronized String getCurrentProxySCMNodeId() { + return scmNodeId; + } + + @Override + public synchronized void performFailover(T currentProxy) { + // do nothing. + } + + @Override + public synchronized void performFailoverToAssignedLeader(String newLeader, + Exception e) { + // do nothing. + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java new file mode 100644 index 000000000000..030b0c7b6842 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyClient.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.symmetric; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; +import org.apache.hadoop.hdds.security.exception.SCMSecurityException; + +import java.io.IOException; +import java.util.UUID; + +/** + * A composition of {@link DefaultSecretKeySignerClient} and + * {@link DefaultSecretKeyVerifierClient} for components need both APIs. + */ +public class DefaultSecretKeyClient implements SecretKeyClient { + private final SecretKeySignerClient signerClientDelegate; + private final SecretKeyVerifierClient verifierClientDelegate; + + + DefaultSecretKeyClient(SecretKeySignerClient signerClientDelegate, + SecretKeyVerifierClient verifierClientDelegate) { + this.signerClientDelegate = signerClientDelegate; + this.verifierClientDelegate = verifierClientDelegate; + } + + + @Override + public ManagedSecretKey getCurrentSecretKey() { + return signerClientDelegate.getCurrentSecretKey(); + } + + @Override + public void start(ConfigurationSource conf) throws IOException { + signerClientDelegate.start(conf); + } + + @Override + public void stop() { + signerClientDelegate.stop(); + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) throws SCMSecurityException { + return verifierClientDelegate.getSecretKey(id); + } + + public static SecretKeyClient create(ConfigurationSource conf, + SecretKeyProtocol secretKeyProtocol) + throws IOException { + SecretKeySignerClient singerClient = + new DefaultSecretKeySignerClient(secretKeyProtocol); + SecretKeyVerifierClient verifierClient = + new DefaultSecretKeyVerifierClient(secretKeyProtocol, conf); + return new DefaultSecretKeyClient(singerClient, verifierClient); + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java new file mode 100644 index 000000000000..d5e12689f9c2 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeySignerClient.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.symmetric; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Objects.requireNonNull; + +/** + * Default implementation of {@link SecretKeySignerClient} that fetches + * secret keys from SCM. This client implements a background thread that + * periodically check and get the latest current secret key from SCM. + */ +public class DefaultSecretKeySignerClient implements SecretKeySignerClient { + private static final Logger LOG = + LoggerFactory.getLogger(DefaultSecretKeySignerClient.class); + + private final SecretKeyProtocol secretKeyProtocol; + private final AtomicReference cache = + new AtomicReference<>(); + private ScheduledExecutorService executorService; + + public DefaultSecretKeySignerClient( + SecretKeyProtocol secretKeyProtocol) { + this.secretKeyProtocol = secretKeyProtocol; + } + + @Override + public ManagedSecretKey getCurrentSecretKey() { + return requireNonNull(cache.get(), + "SecretKey client must have been initialized already."); + } + + @Override + public void refetchSecretKey() { + // pass duration as ZERO to force a refresh. + checkAndRefresh(Duration.ZERO); + } + + @Override + public void start(ConfigurationSource conf) throws IOException { + final ManagedSecretKey initialKey = + secretKeyProtocol.getCurrentSecretKey(); + LOG.info("Initial secret key fetched from SCM: {}.", initialKey); + cache.set(initialKey); + scheduleSecretKeyPoller(conf, initialKey.getCreationTime()); + } + + @Override + public void stop() { + if (executorService != null) { + executorService.shutdown(); + try { + if (executorService.awaitTermination(1, TimeUnit.MINUTES)) { + executorService.shutdownNow(); + } + } catch (InterruptedException e) { + LOG.error("Interrupted while shutting down executor service.", e); + Thread.currentThread().interrupt(); + } + } + } + + private void scheduleSecretKeyPoller(ConfigurationSource conf, + Instant initialCreation) { + Duration rotateDuration = SecretKeyConfig.parseRotateDuration(conf); + Instant nextRotate = initialCreation.plus(rotateDuration); + ThreadFactory threadFactory = new ThreadFactoryBuilder() + .setNameFormat("SecretKeyPoller") + .setDaemon(true) + .build(); + executorService = Executors.newScheduledThreadPool(1, threadFactory); + Duration interval = SecretKeyConfig.parseRotateCheckDuration(conf); + Duration initialDelay = Duration.between(Instant.now(), nextRotate); + + LOG.info("Scheduling SecretKeyPoller with initial delay of {} " + + "and interval of {}", initialDelay, interval); + executorService.scheduleAtFixedRate(() -> checkAndRefresh(rotateDuration), + initialDelay.toMillis(), interval.toMillis(), + TimeUnit.MILLISECONDS); + } + + private synchronized void checkAndRefresh(Duration rotateDuration) { + ManagedSecretKey current = cache.get(); + Instant nextRotate = current.getCreationTime().plus(rotateDuration); + // when the current key passes the rotation cycle, fetch the next one + // from SCM. + if (nextRotate.isBefore(Instant.now())) { + try { + ManagedSecretKey newKey = secretKeyProtocol.getCurrentSecretKey(); + if (!newKey.equals(current)) { + cache.set(newKey); + LOG.info("New secret key fetched from SCM: {}.", newKey); + } + } catch (IOException e) { + // TODO: emic failure metrics. + throw new UncheckedIOException( + "Error fetching current key from SCM", e); + } + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java new file mode 100644 index 000000000000..8a223cc16f1c --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/DefaultSecretKeyVerifierClient.java @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.symmetric; + +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; +import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; +import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.time.Duration; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdds.security.symmetric.SecretKeyConfig.parseExpiryDuration; +import static org.apache.hadoop.hdds.security.symmetric.SecretKeyConfig.parseRotateDuration; + +/** + * Default implementation of {@link SecretKeyVerifierClient} that fetches + * SecretKeys remotely via {@link SCMSecurityProtocol} and cache them locally. + */ +public class DefaultSecretKeyVerifierClient implements SecretKeyVerifierClient { + private static final Logger LOG = + LoggerFactory.getLogger(DefaultSecretKeyVerifierClient.class); + + private final LoadingCache> cache; + + DefaultSecretKeyVerifierClient(SecretKeyProtocol secretKeyProtocol, + ConfigurationSource conf) { + Duration expiryDuration = parseExpiryDuration(conf); + Duration rotateDuration = parseRotateDuration(conf); + + // if rotation is 1d, and each keys is valid for 7d before expiring, + // the expected number valid keys at any time is 7. + final long expectedValidKeys = + expiryDuration.toMillis() / rotateDuration.toMillis() + 1; + // However, we want to cache some expired keys as well, to avoid requesting + // SCM for recently expire secret keys. It makes sense to extend the + // secret keys cache by twice (e.g. 7 valid one and 7 recent expired). + final int secretKeyCacheMultiplier = 2; + long cacheSize = expectedValidKeys * secretKeyCacheMultiplier; + Duration cacheExpiry = expiryDuration.multipliedBy( + secretKeyCacheMultiplier); + + CacheLoader> loader = + new CacheLoader>() { + @Override + public Optional load(UUID id) throws Exception { + ManagedSecretKey secretKey = secretKeyProtocol.getSecretKey(id); + LOG.info("Secret key fetched from SCM: {}", secretKey); + return Optional.ofNullable(secretKey); + } + }; + + LOG.info("Initializing secret key cache with size {}, TTL {}", + cacheSize, expiryDuration); + cache = CacheBuilder.newBuilder() + .maximumSize(cacheSize) + .expireAfterWrite(cacheExpiry.toMillis(), TimeUnit.MILLISECONDS) + .recordStats() + .build(loader); + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) throws SCMSecurityException { + try { + return cache.get(id).orElse(null); + } catch (ExecutionException e) { + // handle cache load exception. + if (e.getCause() instanceof IOException) { + IOException cause = (IOException) e.getCause(); + if (cause instanceof SCMSecurityException) { + throw (SCMSecurityException) cause; + } else { + throw new SCMSecurityException( + "Error fetching secret key " + id + " from SCM", cause); + } + } + throw new IllegalStateException("Unexpected exception fetching secret " + + "key " + id + " from SCM", e.getCause()); + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStore.java new file mode 100644 index 000000000000..48cc633b67b9 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStore.java @@ -0,0 +1,199 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import com.fasterxml.jackson.databind.MappingIterator; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.ObjectReader; +import com.fasterxml.jackson.databind.SequenceWriter; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.PosixFilePermission; +import java.time.Instant; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.UUID; + +import static com.google.common.collect.Sets.newHashSet; +import static java.nio.file.Files.createDirectories; +import static java.nio.file.Files.createFile; +import static java.nio.file.Files.exists; +import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; +import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; + +/** + * A {@link SecretKeyStore} that saves and loads SecretKeys from/to a + * JSON file on local file system. + */ +public class LocalSecretKeyStore implements SecretKeyStore { + private static final Set SECRET_KEYS_PERMISSIONS = + newHashSet(OWNER_READ, OWNER_WRITE); + private static final Logger LOG = + LoggerFactory.getLogger(LocalSecretKeyStore.class); + + private final Path secretKeysFile; + private final ObjectMapper mapper; + + public LocalSecretKeyStore(Path secretKeysFile) { + this.secretKeysFile = requireNonNull(secretKeysFile); + this.mapper = new ObjectMapper() + .registerModule(new JavaTimeModule()) + .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false); + } + + @Override + public synchronized List load() { + if (!secretKeysFile.toFile().exists()) { + return Collections.emptyList(); + } + + ObjectReader reader = mapper.readerFor(ManagedSecretKeyDto.class); + try (MappingIterator iterator = + reader.readValues(secretKeysFile.toFile())) { + List dtos = iterator.readAll(); + List result = dtos.stream() + .map(ManagedSecretKeyDto::toObject) + .collect(toList()); + LOG.info("Loaded {} from {}", result, secretKeysFile); + return result; + } catch (IOException e) { + throw new IllegalStateException("Error reading SecretKeys from " + + secretKeysFile, e); + } + } + + @Override + public synchronized void save(Collection secretKeys) { + createSecretKeyFiles(); + + List dtos = secretKeys.stream() + .map(ManagedSecretKeyDto::new) + .collect(toList()); + + try (SequenceWriter writer = + mapper.writer().writeValues(secretKeysFile.toFile())) { + writer.init(true); + writer.writeAll(dtos); + } catch (IOException e) { + throw new IllegalStateException("Error saving SecretKeys to file " + + secretKeysFile, e); + } + LOG.info("Saved {} to file {}", secretKeys, secretKeysFile); + } + + private void createSecretKeyFiles() { + try { + if (!exists(secretKeysFile)) { + Path parent = secretKeysFile.getParent(); + if (parent != null && !exists(parent)) { + createDirectories(parent); + } + createFile(secretKeysFile); + } + Files.setPosixFilePermissions(secretKeysFile, SECRET_KEYS_PERMISSIONS); + } catch (IOException e) { + throw new IllegalStateException("Error setting secret keys file" + + " permission: " + secretKeysFile, e); + } + } + + /** + * Just a simple DTO that allows serializing/deserializing the immutable + * {@link ManagedSecretKey} objects. + */ + private static class ManagedSecretKeyDto { + private UUID id; + private Instant creationTime; + private Instant expiryTime; + private String algorithm; + private byte[] encoded; + + /** + * Used by Jackson when deserializing. + */ + ManagedSecretKeyDto() { + } + + ManagedSecretKeyDto(ManagedSecretKey object) { + id = object.getId(); + creationTime = object.getCreationTime(); + expiryTime = object.getExpiryTime(); + algorithm = object.getSecretKey().getAlgorithm(); + encoded = object.getSecretKey().getEncoded(); + } + + public ManagedSecretKey toObject() { + SecretKey secretKey = new SecretKeySpec(this.encoded, this.algorithm); + return new ManagedSecretKey(id, creationTime, + expiryTime, secretKey); + } + + public UUID getId() { + return id; + } + + public void setId(UUID id) { + this.id = id; + } + + public Instant getCreationTime() { + return creationTime; + } + + public void setCreationTime(Instant creationTime) { + this.creationTime = creationTime; + } + + public Instant getExpiryTime() { + return expiryTime; + } + + public void setExpiryTime(Instant expiryTime) { + this.expiryTime = expiryTime; + } + + public String getAlgorithm() { + return algorithm; + } + + public void setAlgorithm(String algorithm) { + this.algorithm = algorithm; + } + + public byte[] getEncoded() { + return encoded; + } + + public void setEncoded(byte[] encoded) { + this.encoded = encoded; + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKey.java new file mode 100644 index 000000000000..2ff44daf9bec --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKey.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import com.google.protobuf.ByteString; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.util.ProtobufUtils; + +import javax.crypto.Mac; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.security.InvalidKeyException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.time.Instant; +import java.util.UUID; + +/** + * Enclosed a symmetric {@link SecretKey} with additional data for life-cycle + * management. + */ +public final class ManagedSecretKey { + private final UUID id; + private final Instant creationTime; + private final Instant expiryTime; + private final SecretKey secretKey; + private final ThreadLocal macInstances; + + public ManagedSecretKey(UUID id, + Instant creationTime, + Instant expiryTime, + SecretKey secretKey) { + this.id = id; + this.creationTime = creationTime; + this.expiryTime = expiryTime; + this.secretKey = secretKey; + + // This help reuse Mac instances for the same thread. + macInstances = ThreadLocal.withInitial(() -> { + try { + return Mac.getInstance(secretKey.getAlgorithm()); + } catch (NoSuchAlgorithmException e) { + throw new IllegalArgumentException( + "Invalid algorithm " + secretKey.getAlgorithm(), e); + } + }); + } + + public boolean isExpired() { + return expiryTime.isBefore(Instant.now()); + } + + public UUID getId() { + return id; + } + + public SecretKey getSecretKey() { + return secretKey; + } + + public Instant getCreationTime() { + return creationTime; + } + + public Instant getExpiryTime() { + return expiryTime; + } + + @Override + public int hashCode() { + return id.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof ManagedSecretKey)) { + return false; + } + ManagedSecretKey that = (ManagedSecretKey) obj; + return this.id.equals(that.id); + } + + @Override + public String toString() { + return "SecretKey(id = " + id + ", creation at: " + + creationTime + ", expire at: " + expiryTime + ")"; + } + + public byte[] sign(byte[] data) { + try { + Mac mac = macInstances.get(); + mac.init(secretKey); + return mac.doFinal(data); + } catch (InvalidKeyException e) { + throw new IllegalArgumentException("Invalid key to HMAC computation", e); + } + } + + public byte[] sign(TokenIdentifier tokenId) { + return sign(tokenId.getBytes()); + } + + public boolean isValidSignature(byte[] data, byte[] signature) { + byte[] expectedSignature = sign(data); + return MessageDigest.isEqual(expectedSignature, signature); + } + + public boolean isValidSignature(TokenIdentifier tokenId, byte[] signature) { + return isValidSignature(tokenId.getBytes(), signature); + } + + /** + * @return the protobuf message to deserialize this object. + */ + public SCMSecretKeyProtocolProtos.ManagedSecretKey toProtobuf() { + return SCMSecretKeyProtocolProtos.ManagedSecretKey.newBuilder() + .setId(ProtobufUtils.toProtobuf(id)) + .setCreationTime(this.creationTime.toEpochMilli()) + .setExpiryTime(this.expiryTime.toEpochMilli()) + .setAlgorithm(this.secretKey.getAlgorithm()) + .setEncoded(ByteString.copyFrom(this.secretKey.getEncoded())) + .build(); + } + + /** + * Create a {@link ManagedSecretKey} from a given protobuf message. + */ + public static ManagedSecretKey fromProtobuf( + SCMSecretKeyProtocolProtos.ManagedSecretKey message) { + UUID id = ProtobufUtils.fromProtobuf(message.getId()); + Instant creationTime = Instant.ofEpochMilli(message.getCreationTime()); + Instant expiryTime = Instant.ofEpochMilli(message.getExpiryTime()); + SecretKey secretKey = new SecretKeySpec(message.getEncoded().toByteArray(), + message.getAlgorithm()); + return new ManagedSecretKey(id, creationTime, expiryTime, secretKey); + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyClient.java new file mode 100644 index 000000000000..a71b14dc3f28 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyClient.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.symmetric; + +/** + * Composite client for those components that need to perform both signing + * and verifying. + */ +public interface SecretKeyClient extends SecretKeySignerClient, + SecretKeyVerifierClient { +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyConfig.java new file mode 100644 index 000000000000..a833ba0137aa --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyConfig.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; + +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ALGORITHM; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ALGORITHM_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_EXPIRY_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_EXPIRY_DURATION_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_FILE; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_FILE_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_CHECK_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_CHECK_DURATION_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_DURATION_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; + +/** + * Configurations related to SecretKeys lifecycle management. + */ +public class SecretKeyConfig { + private final Path localSecretKeyFile; + private final Duration rotateDuration; + private final Duration expiryDuration; + private final String algorithm; + private final Duration rotationCheckDuration; + + public SecretKeyConfig(ConfigurationSource conf, String component) { + String metadataDir = conf.get(HDDS_METADATA_DIR_NAME, + conf.get(OZONE_METADATA_DIRS)); + String keyDir = conf.get(HDDS_KEY_DIR_NAME, HDDS_KEY_DIR_NAME_DEFAULT); + String fileName = conf.get(HDDS_SECRET_KEY_FILE, + HDDS_SECRET_KEY_FILE_DEFAULT); + localSecretKeyFile = Paths.get(metadataDir, component, keyDir, fileName); + + this.rotateDuration = parseRotateDuration(conf); + this.expiryDuration = parseExpiryDuration(conf); + this.rotationCheckDuration = parseRotateCheckDuration(conf); + + this.algorithm = conf.get(HDDS_SECRET_KEY_ALGORITHM, + HDDS_SECRET_KEY_ALGORITHM_DEFAULT); + } + + public static Duration parseExpiryDuration(ConfigurationSource conf) { + long expiryDurationInMs = conf.getTimeDuration( + HDDS_SECRET_KEY_EXPIRY_DURATION, + HDDS_SECRET_KEY_EXPIRY_DURATION_DEFAULT, TimeUnit.MILLISECONDS); + return Duration.ofMillis(expiryDurationInMs); + } + + public static Duration parseRotateDuration(ConfigurationSource conf) { + long rotateDurationInMs = conf.getTimeDuration( + HDDS_SECRET_KEY_ROTATE_DURATION, + HDDS_SECRET_KEY_ROTATE_DURATION_DEFAULT, TimeUnit.MILLISECONDS); + return Duration.ofMillis(rotateDurationInMs); + } + + public static Duration parseRotateCheckDuration(ConfigurationSource conf) { + long rotationCheckInMs = conf.getTimeDuration( + HDDS_SECRET_KEY_ROTATE_CHECK_DURATION, + HDDS_SECRET_KEY_ROTATE_CHECK_DURATION_DEFAULT, TimeUnit.MILLISECONDS); + return Duration.ofMillis(rotationCheckInMs); + } + + public Path getLocalSecretKeyFile() { + return localSecretKeyFile; + } + + public Duration getRotateDuration() { + return rotateDuration; + } + + public Duration getExpiryDuration() { + return expiryDuration; + } + + public String getAlgorithm() { + return algorithm; + } + + public Duration getRotationCheckDuration() { + return rotationCheckDuration; + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManager.java new file mode 100644 index 000000000000..8685a7fb5236 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManager.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.crypto.KeyGenerator; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static java.time.Duration.between; +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; + +/** + * This component manages symmetric SecretKey life-cycle, including generation, + * rotation and destruction. + */ +public class SecretKeyManager implements SecretKeyClient { + private static final Logger LOG = + LoggerFactory.getLogger(SecretKeyManager.class); + + private final SecretKeyState state; + private final Duration rotationDuration; + private final Duration validityDuration; + private final SecretKeyStore keyStore; + + private final KeyGenerator keyGenerator; + + public SecretKeyManager(SecretKeyState state, + SecretKeyStore keyStore, + Duration rotationDuration, + Duration validityDuration, + String algorithm) { + this.state = requireNonNull(state); + this.rotationDuration = requireNonNull(rotationDuration); + this.validityDuration = requireNonNull(validityDuration); + this.keyStore = requireNonNull(keyStore); + this.keyGenerator = createKeyGenerator(algorithm); + } + + public SecretKeyManager(SecretKeyState state, + SecretKeyStore keyStore, + SecretKeyConfig config) { + this(state, keyStore, config.getRotateDuration(), + config.getExpiryDuration(), config.getAlgorithm()); + } + + /** + * If the SecretKey state is not initialized, initialize it from by loading + * SecretKeys from local file, or generate new keys if the file doesn't + * exist. + */ + public synchronized void checkAndInitialize() throws TimeoutException { + if (isInitialized()) { + return; + } + + LOG.info("Initializing SecretKeys."); + + // Load and filter expired keys. + List allKeys = keyStore.load() + .stream() + .filter(x -> !x.isExpired()) + .collect(toList()); + + if (allKeys.isEmpty()) { + // if no valid key present , generate new key as the current key. + // This happens at first start or restart after being down for + // a significant time. + ManagedSecretKey newKey = generateSecretKey(); + allKeys.add(newKey); + LOG.info("No valid key has been loaded. " + + "A new key is generated: {}", newKey); + } else { + LOG.info("Keys reloaded: {}", allKeys); + } + + state.updateKeys(allKeys); + } + + public boolean isInitialized() { + return state.getCurrentKey() != null; + } + + /** + * Check and rotate the keys. + * + * @return true if rotation actually happens, false if it doesn't. + */ + public synchronized boolean checkAndRotate() throws TimeoutException { + // Initialize the state if it's not initialized already. + checkAndInitialize(); + + ManagedSecretKey currentKey = state.getCurrentKey(); + if (shouldRotate(currentKey)) { + ManagedSecretKey newCurrentKey = generateSecretKey(); + List updatedKeys = state.getSortedKeys() + .stream().filter(x -> !x.isExpired()) + .collect(toList()); + updatedKeys.add(newCurrentKey); + + LOG.info("SecretKey rotation is happening, new key generated {}", + newCurrentKey); + state.updateKeys(updatedKeys); + return true; + } + return false; + } + + @Override + public ManagedSecretKey getCurrentSecretKey() { + return state.getCurrentKey(); + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) { + return state.getKey(id); + } + + public List getSortedKeys() { + return state.getSortedKeys(); + } + + public void reinitialize(List secretKeys) { + state.reinitialize(secretKeys); + } + + private boolean shouldRotate(ManagedSecretKey currentKey) { + Duration established = between(currentKey.getCreationTime(), Instant.now()); + return established.compareTo(rotationDuration) >= 0; + } + + private ManagedSecretKey generateSecretKey() { + Instant now = Instant.now(); + return new ManagedSecretKey( + UUID.randomUUID(), + now, + now.plus(validityDuration), + keyGenerator.generateKey() + ); + } + + private KeyGenerator createKeyGenerator(String algorithm) { + try { + return KeyGenerator.getInstance(algorithm); + } catch (NoSuchAlgorithmException e) { + throw new IllegalArgumentException("Error creating KeyGenerator for " + + "algorithm " + algorithm, e); + } + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeySignerClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeySignerClient.java new file mode 100644 index 000000000000..0ae010545f74 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeySignerClient.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.symmetric; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; + +import java.io.IOException; + +/** + * Define the client-side API that the token signers (like OM) uses to retrieve + * the secret key to sign data. + */ +public interface SecretKeySignerClient { + ManagedSecretKey getCurrentSecretKey(); + + /** + * This is where the actual implementation can prefetch the current + * secret key or initialize ay necessary resources, e.g. cache or executors. + */ + default void start(ConfigurationSource conf) throws IOException { + } + + /** + * Give a chance for the implementation to clean up acquired resources. + */ + default void stop() { + } + + default void refetchSecretKey() { + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyState.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyState.java new file mode 100644 index 000000000000..43518b901a78 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyState.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import org.apache.hadoop.hdds.scm.metadata.Replicate; + +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +/** + * This component holds the state of managed SecretKeys, including the + * current key and all active keys. + */ +public interface SecretKeyState { + /** + * Get the current active key, which is used for signing tokens. This is + * also the latest key managed by this state. + * + * @return the current active key, or null if the state is not initialized. + */ + ManagedSecretKey getCurrentKey(); + + ManagedSecretKey getKey(UUID id); + + /** + * Get the keys that managed by this manager. + * The returned keys are sorted by creation time, in the order of latest + * to oldest. + */ + List getSortedKeys(); + + /** + * Update the SecretKeys. + * This method replicates SecretKeys across all SCM instances. + */ + @Replicate + void updateKeys(List newKeys) throws TimeoutException; + + /** + * Update SecretKeys from a snapshot from SCM leader. + */ + void reinitialize(List secretKeys); +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStateImpl.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStateImpl.java new file mode 100644 index 000000000000..b3f0ae55d997 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStateImpl.java @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Function; + +import static java.util.Comparator.comparing; +import static java.util.Objects.requireNonNull; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; + +/** + * Default implementation of {@link SecretKeyState}. + */ +public final class SecretKeyStateImpl implements SecretKeyState { + private static final Logger LOG = + LoggerFactory.getLogger(SecretKeyStateImpl.class); + + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + private List sortedKeys; + private ManagedSecretKey currentKey; + private Map keyById; + + private final SecretKeyStore keyStore; + + /** + * Instantiate a state with no keys. This state object needs to be backed by + * a proper replication proxy so that the @Replication method works. + */ + public SecretKeyStateImpl(SecretKeyStore keyStore) { + this.keyStore = requireNonNull(keyStore); + } + + /** + * Get the current active key, which is used for signing tokens. This is + * also the latest key managed by this state. + */ + @Override + public ManagedSecretKey getCurrentKey() { + lock.readLock().lock(); + try { + return currentKey; + } finally { + lock.readLock().unlock(); + } + } + + @Override + public ManagedSecretKey getKey(UUID id) { + lock.readLock().lock(); + try { + // Return null if not initialized yet. + if (keyById == null) { + return null; + } + return keyById.get(id); + } finally { + lock.readLock().unlock(); + } + } + + /** + * Get the keys that managed by this manager. + * The returned keys are sorted by creation time, in the order of latest + * to oldest. + */ + @Override + public List getSortedKeys() { + lock.readLock().lock(); + try { + return sortedKeys; + } finally { + lock.readLock().unlock(); + } + } + + /** + * Update the SecretKeys. + * This method replicates SecretKeys across all SCM instances. + */ + @Override + public void updateKeys(List newKeys) { + updateKeysInternal(newKeys); + } + + private void updateKeysInternal(List newKeys) { + LOG.info("Updating keys with {}", newKeys); + lock.writeLock().lock(); + try { + // Store sorted keys in order of latest to oldest and make it + // immutable so that can be used to answer queries directly. + sortedKeys = Collections.unmodifiableList( + newKeys.stream() + .sorted(comparing(ManagedSecretKey::getCreationTime).reversed()) + .collect(toList()) + ); + currentKey = sortedKeys.get(0); + keyById = newKeys.stream().collect(toMap( + ManagedSecretKey::getId, + Function.identity() + )); + LOG.info("Current key updated {}", currentKey); + keyStore.save(sortedKeys); + } finally { + lock.writeLock().unlock(); + } + } + + @Override + public void reinitialize(List secretKeys) { + updateKeysInternal(secretKeys); + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStore.java new file mode 100644 index 000000000000..c851c3683d33 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyStore.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import java.util.Collection; +import java.util.List; + +/** + * Interface for SecretKey storage component, which is responsible for saving + * the SecretKeys states persistently to ensure they're not lost during + * restarts. + * + * This interface allows new persistent storage to be plugged in easily. + */ +public interface SecretKeyStore { + List load(); + + void save(Collection secretKeys); +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java new file mode 100644 index 000000000000..08ed39d7f4bd --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyVerifierClient.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.symmetric; + +import org.apache.hadoop.hdds.security.exception.SCMSecurityException; + +import javax.annotation.Nullable; +import java.util.UUID; + +/** + * Define the client-side API that the token verifiers (or datanodes) use to + * retrieve the relevant secret key to validate token authority. + */ +public interface SecretKeyVerifierClient { + @Nullable + ManagedSecretKey getSecretKey(UUID id) throws SCMSecurityException; +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/package-info.java new file mode 100644 index 000000000000..2997fe0a26bb --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/symmetric/package-info.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * In secure mode, Ozone uses symmetric key algorithm to sign all its issued + * tokens, such as block or container tokens. These tokens are then verified + * by datanodes to ensure their authenticity and integrity. + *

+ * + * That process requires symmetric {@link javax.crypto.SecretKey} to be + * generated, managed, and distributed to different Ozone components. + * For example, the token signer (Ozone Manager and SCM) and the + * verifier (datanode) need to use the same SecretKey. + *

+ * + * This package encloses the logic to manage symmetric secret keys + * lifecycle. In details, it consists of the following components: + *

    + *
  • + * The definition of manage secret key which is shared between SCM, + * OM and datanodes, see + * {@link org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey}. + *
  • + * + *
  • + * The definition of secret key states, which is designed to get replicated + * across all SCM instances, see + * {@link org.apache.hadoop.hdds.security.symmetric.SecretKeyState} + *
  • + * + *
  • + * The definition and implementation of secret key persistent storage, to + * help retain SecretKey after restarts, see + * {@link org.apache.hadoop.hdds.security.symmetric.SecretKeyStore} and + * {@link org.apache.hadoop.hdds.security.symmetric.LocalSecretKeyStore}. + *
  • + * + *
  • + * The basic logic to manage secret key lifecycle, see + * {@link org.apache.hadoop.hdds.security.symmetric.SecretKeyManager} + *
  • + *
+ * + *

+ * The original overall design can be found at + * HDDS-7733. + */ +package org.apache.hadoop.hdds.security.symmetric; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java index 996ed7ae68c6..c9999d253bc6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/BlockTokenVerifier.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProtoOrBuilder; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,8 +54,9 @@ public static String getTokenService(ContainerBlockID blockID) { return String.valueOf(blockID); } - public BlockTokenVerifier(SecurityConfig conf, CertificateClient caClient) { - super(conf, caClient); + public BlockTokenVerifier(SecurityConfig conf, + SecretKeyVerifierClient secretKeyClient) { + super(conf, secretKeyClient); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenSecretManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenSecretManager.java index e9f37f3de120..4cee87696ff4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenSecretManager.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenSecretManager.java @@ -20,11 +20,9 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.IOException; import java.io.UncheckedIOException; @@ -38,17 +36,15 @@ public class ContainerTokenSecretManager extends ShortLivedTokenSecretManager implements ContainerTokenGenerator { - private static final Logger LOG = - LoggerFactory.getLogger(ContainerTokenSecretManager.class); - - public ContainerTokenSecretManager(SecurityConfig conf, long tokenLifetime) { - super(conf, tokenLifetime, LOG); + public ContainerTokenSecretManager(long tokenLifetime, + SecretKeySignerClient secretKeyClient) { + super(tokenLifetime, secretKeyClient); } public ContainerTokenIdentifier createIdentifier(String user, ContainerID containerID) { return new ContainerTokenIdentifier(user, containerID, - getCertSerialId(), getTokenExpiryTime()); + getTokenExpiryTime()); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenVerifier.java index 941160a042d5..7e4d186c3223 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ContainerTokenVerifier.java @@ -21,16 +21,16 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProtoOrBuilder; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; /** Verifier for container tokens. */ public class ContainerTokenVerifier extends ShortLivedTokenVerifier { public ContainerTokenVerifier(SecurityConfig conf, - CertificateClient caClient) { - super(conf, caClient); + SecretKeyVerifierClient secretKeyClient) { + super(conf, secretKeyClient); } @Override diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSecretManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSecretManager.java index 3dc7a395a1b4..1192377e1910 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSecretManager.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/OzoneBlockTokenSecretManager.java @@ -21,11 +21,10 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,24 +39,19 @@ @InterfaceStability.Unstable public class OzoneBlockTokenSecretManager extends ShortLivedTokenSecretManager { - private static final Logger LOG = LoggerFactory .getLogger(OzoneBlockTokenSecretManager.class); - public OzoneBlockTokenSecretManager(SecurityConfig conf, long tokenLifetime) { - super(conf, tokenLifetime, LOG); - } - - @Override - public OzoneBlockTokenIdentifier createIdentifier() { - throw new SecurityException("Ozone block token can't be created " - + "without owner and access mode information."); + public OzoneBlockTokenSecretManager(long tokenLifetime, + SecretKeySignerClient passwordManager) { + super(tokenLifetime, passwordManager); } public OzoneBlockTokenIdentifier createIdentifier(String owner, BlockID blockID, Set modes, long maxLength) { return new OzoneBlockTokenIdentifier(owner, blockID, modes, - getTokenExpiryTime().toEpochMilli(), getCertSerialId(), maxLength); + getTokenExpiryTime().toEpochMilli(), + maxLength); } /** @@ -73,8 +67,9 @@ public Token generateToken(String user, LOG.info("Issued delegation token -> expiryTime:{}, tokenId:{}", Instant.ofEpochMilli(expiryTime), tokenIdentifier); } + byte[] password = createPassword(tokenIdentifier); return new Token<>(tokenIdentifier.getBytes(), - createPassword(tokenIdentifier), tokenIdentifier.getKind(), + password, tokenIdentifier.getKind(), new Text(tokenIdentifier.getService())); } @@ -88,49 +83,4 @@ public Token generateToken(BlockID blockId, return generateToken(userID, blockId, modes, maxLength); } - @Override - public byte[] retrievePassword(OzoneBlockTokenIdentifier identifier) - throws InvalidToken { - validateToken(identifier); - return createPassword(identifier); - } - - @Override - public long renewToken(Token token, - String renewer) { - throw new UnsupportedOperationException("Renew token operation is not " + - "supported for ozone block tokens."); - } - - @Override - public OzoneBlockTokenIdentifier cancelToken(Token - token, String canceller) { - throw new UnsupportedOperationException("Cancel token operation is not " + - "supported for ozone block tokens."); - } - - /** - * Find the OzoneBlockTokenInfo for the given token id, and verify that if the - * token is not expired. - */ - @Override - public boolean validateToken(OzoneBlockTokenIdentifier identifier) - throws InvalidToken { - long now = Time.now(); - if (identifier.getExpiryDate() < now) { - throw new InvalidToken("token " + formatTokenId(identifier) + " is " + - "expired, current time: " + Time.formatTime(now) + - " expiry time: " + identifier.getExpiryDate()); - } - return true; - } - - /** - * Validates if given hash is valid. - */ - public boolean verifySignature(OzoneBlockTokenIdentifier identifier, - byte[] password) { - throw new UnsupportedOperationException("This operation is not " + - "supported for block tokens."); - } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenSecretManager.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenSecretManager.java index 966cf1cf5f39..981ea77a416d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenSecretManager.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenSecretManager.java @@ -19,11 +19,10 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.hdds.security.OzoneSecretManager; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; -import org.slf4j.Logger; import java.time.Instant; @@ -33,54 +32,21 @@ */ @InterfaceAudience.Private @InterfaceStability.Unstable -public abstract class - ShortLivedTokenSecretManager - extends OzoneSecretManager { +public abstract class ShortLivedTokenSecretManager + { + private final long tokenMaxLifetime; + private SecretKeySignerClient secretKeyClient; - private static final Text SERVICE = new Text("HDDS_SERVICE"); - - protected ShortLivedTokenSecretManager(SecurityConfig conf, - long tokenLifetime, Logger logger) { - super(conf, tokenLifetime, tokenLifetime, SERVICE, logger); - } - - @Override - public T createIdentifier() { - throw new SecurityException("Short-lived token requires additional " + - "information (owner, etc.)."); - } - - @Override - public long renewToken(Token token, String renewer) { - throw new UnsupportedOperationException("Renew token operation is not " + - "supported for short-lived tokens."); - } - - @Override - public T cancelToken(Token token, String canceller) { - throw new UnsupportedOperationException("Cancel token operation is not " + - "supported for short-lived tokens."); - } - - @Override - public byte[] retrievePassword(T identifier) throws InvalidToken { - validateToken(identifier); - return createPassword(identifier); + protected ShortLivedTokenSecretManager( + long tokenLifetime, SecretKeySignerClient secretKeyClient) { + this.tokenMaxLifetime = tokenLifetime; + this.secretKeyClient = secretKeyClient; } - /** - * Find the OzoneBlockTokenInfo for the given token id, and verify that if the - * token is not expired. - */ - protected boolean validateToken(T identifier) throws InvalidToken { - Instant now = Instant.now(); - if (identifier.isExpired(now)) { - throw new InvalidToken("token " + formatTokenId(identifier) + " is " + - "expired, current time: " + now + - " expiry time: " + identifier.getExpiry()); - } - - return true; + protected byte[] createPassword(T tokenId) { + ManagedSecretKey secretKey = secretKeyClient.getCurrentSecretKey(); + tokenId.setSecretKeyId(secretKey.getId()); + return secretKey.sign(tokenId); } /** @@ -89,12 +55,21 @@ protected boolean validateToken(T identifier) throws InvalidToken { * @return Expiry time. */ protected Instant getTokenExpiryTime() { - return Instant.now().plusMillis(getTokenMaxLifetime()); + return Instant.now().plusMillis(tokenMaxLifetime); } public Token generateToken(T tokenIdentifier) { + byte[] password = createPassword(tokenIdentifier); return new Token<>(tokenIdentifier.getBytes(), - createPassword(tokenIdentifier), tokenIdentifier.getKind(), + password, tokenIdentifier.getKind(), new Text(tokenIdentifier.getService())); } + + /** + * Allows integration-test to inject a custom implementation of + * SecretKeyClient to test without fully setting up a working secure cluster. + */ + public void setSecretKeyClient(SecretKeySignerClient client) { + this.secretKeyClient = client; + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java index 92b643e28592..ae18305f9ead 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/ShortLivedTokenVerifier.java @@ -20,17 +20,15 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProtoOrBuilder; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; -import java.security.cert.CertificateExpiredException; -import java.security.cert.CertificateNotYetValidException; -import java.security.cert.X509Certificate; import java.time.Instant; import java.util.Objects; @@ -42,13 +40,13 @@ ShortLivedTokenVerifier implements TokenVerifier { - private final CertificateClient caClient; private final SecurityConfig conf; + private final SecretKeyVerifierClient secretKeyClient; protected ShortLivedTokenVerifier(SecurityConfig conf, - CertificateClient caClient) { + SecretKeyVerifierClient secretKeyClient) { this.conf = conf; - this.caClient = caClient; + this.secretKeyClient = secretKeyClient; } /** Whether the specific kind of token is required for {@code cmdType}. */ @@ -75,11 +73,6 @@ public void verify(String user, Token token, return; } - if (caClient == null) { - throw new SCMSecurityException("Certificate client not available " + - "to validate token"); - } - T tokenId = createTokenIdentifier(); try { tokenId.readFields(new DataInputStream(new ByteArrayInputStream( @@ -88,31 +81,9 @@ public void verify(String user, Token token, throw new BlockTokenException("Failed to decode token : " + token); } - UserGroupInformation tokenUser = tokenId.getUser(); - X509Certificate signerCert = - caClient.getCertificate(tokenId.getCertSerialId()); - - if (signerCert == null) { - throw new BlockTokenException("Can't find signer certificate " + - "(CertSerialId: " + tokenId.getCertSerialId() + - ") of the token for user: " + tokenUser); - } - - try { - signerCert.checkValidity(); - } catch (CertificateExpiredException exExp) { - throw new BlockTokenException("Token can't be verified due to " + - "expired certificate " + tokenId.getCertSerialId()); - } catch (CertificateNotYetValidException exNyv) { - throw new BlockTokenException("Token can't be verified due to " + - "not yet valid certificate " + tokenId.getCertSerialId()); - } - - if (!caClient.verifySignature(tokenId.getBytes(), token.getPassword(), - signerCert)) { - throw new BlockTokenException("Invalid token for user: " + tokenUser); - } + verifyTokenPassword(tokenId, token.getPassword()); + UserGroupInformation tokenUser = tokenId.getUser(); // check expiration if (tokenId.isExpired(Instant.now())) { throw new BlockTokenException("Expired token for user: " + tokenUser); @@ -132,4 +103,27 @@ public void verify(String user, Token token, protected SecurityConfig getConf() { return conf; } + + private void verifyTokenPassword( + ShortLivedTokenIdentifier tokenId, byte[] password) + throws SCMSecurityException { + + ManagedSecretKey secretKey = secretKeyClient.getSecretKey( + tokenId.getSecretKeyId()); + if (secretKey == null) { + throw new BlockTokenException("Can't find the signing secret key " + + tokenId.getSecretKeyId() + " of the token for user: " + + tokenId.getUser()); + } + + if (secretKey.isExpired()) { + throw new BlockTokenException("Token can't be verified due to " + + "expired secret key " + tokenId.getSecretKeyId()); + } + + if (!secretKey.isValidSignature(tokenId, password)) { + throw new BlockTokenException("Invalid token for user: " + + tokenId.getUser()); + } + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java index dbf79d5482c3..3301b68fccad 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/token/TokenVerifier.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProtoOrBuilder; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.security.token.Token; import java.io.IOException; @@ -73,15 +73,14 @@ default void verify(ContainerCommandRequestProtoOrBuilder cmd, String user, /** Create appropriate token verifier based on the configuration. */ static TokenVerifier create(SecurityConfig conf, - CertificateClient certClient) { - + SecretKeyVerifierClient secretKeyClient) throws IOException { if (!conf.isBlockTokenEnabled() && !conf.isContainerTokenEnabled()) { return new NoopTokenVerifier(); } List list = new LinkedList<>(); - list.add(new BlockTokenVerifier(conf, certClient)); - list.add(new ContainerTokenVerifier(conf, certClient)); + list.add(new BlockTokenVerifier(conf, secretKeyClient)); + list.add(new ContainerTokenVerifier(conf, secretKeyClient)); return new CompositeTokenVerifier(list); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 7e9cf31cd483..e87e262b9ed3 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -44,13 +44,19 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolDatanodePB; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolOmPB; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolScmPB; import org.apache.hadoop.hdds.ratis.ServerNotLeaderException; import org.apache.hadoop.hdds.recon.ReconConfigKeys; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.proxy.SCMClientConfig; +import org.apache.hadoop.hdds.scm.proxy.SecretKeyProtocolFailoverProxyProvider; import org.apache.hadoop.hdds.scm.proxy.SCMSecurityProtocolFailoverProxyProvider; +import org.apache.hadoop.hdds.scm.proxy.SingleSecretKeyProtocolProxyProvider; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; @@ -506,6 +512,58 @@ public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient( SCMSecurityProtocolClientSideTranslatorPB.class, conf); } + /** + * Create a {@link org.apache.hadoop.hdds.protocol.SecretKeyProtocol} for + * datanode service, should be use only if user is the Datanode identity. + */ + public static SecretKeyProtocolClientSideTranslatorPB + getSecretKeyClientForDatanode(ConfigurationSource conf) + throws IOException { + return new SecretKeyProtocolClientSideTranslatorPB( + new SecretKeyProtocolFailoverProxyProvider(conf, + UserGroupInformation.getCurrentUser(), + SecretKeyProtocolDatanodePB.class), + SecretKeyProtocolDatanodePB.class); + } + + /** + * Create a {@link org.apache.hadoop.hdds.protocol.SecretKeyProtocol} for + * OM service, should be use only if user is the OM identity. + */ + public static SecretKeyProtocolClientSideTranslatorPB + getSecretKeyClientForOm(ConfigurationSource conf) throws IOException { + return new SecretKeyProtocolClientSideTranslatorPB( + new SecretKeyProtocolFailoverProxyProvider(conf, + UserGroupInformation.getCurrentUser(), + SecretKeyProtocolOmPB.class), + SecretKeyProtocolOmPB.class); + } + + public static SecretKeyProtocolClientSideTranslatorPB + getSecretKeyClientForDatanode(ConfigurationSource conf, + UserGroupInformation ugi) { + return new SecretKeyProtocolClientSideTranslatorPB( + new SecretKeyProtocolFailoverProxyProvider(conf, ugi, + SecretKeyProtocolDatanodePB.class), + SecretKeyProtocolDatanodePB.class); + } + + /** + * Create a {@link org.apache.hadoop.hdds.protocol.SecretKeyProtocol} for + * SCM service, should be use only if user is the Datanode identity. + * + * The protocol returned by this method only target a single destination + * SCM node. + */ + public static SecretKeyProtocolClientSideTranslatorPB + getSecretKeyClientForScm(ConfigurationSource conf, + String scmNodeId, UserGroupInformation ugi) { + return new SecretKeyProtocolClientSideTranslatorPB( + new SingleSecretKeyProtocolProxyProvider(conf, ugi, + SecretKeyProtocolScmPB.class, scmNodeId), + SecretKeyProtocolScmPB.class); + } + /** * Initialize hadoop metrics system for Ozone servers. * @param configuration OzoneConfiguration to use. diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStoreTest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStoreTest.java new file mode 100644 index 000000000000..c406ce2b08f6 --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/LocalSecretKeyStoreTest.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import com.google.common.collect.ImmutableList; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import javax.crypto.spec.SecretKeySpec; +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.PosixFilePermission; +import java.time.Duration; +import java.time.Instant; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Stream; + +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Sets.newHashSet; +import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; +import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test cases for {@link LocalSecretKeyStore}. + */ +public class LocalSecretKeyStoreTest { + private SecretKeyStore secretKeyStore; + private Path testSecretFile; + + @BeforeEach + private void setup() throws Exception { + testSecretFile = Files.createTempFile("key-strore-test", ".json"); + secretKeyStore = new LocalSecretKeyStore(testSecretFile); + } + + public static Stream saveAndLoadTestCases() throws Exception { + return Stream.of( + // empty + Arguments.of(ImmutableList.of()), + // single secret keys. + Arguments.of(newArrayList( + generateKey("HmacSHA256") + )), + // multiple secret keys. + Arguments.of(newArrayList( + generateKey("HmacSHA1"), + generateKey("HmacSHA256") + )) + ); + } + + @ParameterizedTest + @MethodSource("saveAndLoadTestCases") + public void testSaveAndLoad(List keys) throws IOException { + secretKeyStore.save(keys); + + // Ensure the intended file exists and is readable and writeable to + // file owner only. + File file = testSecretFile.toFile(); + assertTrue(file.exists()); + Set permissions = + Files.getPosixFilePermissions(file.toPath()); + assertEquals(newHashSet(OWNER_READ, OWNER_WRITE), permissions); + + List reloadedKeys = secretKeyStore.load(); + assertEqualKeys(keys, reloadedKeys); + } + + /** + * Verifies that secret keys are overwritten by subsequent writes. + */ + @Test + public void testOverwrite() throws Exception { + List initialKeys = + newArrayList(generateKey("HmacSHA256")); + secretKeyStore.save(initialKeys); + + List updatedKeys = newArrayList( + generateKey("HmacSHA1"), + generateKey("HmacSHA256") + ); + secretKeyStore.save(updatedKeys); + + assertEqualKeys(updatedKeys, secretKeyStore.load()); + } + + /** + * This scenario verifies if an existing secret keys file can be loaded. + * The intention of this is to ensure a saved file can be loaded after + * future changes to {@link ManagedSecretKey} schema. + * + * Please don't just change the content of test json if this + * test fails, instead, analyse the backward-compatibility of the change. + */ + @Test + public void testLoadExistingFile() throws Exception { + // copy test file content to the backing file. + String testJson = "[\n" + + " {\n" + + " \"id\":\"78864cfb-793b-4157-8ad6-714c9f950a16\",\n" + + " \"creationTime\":\"2007-12-03T10:15:30Z\",\n" + + " \"expiryTime\":\"2007-12-03T11:15:30Z\",\n" + + " \"algorithm\":\"HmacSHA256\",\n" + + " \"encoded\":\"YSeCdJRB4RclxoeE69ENmTe2Cv8ybyKhHP3mq4M1r8o=\"\n" + + " }\n" + + "]"; + Files.write(testSecretFile, Collections.singletonList(testJson), + StandardOpenOption.WRITE); + + Instant date = Instant.parse("2007-12-03T10:15:30.00Z"); + ManagedSecretKey secretKey = new ManagedSecretKey( + UUID.fromString("78864cfb-793b-4157-8ad6-714c9f950a16"), + date, + date.plus(Duration.ofHours(1)), + new SecretKeySpec( + Base64.getDecoder().decode( + "YSeCdJRB4RclxoeE69ENmTe2Cv8ybyKhHP3mq4M1r8o="), + "HmacSHA256" + )); + + List expectedKeys = newArrayList(secretKey); + assertEqualKeys(expectedKeys, secretKeyStore.load()); + } + + private void assertEqualKeys(List expected, + List actual) { + assertEquals(expected.size(), actual.size()); + for (int i = 0; i < expected.size(); i++) { + ManagedSecretKey expectedKey = expected.get(i); + ManagedSecretKey actualKey = actual.get(i); + + assertEquals(expectedKey.getId(), actualKey.getId()); + assertEquals(expectedKey.getCreationTime().toEpochMilli(), + actualKey.getCreationTime().toEpochMilli()); + assertEquals(expectedKey.getExpiryTime(), + actualKey.getExpiryTime()); + assertEquals(expectedKey.getSecretKey(), actualKey.getSecretKey()); + } + } + + private static ManagedSecretKey generateKey(String algorithm) + throws Exception { + return generateKey(algorithm, Instant.now()); + } + + private static ManagedSecretKey generateKey(String algorithm, + Instant creationTime) + throws Exception { + KeyGenerator keyGen = KeyGenerator.getInstance(algorithm); + SecretKey secretKey = keyGen.generateKey(); + return new ManagedSecretKey( + UUID.randomUUID(), + creationTime, + creationTime.plus(Duration.ofHours(1)), + secretKey + ); + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKeyTest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKeyTest.java new file mode 100644 index 000000000000..6db83186ac84 --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/ManagedSecretKeyTest.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.symmetric; + +import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.junit.jupiter.api.Test; + +import static com.google.common.collect.ImmutableSet.of; +import static java.time.Duration.ofDays; +import static java.time.Instant.now; +import static org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey.fromProtobuf; +import static org.apache.hadoop.hdds.security.symmetric.SecretKeyTestUtil.generateHmac; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Simple test cases for {@link ManagedSecretKey}. + */ +public class ManagedSecretKeyTest { + + @Test + public void testSignAndVerifySuccess() throws Exception { + // Data can be signed and verified by same key. + byte[] data = RandomUtils.nextBytes(100); + ManagedSecretKey secretKey = generateHmac(now(), ofDays(1)); + byte[] signature = secretKey.sign(data); + assertTrue(secretKey.isValidSignature(data, signature)); + + // Data can be signed and verified by same key transferred via network. + ManagedSecretKey transferredKey = fromProtobuf(secretKey.toProtobuf()); + assertTrue(transferredKey.isValidSignature(data, signature)); + + // Token can be sign and verified by the same key. + OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier("owner", + new BlockID(1L, 1L), of(AccessModeProto.READ), 0L, 1L); + tokenId.setSecretKeyId(secretKey.getId()); + + signature = secretKey.sign(tokenId); + assertTrue(secretKey.isValidSignature(tokenId, signature)); + + // Token can be signed and verified by same key transferred via network. + assertTrue(transferredKey.isValidSignature(tokenId, signature)); + } + + @Test + public void testVerifyFailure() throws Exception { + byte[] data = RandomUtils.nextBytes(100); + ManagedSecretKey secretKey = generateHmac(now(), ofDays(1)); + // random signature is not valid. + assertFalse(secretKey.isValidSignature(data, RandomUtils.nextBytes(100))); + + // Data sign by one key can't be verified by another key. + byte[] signature = secretKey.sign(data); + ManagedSecretKey secretKey1 = generateHmac(now(), ofDays(1)); + assertFalse(secretKey1.isValidSignature(data, signature)); + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManagerTest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManagerTest.java new file mode 100644 index 000000000000..e7fd24082cbc --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyManagerTest.java @@ -0,0 +1,206 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import com.google.common.collect.ImmutableList; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import java.time.Duration; +import java.time.Instant; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static com.google.common.collect.Lists.newArrayList; +import static java.time.Instant.now; +import static java.time.temporal.ChronoUnit.DAYS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.of; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Tests cases for {@link SecretKeyManager} implementation. + */ +public class SecretKeyManagerTest { + private static final Duration VALIDITY_DURATION = Duration.ofDays(3); + private static final Duration ROTATION_DURATION = Duration.ofDays(1); + private static final String ALGORITHM = "HmacSHA256"; + + private SecretKeyStore mockedKeyStore; + + @BeforeEach + private void setup() { + mockedKeyStore = Mockito.mock(SecretKeyStore.class); + } + + public static Stream loadSecretKeysTestCases() throws Exception { + ManagedSecretKey k0 = generateKey(now()); + ManagedSecretKey k1 = generateKey(now().minus(1, DAYS)); + ManagedSecretKey k2 = generateKey(now().minus(2, DAYS)); + ManagedSecretKey k3 = generateKey(now().minus(3, DAYS)); + ManagedSecretKey k4 = generateKey(now().minus(4, DAYS)); + ManagedSecretKey k5 = generateKey(now().minus(5, DAYS)); + return Stream.of( + // first start + of(ImmutableList.of(), null, null), + + // restart => nothing is filtered + of(newArrayList(k0, k1, k2), k0, newArrayList(k0, k1, k2)), + + // stop 1 day and start + of(newArrayList(k1, k2, k3), k1, newArrayList(k1, k2)), + + // stop 2 day and start => expired keys are filtered + of(newArrayList(k2, k3, k4), k2, newArrayList(k2)), + + // stop 3 day and start, all saved keys are filtered + of(newArrayList(k3, k4, k5), null, null) + ); + } + + /** + * Verify how SecretKeyManager initializes its keys under different scenarios, + * e.g. with or without the present of saved keys. + */ + @ParameterizedTest + @MethodSource("loadSecretKeysTestCases") + public void testLoadSecretKeys(List savedSecretKey, + ManagedSecretKey expectedCurrentKey, + List expectedLoadedKeys) + throws Exception { + SecretKeyState state = new SecretKeyStateImpl(mockedKeyStore); + SecretKeyManager lifeCycleManager = + new SecretKeyManager(state, mockedKeyStore, + ROTATION_DURATION, VALIDITY_DURATION, ALGORITHM); + + when(mockedKeyStore.load()).thenReturn(savedSecretKey); + lifeCycleManager.checkAndInitialize(); + + if (expectedCurrentKey != null) { + assertEquals(state.getCurrentKey(), expectedCurrentKey); + List allKeys = state.getSortedKeys(); + assertSameKeys(expectedLoadedKeys, allKeys); + } else { + // expect the current key is newly generated. + assertFalse(savedSecretKey.contains(state.getCurrentKey())); + assertEquals(1, state.getSortedKeys().size()); + assertTrue(state.getSortedKeys().contains( + state.getCurrentKey())); + } + } + + private static void assertSameKeys(Collection expected, + Collection actual) { + assertEquals(expected.size(), actual.size()); + for (ManagedSecretKey expectedKey : expected) { + assertTrue(actual.contains(expectedKey)); + } + } + + public static Stream rotationTestCases() throws Exception { + ManagedSecretKey k0 = generateKey(now()); + ManagedSecretKey k1 = generateKey(now().minus(1, DAYS)); + ManagedSecretKey k2 = generateKey(now().minus(2, DAYS)); + ManagedSecretKey k3 = generateKey(now().minus(3, DAYS)); + ManagedSecretKey k4 = generateKey(now().minus(4, DAYS)); + return Stream.of( + + // Currentkey is new, not rotate. + of(newArrayList(k0, k1, k2), false, null), + + // Current key just exceeds the rotation period. + of(newArrayList(k1, k2, k3), true, newArrayList(k1, k2)), + + // Current key exceeds the rotation period for a significant time (2d). + of(newArrayList(k2, k3, k4), true, newArrayList(k2)) + ); + } + + /** + * Verify rotation behavior under different scenarios. + */ + @ParameterizedTest + @MethodSource("rotationTestCases") + public void testRotate(List initialKeys, + boolean expectRotate, + List expectedRetainedKeys) + throws TimeoutException { + + SecretKeyState state = new SecretKeyStateImpl(mockedKeyStore); + + SecretKeyManager lifeCycleManager = + new SecretKeyManager(state, mockedKeyStore, + ROTATION_DURATION, VALIDITY_DURATION, ALGORITHM); + + // Set the initial state. + state.updateKeys(initialKeys); + ManagedSecretKey initialCurrentKey = state.getCurrentKey(); + Mockito.reset(mockedKeyStore); + + assertEquals(expectRotate, lifeCycleManager.checkAndRotate()); + + if (expectRotate) { + // Verify rotation behavior. + + // 1. A new key is generated as current key. + ManagedSecretKey currentKey = state.getCurrentKey(); + assertNotEquals(initialCurrentKey, currentKey); + assertFalse(initialKeys.contains(currentKey)); + + // 2. keys are correctly rotated, expired ones are excluded. + List expectedAllKeys = expectedRetainedKeys; + expectedAllKeys.add(currentKey); + assertSameKeys(expectedAllKeys, state.getSortedKeys()); + + // 3. All keys are stored. + ArgumentCaptor> storedKeyCaptor = + ArgumentCaptor.forClass(Collection.class); + verify(mockedKeyStore).save(storedKeyCaptor.capture()); + assertSameKeys(expectedAllKeys, storedKeyCaptor.getValue()); + + // 4. The new generated key has correct data. + assertEquals(ALGORITHM, currentKey.getSecretKey().getAlgorithm()); + assertEquals(0, + Duration.between(currentKey.getCreationTime(), now()).toMinutes()); + Instant expectedExpiryTime = now().plus(VALIDITY_DURATION); + assertEquals(0, + Duration.between(currentKey.getExpiryTime(), + expectedExpiryTime).toMinutes()); + } else { + assertEquals(initialCurrentKey, state.getCurrentKey()); + assertSameKeys(initialKeys, state.getSortedKeys()); + } + } + + private static ManagedSecretKey generateKey(Instant creationTime) + throws Exception { + return SecretKeyTestUtil.generateKey(ALGORITHM, creationTime, + VALIDITY_DURATION); + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyTestUtil.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyTestUtil.java new file mode 100644 index 000000000000..272bedad546c --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/SecretKeyTestUtil.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.security.symmetric; + +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.util.UUID; + +/** + * Contains utility to test secret key logic. + */ +public final class SecretKeyTestUtil { + private SecretKeyTestUtil() { + } + + public static ManagedSecretKey generateKey( + String algorithm, Instant creationTime, Duration validDuration) + throws NoSuchAlgorithmException { + KeyGenerator keyGen = KeyGenerator.getInstance(algorithm); + SecretKey secretKey = keyGen.generateKey(); + return new ManagedSecretKey( + UUID.randomUUID(), + creationTime, + creationTime.plus(validDuration), + secretKey + ); + } + + public static ManagedSecretKey generateHmac( + Instant creationTime, Duration validDuration) + throws NoSuchAlgorithmException { + return generateKey("HmacSHA256", creationTime, validDuration); + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestBlockTokenVerifier.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestBlockTokenVerifier.java index c108a2670d01..91825b09cc14 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestBlockTokenVerifier.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestBlockTokenVerifier.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.ozone.container.ContainerTestHelper; import java.io.IOException; @@ -46,8 +46,8 @@ protected String tokenEnabledConfigKey() { @Override protected TokenVerifier newTestSubject(SecurityConfig secConf, - CertificateClient caClient) { - return new BlockTokenVerifier(secConf, caClient); + SecretKeyVerifierClient secretKeyClient) { + return new BlockTokenVerifier(secConf, secretKeyClient); } @Override @@ -67,10 +67,12 @@ protected ContainerCommandRequestProto verifiedRequest( @Override protected OzoneBlockTokenIdentifier newTokenId() { - return new OzoneBlockTokenIdentifier("any user", - new BlockID(1, 0), - EnumSet.allOf(AccessModeProto.class), - Instant.now().plusSeconds(3600).toEpochMilli(), - CERT_ID, 100); + OzoneBlockTokenIdentifier tokenId = + new OzoneBlockTokenIdentifier("any user", + new BlockID(1, 0), + EnumSet.allOf(AccessModeProto.class), + Instant.now().plusSeconds(3600).toEpochMilli(), 100); + tokenId.setSecretKeyId(SECRET_KEY_ID); + return tokenId; } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestContainerTokenVerifier.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestContainerTokenVerifier.java index 25616a62ecda..1704226e5a9f 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestContainerTokenVerifier.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestContainerTokenVerifier.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import java.io.IOException; import java.time.Instant; @@ -48,8 +48,8 @@ protected String tokenEnabledConfigKey() { @Override protected TokenVerifier newTestSubject(SecurityConfig secConf, - CertificateClient caClient) { - return new ContainerTokenVerifier(secConf, caClient); + SecretKeyVerifierClient secretKeyClient) { + return new ContainerTokenVerifier(secConf, secretKeyClient); } @Override @@ -69,8 +69,10 @@ protected ContainerCommandRequestProto verifiedRequest( @Override protected ContainerTokenIdentifier newTokenId() { - return new ContainerTokenIdentifier("any user", - ContainerID.valueOf(CONTAINER_ID.incrementAndGet()), "123", + ContainerTokenIdentifier tokenId = new ContainerTokenIdentifier("any user", + ContainerID.valueOf(CONTAINER_ID.incrementAndGet()), Instant.now().plusSeconds(3600)); + tokenId.setSecretKeyId(SECRET_KEY_ID); + return tokenId; } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java new file mode 100644 index 000000000000..fbedf2de1a67 --- /dev/null +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.security.token; + +import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyTestUtil; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.DataInputStream; +import java.io.IOException; +import java.util.EnumSet; + +import static java.time.Duration.ofDays; +import static java.time.Instant.now; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test class for {@link OzoneBlockTokenIdentifier}. + */ +public class TestOzoneBlockTokenIdentifier { + private long expiryTime; + private ManagedSecretKey secretKey; + + @BeforeEach + public void setUp() throws Exception { + expiryTime = Time.monotonicNow() + 60 * 60 * 24; + + secretKey = SecretKeyTestUtil.generateHmac(now(), ofDays(1)); + } + + @Test + public void testSignToken() { + OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( + "testUser", "84940", + EnumSet.allOf(AccessModeProto.class), + expiryTime, 128L); + tokenId.setSecretKeyId(secretKey.getId()); + byte[] signedToken = secretKey.sign(tokenId); + + // Verify a valid signed OzoneMaster Token with Ozone Master. + assertTrue(secretKey.isValidSignature(tokenId.getBytes(), signedToken)); + + // Verify an invalid signed OzoneMaster Token with Ozone Master. + assertFalse(secretKey.isValidSignature(tokenId.getBytes(), + RandomUtils.nextBytes(128))); + } + + @Test + public void testTokenSerialization() throws + IOException { + long maxLength = 128L; + + OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( + "testUser", "84940", + EnumSet.allOf(AccessModeProto.class), + expiryTime, maxLength); + tokenId.setSecretKeyId(secretKey.getId()); + byte[] signedToken = secretKey.sign(tokenId); + + Token token = new Token<>(tokenId.getBytes(), + signedToken, tokenId.getKind(), new Text("host:port")); + + String encodeToUrlString = token.encodeToUrlString(); + + TokendecodedToken = new Token<>(); + decodedToken.decodeFromUrlString(encodeToUrlString); + + OzoneBlockTokenIdentifier decodedTokenId = new OzoneBlockTokenIdentifier(); + decodedTokenId.readFields(new DataInputStream( + new ByteArrayInputStream(decodedToken.getIdentifier()))); + + Assertions.assertEquals(tokenId, decodedTokenId); + Assertions.assertEquals(maxLength, decodedTokenId.getMaxLength()); + + // Verify a decoded signed Token + assertTrue(secretKey.isValidSignature(decodedTokenId, + decodedToken.getPassword())); + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java index 1c93dcc18083..d8c22713235a 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenSecretManager.java @@ -18,19 +18,6 @@ package org.apache.hadoop.hdds.security.token; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.getBlockRequest; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.getReadChunkRequest; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.newPutBlockRequestBuilder; -import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; -import static org.mockito.AdditionalAnswers.delegatesTo; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -38,48 +25,36 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyTestUtil; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificate.client.DefaultCertificateClient; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.token.Token; import org.apache.ozone.test.GenericTestUtils; -import org.apache.ozone.test.LambdaTestUtils; -import org.apache.hadoop.util.Time; -import org.bouncycastle.asn1.x500.X500Name; -import org.bouncycastle.asn1.x509.AlgorithmIdentifier; -import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.X509v1CertificateBuilder; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; -import org.bouncycastle.crypto.util.PrivateKeyFactory; -import org.bouncycastle.operator.ContentSigner; -import org.bouncycastle.operator.DefaultDigestAlgorithmIdentifierFinder; -import org.bouncycastle.operator.DefaultSignatureAlgorithmIdentifierFinder; -import org.bouncycastle.operator.OperatorCreationException; -import org.bouncycastle.operator.bc.BcRSAContentSignerBuilder; -import org.bouncycastle.pkcs.PKCS10CertificationRequest; -import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import org.slf4j.Logger; +import org.mockito.Mockito; import java.io.ByteArrayInputStream; import java.io.DataInputStream; -import java.io.IOException; -import java.math.BigInteger; -import java.nio.file.Path; -import java.security.KeyPair; -import java.security.SecureRandom; -import java.security.Signature; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; -import java.time.Instant; -import java.util.Date; +import java.security.NoSuchAlgorithmException; import java.util.EnumSet; +import java.util.UUID; import java.util.concurrent.TimeUnit; +import static java.time.Duration.ofDays; +import static java.time.Instant.now; +import static org.apache.hadoop.ozone.container.ContainerTestHelper.getBlockRequest; +import static org.apache.hadoop.ozone.container.ContainerTestHelper.getReadChunkRequest; +import static org.apache.hadoop.ozone.container.ContainerTestHelper.newPutBlockRequestBuilder; +import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; + /** * Test class for {@link OzoneBlockTokenSecretManager}. */ @@ -87,14 +62,15 @@ public class TestOzoneBlockTokenSecretManager { private static final String BASEDIR = GenericTestUtils .getTempPath(TestOzoneBlockTokenSecretManager.class.getSimpleName()); - private static final String ALGORITHM = "SHA256withRSA"; + private static final String ALGORITHM = "HmacSHA256"; private OzoneBlockTokenSecretManager secretManager; - private KeyPair keyPair; - private String omCertSerialId; - private CertificateClient client; + private UUID secretKeyId; + private SecretKeyVerifierClient secretKeyClient; + private SecretKeySignerClient secretKeySignerClient; private TokenVerifier tokenVerifier; private Pipeline pipeline; + private ManagedSecretKey secretKey; @Before public void setUp() throws Exception { @@ -103,40 +79,19 @@ public void setUp() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, BASEDIR); conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true); - // Create Ozone Master key pair. - keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - // Create Ozone Master certificate (SCM CA issued cert) and key store. SecurityConfig securityConfig = new SecurityConfig(conf); - X509Certificate x509Certificate = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, ALGORITHM); - omCertSerialId = x509Certificate.getSerialNumber().toString(); - secretManager = new OzoneBlockTokenSecretManager(securityConfig, - TimeUnit.HOURS.toMillis(1)); - Logger log = mock(Logger.class); - DefaultCertificateClient toStub = - new DefaultCertificateClient( - securityConfig, log, null, "test", null, null) { - @Override - protected String signAndStoreCertificate( - PKCS10CertificationRequest request, Path certificatePath) { - return null; - } - }; - client = mock(DefaultCertificateClient.class, delegatesTo(toStub)); - doReturn(x509Certificate).when(client).getCertificate(); - doReturn(x509Certificate).when(client).getCertificate(anyString()); - doReturn(keyPair.getPublic()).when(client).getPublicKey(); - doReturn(keyPair.getPrivate()).when(client).getPrivateKey(); - doReturn(null).when(client).signData(any(byte[].class)); - - secretManager.start(client); - tokenVerifier = new BlockTokenVerifier(securityConfig, client); - } - @After - public void tearDown() throws IOException { - secretManager = null; - client.close(); + secretKey = generateValidSecretKey(); + secretKeyId = secretKey.getId(); + + secretKeyClient = Mockito.mock(SecretKeyVerifierClient.class); + secretKeySignerClient = Mockito.mock(SecretKeySignerClient.class); + when(secretKeySignerClient.getCurrentSecretKey()).thenReturn(secretKey); + when(secretKeyClient.getSecretKey(secretKeyId)).thenReturn(secretKey); + + secretManager = new OzoneBlockTokenSecretManager( + TimeUnit.HOURS.toMillis(1), secretKeySignerClient); + tokenVerifier = new BlockTokenVerifier(securityConfig, secretKeyClient); } @Test @@ -153,7 +108,7 @@ public void testGenerateToken() throws Exception { identifier.getService()); Assert.assertEquals(EnumSet.allOf(AccessModeProto.class), identifier.getAccessModes()); - Assert.assertEquals(omCertSerialId, identifier.getCertSerialId()); + Assert.assertEquals(secretKeyId, identifier.getSecretKeyId()); validateHash(token.getPassword(), token.getIdentifier()); } @@ -170,9 +125,8 @@ public void testCreateIdentifierSuccess() throws Exception { btIdentifier.getService()); Assert.assertEquals(EnumSet.allOf(AccessModeProto.class), btIdentifier.getAccessModes()); - Assert.assertEquals(omCertSerialId, btIdentifier.getCertSerialId()); - byte[] hash = secretManager.createPassword(btIdentifier); + Assert.assertEquals(secretKeyId, btIdentifier.getSecretKeyId()); validateHash(hash, btIdentifier.getBytes()); } @@ -224,57 +178,8 @@ public void tokenCannotBeUsedForOtherBlock() throws Exception { OzoneBlockTokenIdentifier.getTokenService(otherBlockID))); } - /** - * Validate hash using public key of KeyPair. - * */ private void validateHash(byte[] hash, byte[] identifier) throws Exception { - Signature rsaSignature = - Signature.getInstance(secretManager.getDefaultSignatureAlgorithm()); - rsaSignature.initVerify(client.getPublicKey()); - rsaSignature.update(identifier); - assertTrue(rsaSignature.verify(hash)); - } - - @Test - @SuppressWarnings("java:S2699") - public void testCreateIdentifierFailure() throws Exception { - LambdaTestUtils.intercept(SecurityException.class, - "Ozone block token can't be created without owner and access mode " - + "information.", () -> { - secretManager.createIdentifier(); - }); - } - - @Test - @SuppressWarnings("java:S2699") - public void testRenewToken() throws Exception { - LambdaTestUtils.intercept(UnsupportedOperationException.class, - "Renew token operation is not supported for ozone block" + - " tokens.", () -> { - secretManager.renewToken(null, null); - }); - } - - @Test - @SuppressWarnings("java:S2699") - public void testCancelToken() throws Exception { - LambdaTestUtils.intercept(UnsupportedOperationException.class, - "Cancel token operation is not supported for ozone block" + - " tokens.", () -> { - secretManager.cancelToken(null, null); - }); - } - - @Test - @SuppressWarnings("java:S2699") - public void testVerifySignatureFailure() throws Exception { - OzoneBlockTokenIdentifier id = new OzoneBlockTokenIdentifier( - "testUser", "123", EnumSet.allOf(AccessModeProto.class), - Time.now() + 60 * 60 * 24, "123444", 1024); - LambdaTestUtils.intercept(UnsupportedOperationException.class, "operation" + - " is not supported for block tokens", - () -> secretManager.verifySignature(id, - client.signData(id.getBytes()))); + assertTrue(secretKey.isValidSignature(identifier, hash)); } @Test @@ -328,7 +233,7 @@ public void testBlockTokenWriteAccessMode() throws Exception { } @Test - public void testExpiredCertificate() throws Exception { + public void testExpiredSecretKey() throws Exception { String user = "testUser2"; BlockID blockID = new BlockID(102, 0); Token token = @@ -342,79 +247,23 @@ public void testExpiredCertificate() throws Exception { tokenVerifier.verify("testUser", token, writeChunkRequest); // Mock client with an expired cert - X509Certificate expiredCert = generateExpiredCert( - "CN=OzoneMaster", keyPair, ALGORITHM); - when(client.getCertificate(anyString())).thenReturn(expiredCert); + ManagedSecretKey expiredSecretKey = generateExpiredSecretKey(); + when(secretKeyClient.getSecretKey(any())).thenReturn(expiredSecretKey); BlockTokenException e = assertThrows(BlockTokenException.class, () -> tokenVerifier.verify(user, token, writeChunkRequest)); String msg = e.getMessage(); assertTrue(msg, msg.contains("Token can't be verified due to" + - " expired certificate")); - } - - @Test - public void testNetYetValidCertificate() throws Exception { - String user = "testUser2"; - BlockID blockID = new BlockID(102, 0); - Token token = - secretManager.generateToken(user, blockID, - EnumSet.allOf(AccessModeProto.class), 100); - ContainerCommandRequestProto writeChunkRequest = - newWriteChunkRequestBuilder(pipeline, blockID, 100) - .setEncodedToken(token.encodeToUrlString()) - .build(); - - tokenVerifier.verify(user, token, writeChunkRequest); - - // Mock client with an expired cert - X509Certificate netYetValidCert = generateNotValidYetCert( - "CN=OzoneMaster", keyPair, ALGORITHM); - when(client.getCertificate(anyString())). - thenReturn(netYetValidCert); - - BlockTokenException e = assertThrows(BlockTokenException.class, - () -> tokenVerifier.verify(user, token, writeChunkRequest)); - String msg = e.getMessage(); - assertTrue(msg, msg.contains("Token can't be verified due to not" + - " yet valid certificate")); - } - - private X509Certificate generateExpiredCert(String dn, - KeyPair pair, String algorithm) throws CertificateException, - IllegalStateException, IOException, OperatorCreationException { - Date from = new Date(); - // Set end date same as start date to make sure the cert is expired. - return generateTestCert(dn, pair, algorithm, from, from); + " expired secret key")); } - private X509Certificate generateNotValidYetCert(String dn, - KeyPair pair, String algorithm) throws CertificateException, - IllegalStateException, IOException, OperatorCreationException { - Date from = new Date(Instant.now().toEpochMilli() + 100000L); - Date to = new Date(from.getTime() + 200000L); - return generateTestCert(dn, pair, algorithm, from, to); + private ManagedSecretKey generateValidSecretKey() + throws NoSuchAlgorithmException { + return SecretKeyTestUtil.generateKey(ALGORITHM, now(), ofDays(1)); } - private X509Certificate generateTestCert(String dn, - KeyPair pair, String algorithm, Date from, Date to) - throws CertificateException, IllegalStateException, - IOException, OperatorCreationException { - BigInteger sn = new BigInteger(64, new SecureRandom()); - SubjectPublicKeyInfo subPubKeyInfo = SubjectPublicKeyInfo.getInstance( - pair.getPublic().getEncoded()); - X500Name subjectDN = new X500Name(dn); - X509v1CertificateBuilder builder = new X509v1CertificateBuilder( - subjectDN, sn, from, to, subjectDN, subPubKeyInfo); - - AlgorithmIdentifier sigAlgId = - new DefaultSignatureAlgorithmIdentifierFinder().find(algorithm); - AlgorithmIdentifier digAlgId = - new DefaultDigestAlgorithmIdentifierFinder().find(sigAlgId); - ContentSigner signer = - new BcRSAContentSignerBuilder(sigAlgId, digAlgId) - .build(PrivateKeyFactory.createKey(pair.getPrivate().getEncoded())); - X509CertificateHolder holder = builder.build(signer); - return new JcaX509CertificateConverter().getCertificate(holder); + private ManagedSecretKey generateExpiredSecretKey() throws Exception { + return SecretKeyTestUtil.generateKey(ALGORITHM, + now().minus(ofDays(2)), ofDays(1)); } } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java index 009b7a0105c3..1ff9bee053a9 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/token/TokenVerifierTests.java @@ -19,25 +19,30 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyVerifierClient; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.jetbrains.annotations.NotNull; import org.junit.jupiter.api.Test; +import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.crypto.SecretKey; import java.io.IOException; -import java.security.cert.CertificateException; -import java.security.cert.CertificateExpiredException; -import java.security.cert.CertificateNotYetValidException; -import java.security.cert.X509Certificate; +import java.time.Duration; import java.time.Instant; +import java.util.UUID; import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; @@ -51,13 +56,13 @@ public abstract class TokenVerifierTests { private static final Logger LOG = LoggerFactory.getLogger(TokenVerifierTests.class); - protected static final String CERT_ID = "123"; + protected static final UUID SECRET_KEY_ID = UUID.randomUUID(); /** * Create the specific kind of TokenVerifier. */ protected abstract TokenVerifier newTestSubject( - SecurityConfig secConf, CertificateClient caClient); + SecurityConfig secConf, SecretKeyVerifierClient secretKeyClient); /** * @return the config key to enable/disable the specific kind of tokens @@ -82,98 +87,138 @@ protected abstract ContainerCommandRequestProto verifiedRequest(T tokenId) @Test public void skipsVerificationIfDisabled() throws IOException { // GIVEN - CertificateClient caClient = mock(CertificateClient.class); - TokenVerifier subject = newTestSubject(tokenDisabled(), caClient); + SecretKeyVerifierClient secretKeyClient = mock( + SecretKeyVerifierClient.class); + TokenVerifier subject = newTestSubject(tokenDisabled(), secretKeyClient); // WHEN subject.verify("anyUser", anyToken(), verifiedRequest(newTokenId())); // THEN - verify(caClient, never()).getCertificate(any()); + verify(secretKeyClient, never()).getSecretKey(any()); } @Test public void skipsVerificationForMiscCommands() throws IOException { // GIVEN - CertificateClient caClient = mock(CertificateClient.class); - TokenVerifier subject = newTestSubject(tokenEnabled(), caClient); + SecretKeyVerifierClient secretKeyClient = mock( + SecretKeyVerifierClient.class); + TokenVerifier subject = newTestSubject(tokenEnabled(), secretKeyClient); // WHEN subject.verify("anyUser", anyToken(), unverifiedRequest()); // THEN - verify(caClient, never()).getCertificate(any()); + verify(secretKeyClient, never()).getSecretKey(any()); } @Test - public void rejectsExpiredCertificate() throws Exception { - rejectsInvalidCertificate(CertificateExpiredException.class); - } + public void rejectsExpiredSecretKey() throws Exception { + // GIVEN + SecretKeyVerifierClient secretKeyClient = + mock(SecretKeyVerifierClient.class); - @Test - public void rejectsNotYetValidCertificate() throws Exception { - rejectsInvalidCertificate(CertificateNotYetValidException.class); + Instant past = Instant.now().minus(Duration.ofHours(1)); + ManagedSecretKey expiredSecretKey = new ManagedSecretKey(UUID.randomUUID(), + past, past, Mockito.mock(SecretKey.class)); + + when(secretKeyClient.getSecretKey(SECRET_KEY_ID)) + .thenReturn(expiredSecretKey); + T tokenId = newTokenId(); + ContainerCommandRequestProto cmd = verifiedRequest(tokenId); + TokenVerifier subject = newTestSubject(tokenEnabled(), secretKeyClient); + + // WHEN+THEN + ShortLivedTokenSecretManager secretManager = new MockTokenManager(); + Token token = secretManager.generateToken(tokenId); + BlockTokenException ex = assertThrows(BlockTokenException.class, () -> + subject.verify("anyUser", token, cmd)); + assertThat(ex.getMessage(), containsString("expired secret key")); } - private void rejectsInvalidCertificate( - Class problem) throws Exception { + @Test + public void rejectsTokenWithInvalidSecretId() throws Exception { // GIVEN - CertificateClient caClient = mock(CertificateClient.class); - X509Certificate cert = invalidCertificate(problem); - when(caClient.getCertificate(CERT_ID)).thenReturn(cert); - ContainerCommandRequestProto cmd = verifiedRequest(newTokenId()); - TokenVerifier subject = newTestSubject(tokenEnabled(), caClient); + SecretKeyVerifierClient secretKeyClient = + mock(SecretKeyVerifierClient.class); + + when(secretKeyClient.getSecretKey(SECRET_KEY_ID)).thenReturn(null); + T tokenId = newTokenId(); + ContainerCommandRequestProto cmd = verifiedRequest(tokenId); + TokenVerifier subject = newTestSubject(tokenEnabled(), secretKeyClient); // WHEN+THEN - assertThrows(BlockTokenException.class, () -> - subject.verify("anyUser", anyToken(), cmd)); + ShortLivedTokenSecretManager secretManager = new MockTokenManager(); + Token token = secretManager.generateToken(tokenId); + BlockTokenException ex = assertThrows(BlockTokenException.class, () -> + subject.verify("anyUser", token, cmd)); + assertThat(ex.getMessage(), + containsString("Can't find the signing secret key")); } @Test public void rejectsInvalidSignature() throws Exception { // GIVEN - CertificateClient caClient = mock(CertificateClient.class); - when(caClient.getCertificate(CERT_ID)).thenReturn(validCertificate()); - Token invalidToken = new Token<>(); - validSignature(caClient, false); - ContainerCommandRequestProto cmd = verifiedRequest(newTokenId()); - TokenVerifier subject = newTestSubject(tokenEnabled(), caClient); + SecretKeyVerifierClient secretKeyClient = + mockSecretKeyClient(false); + + ShortLivedTokenSecretManager secretManager = new MockTokenManager(); + T tokenId = newTokenId(); + Token invalidToken = secretManager.generateToken(tokenId); + ContainerCommandRequestProto cmd = verifiedRequest(tokenId); + TokenVerifier subject = newTestSubject(tokenEnabled(), secretKeyClient); // WHEN+THEN - assertThrows(BlockTokenException.class, () -> - subject.verify("anyUser", invalidToken, cmd)); + BlockTokenException ex = + assertThrows(BlockTokenException.class, () -> + subject.verify("anyUser", invalidToken, cmd)); + assertThat(ex.getMessage(), + containsString("Invalid token for user")); + } + + @NotNull + private SecretKeyVerifierClient mockSecretKeyClient(boolean validSignature) + throws IOException { + SecretKeyVerifierClient secretKeyClient = + mock(SecretKeyVerifierClient.class); + ManagedSecretKey validSecretKey = Mockito.mock(ManagedSecretKey.class); + when(secretKeyClient.getSecretKey(SECRET_KEY_ID)) + .thenReturn(validSecretKey); + when(validSecretKey.isValidSignature((TokenIdentifier) any(), any())) + .thenReturn(validSignature); + return secretKeyClient; } @Test public void rejectsExpiredToken() throws Exception { // GIVEN - SecurityConfig conf = tokenEnabled(); - CertificateClient caClient = mock(CertificateClient.class); - when(caClient.getCertificate(CERT_ID)).thenReturn(validCertificate()); - validSignature(caClient, true); - ShortLivedTokenSecretManager secretManager = new MockTokenManager(conf); + SecretKeyVerifierClient secretKeyClient = mockSecretKeyClient(true); + + ShortLivedTokenSecretManager secretManager = new MockTokenManager(); T tokenId = expired(newTokenId()); ContainerCommandRequestProto cmd = verifiedRequest(tokenId); Token token = secretManager.generateToken(tokenId); - TokenVerifier subject = newTestSubject(tokenEnabled(), caClient); + TokenVerifier subject = newTestSubject(tokenEnabled(), secretKeyClient); // WHEN+THEN - assertThrows(BlockTokenException.class, () -> - subject.verify("anyUser", token, cmd)); + BlockTokenException ex = + assertThrows(BlockTokenException.class, () -> + subject.verify("anyUser", token, cmd)); + assertThat(ex.getMessage(), + containsString("Expired token for user")); } @Test public void acceptsValidToken() throws Exception { // GIVEN SecurityConfig conf = tokenEnabled(); - CertificateClient caClient = mock(CertificateClient.class); - when(caClient.getCertificate(CERT_ID)).thenReturn(validCertificate()); - validSignature(caClient, true); - ShortLivedTokenSecretManager secretManager = new MockTokenManager(conf); + SecretKeyVerifierClient secretKeyClient = mockSecretKeyClient(true); + + ShortLivedTokenSecretManager secretManager = new MockTokenManager(); T tokenId = valid(newTokenId()); ContainerCommandRequestProto cmd = verifiedRequest(tokenId); Token token = secretManager.generateToken(tokenId); - TokenVerifier subject = newTestSubject(conf, caClient); + TokenVerifier subject = newTestSubject(conf, secretKeyClient); // WHEN+THEN subject.verify("anyUser", token, cmd); @@ -189,24 +234,6 @@ private T valid(T tokenId) { return tokenId; } - private void validSignature(CertificateClient caClient, boolean valid) - throws Exception { - when(caClient.verifySignature(any(byte[].class), any(), any())) - .thenReturn(valid); - } - - private static X509Certificate invalidCertificate( - Class problem) - throws CertificateExpiredException, CertificateNotYetValidException { - X509Certificate cert = mock(X509Certificate.class); - doThrow(problem).when(cert).checkValidity(); - return cert; - } - - private static X509Certificate validCertificate() { - return mock(X509Certificate.class); - } - protected SecurityConfig tokenDisabled() { return getSecurityConfig(false); } @@ -230,8 +257,9 @@ private static Token anyToken() { */ private class MockTokenManager extends ShortLivedTokenSecretManager { - MockTokenManager(SecurityConfig conf) { - super(conf, TimeUnit.HOURS.toMillis(1), LOG); + MockTokenManager() { + super(TimeUnit.HOURS.toMillis(1), + Mockito.mock(SecretKeySignerClient.class)); } @Override diff --git a/hadoop-hdds/framework/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker b/hadoop-hdds/framework/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker new file mode 100644 index 000000000000..72652da02fc2 --- /dev/null +++ b/hadoop-hdds/framework/src/test/resources/mockito-extensions/org.mockito.plugins.MockMaker @@ -0,0 +1,16 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +mock-maker-inline diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index b4ffd37a249a..97e704df95e1 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -532,7 +532,8 @@ message ContainerTokenSecretProto { required string ownerId = 1; required ContainerID containerId = 2; required uint64 expiryDate = 3; - required string certSerialId = 4; + optional string certSerialId = 4 [deprecated=true]; + optional UUID secretKeyId = 5; } message GetContainerTokenRequestProto { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 975e61916090..1479daa1c667 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -368,9 +368,10 @@ message BlockTokenSecretProto { required string ownerId = 1; required string blockId = 2; required uint64 expiryDate = 3; - required string omCertSerialId = 4; + optional string omCertSerialId = 4 [deprecated=true]; repeated AccessModeProto modes = 5; required uint64 maxLength = 6; + optional UUID secretKeyId = 7; } message BlockID { diff --git a/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto index 41da6a5468f8..4fb0737b3925 100644 --- a/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto @@ -29,6 +29,7 @@ enum RequestType { MOVE = 6; STATEFUL_SERVICE_CONFIG = 7; FINALIZE = 8; + SECRET_KEY = 9; } message Method { diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmSecretKeyProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmSecretKeyProtocol.proto new file mode 100644 index 000000000000..88b00ff7c314 --- /dev/null +++ b/hadoop-hdds/interface-server/src/main/proto/ScmSecretKeyProtocol.proto @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * These .proto interfaces are private and unstable. + * Please see http://wiki.apache.org/hadoop/Compatibility + * for what changes are allowed for a *unstable* .proto interface. + */ +syntax = "proto2"; + +option java_package = "org.apache.hadoop.hdds.protocol.proto"; + +option java_outer_classname = "SCMSecretKeyProtocolProtos"; + +option java_generic_services = true; + +option java_generate_equals_and_hash = true; + +package hadoop.hdds.security.symmetric; + +import "hdds.proto"; + +/** +All commands is send as request and all response come back via +Response class. If adding new functions please follow this protocol, since +our tracing and visibility tools depend on this pattern. +*/ +message SCMSecretKeyRequest { + required Type cmdType = 1; // Type of the command + + optional string traceID = 2; + + optional SCMGetSecretKeyRequest getSecretKeyRequest = 3; +} + +message SCMSecretKeyResponse { + required Type cmdType = 1; // Type of the command + + // A string that identifies this command, we generate Trace ID in Ozone + // frontend and this allows us to trace that command all over ozone. + optional string traceID = 2; + + optional bool success = 3 [default = true]; + + optional string message = 4; + + required Status status = 5; + + optional SCMGetCurrentSecretKeyResponse currentSecretKeyResponseProto = 11; + + optional SCMGetSecretKeyResponse getSecretKeyResponseProto = 12; + + optional SCMSecretKeysListResponse secretKeysListResponseProto = 13; + +} + +enum Type { + GetCurrentSecretKey = 1; + GetSecretKey = 2; + GetAllSecretKeys = 3; +} + +enum Status { + OK = 1; + INTERNAL_ERROR = 2; + SECRET_KEY_NOT_ENABLED = 3; + SECRET_KEY_NOT_INITIALIZED = 4; +} + +service SCMSecretKeyProtocolService { + rpc submitRequest (SCMSecretKeyRequest) returns (SCMSecretKeyResponse); +} + +message ManagedSecretKey { + required UUID id = 1; + required uint64 creationTime = 2; + required uint64 expiryTime = 3; + required string algorithm = 4; + required bytes encoded = 5; +} + +message SCMGetSecretKeyRequest { + required UUID secretKeyId = 1; +} + +message SCMGetCurrentSecretKeyResponse { + required ManagedSecretKey secretKey = 1; +} + +message SCMGetSecretKeyResponse { + optional ManagedSecretKey secretKey = 1; +} + +message SCMSecretKeysListResponse { + repeated ManagedSecretKey secretKeys = 1; +} diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto index dc6bcf986c3d..1768444e079b 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerSecurityProtocol.proto @@ -80,7 +80,6 @@ message SCMSecurityResponse { optional SCMGetLatestCrlIdResponseProto getLatestCrlIdResponseProto = 9; optional SCMRevokeCertificatesResponseProto revokeCertificatesResponseProto = 10; - } enum Type { @@ -248,4 +247,4 @@ message SCMRevokeCertificatesResponseProto { service SCMSecurityProtocolService { rpc submitRequest (SCMSecurityRequest) returns (SCMSecurityResponse); -} +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 0a86de3b372f..03f6ae293b2e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -20,10 +20,12 @@ import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.RemoveSCMRequest; import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.ratis.server.protocol.TermIndex; import java.io.IOException; +import java.util.List; /** * SCMHAManager provides HA service for SCM. @@ -87,6 +89,12 @@ public interface SCMHAManager extends AutoCloseable { */ DBCheckpoint downloadCheckpointFromLeader(String leaderId); + /** + * Get secret keys from SCM leader. + */ + List getSecretKeysFromLeader(String leaderID) + throws IOException; + /** * Verify the SCM DB checkpoint downloaded from leader. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 57c47b4b9d58..aeefdcbbf072 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -21,12 +21,16 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.RemoveSCMRequest; import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer; import org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; +import org.apache.hadoop.hdds.scm.security.SecretKeyManagerService; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.utils.HAUtils; import com.google.common.annotations.VisibleForTesting; @@ -37,6 +41,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.hdds.ExitManager; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.FileUtils; import org.slf4j.Logger; @@ -45,6 +50,9 @@ import java.io.File; import java.io.IOException; import java.nio.file.Path; +import java.util.List; + +import static org.apache.hadoop.hdds.utils.HddsServerUtil.getSecretKeyClientForScm; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL_DEFAULT; @@ -64,6 +72,7 @@ public class SCMHAManagerImpl implements SCMHAManager { private final SCMRatisServer ratisServer; private final ConfigurationSource conf; + private final SecurityConfig securityConfig; private final DBTransactionBuffer transactionBuffer; private final SCMSnapshotProvider scmSnapshotProvider; private final StorageContainerManager scm; @@ -77,8 +86,10 @@ public class SCMHAManagerImpl implements SCMHAManager { * Creates SCMHAManager instance. */ public SCMHAManagerImpl(final ConfigurationSource conf, + final SecurityConfig securityConfig, final StorageContainerManager scm) throws IOException { this.conf = conf; + this.securityConfig = securityConfig; this.scm = scm; this.exitManager = new ExitManager(); if (SCMHAUtils.isSCMHAEnabled(conf)) { @@ -188,6 +199,21 @@ public DBCheckpoint downloadCheckpointFromLeader(String leaderId) { return dBCheckpoint; } + @Override + public List getSecretKeysFromLeader(String leaderID) + throws IOException { + if (!SecretKeyManagerService.isSecretKeyEnable(securityConfig)) { + return null; + } + + LOG.info("Getting secret keys from leader {}.", leaderID); + try (SecretKeyProtocolClientSideTranslatorPB secretKeyProtocol = + getSecretKeyClientForScm(conf, leaderID, + UserGroupInformation.getLoginUser())) { + return secretKeyProtocol.getAllSecretKeys(); + } + } + @Override public TermIndex verifyCheckpointFromLeader(String leaderId, DBCheckpoint checkpoint) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java index 7c926ce8843f..31941c85c586 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerStub.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.AddSCMRequest; import org.apache.hadoop.hdds.scm.RemoveSCMRequest; import org.apache.hadoop.hdds.scm.metadata.DBTransactionBuffer; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; @@ -148,6 +149,11 @@ public DBCheckpoint downloadCheckpointFromLeader(String leaderId) { return null; } + @Override + public List getSecretKeysFromLeader(String leaderID) { + return null; + } + @Override public TermIndex verifyCheckpointFromLeader(String leaderId, DBCheckpoint checkpoint) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index c88331db9808..93846615373f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.util.EnumMap; +import java.util.List; import java.util.Map; import java.util.Collection; import java.util.Optional; @@ -37,6 +38,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.util.concurrent.HadoopExecutors; @@ -62,6 +64,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static java.util.Objects.requireNonNull; + /** * The SCMStateMachine is the state machine for SCMRatisServer. It is * responsible for applying ratis committed transactions to @@ -83,6 +87,7 @@ public class SCMStateMachine extends BaseStateMachine { // ensures serializable between notifyInstallSnapshotFromLeader() // and reinitialize(). private DBCheckpoint installingDBCheckpoint = null; + private List installingSecretKeys = null; private AtomicLong currentLeaderTerm = new AtomicLong(-1L); private AtomicBoolean refreshedAfterLeaderReady = new AtomicBoolean(false); @@ -243,12 +248,23 @@ public CompletableFuture notifyInstallSnapshotFromLeader( return null; } + List secretKeys; + try { + secretKeys = + scm.getScmHAManager().getSecretKeysFromLeader(leaderNodeId); + LOG.info("Got secret keys from leaders {}", secretKeys); + } catch (IOException ex) { + LOG.error("Failed to get secret keys from SCM leader {}", + leaderNodeId, ex); + return null; + } + TermIndex termIndex = scm.getScmHAManager().verifyCheckpointFromLeader( leaderNodeId, checkpoint); if (termIndex != null) { - setInstallingDBCheckpoint(checkpoint); + setInstallingSnapshotData(checkpoint, secretKeys); } return termIndex; }, @@ -381,9 +397,11 @@ public void pause() { public void reinitialize() throws IOException { Preconditions.checkNotNull(installingDBCheckpoint); DBCheckpoint checkpoint = installingDBCheckpoint; + List secretKeys = installingSecretKeys; // explicitly set installingDBCheckpoint to be null installingDBCheckpoint = null; + installingSecretKeys = null; TermIndex termIndex = null; try { @@ -402,6 +420,10 @@ public void reinitialize() throws IOException { LOG.error("Failed to unpause ", ioe); } + if (secretKeys != null) { + requireNonNull(scm.getSecretKeyManager()).reinitialize(secretKeys); + } + getLifeCycle().transition(LifeCycle.State.STARTING); getLifeCycle().transition(LifeCycle.State.RUNNING); } @@ -425,8 +447,10 @@ public void close() throws IOException { } @VisibleForTesting - public void setInstallingDBCheckpoint(DBCheckpoint checkpoint) { + public void setInstallingSnapshotData(DBCheckpoint checkpoint, + List secretKeys) { Preconditions.checkArgument(installingDBCheckpoint == null); installingDBCheckpoint = checkpoint; + installingSecretKeys = secretKeys; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java index 6c75593be14c..af7705150984 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java @@ -23,6 +23,7 @@ import com.google.protobuf.ProtocolMessageEnum; import org.apache.commons.lang3.ClassUtils; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; import java.math.BigInteger; import java.security.cert.X509Certificate; @@ -49,6 +50,7 @@ public final class CodecFactory { codecs.put(BigInteger.class, new BigIntegerCodec()); codecs.put(X509Certificate.class, new X509CertificateCodec()); codecs.put(ByteString.class, new ByteStringCodec()); + codecs.put(ManagedSecretKey.class, new ManagedSecretKeyCodec()); } private CodecFactory() { } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java index 0667b8776f11..67d8d5522794 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ListCodec.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.ha.ReflectionUtil; import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; import java.util.List; /** @@ -51,8 +52,11 @@ public ByteString serialize(Object object) public Object deserialize(Class type, ByteString value) throws InvalidProtocolBufferException { try { + // If argument type is the generic interface, then determine a + // concrete implementation. + Class concreteType = (type == List.class) ? ArrayList.class : type; - List result = (List) type.newInstance(); + List result = (List) concreteType.newInstance(); final ListArgument listArgs = (ListArgument) ReflectionUtil .getMethod(ListArgument.class, "parseFrom", byte[].class) .invoke(null, (Object) value.toByteArray()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java new file mode 100644 index 000000000000..32705bb2a7ed --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/ManagedSecretKeyCodec.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ha.io; + +import com.google.protobuf.ByteString; +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; + +/** + * A codec for {@link ManagedSecretKey} objects. + */ +public class ManagedSecretKeyCodec implements Codec { + @Override + public ByteString serialize(Object object) + throws InvalidProtocolBufferException { + ManagedSecretKey secretKey = (ManagedSecretKey) object; + return ByteString.copyFrom(secretKey.toProtobuf().toByteArray()); + } + + @Override + public Object deserialize(Class type, ByteString value) + throws InvalidProtocolBufferException { + SCMSecretKeyProtocolProtos.ManagedSecretKey message = + SCMSecretKeyProtocolProtos.ManagedSecretKey.parseFrom(value); + return ManagedSecretKey.fromProtobuf(message); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SecretKeyProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SecretKeyProtocolServerSideTranslatorPB.java new file mode 100644 index 000000000000..527d7a42fa66 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/SecretKeyProtocolServerSideTranslatorPB.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.protocol; + +import com.google.protobuf.ProtocolMessageEnum; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMGetCurrentSecretKeyResponse; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMGetSecretKeyRequest; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMGetSecretKeyResponse; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyRequest; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeyResponse; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.SCMSecretKeysListResponse; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos.Status; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolDatanodePB; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolOmPB; +import org.apache.hadoop.hdds.scm.ha.RatisUtil; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.exception.SCMSecretKeyException; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; +import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; +import org.apache.hadoop.util.ProtobufUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.UUID; + +/** + * This class is the server-side translator that forwards requests received on + * {@link SecretKeyProtocolDatanodePB} to the server implementation. + */ +public class SecretKeyProtocolServerSideTranslatorPB + implements SecretKeyProtocolDatanodePB, SecretKeyProtocolOmPB { + + private static final Logger LOG = + LoggerFactory.getLogger(SecretKeyProtocolServerSideTranslatorPB.class); + + private final SecretKeyProtocol impl; + private final StorageContainerManager scm; + + private OzoneProtocolMessageDispatcher dispatcher; + + public SecretKeyProtocolServerSideTranslatorPB(SecretKeyProtocol impl, + StorageContainerManager storageContainerManager, + ProtocolMessageMetrics messageMetrics) { + this.impl = impl; + this.scm = storageContainerManager; + this.dispatcher = + new OzoneProtocolMessageDispatcher<>("SCMSecretKeyProtocol", + messageMetrics, LOG); + } + + @Override + public SCMSecretKeyResponse submitRequest(RpcController controller, + SCMSecretKeyRequest request) throws ServiceException { + if (!scm.checkLeader()) { + RatisUtil.checkRatisException( + scm.getScmHAManager().getRatisServer().triggerNotLeaderException(), + scm.getSecurityProtocolRpcPort(), scm.getScmId()); + } + return dispatcher.processRequest(request, this::processRequest, + request.getCmdType(), request.getTraceID()); + } + + public SCMSecretKeyResponse processRequest(SCMSecretKeyRequest request) + throws ServiceException { + SCMSecretKeyResponse.Builder scmSecurityResponse = + SCMSecretKeyResponse.newBuilder().setCmdType(request.getCmdType()) + .setStatus(Status.OK); + try { + switch (request.getCmdType()) { + case GetCurrentSecretKey: + return scmSecurityResponse + .setCurrentSecretKeyResponseProto(getCurrentSecretKey()) + .build(); + + case GetSecretKey: + return scmSecurityResponse.setGetSecretKeyResponseProto( + getSecretKey(request.getGetSecretKeyRequest())) + .build(); + + case GetAllSecretKeys: + return scmSecurityResponse + .setSecretKeysListResponseProto(getAllSecretKeys()) + .build(); + + default: + throw new IllegalArgumentException( + "Unknown request type: " + request.getCmdType()); + } + } catch (IOException e) { + RatisUtil.checkRatisException(e, scm.getSecurityProtocolRpcPort(), + scm.getScmId()); + scmSecurityResponse.setSuccess(false); + scmSecurityResponse.setStatus(exceptionToResponseStatus(e)); + // If actual cause is set in SCMSecurityException, set message with + // actual cause message. + if (e.getMessage() != null) { + scmSecurityResponse.setMessage(e.getMessage()); + } else { + if (e.getCause() != null && e.getCause().getMessage() != null) { + scmSecurityResponse.setMessage(e.getCause().getMessage()); + } + } + return scmSecurityResponse.build(); + } + } + + private SCMSecretKeysListResponse getAllSecretKeys() + throws IOException { + SCMSecretKeysListResponse.Builder builder = + SCMSecretKeysListResponse.newBuilder(); + impl.getAllSecretKeys() + .stream().map(ManagedSecretKey::toProtobuf) + .forEach(builder::addSecretKeys); + return builder.build(); + } + + private SCMGetSecretKeyResponse getSecretKey( + SCMGetSecretKeyRequest getSecretKeyRequest) throws IOException { + SCMGetSecretKeyResponse.Builder builder = + SCMGetSecretKeyResponse.newBuilder(); + UUID id = ProtobufUtils.fromProtobuf(getSecretKeyRequest.getSecretKeyId()); + ManagedSecretKey secretKey = impl.getSecretKey(id); + if (secretKey != null) { + builder.setSecretKey(secretKey.toProtobuf()); + } + return builder.build(); + } + + private SCMGetCurrentSecretKeyResponse getCurrentSecretKey() + throws IOException { + return SCMGetCurrentSecretKeyResponse.newBuilder() + .setSecretKey(impl.getCurrentSecretKey().toProtobuf()) + .build(); + } + + private Status exceptionToResponseStatus(IOException ex) { + if (ex instanceof SCMSecretKeyException) { + return Status.values()[ + ((SCMSecretKeyException) ex).getErrorCode().ordinal()]; + } else { + return Status.INTERNAL_ERROR; + } + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/ScmSecretKeyStateBuilder.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/ScmSecretKeyStateBuilder.java new file mode 100644 index 000000000000..c689fd2db39d --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/ScmSecretKeyStateBuilder.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.security; + +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol; +import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler; +import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyState; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyStateImpl; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyStore; + +import java.lang.reflect.Proxy; + +/** + * Builder for {@link SecretKeyState} with a proper proxy to make @Replicate + * happen. + */ +public class ScmSecretKeyStateBuilder { + private SecretKeyStore secretKeyStore; + private SCMRatisServer scmRatisServer; + + public ScmSecretKeyStateBuilder setSecretKeyStore( + SecretKeyStore secretKeyStore) { + this.secretKeyStore = secretKeyStore; + return this; + } + + public ScmSecretKeyStateBuilder setRatisServer( + final SCMRatisServer ratisServer) { + scmRatisServer = ratisServer; + return this; + } + + public SecretKeyState build() { + final SecretKeyState impl = new SecretKeyStateImpl(secretKeyStore); + + final SCMHAInvocationHandler scmhaInvocationHandler = + new SCMHAInvocationHandler(SCMRatisProtocol.RequestType.SECRET_KEY, + impl, scmRatisServer); + + return (SecretKeyState) Proxy.newProxyInstance( + SCMHAInvocationHandler.class.getClassLoader(), + new Class[]{SecretKeyState.class}, scmhaInvocationHandler); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java new file mode 100644 index 000000000000..1761f9799223 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/SecretKeyManagerService.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.security; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; +import org.apache.hadoop.hdds.scm.ha.SCMService; +import org.apache.hadoop.hdds.security.symmetric.LocalSecretKeyStore; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyState; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyStore; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static org.apache.hadoop.ozone.OzoneConsts.SCM_CA_CERT_STORAGE_DIR; + +/** + * A background service running in SCM to maintain the SecretKeys lifecycle. + */ +public class SecretKeyManagerService implements SCMService, Runnable { + public static final Logger LOG = + LoggerFactory.getLogger(SecretKeyManagerService.class); + + private final SCMContext scmContext; + private final SecretKeyManager secretKeyManager; + private final SecretKeyConfig secretKeyConfig; + + + /** + * SCMService related variables. + */ + private final Lock serviceLock = new ReentrantLock(); + private ServiceStatus serviceStatus = ServiceStatus.PAUSING; + + private final ScheduledExecutorService scheduler; + + @SuppressWarnings("parameternumber") + public SecretKeyManagerService(SCMContext scmContext, + ConfigurationSource conf, + SCMRatisServer ratisServer) { + this.scmContext = scmContext; + + secretKeyConfig = new SecretKeyConfig(conf, + SCM_CA_CERT_STORAGE_DIR); + SecretKeyStore secretKeyStore = new LocalSecretKeyStore( + secretKeyConfig.getLocalSecretKeyFile()); + SecretKeyState secretKeyState = new ScmSecretKeyStateBuilder() + .setSecretKeyStore(secretKeyStore) + .setRatisServer(ratisServer) + .build(); + secretKeyManager = new SecretKeyManager(secretKeyState, + secretKeyStore, secretKeyConfig); + + scheduler = Executors.newScheduledThreadPool(1, + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat(getServiceName()) + .build()); + + start(); + } + + @Override + public void notifyStatusChanged() { + serviceLock.lock(); + try { + if (scmContext.isLeaderReady()) { + // Asynchronously initialize SecretKeys for first time leader. + if (!secretKeyManager.isInitialized()) { + scheduler.schedule(() -> { + try { + secretKeyManager.checkAndInitialize(); + } catch (TimeoutException e) { + throw new RuntimeException( + "Timeout replicating initialized state.", e); + } + }, 0, TimeUnit.SECONDS); + } + + serviceStatus = ServiceStatus.RUNNING; + } else { + serviceStatus = ServiceStatus.PAUSING; + } + } finally { + serviceLock.unlock(); + } + } + + @Override + public boolean shouldRun() { + serviceLock.lock(); + try { + return serviceStatus == ServiceStatus.RUNNING; + } finally { + serviceLock.unlock(); + } + } + + @Override + public void run() { + if (!shouldRun()) { + return; + } + + try { + secretKeyManager.checkAndRotate(); + } catch (TimeoutException e) { + LOG.error("Error occurred when updating SecretKeys.", e); + } + } + + @Override + public String getServiceName() { + return SecretKeyManagerService.class.getSimpleName(); + } + + @Override + public void start() { + LOG.info("Scheduling rotation checker with interval {}", + secretKeyConfig.getRotationCheckDuration()); + scheduler.scheduleAtFixedRate(this, 0, + secretKeyConfig.getRotationCheckDuration().toMillis(), + TimeUnit.MILLISECONDS); + } + + public SecretKeyManager getSecretKeyManager() { + return secretKeyManager; + } + + @Override + public void stop() { + scheduler.shutdownNow(); + } + + public static boolean isSecretKeyEnable(SecurityConfig conf) { + return conf.isSecurityEnabled() && + (conf.isBlockTokenEnabled() || conf.isContainerTokenEnabled()); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/package-info.java new file mode 100644 index 000000000000..296e7f0883ab --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Encapsulate classes dealing with security concern in SCM. + */ +package org.apache.hadoop.hdds.scm.security; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java index 0602ba2341b8..ce0d5e25ebc0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMPolicyProvider.java @@ -22,7 +22,10 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience.Private; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.annotation.InterfaceStability.Unstable; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocolDatanode; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocolOm; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocolScm; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; @@ -35,6 +38,9 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_BLOCK_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_CONTAINER_PROTOCOL_ACL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_OM_PROTOCOL_ACL; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_SCM_PROTOCOL_ACL; /** * {@link PolicyProvider} for SCM protocols. @@ -72,6 +78,15 @@ public static SCMPolicyProvider getInstance() { new Service( HDDS_SECURITY_CLIENT_SCM_CERTIFICATE_PROTOCOL_ACL, SCMSecurityProtocol.class), + new Service( + HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_OM_PROTOCOL_ACL, + SecretKeyProtocolOm.class), + new Service( + HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_SCM_PROTOCOL_ACL, + SecretKeyProtocolScm.class), + new Service( + HDDS_SECURITY_CLIENT_SCM_SECRET_KEY_DATANODE_PROTOCOL_ACL, + SecretKeyProtocolDatanode.class) }; @SuppressFBWarnings("EI_EXPOSE_REP") diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java index 2a9941557315..dd882eb93806 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMSecurityProtocolServer.java @@ -30,6 +30,7 @@ import java.util.List; import java.util.Objects; import java.util.Optional; +import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; import java.util.stream.Collectors; @@ -37,19 +38,29 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.OzoneManagerDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmNodeDetailsProto; +import org.apache.hadoop.hdds.protocol.proto.SCMSecretKeyProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolDatanodePB; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolOmPB; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB; +import org.apache.hadoop.hdds.protocolPB.SecretKeyProtocolScmPB; +import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; +import org.apache.hadoop.hdds.scm.protocol.SecretKeyProtocolServerSideTranslatorPB; import org.apache.hadoop.hdds.scm.update.server.SCMUpdateServiceGrpcServer; import org.apache.hadoop.hdds.scm.update.client.UpdateServiceConfig; import org.apache.hadoop.hdds.scm.update.server.SCMCRLStore; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.protocol.SCMSecurityProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdds.security.exception.SCMSecretKeyException; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; import org.apache.hadoop.hdds.security.x509.crl.CRLInfo; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.scm.ScmConfig; @@ -68,6 +79,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.annotation.Nullable; + +import static org.apache.hadoop.hdds.security.exception.SCMSecretKeyException.ErrorCode.SECRET_KEY_NOT_ENABLED; +import static org.apache.hadoop.hdds.security.exception.SCMSecretKeyException.ErrorCode.SECRET_KEY_NOT_INITIALIZED; import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.CERTIFICATE_NOT_FOUND; import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_CA_CERT_FAILED; import static org.apache.hadoop.hdds.security.exception.SCMSecurityException.ErrorCode.GET_CERTIFICATE_FAILED; @@ -79,7 +94,8 @@ @KerberosInfo( serverPrincipal = ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY) @InterfaceAudience.Private -public class SCMSecurityProtocolServer implements SCMSecurityProtocol { +public class SCMSecurityProtocolServer implements SCMSecurityProtocol, + SecretKeyProtocol { private static final Logger LOGGER = LoggerFactory .getLogger(SCMSecurityProtocolServer.class); @@ -90,17 +106,24 @@ public class SCMSecurityProtocolServer implements SCMSecurityProtocol { private final SCMUpdateServiceGrpcServer grpcUpdateServer; // gRPC SERVER private final InetSocketAddress rpcAddress; private final ProtocolMessageMetrics metrics; + private final ProtocolMessageMetrics secretKeyMetrics; private final StorageContainerManager storageContainerManager; + // SecretKey may not be enabled when neither block token nor container + // token is enabled. + private final SecretKeyManager secretKeyManager; + SCMSecurityProtocolServer(OzoneConfiguration conf, CertificateServer rootCertificateServer, CertificateServer scmCertificateServer, - X509Certificate rootCACert, StorageContainerManager scm) + X509Certificate rootCACert, StorageContainerManager scm, + @Nullable SecretKeyManager secretKeyManager) throws IOException { this.storageContainerManager = scm; this.rootCertificateServer = rootCertificateServer; this.scmCertificateServer = scmCertificateServer; this.rootCACertificate = rootCACert; + this.secretKeyManager = secretKeyManager; final int handlerCount = conf.getInt(ScmConfigKeys.OZONE_SCM_SECURITY_HANDLER_COUNT_KEY, ScmConfigKeys.OZONE_SCM_SECURITY_HANDLER_COUNT_DEFAULT); @@ -112,11 +135,20 @@ public class SCMSecurityProtocolServer implements SCMSecurityProtocol { metrics = new ProtocolMessageMetrics("ScmSecurityProtocol", "SCM Security protocol metrics", SCMSecurityProtocolProtos.Type.values()); + secretKeyMetrics = new ProtocolMessageMetrics("ScmSecretKeyProtocol", + "SCM SecretKey protocol metrics", + SCMSecretKeyProtocolProtos.Type.values()); BlockingService secureProtoPbService = SCMSecurityProtocolProtos.SCMSecurityProtocolService .newReflectiveBlockingService( new SCMSecurityProtocolServerSideTranslatorPB(this, scm, metrics)); + BlockingService secretKeyService = + SCMSecretKeyProtocolProtos.SCMSecretKeyProtocolService + .newReflectiveBlockingService( + new SecretKeyProtocolServerSideTranslatorPB( + this, scm, secretKeyMetrics) + ); this.rpcServer = StorageContainerManager.startRpcServer( conf, @@ -124,6 +156,12 @@ public class SCMSecurityProtocolServer implements SCMSecurityProtocol { SCMSecurityProtocolPB.class, secureProtoPbService, handlerCount); + HddsServerUtil.addPBProtocol(conf, SecretKeyProtocolDatanodePB.class, + secretKeyService, rpcServer); + HddsServerUtil.addPBProtocol(conf, SecretKeyProtocolOmPB.class, + secretKeyService, rpcServer); + HddsServerUtil.addPBProtocol(conf, SecretKeyProtocolScmPB.class, + secretKeyService, rpcServer); if (conf.getBoolean( CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) { rpcServer.refreshServiceAcl(conf, SCMPolicyProvider.getInstance()); @@ -162,6 +200,38 @@ public String getCertificate( return getEncodedCertToString(certSignReq, nodeDetails.getNodeType()); } + @Override + public ManagedSecretKey getCurrentSecretKey() throws SCMSecretKeyException { + validateSecretKeyStatus(); + return secretKeyManager.getCurrentSecretKey(); + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) throws SCMSecretKeyException { + validateSecretKeyStatus(); + return secretKeyManager.getSecretKey(id); + } + + @Override + public List getAllSecretKeys() + throws SCMSecretKeyException { + validateSecretKeyStatus(); + return secretKeyManager.getSortedKeys(); + } + + private void validateSecretKeyStatus() throws SCMSecretKeyException { + if (secretKeyManager == null) { + throw new SCMSecretKeyException("Secret keys are not enabled.", + SECRET_KEY_NOT_ENABLED); + } + + if (!secretKeyManager.isInitialized()) { + throw new SCMSecretKeyException( + "Secret key initialization is not finished yet.", + SECRET_KEY_NOT_INITIALIZED); + } + } + /** * Get SCM signed certificate for OM. * @@ -368,7 +438,7 @@ public long revokeCertificates(List certIds, int reason, } catch (InterruptedException | ExecutionException e) { Thread.currentThread().interrupt(); throw new SCMException("Fail to revoke certs", - SCMException.ResultCodes.FAILED_TO_REVOKE_CERTIFICATES); + ResultCodes.FAILED_TO_REVOKE_CERTIFICATES); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 3c076c90e4ce..8991a1e11ca8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.node.NodeAddressUpdateHandler; +import org.apache.hadoop.hdds.scm.security.SecretKeyManagerService; import org.apache.hadoop.hdds.scm.server.upgrade.FinalizationManager; import org.apache.hadoop.hdds.scm.server.upgrade.FinalizationManagerImpl; import org.apache.hadoop.hdds.scm.ha.StatefulServiceStateManager; @@ -72,6 +73,7 @@ import org.apache.hadoop.hdds.scm.server.upgrade.SCMUpgradeFinalizationContext; import org.apache.hadoop.hdds.scm.server.upgrade.ScmHAUnfinalizedStateValidationAction; import org.apache.hadoop.hdds.scm.pipeline.WritableContainerFactory; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; import org.apache.hadoop.hdds.security.token.ContainerTokenGenerator; import org.apache.hadoop.hdds.security.token.ContainerTokenSecretManager; import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; @@ -129,7 +131,6 @@ import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode; -import org.apache.hadoop.hdds.security.OzoneSecurityException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer; import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultCAServer; @@ -165,7 +166,6 @@ import javax.management.ObjectName; import java.io.IOException; -import java.io.UncheckedIOException; import java.math.BigInteger; import java.net.InetAddress; import java.net.InetSocketAddress; @@ -189,6 +189,7 @@ import static org.apache.hadoop.hdds.ratis.RatisHelper.newJvmPauseMonitor; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_REPORT_EXEC_WAIT_THRESHOLD_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EVENT_REPORT_QUEUE_WAIT_THRESHOLD_DEFAULT; +import static org.apache.hadoop.hdds.scm.security.SecretKeyManagerService.isSecretKeyEnable; import static org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateStore.CertType.VALID_CERTS; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getRemoteUser; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; @@ -303,6 +304,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl // container replicas. private ContainerReplicaPendingOps containerReplicaPendingOps; private final AtomicBoolean isStopped = new AtomicBoolean(false); + private final SecretKeyManagerService secretKeyManagerService; private Clock systemClock; @@ -382,6 +384,14 @@ private StorageContainerManager(OzoneConfiguration conf, initializeSystemManagers(conf, configurator); + if (isSecretKeyEnable(securityConfig)) { + secretKeyManagerService = new SecretKeyManagerService(scmContext, conf, + scmHAManager.getRatisServer()); + serviceManager.register(secretKeyManagerService); + } else { + secretKeyManagerService = null; + } + // Authenticate SCM if security is enabled, this initialization can only // be done after the metadata store is initialized. if (OzoneSecurityUtil.isSecurityEnabled(conf)) { @@ -616,7 +626,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, if (configurator.getSCMHAManager() != null) { scmHAManager = configurator.getSCMHAManager(); } else { - scmHAManager = new SCMHAManagerImpl(conf, this); + scmHAManager = new SCMHAManagerImpl(conf, securityConfig, this); } if (configurator.getLeaseManager() != null) { @@ -863,6 +873,9 @@ certificateStore, new DefaultCAProfile(), certificateStore, scmStorageConfig, new DefaultProfile()); } + SecretKeyManager secretKeyManager = secretKeyManagerService != null ? + secretKeyManagerService.getSecretKeyManager() : null; + // We need to pass getCACertificate as rootCA certificate, // as for SCM CA is root-CA. securityProtocolServer = new SCMSecurityProtocolServer(conf, @@ -870,7 +883,7 @@ certificateStore, new DefaultCAProfile(), scmCertificateClient == null ? null : scmCertificateClient.getRootCACertificate() != null ? scmCertificateClient.getRootCACertificate() : - scmCertificateClient.getCACertificate(), this); + scmCertificateClient.getCACertificate(), this, secretKeyManager); if (securityConfig.isContainerTokenEnabled()) { containerTokenMgr = createContainerTokenSecretManager(configuration); @@ -972,8 +985,8 @@ private ContainerTokenSecretManager createContainerTokenSecretManager( scmCertificateClient = new SCMCertificateClient(securityConfig, certSerialNumber, SCM_ROOT_CA_COMPONENT_NAME); } - return new ContainerTokenSecretManager(securityConfig, - expiryTime); + return new ContainerTokenSecretManager(expiryTime, + secretKeyManagerService.getSecretKeyManager()); } /** @@ -1469,7 +1482,6 @@ public void start() throws IOException { } scmHAManager.start(); - startSecretManagerIfNecessary(); ms = HddsServerUtil .initializeMetrics(configuration, "StorageContainerManager"); @@ -1617,8 +1629,6 @@ public void stop() { LOG.error("SCM block manager service stop failed.", ex); } - stopSecretManager(); - if (metrics != null) { metrics.unRegister(); } @@ -1798,6 +1808,12 @@ public SCMSafeModeManager getScmSafeModeManager() { return scmSafeModeManager; } + @VisibleForTesting + public SecretKeyManager getSecretKeyManager() { + return secretKeyManagerService != null ? + secretKeyManagerService.getSecretKeyManager() : null; + } + @Override public ReplicationManager getReplicationManager() { return replicationManager; @@ -1996,46 +2012,6 @@ public SCMHAMetrics getScmHAMetrics() { return scmHAMetrics; } - private void startSecretManagerIfNecessary() { - boolean shouldRun = securityConfig.isSecurityEnabled() - && securityConfig.isContainerTokenEnabled() - && containerTokenMgr != null; - if (shouldRun) { - boolean running = containerTokenMgr.isRunning(); - if (!running) { - startSecretManager(); - } - } - } - - private void startSecretManager() { - try { - scmCertificateClient.assertValidKeysAndCertificate(); - } catch (OzoneSecurityException e) { - LOG.error("Unable to read key pair.", e); - throw new UncheckedIOException(e); - } - try { - LOG.info("Starting token manager"); - containerTokenMgr.start(scmCertificateClient); - } catch (IOException e) { - // Unable to start secret manager. - LOG.error("Error starting block token secret manager.", e); - throw new UncheckedIOException(e); - } - } - - private void stopSecretManager() { - if (containerTokenMgr != null) { - LOG.info("Stopping block token manager."); - try { - containerTokenMgr.stop(); - } catch (IOException e) { - LOG.error("Failed to stop block token manager", e); - } - } - } - public ContainerTokenGenerator getContainerTokenGenerator() { return containerTokenMgr != null ? containerTokenMgr diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java index f1fd5590c091..27eacb742bc8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAManagerImpl.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.scm.server.upgrade.FinalizationManager; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client .CertificateClient; import org.apache.hadoop.hdds.utils.TransactionInfo; @@ -257,7 +258,8 @@ private StorageContainerManager getMockStorageContainerManager( conf.get(ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY)); when(scm.getSystemClock()).thenReturn(Clock.system(ZoneOffset.UTC)); - final SCMHAManager manager = new SCMHAManagerImpl(conf, scm); + final SCMHAManager manager = new SCMHAManagerImpl(conf, + new SecurityConfig(conf), scm); when(scm.getScmHAManager()).thenReturn(manager); return scm; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java index 5480caaf9776..82f2cc182a94 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMSecurityProtocolServer.java @@ -41,7 +41,7 @@ public void setUp() throws Exception { config.set(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT + ":0"); securityProtocolServer = new SCMSecurityProtocolServer(config, null, - null, null, null); + null, null, null, null); } @AfterEach diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 7af924454bf1..b8eb7b0cb1bf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -148,7 +148,7 @@ public void testGetVersionTask() throws Exception { conf.setBoolean( OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true); OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); + datanodeDetails, conf, getContext(datanodeDetails)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, conf, ozoneContainer); @@ -182,7 +182,7 @@ public void testTmpDirCleanup() throws Exception { serverAddress, 1000)) { DatanodeDetails datanodeDetails = randomDatanodeDetails(); OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); + datanodeDetails, conf, getContext(datanodeDetails)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); String clusterId = scmServerImpl.getClusterId(); @@ -261,7 +261,7 @@ public void testCheckVersionResponse() throws Exception { .captureLogs(VersionEndpointTask.LOG); DatanodeDetails datanodeDetails = randomDatanodeDetails(); OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); + datanodeDetails, conf, getContext(datanodeDetails)); rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, conf, ozoneContainer); @@ -310,7 +310,7 @@ public void testGetVersionToInvalidEndpoint() throws Exception { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); DatanodeDetails datanodeDetails = randomDatanodeDetails(); OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); + datanodeDetails, conf, getContext(datanodeDetails)); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, conf, ozoneContainer); EndpointStateMachine.EndPointStates newState = versionTask.call(); @@ -338,7 +338,7 @@ public void testGetVersionAssertRpcTimeOut() throws Exception { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION); DatanodeDetails datanodeDetails = randomDatanodeDetails(); OzoneContainer ozoneContainer = new OzoneContainer( - datanodeDetails, conf, getContext(datanodeDetails), null); + datanodeDetails, conf, getContext(datanodeDetails)); VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint, conf, ozoneContainer); @@ -576,7 +576,7 @@ private StateContext heartbeatTaskHelper( // Create a datanode state machine for stateConext used by endpoint task try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( - randomDatanodeDetails(), conf, null, null, null); + randomDatanodeDetails(), conf); EndpointStateMachine rpcEndPoint = createEndpoint(conf, scmAddress, rpcTimeout)) { HddsProtos.DatanodeDetailsProto datanodeDetailsProto = diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index 91d4b944045d..4843c1c45e6c 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -74,6 +74,17 @@ private static List createStreams( // BlockInputStream is only created here and not initialized. The // BlockInputStream is initialized when a read operation is performed on // the block for the first time. + Function retry; + if (retryFunction != null) { + retry = keyBlockID -> { + OmKeyInfo newKeyInfo = retryFunction.apply(keyInfo); + return getBlockLocationInfo(newKeyInfo, + omKeyLocationInfo.getBlockID()); + }; + } else { + retry = null; + } + BlockExtendedInputStream stream = blockStreamFactory.create( keyInfo.getReplicationConfig(), @@ -82,11 +93,7 @@ private static List createStreams( omKeyLocationInfo.getToken(), verifyChecksum, xceiverClientFactory, - keyBlockID -> { - OmKeyInfo newKeyInfo = retryFunction.apply(keyInfo); - return getBlockLocationInfo(newKeyInfo, - omKeyLocationInfo.getBlockID()); - }); + retry); partStreams.add(stream); } return partStreams; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 3ad0b6f33d58..4eea2aa7a298 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -258,6 +258,7 @@ public static boolean isReadOnly( case TenantListUser: case ListSnapshot: case EchoRPC: + case RefetchSecretKey: case RangerBGSync: // RangerBGSync is a read operation in the sense that it doesn't directly // write to OM DB. And therefore it doesn't need a OMClientRequest. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 4442b6d06105..9a7acb02f573 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -80,6 +80,19 @@ private OMConfigKeys() { public static final int OZONE_OM_PORT_DEFAULT = 9862; public static final String OZONE_OM_GRPC_PORT_KEY = "ozone.om.grpc.port"; + + public static final String OZONE_OM_GRPC_BOSSGROUP_SIZE_KEY = + "ozone.om.grpc.bossgroup.size"; + public static final int OZONE_OM_GRPC_BOSSGROUP_SIZE_DEFAULT = 8; + + public static final String OZONE_OM_GRPC_WORKERGROUP_SIZE_KEY = + "ozone.om.grpc.workergroup.size"; + public static final int OZONE_OM_GRPC_WORKERGROUP_SIZE_DEFAULT = 32; + + public static final String OZONE_OM_GRPC_READ_THREAD_NUM_KEY = + "ozone.om.grpc.read.thread.num"; + public static final int OZONE_OM_GRPC_READ_THREAD_NUM_DEFAULT = 32; + public static final String OZONE_OM_HTTP_ENABLED_KEY = "ozone.om.http.enabled"; public static final String OZONE_OM_HTTP_BIND_HOST_KEY = diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 1fcf6b994bef..12e5d117506e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -21,6 +21,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.List; +import java.util.UUID; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.OzoneAcl; @@ -1028,4 +1029,6 @@ boolean recoverLease(String volumeName, String bucketName, */ void setTimes(OmKeyArgs keyArgs, long mtime, long atime) throws IOException; + + UUID refetchSecretKey() throws IOException; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 0b3eb6119e26..1e696402b142 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -22,6 +22,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.UUID; import java.util.stream.Collectors; import org.apache.commons.lang3.StringUtils; @@ -156,6 +157,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverLeaseResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RecoverTrashResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RemoveAclResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysArgs; @@ -204,6 +207,7 @@ import com.google.common.base.Preconditions; import com.google.common.base.Strings; import com.google.protobuf.ByteString; +import org.apache.hadoop.util.ProtobufUtils; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; @@ -1382,6 +1386,19 @@ public S3VolumeContext getS3VolumeContext() throws IOException { return S3VolumeContext.fromProtobuf(resp); } + @Override + public UUID refetchSecretKey() throws IOException { + final RefetchSecretKeyRequest.Builder requestBuilder = + RefetchSecretKeyRequest.newBuilder(); + final OMRequest omRequest = createOMRequest(Type.RefetchSecretKey) + .setRefetchSecretKeyRequest(requestBuilder) + .build(); + final OMResponse omResponse = submitRequest(omRequest); + final RefetchSecretKeyResponse resp = + handleError(omResponse).getRefetchSecretKeyResponse(); + return ProtobufUtils.fromProtobuf(resp.getId()); + } + /** * Return the proxy object underlying this protocol translator. * diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test-ec.sh b/hadoop-ozone/dist/src/main/compose/ozone/test-ec.sh new file mode 100644 index 000000000000..70693ceeab78 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozone/test-ec.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:EC + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +export SECURITY_ENABLED=false +export OZONE_REPLICATION_FACTOR=3 + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +start_docker_env 5 + +execute_robot_test scm -v BUCKET:erasure s3 + +prefix=${RANDOM} +execute_robot_test scm -v PREFIX:${prefix} ec/basic.robot +docker-compose up -d --no-recreate --scale datanode=4 +execute_robot_test scm -v PREFIX:${prefix} -N read-4-datanodes ec/read.robot +docker-compose up -d --no-recreate --scale datanode=3 +execute_robot_test scm -v PREFIX:${prefix} -N read-3-datanodes ec/read.robot +docker-compose up -d --no-recreate --scale datanode=5 + +stop_docker_env + +generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index 2958f90b5624..02cf5e6bff06 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -26,7 +26,7 @@ export OZONE_REPLICATION_FACTOR=3 # shellcheck source=/dev/null source "$COMPOSE_DIR/../testlib.sh" -start_docker_env 5 +start_docker_env execute_robot_test scm lib execute_robot_test scm ozone-lib @@ -39,13 +39,6 @@ execute_robot_test scm gdpr execute_robot_test scm security/ozone-secure-token.robot -exclude="" -for bucket in erasure; do - execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 - # some tests are independent of the bucket type, only need to be run once - exclude="--exclude no-bucket-type" -done - execute_robot_test scm recon execute_robot_test scm om-ratis @@ -60,13 +53,6 @@ execute_debug_tests execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:bucket -N ozonefs-o3fs-bucket ozonefs/ozonefs.robot -prefix=${RANDOM} -execute_robot_test scm -v PREFIX:${prefix} ec/basic.robot -docker-compose up -d --no-recreate --scale datanode=4 -execute_robot_test scm -v PREFIX:${prefix} ec/read.robot -docker-compose up -d --no-recreate --scale datanode=3 -execute_robot_test scm -v PREFIX:${prefix} ec/read.robot - execute_robot_test s3g grpc/grpc-om-s3-metrics.robot execute_robot_test scm --exclude pre-finalized-snapshot-tests snapshot diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index 9be6e14a2973..b57e5913e77c 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -162,3 +162,8 @@ no_proxy=om,scm,recon,s3g,kdc,localhost,127.0.0.1 # Explicitly enable filesystem snapshot feature for this Docker compose cluster OZONE-SITE.XML_ozone.filesystem.snapshot.enabled=true + + +OZONE-SITE.XML_hdds.secret.key.rotate.duration=5m +OZONE-SITE.XML_hdds.secret.key.rotate.check.duration=1m +OZONE-SITE.XML_hdds.secret.key.expiry.duration=1h \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot index 9f31afb254ed..340d3de82575 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot @@ -21,12 +21,13 @@ Test Timeout 5 minutes *** Test Cases *** List datanodes - ${output} = Execute ozone admin datanode list + Execute ozone admin datanode list > datanode.list + ${output} = Get File datanode.list Should contain ${output} Datanode: Should contain ${output} Related pipelines: Filter list by UUID - ${uuid} = Execute ozone admin datanode list | grep '^Datanode:' | head -1 | awk '{ print \$2 }' + ${uuid} = Execute grep '^Datanode:' datanode.list | head -1 | awk '{ print \$2 }' ${output} = Execute ozone admin datanode list --id "${uuid}" Should contain ${output} Datanode: ${uuid} ${datanodes} = Get Lines Containing String ${output} Datanode: @@ -35,8 +36,8 @@ Filter list by UUID Should Be Equal As Integers ${count} 1 Filter list by NodeOperationalState - ${uuid} = Execute ozone admin datanode list | grep '^Datanode:' | head -1 | awk '{ print \$2 }' - ${expected} = Execute ozone admin datanode list | grep -c 'Operational State: IN_SERVICE' + ${uuid} = Execute grep '^Datanode:' datanode.list | head -1 | awk '{ print \$2 }' + ${expected} = Execute grep -c 'Operational State: IN_SERVICE' datanode.list ${output} = Execute ozone admin datanode list --operational-state IN_SERVICE Should contain ${output} Datanode: ${uuid} ${datanodes} = Get Lines Containing String ${output} Datanode: @@ -45,8 +46,8 @@ Filter list by NodeOperationalState Should Be Equal As Integers ${count} ${expected} Filter list by NodeState - ${uuid} = Execute ozone admin datanode list | grep '^Datanode:' | head -1 | awk '{ print \$2 }' - ${expected} = Execute ozone admin datanode list | grep -c 'Health State: HEALTHY' + ${uuid} = Execute grep '^Datanode:' datanode.list | head -1 | awk '{ print \$2 }' + ${expected} = Execute grep -c 'Health State: HEALTHY' datanode.list ${output} = Execute ozone admin datanode list --node-state HEALTHY Should contain ${output} Datanode: ${uuid} ${datanodes} = Get Lines Containing String ${output} Datanode: diff --git a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot index 5ab293846408..b7df1aa5e880 100644 --- a/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/basic/ozone-shell-lib.robot @@ -36,8 +36,6 @@ Test ozone shell Should Be Empty ${result} ${result} = Execute ozone sh volume list ${protocol}${server}/ | jq -r '.[] | select(.name=="${volume}")' Should contain ${result} creationTime - ${result} = Execute ozone sh volume list | jq -r '.[] | select(.name=="${volume}")' - Should contain ${result} creationTime # TODO: Disable updating the owner, acls should be used to give access to other user. Execute ozone sh volume setquota ${protocol}${server}/${volume} --space-quota 10TB --namespace-quota 100 # ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | jq -r '. | select(.volumeName=="${volume}") | .owner | .name' @@ -46,16 +44,18 @@ Test ozone shell Should Be Equal ${result} 10995116277760 ${result} = Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 --space-quota 10TB --namespace-quota 100 Should Be Empty ${result} - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .storageType' + Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 > bb1.json + ${result} = Execute jq -r '. | select(.name=="bb1") | .storageType' bb1.json Should Be Equal ${result} DISK - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .quotaInBytes' + ${result} = Execute jq -r '. | select(.name=="bb1") | .quotaInBytes' bb1.json Should Be Equal ${result} 10995116277760 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .quotaInNamespace' + ${result} = Execute jq -r '. | select(.name=="bb1") | .quotaInNamespace' bb1.json Should Be Equal ${result} 100 Execute ozone sh bucket setquota ${protocol}${server}/${volume}/bb1 --space-quota 1TB --namespace-quota 1000 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .quotaInBytes' + Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 > bb1.json + ${result} = Execute jq -r '. | select(.name=="bb1") | .quotaInBytes' bb1.json Should Be Equal ${result} 1099511627776 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .quotaInNamespace' + ${result} = Execute jq -r '. | select(.name=="bb1") | .quotaInNamespace' bb1.json Should Be Equal ${result} 1000 ${result} = Execute ozone sh bucket list ${protocol}${server}/${volume}/ | jq -r '.[] | select(.name=="bb1") | .volumeName' Should Be Equal ${result} ${volume} @@ -75,14 +75,16 @@ Test ozone shell Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 Execute ozone sh volume delete ${protocol}${server}/${volume} Execute ozone sh volume create ${protocol}${server}/${volume} - ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | jq -r '. | select(.name=="${volume}") | .quotaInBytes' + Execute ozone sh volume info ${protocol}${server}/${volume} > volume.json + ${result} = Execute jq -r '. | select(.name=="${volume}") | .quotaInBytes' volume.json Should Be Equal ${result} -1 - ${result} = Execute ozone sh volume info ${protocol}${server}/${volume} | jq -r '. | select(.name=="${volume}") | .quotaInNamespace' + ${result} = Execute jq -r '. | select(.name=="${volume}") | .quotaInNamespace' volume.json Should Be Equal ${result} -1 Execute ozone sh bucket create ${protocol}${server}/${volume}/bb1 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .quotaInBytes' + Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 > bb1.json + ${result} = Execute jq -r '. | select(.name=="bb1") | .quotaInBytes' bb1.json Should Be Equal ${result} -1 - ${result} = Execute ozone sh bucket info ${protocol}${server}/${volume}/bb1 | jq -r '. | select(.name=="bb1") | .quotaInNamespace' + ${result} = Execute jq -r '. | select(.name=="bb1") | .quotaInNamespace' bb1.json Should Be Equal ${result} -1 Execute ozone sh bucket delete ${protocol}${server}/${volume}/bb1 Execute ozone sh volume delete ${protocol}${server}/${volume} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index cd8a7d2e68e0..407ae946649e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -151,7 +151,7 @@ public void testInstallCheckPoint() throws Exception { SCMStateMachine sm = scm.getScmHAManager().getRatisServer().getSCMStateMachine(); sm.pause(); - sm.setInstallingDBCheckpoint(checkpoint); + sm.setInstallingSnapshotData(checkpoint, null); sm.reinitialize(); Assert.assertNotNull( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index 0c2fb3887e92..a9cf2c108622 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -45,10 +45,10 @@ import org.apache.hadoop.hdds.scm.pipeline.WritableECContainerProvider.WritableECContainerProviderConfig; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.token.ContainerTokenSecretManager; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.ozone.HddsDatanodeService; @@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.InsufficientLocationsException; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -112,7 +113,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; import static org.apache.hadoop.ozone.container.ContainerTestHelper.newWriteChunkRequestBuilder; /** @@ -151,6 +151,7 @@ public class TestContainerCommandsEC { private static Token containerToken; private static ContainerTokenSecretManager containerTokenGenerator; private static OzoneBlockTokenSecretManager blockTokenGenerator; + private static SecretKeyClient secretKeyClient; private List clients = null; private static OzoneConfiguration config; private static CertificateClient certClient; @@ -365,8 +366,8 @@ public void testOrphanBlock() throws Exception { } try (ECReconstructionCoordinator coordinator = - new ECReconstructionCoordinator(config, certClient, null, - ECReconstructionMetrics.create())) { + new ECReconstructionCoordinator(config, certClient, secretKeyClient, + null, ECReconstructionMetrics.create())) { // Attempt to reconstruct the container. coordinator.reconstructECContainerGroup(orphanContainerID, @@ -582,7 +583,7 @@ private void testECReconstructionCoordinator(List missingIndexes, XceiverClientManager xceiverClientManager = new XceiverClientManager(config); ECReconstructionCoordinator coordinator = - new ECReconstructionCoordinator(config, certClient, + new ECReconstructionCoordinator(config, certClient, secretKeyClient, null, ECReconstructionMetrics.create())) { ECReconstructionMetrics metrics = @@ -778,7 +779,7 @@ public void testECReconstructionCoordinatorShouldCleanupContainersOnFailure() Assert.assertThrows(IOException.class, () -> { try (ECReconstructionCoordinator coordinator = - new ECReconstructionCoordinator(config, certClient, + new ECReconstructionCoordinator(config, certClient, secretKeyClient, null, ECReconstructionMetrics.create())) { coordinator.reconstructECContainerGroup(conID, (ECReplicationConfig) containerPipeline.getReplicationConfig(), @@ -840,10 +841,12 @@ public static void startCluster(OzoneConfiguration conf) throws Exception { OzoneManager.setTestSecureOmFlag(true); certClient = new CertificateClientTestImpl(conf); + secretKeyClient = new SecretKeyTestClient(); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(NUM_DN) .setScmId(SCM_ID).setClusterId(CLUSTER_ID) .setCertificateClient(certClient) + .setSecretKeyClient(secretKeyClient) .build(); cluster.waitForClusterToBeReady(); cluster.getOzoneManager().startSecretManager(); @@ -887,16 +890,11 @@ public static void prepareData(int[][] ranges) throws Exception { pipeline = pipelines.get(0); datanodeDetails = pipeline.getNodes(); - OzoneConfiguration tweakedConfig = new OzoneConfiguration(config); - tweakedConfig.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); - SecurityConfig conf = new SecurityConfig(tweakedConfig); long tokenLifetime = TimeUnit.DAYS.toMillis(1); containerTokenGenerator = new ContainerTokenSecretManager( - conf, tokenLifetime); - containerTokenGenerator.start(certClient); + tokenLifetime, secretKeyClient); blockTokenGenerator = new OzoneBlockTokenSecretManager( - conf, tokenLifetime); - blockTokenGenerator.start(certClient); + tokenLifetime, secretKeyClient); containerToken = containerTokenGenerator .generateToken(ANY_USER, new ContainerID(containerID)); } @@ -913,14 +911,6 @@ public static void stopCluster() throws IOException { if (cluster != null) { cluster.shutdown(); } - - if (blockTokenGenerator != null) { - blockTokenGenerator.stop(); - } - - if (containerTokenGenerator != null) { - containerTokenGenerator.stop(); - } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java index 42c9fc4849d4..f9f6871f546b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestScmHAFinalization.java @@ -35,9 +35,9 @@ import org.apache.hadoop.ozone.upgrade.UpgradeFinalizationExecutor; import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -61,7 +61,6 @@ * Tests upgrade finalization failure scenarios and corner cases specific to SCM * HA. */ -@Disabled("HDDS-8714, HDDS-8740") public class TestScmHAFinalization { private static final String CLIENT_ID = UUID.randomUUID().toString(); private static final Logger LOG = @@ -189,6 +188,7 @@ public void testFinalizationWithLeaderChange( @ParameterizedTest @MethodSource(METHOD_SOURCE) + @Flaky("HDDS-8714") public void testFinalizationWithRestart( UpgradeTestInjectionPoints haltingPoint) throws Exception { CountDownLatch terminateLatch = new CountDownLatch(1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index a23e286aa0a4..a1696f049d46 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OzoneManager; @@ -346,6 +347,7 @@ abstract class Builder { protected int numDataVolumes = 1; protected boolean startDataNodes = true; protected CertificateClient certClient; + protected SecretKeyClient secretKeyClient; protected int pipelineNumLimit = DEFAULT_PIPELINE_LIMIT; protected Builder(OzoneConfiguration conf) { @@ -408,6 +410,11 @@ public Builder setCertificateClient(CertificateClient client) { return this; } + public Builder setSecretKeyClient(SecretKeyClient client) { + this.secretKeyClient = client; + return this; + } + /** * Sets the SCM id. * diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index cc3e70db5307..5f1271f4cbf6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -61,6 +61,7 @@ import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.CodecBuffer; @@ -130,6 +131,7 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private int waitForClusterToBeReadyTimeout = 120000; // 2 min private CertificateClient caClient; private final Set clients = ConcurrentHashMap.newKeySet(); + private SecretKeyClient secretKeyClient; /** * Creates a new MiniOzoneCluster with Recon. @@ -482,6 +484,7 @@ public void startHddsDatanodes() { } catch (IOException e) { LOG.error("Exception while setting certificate client to DataNode.", e); } + datanode.setSecretKeyClient(secretKeyClient); datanode.start(); }); } @@ -516,6 +519,10 @@ private void setCAClient(CertificateClient client) { this.caClient = client; } + private void setSecretKeyClient(SecretKeyClient client) { + this.secretKeyClient = client; + } + private static void stopDatanodes( Collection hddsDatanodes) { if (!hddsDatanodes.isEmpty()) { @@ -590,6 +597,9 @@ public MiniOzoneCluster build() throws IOException { if (certClient != null) { om.setCertClient(certClient); } + if (secretKeyClient != null) { + om.setSecretKeyClient(secretKeyClient); + } om.start(); if (includeRecon) { @@ -606,6 +616,7 @@ public MiniOzoneCluster build() throws IOException { hddsDatanodes, reconServer); cluster.setCAClient(certClient); + cluster.setSecretKeyClient(secretKeyClient); if (startDataNodes) { cluster.startHddsDatanodes(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 91ff85e1afec..7d2d22e34b2e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -165,6 +165,12 @@ public StorageContainerManager getStorageContainerManager(int index) { return this.scmhaService.getServiceByIndex(index); } + public StorageContainerManager getScmLeader() { + return getStorageContainerManagers().stream() + .filter(StorageContainerManager::checkLeader) + .findFirst().orElse(null); + } + private OzoneManager getOMLeader(boolean waitForLeaderElection) throws TimeoutException, InterruptedException { if (waitForLeaderElection) { @@ -643,10 +649,15 @@ private void initSCMHAConfig() { scmServiceId, scmNodeId); String scmGrpcPortKey = ConfUtils.addKeySuffixes( ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, scmServiceId, scmNodeId); + String scmSecurityAddrKey = ConfUtils.addKeySuffixes( + ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, scmServiceId, + scmNodeId); conf.set(scmAddrKey, "127.0.0.1"); conf.set(scmHttpAddrKey, localhostWithFreePort()); conf.set(scmHttpsAddrKey, localhostWithFreePort()); + conf.set(scmSecurityAddrKey, localhostWithFreePort()); + conf.set("ozone.scm.update.service.port", "0"); int ratisPort = getFreePort(); conf.setInt(scmRatisPortKey, ratisPort); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java new file mode 100644 index 000000000000..6661c81b44c6 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokens.java @@ -0,0 +1,394 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.conf.DefaultConfigManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.XceiverClientFactory; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory; +import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl; +import org.apache.hadoop.ozone.client.io.KeyInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ratis.util.ExitUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.net.InetAddress; +import java.util.Objects; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.TimeoutException; +import java.util.function.Function; + +import static java.util.Objects.requireNonNull; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_EXPIRY_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_CHECK_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_DURATION; +import static org.apache.hadoop.hdds.StringUtils.string2Bytes; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.BLOCK_TOKEN_VERIFICATION_FAILED; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.apache.ozone.test.GenericTestUtils.assertExceptionContains; +import static org.apache.ozone.test.GenericTestUtils.waitFor; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +/** + * Integration test to verify block tokens in a secure cluster. + */ +@InterfaceAudience.Private +public final class TestBlockTokens { + private static final Logger LOG = LoggerFactory + .getLogger(TestBlockTokens.class); + private static final String TEST_VOLUME = "testvolume"; + private static final String TEST_BUCKET = "testbucket"; + private static final String TEST_FILE = "testfile"; + private static final int ROTATE_DURATION_IN_MS = 3000; + private static final int EXPIRY_DURATION_IN_MS = 10000; + private static final int ROTATION_CHECK_DURATION_IN_MS = 100; + + @Rule + public Timeout timeout = Timeout.seconds(180); + + private static MiniKdc miniKdc; + private static OzoneConfiguration conf; + private static File workDir; + private static File ozoneKeytab; + private static File spnegoKeytab; + private static File testUserKeytab; + private static String testUserPrincipal; + private static String host; + private static String clusterId; + private static String scmId; + private static MiniOzoneHAClusterImpl cluster; + private static OzoneClient client; + private static BlockInputStreamFactory blockInputStreamFactory = + new BlockInputStreamFactoryImpl(); + + @BeforeClass + public static void init() throws Exception { + conf = new OzoneConfiguration(); + conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); + + ExitUtils.disableSystemExit(); + + workDir = + GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); + clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + + startMiniKdc(); + setSecureConfig(); + createCredentialsInKDC(); + setSecretKeysConfig(); + startCluster(); + client = cluster.newClient(); + createTestData(); + } + + private static void createTestData() throws IOException { + client.getProxy().createVolume(TEST_VOLUME); + client.getProxy().createBucket(TEST_VOLUME, TEST_BUCKET); + byte[] data = string2Bytes(RandomStringUtils.randomAlphanumeric(1024)); + OzoneBucket bucket = client.getObjectStore().getVolume(TEST_VOLUME) + .getBucket(TEST_BUCKET); + try (OzoneOutputStream out = bucket.createKey(TEST_FILE, data.length)) { + org.apache.commons.io.IOUtils.write(data, out); + } + } + + @AfterClass + public static void stop() { + miniKdc.stop(); + IOUtils.close(LOG, client); + if (cluster != null) { + cluster.stop(); + } + DefaultConfigManager.clearDefaultConfigs(); + } + + @Test + public void blockTokensHappyCase() throws Exception { + ManagedSecretKey currentScmKey = + getScmSecretKeyManager().getCurrentSecretKey(); + OmKeyInfo keyInfo = getTestKeyInfo(); + + // assert block token points to the current SCM key. + assertEquals(currentScmKey.getId(), extractSecretKeyId(keyInfo)); + + // and the keyInfo can be used to read from datanodes. + readDataWithoutRetry(keyInfo); + + // after the rotation passes, the old token is still usable. + waitFor( + () -> !Objects.equals(getScmSecretKeyManager().getCurrentSecretKey(), + currentScmKey), + ROTATION_CHECK_DURATION_IN_MS, + ROTATE_DURATION_IN_MS + ROTATION_CHECK_DURATION_IN_MS); + readDataWithoutRetry(keyInfo); + } + + @Test + public void blockTokenFailsOnExpiredSecretKey() throws Exception { + OmKeyInfo keyInfo = getTestKeyInfo(); + UUID secretKeyId = extractSecretKeyId(keyInfo); + readDataWithoutRetry(keyInfo); + + // wait until the secret key expires. + ManagedSecretKey secretKey = + requireNonNull(getScmSecretKeyManager().getSecretKey(secretKeyId)); + waitFor(secretKey::isExpired, ROTATION_CHECK_DURATION_IN_MS, + EXPIRY_DURATION_IN_MS); + assertTrue(secretKey.isExpired()); + // verify that the read is denied because of the expired secret key. + StorageContainerException ex = assertThrows(StorageContainerException.class, + () -> readDataWithoutRetry(keyInfo)); + assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult()); + assertExceptionContains( + "Token can't be verified due to expired secret key", ex); + } + + @Test + public void blockTokenOnExpiredSecretKeyRetrySuccessful() throws Exception { + OmKeyInfo keyInfo = getTestKeyInfo(); + UUID secretKeyId = extractSecretKeyId(keyInfo); + readDataWithoutRetry(keyInfo); + + // wait until the secret key expires. + ManagedSecretKey secretKey = + requireNonNull(getScmSecretKeyManager().getSecretKey(secretKeyId)); + waitFor(secretKey::isExpired, ROTATION_CHECK_DURATION_IN_MS, + EXPIRY_DURATION_IN_MS); + assertTrue(secretKey.isExpired()); + // verify that the read is denied because of the expired secret key. + readData(keyInfo, k -> { + try { + return getTestKeyInfo(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + + @Test + public void blockTokenFailsOnWrongSecretKeyId() throws Exception { + OmKeyInfo keyInfo = getTestKeyInfo(); + // replace block token secret key id with wrong id. + for (OmKeyLocationInfoGroup v : keyInfo.getKeyLocationVersions()) { + for (OmKeyLocationInfo l : v.getLocationList()) { + Token token = l.getToken(); + OzoneBlockTokenIdentifier tokenId = token.decodeIdentifier(); + tokenId.setSecretKeyId(UUID.randomUUID()); + token.setID(tokenId.getBytes()); + } + } + + // verify that the read is denied because of the unknown secret key. + StorageContainerException ex = + assertThrows(StorageContainerException.class, + () -> readDataWithoutRetry(keyInfo)); + assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult()); + assertExceptionContains("Can't find the signing secret key", ex); + } + + @Test + public void blockTokenFailsOnWrongPassword() throws Exception { + OmKeyInfo keyInfo = getTestKeyInfo(); + // replace block token secret key id with wrong id. + for (OmKeyLocationInfoGroup v : keyInfo.getKeyLocationVersions()) { + for (OmKeyLocationInfo l : v.getLocationList()) { + Token token = l.getToken(); + token.setPassword(RandomUtils.nextBytes(100)); + } + } + + // verify that the read is denied because of the unknown secret key. + StorageContainerException ex = + assertThrows(StorageContainerException.class, + () -> readDataWithoutRetry(keyInfo)); + assertEquals(BLOCK_TOKEN_VERIFICATION_FAILED, ex.getResult()); + assertExceptionContains("Invalid token for user", ex); + } + + private UUID extractSecretKeyId(OmKeyInfo keyInfo) throws IOException { + OmKeyLocationInfo locationInfo = + keyInfo.getKeyLocationVersions().get(0).getLocationList().get(0); + Token token = locationInfo.getToken(); + return token.decodeIdentifier().getSecretKeyId(); + } + + private OmKeyInfo getTestKeyInfo() throws IOException { + OmKeyArgs arg = new OmKeyArgs.Builder() + .setVolumeName(TEST_VOLUME) + .setBucketName(TEST_BUCKET) + .setKeyName(TEST_FILE) + .build(); + return cluster.getOzoneManager() + .getKeyInfo(arg, false).getKeyInfo(); + } + + private void readDataWithoutRetry(OmKeyInfo keyInfo) throws IOException { + readData(keyInfo, null); + } + + private void readData(OmKeyInfo keyInfo, + Function retryFunc) throws IOException { + XceiverClientFactory xceiverClientManager = + ((RpcClient) client.getProxy()).getXceiverClientManager(); + try (InputStream is = KeyInputStream.getFromOmKeyInfo(keyInfo, + xceiverClientManager, + false, retryFunc, blockInputStreamFactory)) { + byte[] buf = new byte[100]; + int readBytes = is.read(buf, 0, 100); + assertEquals(100, readBytes); + } + } + + private SecretKeyManager getScmSecretKeyManager() { + return cluster.getActiveSCM().getSecretKeyManager(); + } + + private static void setSecretKeysConfig() { + // Secret key lifecycle configs. + conf.set(HDDS_SECRET_KEY_ROTATE_CHECK_DURATION, + ROTATION_CHECK_DURATION_IN_MS + "ms"); + conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, ROTATE_DURATION_IN_MS + "ms"); + conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_IN_MS + "ms"); + + // enable tokens + conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); + conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true); + } + + private static void createCredentialsInKDC() throws Exception { + ScmConfig scmConfig = conf.getObject(ScmConfig.class); + SCMHTTPServerConfig httpServerConfig = + conf.getObject(SCMHTTPServerConfig.class); + createPrincipal(ozoneKeytab, scmConfig.getKerberosPrincipal()); + createPrincipal(spnegoKeytab, httpServerConfig.getKerberosPrincipal()); + createPrincipal(testUserKeytab, testUserPrincipal); + } + + private static void createPrincipal(File keytab, String... principal) + throws Exception { + miniKdc.createPrincipal(keytab, principal); + } + + private static void startMiniKdc() throws Exception { + Properties securityProperties = MiniKdc.createConf(); + miniKdc = new MiniKdc(securityProperties, workDir); + miniKdc.start(); + } + + private static void setSecureConfig() throws IOException { + conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); + host = InetAddress.getLocalHost().getCanonicalHostName() + .toLowerCase(); + + conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name()); + + String curUser = UserGroupInformation.getCurrentUser().getUserName(); + conf.set(OZONE_ADMINISTRATORS, curUser); + + String realm = miniKdc.getRealm(); + String hostAndRealm = host + "@" + realm; + conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + conf.set(HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_SCM/" + hostAndRealm); + conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + conf.set(OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_OM/" + hostAndRealm); + conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + + ozoneKeytab = new File(workDir, "scm.keytab"); + spnegoKeytab = new File(workDir, "http.keytab"); + testUserKeytab = new File(workDir, "testuser.keytab"); + testUserPrincipal = "test@" + realm; + + conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY, + spnegoKeytab.getAbsolutePath()); + conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE, + spnegoKeytab.getAbsolutePath()); + conf.set(DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + } + + private static void startCluster() + throws IOException, TimeoutException, InterruptedException { + OzoneManager.setTestSecureOmFlag(true); + MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + .setClusterId(clusterId) + .setSCMServiceId("TestSecretKey") + .setScmId(scmId) + .setNumDatanodes(3) + .setNumOfStorageContainerManagers(3) + .setNumOfOzoneManagers(1); + + cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster.waitForClusterToBeReady(); + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java new file mode 100644 index 000000000000..c53ef2ea185b --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.conf.DefaultConfigManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ratis.util.ExitUtils; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.net.InetAddress; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.junit.jupiter.api.Assertions.assertEquals; + +/** + * Integration test class to verify block token CLI commands functionality in a + * secure cluster. + */ +@InterfaceAudience.Private +public final class TestBlockTokensCLI { + private static final Logger LOG = LoggerFactory + .getLogger(TestBlockTokensCLI.class); + + @Rule + public Timeout timeout = Timeout.seconds(180); + + private static MiniKdc miniKdc; + private static OzoneAdmin ozoneAdmin; + private static OzoneConfiguration conf; + private static File workDir; + private static File ozoneKeytab; + private static File spnegoKeytab; + private static String host; + private static String clusterId; + private static String scmId; + private static String omServiceId; + private static String scmServiceId; + private static MiniOzoneHAClusterImpl cluster; + private static OzoneClient client; + + @BeforeClass + public static void init() throws Exception { + conf = new OzoneConfiguration(); + conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); + + ExitUtils.disableSystemExit(); + + workDir = + GenericTestUtils.getTestDir(TestBlockTokens.class.getSimpleName()); + clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + omServiceId = "om-service-test"; + scmServiceId = "scm-service-test"; + + startMiniKdc(); + setSecureConfig(); + createCredentialsInKDC(); + setSecretKeysConfig(); + startCluster(); + client = cluster.newClient(); + ozoneAdmin = new OzoneAdmin(conf); + } + + @AfterClass + public static void stop() { + miniKdc.stop(); + IOUtils.close(LOG, client); + if (cluster != null) { + cluster.stop(); + } + DefaultConfigManager.clearDefaultConfigs(); + } + + private SecretKeyManager getScmSecretKeyManager() { + return cluster.getActiveSCM().getSecretKeyManager(); + } + + private static void setSecretKeysConfig() { + // enable tokens + conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); + conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true); + } + + private static void createCredentialsInKDC() throws Exception { + ScmConfig scmConfig = conf.getObject(ScmConfig.class); + SCMHTTPServerConfig httpServerConfig = + conf.getObject(SCMHTTPServerConfig.class); + createPrincipal(ozoneKeytab, scmConfig.getKerberosPrincipal()); + createPrincipal(spnegoKeytab, httpServerConfig.getKerberosPrincipal()); + } + + private static void createPrincipal(File keytab, String... principal) + throws Exception { + miniKdc.createPrincipal(keytab, principal); + } + + private static void startMiniKdc() throws Exception { + Properties securityProperties = MiniKdc.createConf(); + miniKdc = new MiniKdc(securityProperties, workDir); + miniKdc.start(); + } + + private static void setSecureConfig() throws IOException { + conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); + host = InetAddress.getLocalHost().getCanonicalHostName() + .toLowerCase(); + + conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name()); + + String curUser = UserGroupInformation.getCurrentUser().getUserName(); + conf.set(OZONE_ADMINISTRATORS, curUser); + + String realm = miniKdc.getRealm(); + String hostAndRealm = host + "@" + realm; + conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + conf.set(HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_SCM/" + hostAndRealm); + conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + conf.set(OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_OM/" + hostAndRealm); + conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + + ozoneKeytab = new File(workDir, "scm.keytab"); + spnegoKeytab = new File(workDir, "http.keytab"); + + conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY, + spnegoKeytab.getAbsolutePath()); + conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE, + spnegoKeytab.getAbsolutePath()); + conf.set(DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + } + + @Test + public void testFetchKeyOMAdminCommand() throws UnsupportedEncodingException { + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + PrintStream printStream = new PrintStream(outputStream, true, "UTF-8"); + System.setOut(printStream); + + String[] args = + new String[]{"om", "fetch-key", "--service-id=" + omServiceId}; + ozoneAdmin.execute(args); + + String actualOutput = outputStream.toString("UTF-8"); + System.setOut(System.out); + + String actualUUID = testFetchKeyOMAdminCommandUtil(actualOutput); + String expectedUUID = + getScmSecretKeyManager().getCurrentSecretKey().getId().toString(); + assertEquals(expectedUUID, actualUUID); + } + + private String testFetchKeyOMAdminCommandUtil(String output) { + // Extract the current secret key id from the output + String[] lines = output.split(System.lineSeparator()); + for (String line : lines) { + if (line.startsWith("Current Secret Key ID: ")) { + return line.substring("Current Secret Key ID: ".length()).trim(); + } + } + return null; + } + + private static void startCluster() + throws IOException, TimeoutException, InterruptedException { + OzoneManager.setTestSecureOmFlag(true); + MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + .setClusterId(clusterId) + .setSCMServiceId(scmServiceId) + .setOMServiceId(omServiceId) + .setScmId(scmId) + .setNumDatanodes(3) + .setNumOfStorageContainerManagers(3) + .setNumOfOzoneManagers(1); + + cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster.waitForClusterToBeReady(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index bdc4a4284ac3..63b6bdb131ef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -138,7 +138,7 @@ public void testContainerRandomPort() throws IOException { for (int i = 0; i < 3; i++) { stateMachines.add(new DatanodeStateMachine( - randomDatanodeDetails(), ozoneConf, null, null, null)); + randomDatanodeDetails(), ozoneConf)); } //we need to start all the servers to get the fix ports @@ -183,11 +183,11 @@ public void testContainerRandomPort() throws IOException { ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); try ( DatanodeStateMachine sm1 = new DatanodeStateMachine( - randomDatanodeDetails(), ozoneConf, null, null, null); + randomDatanodeDetails(), ozoneConf); DatanodeStateMachine sm2 = new DatanodeStateMachine( - randomDatanodeDetails(), ozoneConf, null, null, null); + randomDatanodeDetails(), ozoneConf); DatanodeStateMachine sm3 = new DatanodeStateMachine( - randomDatanodeDetails(), ozoneConf, null, null, null); + randomDatanodeDetails(), ozoneConf); ) { HashSet ports = new HashSet(); assertTrue(ports.add(sm1.getContainer().getReadChannel().getIPCPort())); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java new file mode 100644 index 000000000000..6a1166140c0d --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecretKeysApi.java @@ -0,0 +1,366 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.conf.DefaultConfigManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; +import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.exception.SCMSecretKeyException; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.util.ExitUtil; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ratis.util.ExitUtils; +import org.jetbrains.annotations.NotNull; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.util.List; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_EXPIRY_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_CHECK_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_DURATION; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.security.exception.SCMSecretKeyException.ErrorCode.SECRET_KEY_NOT_ENABLED; +import static org.apache.hadoop.hdds.utils.HddsServerUtil.getSecretKeyClientForDatanode; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +/** + * Integration test to verify symmetric SecretKeys APIs in a secure cluster. + */ +@InterfaceAudience.Private +public final class TestSecretKeysApi { + private static final Logger LOG = LoggerFactory + .getLogger(TestSecretKeysApi.class); + + @Rule + public Timeout timeout = Timeout.seconds(500); + + private MiniKdc miniKdc; + private OzoneConfiguration conf; + private File workDir; + private File ozoneKeytab; + private File spnegoKeytab; + private File testUserKeytab; + private String testUserPrincipal; + private String ozonePrincipal; + private String clusterId; + private String scmId; + private MiniOzoneHAClusterImpl cluster; + + @Before + public void init() throws Exception { + conf = new OzoneConfiguration(); + conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); + + ExitUtils.disableSystemExit(); + ExitUtil.disableSystemExit(); + + workDir = GenericTestUtils.getTestDir(getClass().getSimpleName()); + clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + + startMiniKdc(); + setSecureConfig(); + createCredentialsInKDC(); + } + + @After + public void stop() { + miniKdc.stop(); + if (cluster != null) { + cluster.stop(); + } + DefaultConfigManager.clearDefaultConfigs(); + } + + private void createCredentialsInKDC() throws Exception { + SCMHTTPServerConfig httpServerConfig = + conf.getObject(SCMHTTPServerConfig.class); + createPrincipal(ozoneKeytab, ozonePrincipal); + createPrincipal(spnegoKeytab, httpServerConfig.getKerberosPrincipal()); + createPrincipal(testUserKeytab, testUserPrincipal); + } + + private void createPrincipal(File keytab, String... principal) + throws Exception { + miniKdc.createPrincipal(keytab, principal); + } + + private void startMiniKdc() throws Exception { + Properties securityProperties = MiniKdc.createConf(); + miniKdc = new MiniKdc(securityProperties, workDir); + miniKdc.start(); + } + + private void setSecureConfig() throws IOException { + conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); + String host = InetAddress.getLocalHost().getCanonicalHostName() + .toLowerCase(); + + conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name()); + + String curUser = UserGroupInformation.getCurrentUser().getUserName(); + conf.set(OZONE_ADMINISTRATORS, curUser); + + String realm = miniKdc.getRealm(); + String hostAndRealm = host + "@" + realm; + ozonePrincipal = "scm/" + hostAndRealm; + conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, ozonePrincipal); + conf.set(HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_SCM/" + hostAndRealm); + conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, ozonePrincipal); + conf.set(OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_OM/" + hostAndRealm); + conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ozonePrincipal); + + ozoneKeytab = new File(workDir, "scm.keytab"); + spnegoKeytab = new File(workDir, "http.keytab"); + testUserKeytab = new File(workDir, "testuser.keytab"); + testUserPrincipal = "test@" + realm; + + conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY, + spnegoKeytab.getAbsolutePath()); + conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE, + spnegoKeytab.getAbsolutePath()); + conf.set(DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + + conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true); + } + + /** + * Test secret key apis in happy case. + */ + @Test + public void testSecretKeyApiSuccess() throws Exception { + enableBlockToken(); + // set a low rotation period, of 1s, expiry is 3s, expect 3 active keys + // at any moment. + conf.set(HDDS_SECRET_KEY_ROTATE_CHECK_DURATION, "100ms"); + conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, "1s"); + conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, "3000ms"); + + startCluster(3); + SecretKeyProtocol secretKeyProtocol = getSecretKeyProtocol(); + + // start the test when keys are full. + GenericTestUtils.waitFor(() -> { + try { + return secretKeyProtocol.getAllSecretKeys().size() >= 3; + } catch (IOException ex) { + throw new RuntimeException(ex); + } + }, 100, 4_000); + + ManagedSecretKey initialKey = secretKeyProtocol.getCurrentSecretKey(); + assertNotNull(initialKey); + List initialKeys = secretKeyProtocol.getAllSecretKeys(); + assertEquals(initialKey, initialKeys.get(0)); + + LOG.info("Initial active key: {}", initialKey); + LOG.info("Initial keys: {}", initialKeys); + + // wait for the next rotation. + GenericTestUtils.waitFor(() -> { + try { + ManagedSecretKey newCurrentKey = + secretKeyProtocol.getCurrentSecretKey(); + return !newCurrentKey.equals(initialKey); + } catch (IOException ex) { + throw new RuntimeException(ex); + } + }, 100, 1500); + ManagedSecretKey updatedKey = secretKeyProtocol.getCurrentSecretKey(); + List updatedKeys = secretKeyProtocol.getAllSecretKeys(); + + LOG.info("Updated active key: {}", updatedKey); + LOG.info("Updated keys: {}", updatedKeys); + + assertEquals(updatedKey, updatedKeys.get(0)); + assertEquals(initialKey, updatedKeys.get(1)); + + // assert getSecretKey by ID. + ManagedSecretKey keyById = secretKeyProtocol.getSecretKey( + updatedKey.getId()); + assertNotNull(keyById); + ManagedSecretKey nonExisting = secretKeyProtocol.getSecretKey( + UUID.randomUUID()); + assertNull(nonExisting); + } + + /** + * Verify API behavior when block token is not enable. + */ + @Test + public void testSecretKeyApiNotEnabled() throws Exception { + startCluster(1); + SecretKeyProtocol secretKeyProtocol = getSecretKeyProtocol(); + + SCMSecretKeyException ex = assertThrows(SCMSecretKeyException.class, + secretKeyProtocol::getCurrentSecretKey); + assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); + + ex = assertThrows(SCMSecretKeyException.class, + () -> secretKeyProtocol.getSecretKey(UUID.randomUUID())); + assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); + + ex = assertThrows(SCMSecretKeyException.class, + secretKeyProtocol::getAllSecretKeys); + assertEquals(SECRET_KEY_NOT_ENABLED, ex.getErrorCode()); + } + + /** + * Verify API behavior when SCM leader fails. + */ + @Test + public void testSecretKeyAfterSCMFailover() throws Exception { + enableBlockToken(); + // set a long duration period, so that no rotation happens during SCM + // leader change. + conf.set(HDDS_SECRET_KEY_ROTATE_CHECK_DURATION, "10m"); + conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, "1d"); + conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, "7d"); + + startCluster(3); + SecretKeyProtocol securityProtocol = getSecretKeyProtocol(); + List keysInitial = securityProtocol.getAllSecretKeys(); + LOG.info("Keys before fail over: {}.", keysInitial); + + // turn the current SCM leader off. + StorageContainerManager activeSCM = cluster.getActiveSCM(); + cluster.shutdownStorageContainerManager(activeSCM); + // wait for + cluster.waitForSCMToBeReady(); + + List keysAfter = securityProtocol.getAllSecretKeys(); + LOG.info("Keys after fail over: {}.", keysAfter); + + assertEquals(keysInitial.size(), keysAfter.size()); + for (int i = 0; i < keysInitial.size(); i++) { + assertEquals(keysInitial.get(i), keysAfter.get(i)); + } + } + + @Test + public void testSecretKeyAuthorization() throws Exception { + enableBlockToken(); + conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, true); + startCluster(1); + + // When HADOOP_SECURITY_AUTHORIZATION is enabled, SecretKey protocol + // is only available for Datanode and OM, any other authenticated user + // can't access the protocol. + SecretKeyProtocol secretKeyProtocol = + getSecretKeyProtocol(testUserPrincipal, testUserKeytab); + RemoteException ex = + assertThrows(RemoteException.class, + secretKeyProtocol::getCurrentSecretKey); + assertEquals(AuthorizationException.class.getName(), ex.getClassName()); + assertTrue(ex.getMessage().contains( + "User test@EXAMPLE.COM (auth:KERBEROS) is not authorized " + + "for protocol")); + } + + @Test + public void testSecretKeyWithoutAuthorization() throws Exception { + enableBlockToken(); + conf.setBoolean(HADOOP_SECURITY_AUTHORIZATION, false); + startCluster(1); + + // When HADOOP_SECURITY_AUTHORIZATION is not enabled, any other + // authenticated user can access the protocol. + SecretKeyProtocol secretKeyProtocol = + getSecretKeyProtocol(testUserPrincipal, testUserKeytab); + assertNotNull(secretKeyProtocol.getCurrentSecretKey()); + } + + private void startCluster(int numSCMs) + throws IOException, TimeoutException, InterruptedException { + OzoneManager.setTestSecureOmFlag(true); + MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + .setClusterId(clusterId) + .setSCMServiceId("TestSecretKey") + .setScmId(scmId) + .setNumDatanodes(3) + .setNumOfStorageContainerManagers(numSCMs) + .setNumOfOzoneManagers(1); + + cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster.waitForClusterToBeReady(); + } + + @NotNull + private SecretKeyProtocol getSecretKeyProtocol() throws IOException { + return getSecretKeyProtocol(ozonePrincipal, ozoneKeytab); + } + + @NotNull + private SecretKeyProtocol getSecretKeyProtocol( + String user, File keyTab) throws IOException { + UserGroupInformation ugi = + UserGroupInformation.loginUserFromKeytabAndReturnUGI( + user, keyTab.getCanonicalPath()); + ugi.setAuthenticationMethod(KERBEROS); + return getSecretKeyClientForDatanode(conf, ugi); + } + + private void enableBlockToken() { + conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 7b7be43edf35..a6b2b63a0a39 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -31,17 +31,12 @@ import java.time.Duration; import java.time.LocalDate; import java.time.LocalDateTime; -import java.time.ZoneId; import java.time.temporal.ChronoUnit; -import java.util.ArrayList; import java.util.Date; -import java.util.List; import java.util.Properties; import java.util.UUID; import java.util.concurrent.Callable; -import org.apache.commons.validator.routines.DomainValidator; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -52,30 +47,20 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.HddsTestUtils; -import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; -import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; -import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultApprover; -import org.apache.hadoop.hdds.security.x509.certificate.authority.profile.DefaultProfile; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultCAServer; -import org.apache.hadoop.hdds.security.x509.certificate.client.DefaultCertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; import org.apache.hadoop.hdds.security.x509.certificate.utils.SelfSignedCertificate; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; @@ -87,8 +72,6 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; @@ -96,9 +79,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; -import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; -import org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransport; -import org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransportFactory; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.security.OMCertificateClient; @@ -117,13 +97,8 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_MAX_DURATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_DEFAULT_DURATION_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; @@ -143,7 +118,6 @@ import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; import static org.apache.hadoop.net.ServerSocketUtil.getPort; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.DELEGATION_TOKEN_MAX_LIFETIME_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE; @@ -152,7 +126,6 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_EXPIRED; import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; import org.apache.ratis.protocol.ClientId; @@ -1195,199 +1168,6 @@ public void testDelegationTokenRenewCrossCertificateRenew() throws Exception { } } - /** - * Tests container token renewal after a certificate renew. - */ - @Test - public void testContainerTokenRenewCrossCertificateRenew() throws Exception { - // Setup secure SCM for start. - final int certLifetime = 40 * 1000; // 40s - conf.set(HDDS_X509_DEFAULT_DURATION, - Duration.ofMillis(certLifetime).toString()); - conf.set(HDDS_X509_MAX_DURATION, - Duration.ofMillis(certLifetime).toString()); - conf.set(HDDS_X509_RENEW_GRACE_DURATION, - Duration.ofMillis(certLifetime - 15 * 1000).toString()); - conf.setBoolean(HDDS_CONTAINER_TOKEN_ENABLED, true); - conf.setLong(HDDS_BLOCK_TOKEN_EXPIRY_TIME, certLifetime - 20 * 1000); - - initSCM(); - scm = HddsTestUtils.getScmSimple(conf); - try { - CertificateClientTestImpl certClient = - new CertificateClientTestImpl(conf, true); - X509Certificate scmCert = certClient.getCertificate(); - String scmCertId1 = scmCert.getSerialNumber().toString(); - // Start SCM - scm.setScmCertificateClient(certClient); - scm.start(); - - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - // Get SCM client which will authenticate via Kerberos - SCMContainerLocationFailoverProxyProvider proxyProvider = - new SCMContainerLocationFailoverProxyProvider(conf, ugi); - StorageContainerLocationProtocolClientSideTranslatorPB scmClient = - new StorageContainerLocationProtocolClientSideTranslatorPB( - proxyProvider); - - // Since client is already connected get a delegation token - ContainerID containerID = new ContainerID(1); - Token token1 = scmClient.getContainerToken(containerID); - - // Check if token is of right kind and renewer is running instance - assertNotNull(token1); - assertEquals(ContainerTokenIdentifier.KIND, token1.getKind()); - assertEquals(containerID.toString(), token1.getService().toString()); - ContainerTokenIdentifier temp = new ContainerTokenIdentifier(); - ByteArrayInputStream buf = new ByteArrayInputStream( - token1.getIdentifier()); - DataInputStream in = new DataInputStream(buf); - temp.readFields(in); - assertEquals(scmCertId1, temp.getCertSerialId()); - - // Wait for SCM certificate to renew - GenericTestUtils.waitFor(() -> !scmCertId1.equals( - certClient.getCertificate().getSerialNumber().toString()), - 100, certLifetime); - String scmCertId2 = - certClient.getCertificate().getSerialNumber().toString(); - assertNotEquals(scmCertId1, scmCertId2); - - // Get a new container token - containerID = new ContainerID(2); - Token token2 = scmClient.getContainerToken(containerID); - buf = new ByteArrayInputStream(token2.getIdentifier()); - in = new DataInputStream(buf); - temp.readFields(in); - assertEquals(scmCertId2, temp.getCertSerialId()); - } finally { - if (scm != null) { - scm.stop(); - } - } - } - - /** - * Test functionality to get SCM signed certificate for OM. - */ - @Test - @Ignore("HDDS-8764") - public void testOMGrpcServerCertificateRenew() throws Exception { - initSCM(); - try { - scm = HddsTestUtils.getScmSimple(conf); - scm.start(); - - conf.set(OZONE_METADATA_DIRS, omMetaDirPath.toString()); - int certLifetime = 30; // second - conf.set(HDDS_X509_DEFAULT_DURATION, - Duration.ofSeconds(certLifetime).toString()); - conf.setInt(OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 2); - - // initialize OmStorage, save om Cert and CA Certs to disk - OMStorage omStore = new OMStorage(conf); - omStore.setClusterId(clusterId); - omStore.setOmId(omId); - - // Prepare the certificates for OM before OM start - SecurityConfig securityConfig = new SecurityConfig(conf); - CertificateClient scmCertClient = scm.getScmCertificateClient(); - CertificateCodec certCodec = new CertificateCodec(securityConfig, "om"); - X509Certificate scmCert = scmCertClient.getCertificate(); - X509Certificate rootCert = scmCertClient.getCACertificate(); - X509CertificateHolder certHolder = generateX509CertHolder(conf, keyPair, - new KeyPair(scmCertClient.getPublicKey(), - scmCertClient.getPrivateKey()), scmCert, - "om_cert", clusterId); - String certId = certHolder.getSerialNumber().toString(); - certCodec.writeCertificate(certHolder); - certCodec.writeCertificate(CertificateCodec.getCertificateHolder(scmCert), - String.format(DefaultCertificateClient.CERT_FILE_NAME_FORMAT, - CAType.SUBORDINATE.getFileNamePrefix() + - scmCert.getSerialNumber().toString())); - certCodec.writeCertificate(CertificateCodec.getCertificateHolder( - scmCertClient.getCACertificate()), - String.format(DefaultCertificateClient.CERT_FILE_NAME_FORMAT, - CAType.ROOT.getFileNamePrefix() + - rootCert.getSerialNumber().toString())); - omStore.setOmCertSerialId(certId); - omStore.initialize(); - - conf.setBoolean(HDDS_GRPC_TLS_ENABLED, true); - conf.setBoolean(OZONE_OM_S3_GPRC_SERVER_ENABLED, true); - conf.setBoolean(HddsConfigKeys.HDDS_GRPC_TLS_TEST_CERT, true); - OzoneManager.setTestSecureOmFlag(true); - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - // In this process, SCM has already login using Kerberos. So pass - // specific UGI to DefaultCertificateClient and OzoneManager to avoid - // conflict with SCM procedure. - DefaultCertificateClient.setUgi(ugi); - OzoneManager.setUgi(ugi); - om = OzoneManager.createOm(conf); - om.start(); - - CertificateClient omCertClient = om.getCertificateClient(); - X509Certificate omCert = omCertClient.getCertificate(); - X509Certificate caCert = omCertClient.getCACertificate(); - X509Certificate rootCaCert = omCertClient.getRootCACertificate(); - List certList = new ArrayList<>(); - certList.add(caCert); - certList.add(rootCaCert); - // set certificates in GrpcOmTransport - GrpcOmTransport.setCaCerts(certList); - - GenericTestUtils.waitFor(() -> om.isLeaderReady(), 500, 10000); - String transportCls = GrpcOmTransportFactory.class.getName(); - conf.set(OZONE_OM_TRANSPORT_CLASS, transportCls); - try (OzoneClient client = OzoneClientFactory.getRpcClient(conf)) { - - ServiceInfoEx serviceInfoEx = client.getObjectStore() - .getClientProxy().getOzoneManagerClient().getServiceInfo(); - Assert.assertTrue(serviceInfoEx.getCaCertificate().equals( - CertificateCodec.getPEMEncodedString(caCert))); - - // Wait for OM certificate to renewed - GenericTestUtils.waitFor(() -> - !omCert.getSerialNumber().toString().equals( - omCertClient.getCertificate().getSerialNumber().toString()), - 500, certLifetime * 1000); - - // rerun the command using old client, it should succeed - serviceInfoEx = client.getObjectStore() - .getClientProxy().getOzoneManagerClient().getServiceInfo(); - Assert.assertTrue(serviceInfoEx.getCaCertificate().equals( - CertificateCodec.getPEMEncodedString(caCert))); - } - - // get new client, it should succeed. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } - - // Wait for old OM certificate to expire - GenericTestUtils.waitFor(() -> omCert.getNotAfter().before(new Date()), - 500, certLifetime * 1000); - // get new client, it should succeed too. - try { - OzoneClient client1 = OzoneClientFactory.getRpcClient(conf); - client1.close(); - } catch (Exception e) { - System.out.println("OzoneClientFactory.getRpcClient failed for " + - e.getMessage()); - fail("Create client should succeed for certificate is renewed"); - } - } finally { - DefaultCertificateClient.setUgi(null); - OzoneManager.setUgi(null); - GrpcOmTransport.setCaCerts(null); - } - } - public void validateCertificate(X509Certificate cert) throws Exception { // Assert that we indeed have a self signed certificate. @@ -1453,47 +1233,4 @@ private static X509CertificateHolder generateX509CertHolder( .setScmID("test") .build(); } - - private static X509CertificateHolder generateX509CertHolder( - OzoneConfiguration conf, KeyPair keyPair, KeyPair rootKeyPair, - X509Certificate rootCert, String subject, - String clusterId) throws Exception { - // Generate normal certificate, signed by RootCA certificate - SecurityConfig secConfig = new SecurityConfig(conf); - DefaultApprover approver = new DefaultApprover(new DefaultProfile(), - secConfig); - - CertificateSignRequest.Builder csrBuilder = - new CertificateSignRequest.Builder(); - // Get host name. - csrBuilder.setKey(keyPair) - .setConfiguration(conf) - .setScmID("test") - .setClusterID(clusterId) - .setSubject(subject) - .setDigitalSignature(true) - .setDigitalEncryption(true); - - addIpAndDnsDataToBuilder(csrBuilder); - LocalDateTime start = LocalDateTime.now(); - String certDuration = conf.get(HDDS_X509_DEFAULT_DURATION, - HDDS_X509_DEFAULT_DURATION_DEFAULT); - X509CertificateHolder certificateHolder = - approver.sign(secConfig, rootKeyPair.getPrivate(), - new X509CertificateHolder(rootCert.getEncoded()), - Date.from(start.atZone(ZoneId.systemDefault()).toInstant()), - Date.from(start.plus(Duration.parse(certDuration)) - .atZone(ZoneId.systemDefault()).toInstant()), - csrBuilder.build(), "test", clusterId); - return certificateHolder; - } - - private static void addIpAndDnsDataToBuilder( - CertificateSignRequest.Builder csrBuilder) throws IOException { - DomainValidator validator = DomainValidator.getInstance(); - // Add all valid ips. - List inetAddresses = - OzoneSecurityUtil.getValidInetsForCurrentHost(); - csrBuilder.addInetAddresses(inetAddresses, validator); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/SecretKeyTestClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/SecretKeyTestClient.java new file mode 100644 index 000000000000..8742560e25d6 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/SecretKeyTestClient.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.client; + +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; + +import javax.crypto.KeyGenerator; +import javax.crypto.SecretKey; +import java.security.NoSuchAlgorithmException; +import java.time.Duration; +import java.time.Instant; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +/** + * Test implementation of {@link SecretKeyClient}. + */ +public class SecretKeyTestClient implements SecretKeyClient { + private final Map keysMap = new HashMap<>(); + private ManagedSecretKey current; + + public SecretKeyTestClient() { + rotate(); + } + + public void rotate() { + this.current = generateKey(); + keysMap.put(current.getId(), current); + } + + @Override + public ManagedSecretKey getCurrentSecretKey() { + return current; + } + + @Override + public ManagedSecretKey getSecretKey(UUID id) { + return keysMap.get(id); + } + + private ManagedSecretKey generateKey() { + KeyGenerator keyGen = null; + try { + keyGen = KeyGenerator.getInstance("HmacSHA256"); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("Should never happen", e); + } + SecretKey secretKey = keyGen.generateKey(); + return new ManagedSecretKey( + UUID.randomUUID(), + Instant.now(), + Instant.now().plus(Duration.ofHours(1)), + secretKey + ); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index e925a9cbd327..e87e3edc9cb2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.TestHelper; @@ -114,6 +115,7 @@ public void setup() throws Exception { MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) + .setSecretKeyClient(new SecretKeyTestClient()) .build(); cluster.setWaitForClusterToBeReadyTimeout(300000); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 146fffa98ab8..cd76ee71d40d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -117,6 +118,7 @@ public void setup() throws Exception { .setStreamBufferSizeUnit(StorageUnit.BYTES) .setHbInterval(200) .setCertificateClient(new CertificateClientTestImpl(conf)) + .setSecretKeyClient(new SecretKeyTestClient()) .build(); cluster.waitForClusterToBeReady(); cluster.getOzoneManager().startSecretManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index bbf459f42630..f37cf405f672 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -60,6 +60,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.hdds.scm.storage.MultipartInputStream; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -154,6 +155,7 @@ public static void init() throws Exception { .setChunkSize(CHUNK_SIZE) .setStreamBufferSizeUnit(StorageUnit.BYTES) .setCertificateClient(certificateClientTest) + .setSecretKeyClient(new SecretKeyTestClient()) .build(); cluster.getOzoneManager().startSecretManager(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index b8fc491769b4..353023f18604 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -43,6 +42,7 @@ import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMConfigKeys; @@ -62,7 +62,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.S3Authentication; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.VolumeInfo; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ozone.test.GenericTestUtils; import org.apache.ozone.test.LambdaTestUtils; @@ -97,7 +96,6 @@ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient { private static final String CLUSTER_ID = UUID.randomUUID().toString(); private static File testDir; private static OzoneConfiguration conf; - private static OzoneBlockTokenSecretManager secretManager; /** * Create a MiniOzoneCluster for testing. @@ -131,10 +129,8 @@ public static void init() throws Exception { .setScmId(SCM_ID) .setClusterId(CLUSTER_ID) .setCertificateClient(certificateClientTest) + .setSecretKeyClient(new SecretKeyTestClient()) .build(); - secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf), - 60 * 60); - secretManager.start(certificateClientTest); cluster.getOzoneManager().startSecretManager(); cluster.waitForClusterToBeReady(); ozClient = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 52aaa6a9c5ab..6bf927d62975 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -80,7 +80,7 @@ public void testCreateOzoneContainer() throws Exception { DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class); Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails); Mockito.when(context.getParent()).thenReturn(dsm); - container = new OzoneContainer(datanodeDetails, conf, context, null); + container = new OzoneContainer(datanodeDetails, conf, context); //Set clusterId and manually start ozone container. container.start(UUID.randomUUID().toString()); @@ -113,8 +113,7 @@ public void testOzoneContainerStart() throws Exception { DatanodeStateMachine dsm = Mockito.mock(DatanodeStateMachine.class); Mockito.when(dsm.getDatanodeDetails()).thenReturn(datanodeDetails); Mockito.when(context.getParent()).thenReturn(dsm); - container = new OzoneContainer(datanodeDetails, conf, - context, null); + container = new OzoneContainer(datanodeDetails, conf, context); String clusterId = UUID.randomUUID().toString(); container.start(clusterId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java index 22d55992cf8b..5ffe4037f754 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java @@ -27,12 +27,13 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.token.ContainerTokenSecretManager; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.hdds.scm.XceiverClientGrpc; import org.apache.hadoop.hdds.scm.XceiverClientSpi; @@ -99,6 +100,7 @@ public class TestOzoneContainerWithTLS { private OzoneConfiguration conf; private ContainerTokenSecretManager secretManager; private CertificateClientTestImpl caClient; + private SecretKeyClient secretKeyClient; private boolean containerTokenEnabled; private int certLifetime = 15 * 1000; // 15s @@ -144,8 +146,9 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); caClient = new CertificateClientTestImpl(conf); - secretManager = new ContainerTokenSecretManager(new SecurityConfig(conf), - expiryTime); + secretKeyClient = new SecretKeyTestClient(); + secretManager = new ContainerTokenSecretManager(expiryTime, + secretKeyClient); } @Test(expected = CertificateExpiredException.class) @@ -177,7 +180,8 @@ public void testCreateOzoneContainer() throws Exception { conf.setBoolean( OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); - container = new OzoneContainer(dn, conf, getContext(dn), caClient); + container = new OzoneContainer(dn, conf, getContext(dn), caClient, + secretKeyClient); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); @@ -185,7 +189,6 @@ public void testCreateOzoneContainer() throws Exception { Collections.singletonList(caClient.getCACertificate()))) { if (containerTokenEnabled) { - secretManager.start(caClient); client.connect(); createSecureContainer(client, containerId, secretManager.generateToken( @@ -217,15 +220,12 @@ public void testContainerDownload() throws Exception { OzoneContainer container = null; try { - container = new OzoneContainer(dn, conf, getContext(dn), caClient); + container = new OzoneContainer(dn, conf, getContext(dn), caClient, + secretKeyClient); // Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); - if (containerTokenEnabled) { - secretManager.start(caClient); - } - // Create containers long containerId = ContainerTestHelper.getTestContainerID(); List containerIdList = new ArrayList<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java index f51cf38de5c8..85149b69ada4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java @@ -19,23 +19,25 @@ package org.apache.hadoop.ozone.container.ozoneimpl; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.scm.XceiverClientGrpc; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.token.ContainerTokenIdentifier; import org.apache.hadoop.hdds.security.token.ContainerTokenSecretManager; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.XceiverClientGrpc; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.security.UserGroupInformation; @@ -93,6 +95,7 @@ public class TestSecureOzoneContainer { private final boolean hasToken; private final boolean tokenExpired; private CertificateClientTestImpl caClient; + private SecretKeyClient secretKeyClient; private ContainerTokenSecretManager secretManager; public TestSecureOzoneContainer(Boolean requireToken, @@ -122,8 +125,9 @@ public void setup() throws Exception { conf.set(OZONE_METADATA_DIRS, ozoneMetaPath); secConfig = new SecurityConfig(conf); caClient = new CertificateClientTestImpl(conf); + secretKeyClient = new SecretKeyTestClient(); secretManager = new ContainerTokenSecretManager( - new SecurityConfig(conf), TimeUnit.DAYS.toMillis(1)); + TimeUnit.DAYS.toMillis(1), secretKeyClient); } @Test @@ -146,7 +150,8 @@ public void testCreateOzoneContainer() throws Exception { conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); - container = new OzoneContainer(dn, conf, getContext(dn), caClient); + container = new OzoneContainer(dn, conf, getContext(dn), caClient, + secretKeyClient); //Set scmId and manually start ozone container. container.start(UUID.randomUUID().toString()); @@ -159,7 +164,6 @@ public void testCreateOzoneContainer() throws Exception { port = secConfig.getConfiguration().getInt(OzoneConfigKeys .DFS_CONTAINER_IPC_PORT, DFS_CONTAINER_IPC_PORT_DEFAULT); } - secretManager.start(caClient); ugi.doAs((PrivilegedAction) () -> { try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)) { @@ -172,7 +176,7 @@ public void testCreateOzoneContainer() throws Exception { : Instant.now().plusSeconds(3600); ContainerTokenIdentifier tokenIdentifier = new ContainerTokenIdentifier(user, containerID, - caClient.getCertificate().getSerialNumber().toString(), + secretKeyClient.getCurrentSecretKey().getId(), expiryDate); token = secretManager.generateToken(tokenIdentifier); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index 14c11233e291..0b979ba0fe9c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyClient; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; import org.apache.hadoop.hdds.security.token.ContainerTokenSecretManager; import org.apache.hadoop.hdds.security.token.TokenVerifier; @@ -53,6 +54,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.RatisTestHelper; +import org.apache.hadoop.ozone.client.SecretKeyTestClient; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; @@ -119,6 +121,7 @@ public class TestSecureContainerServer { = GenericTestUtils.getTestDir("dfs").getAbsolutePath() + File.separator; private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static CertificateClientTestImpl caClient; + private static SecretKeyClient secretKeyClient; private static OzoneBlockTokenSecretManager blockTokenSecretManager; private static ContainerTokenSecretManager containerTokenSecretManager; @@ -130,17 +133,15 @@ public static void setup() throws Exception { CONF.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); CONF.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); caClient = new CertificateClientTestImpl(CONF); + secretKeyClient = new SecretKeyTestClient(); - SecurityConfig secConf = new SecurityConfig(CONF); long tokenLifetime = TimeUnit.HOURS.toMillis(1); - blockTokenSecretManager = new OzoneBlockTokenSecretManager( - secConf, tokenLifetime); - blockTokenSecretManager.start(caClient); + blockTokenSecretManager = new OzoneBlockTokenSecretManager(tokenLifetime, + secretKeyClient); containerTokenSecretManager = new ContainerTokenSecretManager( - secConf, tokenLifetime); - containerTokenSecretManager.start(caClient); + tokenLifetime, secretKeyClient); } @AfterClass @@ -150,8 +151,6 @@ public static void deleteTestDir() { @After public void cleanUp() throws IOException { - containerTokenSecretManager.stop(); - blockTokenSecretManager.stop(); FileUtils.deleteQuietly(new File(CONF.get(HDDS_DATANODE_DIR_KEY))); } @@ -195,7 +194,7 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, } HddsDispatcher hddsDispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, - TokenVerifier.create(new SecurityConfig((conf)), caClient)); + TokenVerifier.create(new SecurityConfig(conf), secretKeyClient)); hddsDispatcher.setClusterId(scmId.toString()); return hddsDispatcher; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java index 3b8ad5faf347..74868bee2af4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java @@ -279,8 +279,8 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { s == LifeCycle.State.NEW || s.isPausingOrPaused()); // Verify correct reloading - followerSM.setInstallingDBCheckpoint( - new RocksDBCheckpoint(checkpointBackup.toPath())); + followerSM.setInstallingSnapshotData( + new RocksDBCheckpoint(checkpointBackup.toPath()), null); followerSM.reinitialize(); Assert.assertEquals(followerSM.getLastAppliedTermIndex(), leaderCheckpointTrxnInfo.getTermIndex()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java new file mode 100644 index 000000000000..410fc5bd653e --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSecretKeySnapshot.java @@ -0,0 +1,289 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.scm; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.DefaultConfigManager; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.ScmConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.ha.SCMStateMachine; +import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; +import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; +import org.apache.hadoop.minikdc.MiniKdc; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.ozone.test.GenericTestUtils; +import org.apache.ratis.util.ExitUtils; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_EXPIRY_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_CHECK_DURATION; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SECRET_KEY_ROTATE_DURATION; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY; +import static org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod.KERBEROS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Integration test to verify that symmetric secret keys are correctly + * synchronized from leader to follower during snapshot installation. + */ +@Timeout(500) +public final class TestSecretKeySnapshot { + private static final Logger LOG = LoggerFactory + .getLogger(TestSecretKeySnapshot.class); + private static final long SNAPSHOT_THRESHOLD = 100; + private static final int LOG_PURGE_GAP = 100; + public static final int ROTATE_CHECK_DURATION_MS = 1_000; + public static final int ROTATE_DURATION_MS = 30_000; + public static final int EXPIRY_DURATION_MS = 61_000; + + private MiniKdc miniKdc; + private OzoneConfiguration conf; + private File workDir; + private File ozoneKeytab; + private File spnegoKeytab; + private String host; + private String clusterId; + private String scmId; + private MiniOzoneHAClusterImpl cluster; + + @BeforeEach + public void init() throws Exception { + conf = new OzoneConfiguration(); + conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); + + ExitUtils.disableSystemExit(); + + workDir = GenericTestUtils.getTestDir(getClass().getSimpleName()); + clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + + startMiniKdc(); + setSecureConfig(); + createCredentialsInKDC(); + + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_RAFT_LOG_PURGE_ENABLED, true); + conf.setInt(ScmConfigKeys.OZONE_SCM_HA_RAFT_LOG_PURGE_GAP, LOG_PURGE_GAP); + conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, + SNAPSHOT_THRESHOLD); + + conf.set(HDDS_SECRET_KEY_ROTATE_CHECK_DURATION, + ROTATE_CHECK_DURATION_MS + "ms"); + conf.set(HDDS_SECRET_KEY_ROTATE_DURATION, ROTATE_DURATION_MS + "ms"); + conf.set(HDDS_SECRET_KEY_EXPIRY_DURATION, EXPIRY_DURATION_MS + "ms"); + + MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf) + .setClusterId(clusterId) + .setSCMServiceId("TestSecretKeySnapshot") + .setScmId(scmId) + .setSCMServiceId("SCMServiceId") + .setNumDatanodes(1) + .setNumOfStorageContainerManagers(3) + .setNumOfActiveSCMs(2) + .setNumOfOzoneManagers(1); + + cluster = (MiniOzoneHAClusterImpl) builder.build(); + cluster.waitForClusterToBeReady(); + } + + @AfterEach + public void stop() { + miniKdc.stop(); + if (cluster != null) { + cluster.stop(); + } + DefaultConfigManager.clearDefaultConfigs(); + } + + private void createCredentialsInKDC() throws Exception { + ScmConfig scmConfig = conf.getObject(ScmConfig.class); + SCMHTTPServerConfig httpServerConfig = + conf.getObject(SCMHTTPServerConfig.class); + createPrincipal(ozoneKeytab, scmConfig.getKerberosPrincipal()); + createPrincipal(spnegoKeytab, httpServerConfig.getKerberosPrincipal()); + } + + private void createPrincipal(File keytab, String... principal) + throws Exception { + miniKdc.createPrincipal(keytab, principal); + } + + private void startMiniKdc() throws Exception { + Properties securityProperties = MiniKdc.createConf(); + miniKdc = new MiniKdc(securityProperties, workDir); + miniKdc.start(); + } + + private void setSecureConfig() throws IOException { + conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); + host = InetAddress.getLocalHost().getCanonicalHostName() + .toLowerCase(); + + conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name()); + + String curUser = UserGroupInformation.getCurrentUser().getUserName(); + conf.set(OZONE_ADMINISTRATORS, curUser); + + String realm = miniKdc.getRealm(); + String hostAndRealm = host + "@" + realm; + conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + conf.set(HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_SCM/" + hostAndRealm); + conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + conf.set(OZONE_OM_HTTP_KERBEROS_PRINCIPAL_KEY, "HTTP_OM/" + hostAndRealm); + conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, "scm/" + hostAndRealm); + + ozoneKeytab = new File(workDir, "scm.keytab"); + spnegoKeytab = new File(workDir, "http.keytab"); + + conf.set(HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY, + spnegoKeytab.getAbsolutePath()); + conf.set(OZONE_OM_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + conf.set(OZONE_OM_HTTP_KERBEROS_KEYTAB_FILE, + spnegoKeytab.getAbsolutePath()); + conf.set(DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY, + ozoneKeytab.getAbsolutePath()); + + conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); + } + + @Test + public void testInstallSnapshot() throws Exception { + // Get the leader SCM + StorageContainerManager leaderSCM = cluster.getScmLeader(); + assertNotNull(leaderSCM); + // Find the inactive SCM + String followerId = cluster.getInactiveSCM().next().getSCMNodeId(); + + StorageContainerManager followerSCM = cluster.getSCM(followerId); + + // wait until leader SCM got enough secret keys. + SecretKeyManager leaderSecretKeyManager = leaderSCM.getSecretKeyManager(); + GenericTestUtils.waitFor( + () -> leaderSecretKeyManager.getSortedKeys().size() >= 2, + ROTATE_CHECK_DURATION_MS, EXPIRY_DURATION_MS); + + writeToIncreaseLogIndex(leaderSCM, 200); + ManagedSecretKey currentKeyInLeader = + leaderSecretKeyManager.getCurrentSecretKey(); + + // Start the inactive SCM. Install Snapshot will happen as part + // of setConfiguration() call to ratis leader and the follower will catch + // up + LOG.info("Starting follower..."); + cluster.startInactiveSCM(followerId); + + // The recently started should be lagging behind the leader . + SCMStateMachine followerSM = + followerSCM.getScmHAManager().getRatisServer().getSCMStateMachine(); + + // Wait & retry for follower to update transactions to leader + // snapshot index. + // Timeout error if follower does not load update within 3s + GenericTestUtils.waitFor(() -> + followerSM.getLastAppliedTermIndex().getIndex() >= 200, + 100, 3000); + long followerLastAppliedIndex = + followerSM.getLastAppliedTermIndex().getIndex(); + assertTrue(followerLastAppliedIndex >= 200); + assertFalse(followerSM.getLifeCycleState().isPausingOrPaused()); + + // Verify that the follower has the secret keys created + // while it was inactive. + SecretKeyManager followerSecretKeyManager = + followerSCM.getSecretKeyManager(); + assertTrue(followerSecretKeyManager.isInitialized()); + List followerKeys = + followerSecretKeyManager.getSortedKeys(); + LOG.info("Follower secret keys after snapshot: {}", followerKeys); + assertTrue(followerKeys.size() >= 2); + assertTrue(followerKeys.contains(currentKeyInLeader)); + assertEquals(leaderSecretKeyManager.getSortedKeys(), followerKeys); + + // Wait for the next rotation, assert that the updates can be synchronized + // normally post snapshot. + ManagedSecretKey currentKeyPostSnapshot = + leaderSecretKeyManager.getCurrentSecretKey(); + GenericTestUtils.waitFor(() -> + !leaderSecretKeyManager.getCurrentSecretKey() + .equals(currentKeyPostSnapshot), + ROTATE_CHECK_DURATION_MS, ROTATE_DURATION_MS); + assertEquals(leaderSecretKeyManager.getSortedKeys(), + followerSecretKeyManager.getSortedKeys()); + + } + + private List writeToIncreaseLogIndex( + StorageContainerManager scm, long targetLogIndex) + throws IOException, InterruptedException, TimeoutException { + List containers = new ArrayList<>(); + SCMStateMachine stateMachine = + scm.getScmHAManager().getRatisServer().getSCMStateMachine(); + long logIndex = scm.getScmHAManager().getRatisServer().getSCMStateMachine() + .getLastAppliedTermIndex().getIndex(); + while (logIndex <= targetLogIndex) { + containers.add(scm.getContainerManager() + .allocateContainer( + RatisReplicationConfig.getInstance(ReplicationFactor.ONE), + this.getClass().getName())); + Thread.sleep(100); + logIndex = stateMachine.getLastAppliedTermIndex().getIndex(); + } + return containers; + } + +} diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index a7a6d4cb77a2..071d97018172 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -135,7 +135,8 @@ enum Type { SnapshotPurge = 118; RecoverLease = 119; SetTimes = 120; - ListSnapshotDiffJobs = 121; + RefetchSecretKey = 121; + ListSnapshotDiffJobs = 122; } message OMRequest { @@ -255,8 +256,9 @@ message OMRequest { optional RecoverLeaseRequest RecoverLeaseRequest = 119; optional SetTimesRequest SetTimesRequest = 120; + optional RefetchSecretKeyRequest RefetchSecretKeyRequest = 121; - optional ListSnapshotDiffJobRequest ListSnapshotDiffJobRequest = 121; + optional ListSnapshotDiffJobRequest ListSnapshotDiffJobRequest = 122; } message OMResponse { @@ -368,8 +370,9 @@ message OMResponse { optional SnapshotPurgeResponse SnapshotPurgeResponse = 118; optional RecoverLeaseResponse RecoverLeaseResponse = 119; optional SetTimesResponse SetTimesResponse = 120; + optional RefetchSecretKeyResponse RefetchSecretKeyResponse = 121; - optional ListSnapshotDiffJobResponse ListSnapshotDiffJobResponse = 121; + optional ListSnapshotDiffJobResponse ListSnapshotDiffJobResponse = 122; } enum Status { @@ -598,6 +601,14 @@ message SetVolumePropertyResponse { optional bool response = 1; } +message RefetchSecretKeyRequest { + +} + +message RefetchSecretKeyResponse { + optional hdds.UUID id = 1; +} + /** * Checks if the user has specified permissions for the volume */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java index 16f8af31a755..c452bf48e460 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/GrpcOzoneManagerServer.java @@ -19,8 +19,12 @@ import java.io.IOException; import java.util.OptionalInt; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; +import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.ssl.KeyStoresFactory; @@ -36,6 +40,9 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import io.grpc.netty.GrpcSslContexts; import io.grpc.netty.NettyServerBuilder; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.handler.ssl.SslContextBuilder; import io.netty.handler.ssl.SslProvider; import io.grpc.Server; @@ -46,8 +53,14 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_PROVIDER_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_BOSSGROUP_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_BOSSGROUP_SIZE_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_READ_THREAD_NUM_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_READ_THREAD_NUM_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_WORKERGROUP_SIZE_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_WORKERGROUP_SIZE_KEY; /** * Separated network server for gRPC transport OzoneManagerService s3g->OM. @@ -61,6 +74,10 @@ public class GrpcOzoneManagerServer { private int port; private final int maxSize; + private ThreadPoolExecutor readExecutors; + private EventLoopGroup bossEventLoopGroup; + private EventLoopGroup workerEventLoopGroup; + public GrpcOzoneManagerServer(OzoneConfiguration config, OzoneManagerProtocolServerSideTranslatorPB omTranslator, @@ -95,8 +112,41 @@ public void init(OzoneManagerProtocolServerSideTranslatorPB omTranslator, OzoneDelegationTokenSecretManager delegationTokenMgr, OzoneConfiguration omServerConfig, CertificateClient caClient) { + + int poolSize = omServerConfig.getInt(OZONE_OM_GRPC_READ_THREAD_NUM_KEY, + OZONE_OM_GRPC_READ_THREAD_NUM_DEFAULT); + + int bossGroupSize = omServerConfig.getInt(OZONE_OM_GRPC_BOSSGROUP_SIZE_KEY, + OZONE_OM_GRPC_BOSSGROUP_SIZE_DEFAULT); + + int workerGroupSize = + omServerConfig.getInt(OZONE_OM_GRPC_WORKERGROUP_SIZE_KEY, + OZONE_OM_GRPC_WORKERGROUP_SIZE_DEFAULT); + + readExecutors = new ThreadPoolExecutor(poolSize, poolSize, + 60, TimeUnit.SECONDS, + new LinkedBlockingQueue<>(), + new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("OmRpcReader-%d") + .build()); + + ThreadFactory bossFactory = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("OmRpcBoss-ELG-%d") + .build(); + bossEventLoopGroup = new NioEventLoopGroup(bossGroupSize, bossFactory); + + ThreadFactory workerFactory = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("OmRpcWorker-ELG-%d") + .build(); + workerEventLoopGroup = + new NioEventLoopGroup(workerGroupSize, workerFactory); + NettyServerBuilder nettyServerBuilder = NettyServerBuilder.forPort(port) .maxInboundMessageSize(maxSize) + .bossEventLoopGroup(bossEventLoopGroup) + .workerEventLoopGroup(workerEventLoopGroup) + .channelType(NioServerSocketChannel.class) + .executor(readExecutors) .addService(ServerInterceptors.intercept( new OzoneManagerServiceGrpc(omTranslator, delegationTokenMgr, @@ -134,7 +184,11 @@ public void start() throws IOException { public void stop() { try { + readExecutors.shutdown(); + readExecutors.awaitTermination(5L, TimeUnit.SECONDS); server.shutdown().awaitTermination(10L, TimeUnit.SECONDS); + bossEventLoopGroup.shutdownGracefully().sync(); + workerEventLoopGroup.shutdownGracefully().sync(); LOG.info("Server {} is shutdown", getClass().getSimpleName()); } catch (InterruptedException ex) { LOG.warn("{} couldn't be stopped gracefully", getClass().getSimpleName()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 785272f7203e..c3580deebd39 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -42,10 +42,10 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.Timer; import java.util.TimerTask; +import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -69,6 +69,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.ReconfigurationHandler; +import org.apache.hadoop.hdds.protocol.SecretKeyProtocol; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ReconfigureProtocolProtos.ReconfigureProtocolService; import org.apache.hadoop.hdds.protocolPB.ReconfigureProtocolPB; @@ -99,6 +100,10 @@ import org.apache.hadoop.hdds.scm.ha.SCMNodeInfo; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.security.OzoneSecurityException; +import org.apache.hadoop.hdds.security.symmetric.SecretKeySignerClient; +import org.apache.hadoop.hdds.security.symmetric.DefaultSecretKeySignerClient; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.ozone.security.OMCertificateClient; @@ -166,7 +171,6 @@ import org.apache.hadoop.ozone.om.protocolPB.OMAdminProtocolPB; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.ozone.common.ha.ratis.RatisSnapshotInfo; -import org.apache.hadoop.hdds.security.OzoneSecurityException; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; @@ -186,7 +190,6 @@ import org.apache.hadoop.ozone.protocolPB.OMAdminProtocolServerSideImpl; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos.PersistedUserVolumeInfo; import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; import org.apache.hadoop.ozone.security.OzoneDelegationTokenSecretManager; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -334,6 +337,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OzoneDelegationTokenSecretManager delegationTokenMgr; private OzoneBlockTokenSecretManager blockTokenMgr; private CertificateClient certClient; + private SecretKeySignerClient secretKeyClient; private String caCertPem = null; private List caCertPemList = new ArrayList<>(); private final Text omRpcAddressTxt; @@ -621,9 +625,12 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) certClient = new OMCertificateClient(secConfig, omStorage, scmInfo == null ? null : scmInfo.getScmId(), this::saveNewCertId, this::terminateOM); + SecretKeyProtocol secretKeyProtocol = + HddsServerUtil.getSecretKeyClientForOm(conf); + secretKeyClient = new DefaultSecretKeySignerClient(secretKeyProtocol); } if (secConfig.isBlockTokenEnabled()) { - blockTokenMgr = createBlockTokenSecretManager(configuration); + blockTokenMgr = createBlockTokenSecretManager(); } // Enable S3 multi-tenancy if config keys are set @@ -1018,41 +1025,18 @@ private OzoneDelegationTokenSecretManager createDelegationTokenSecretManager( .build(); } - private OzoneBlockTokenSecretManager createBlockTokenSecretManager( - OzoneConfiguration conf) { - - long expiryTime = conf.getTimeDuration( + private OzoneBlockTokenSecretManager createBlockTokenSecretManager() { + long expiryTime = configuration.getTimeDuration( HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME, HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME_DEFAULT, TimeUnit.MILLISECONDS); - long certificateGracePeriod = Duration.parse( - conf.get(HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION, - HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION_DEFAULT)).toMillis(); - boolean tokenSanityChecksEnabled = conf.getBoolean( - HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOKEN_CHECKS_ENABLED, - HddsConfigKeys.HDDS_X509_GRACE_DURATION_TOKEN_CHECKS_ENABLED_DEFAULT); - if (tokenSanityChecksEnabled && expiryTime > certificateGracePeriod) { - throw new IllegalArgumentException("Certificate grace period " + - HddsConfigKeys.HDDS_X509_RENEW_GRACE_DURATION + - " should be greater than maximum block token lifetime " + - HddsConfigKeys.HDDS_BLOCK_TOKEN_EXPIRY_TIME); - } - // TODO: Pass OM cert serial ID. - if (testSecureOmFlag) { - return new OzoneBlockTokenSecretManager(secConfig, expiryTime); - } - Objects.requireNonNull(certClient); - return new OzoneBlockTokenSecretManager(secConfig, expiryTime); + return new OzoneBlockTokenSecretManager(expiryTime, secretKeyClient); } private void stopSecretManager() { - if (blockTokenMgr != null) { - LOG.info("Stopping OM block token manager."); - try { - blockTokenMgr.stop(); - } catch (IOException e) { - LOG.error("Failed to stop block token manager", e); - } + if (secretKeyClient != null) { + LOG.info("Stopping secret key client."); + secretKeyClient.stop(); } if (delegationTokenMgr != null) { @@ -1065,6 +1049,11 @@ private void stopSecretManager() { } } + public UUID refetchSecretKey() { + secretKeyClient.refetchSecretKey(); + return secretKeyClient.getCurrentSecretKey().getId(); + } + @VisibleForTesting public void startSecretManager() { try { @@ -1073,13 +1062,13 @@ public void startSecretManager() { LOG.error("Unable to read key pair for OM.", e); throw new UncheckedIOException(e); } + if (secConfig.isBlockTokenEnabled() && blockTokenMgr != null) { + LOG.info("Starting secret key client."); try { - LOG.info("Starting OM block token secret manager"); - blockTokenMgr.start(certClient); + secretKeyClient.start(configuration); } catch (IOException e) { - // Unable to start secret manager. - LOG.error("Error starting block token secret manager.", e); + LOG.error("Unable to initialize secret key.", e); throw new UncheckedIOException(e); } } @@ -1107,6 +1096,17 @@ public void setCertClient(CertificateClient newClient) throws IOException { certClient = newClient; } + /** + * For testing purpose only. This allows testing token in integration test + * without fully setting up a working secure cluster. + */ + @VisibleForTesting + public void setSecretKeyClient( + SecretKeySignerClient secretKeyClient) { + this.secretKeyClient = secretKeyClient; + blockTokenMgr.setSecretKeyClient(secretKeyClient); + } + /** * Login OM service user if security and Kerberos are enabled. */ @@ -2254,8 +2254,7 @@ public void join() { private void startSecretManagerIfNecessary() { boolean shouldRun = isOzoneSecurityEnabled(); if (shouldRun) { - boolean running = delegationTokenMgr.isRunning() - && blockTokenMgr.isRunning(); + boolean running = delegationTokenMgr.isRunning(); if (!running) { startSecretManager(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index c673c4726b52..cba515d57fab 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.HashSet; import java.util.List; +import java.util.UUID; import java.util.stream.Collectors; import com.google.protobuf.ByteString; @@ -81,6 +82,7 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RefetchSecretKeyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest; @@ -140,6 +142,7 @@ import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartInfo; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; +import org.apache.hadoop.util.ProtobufUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -319,6 +322,9 @@ public OMResponse handleReadRequest(OMRequest request) { responseBuilder.setTransferOmLeadershipResponse(transferLeadership( request.getTransferOmLeadershipRequest())); break; + case RefetchSecretKey: + responseBuilder.setRefetchSecretKeyResponse(refetchSecretKey()); + break; default: responseBuilder.setSuccess(false); responseBuilder.setMessage("Unrecognized Command Type: " + cmdType); @@ -954,6 +960,14 @@ private RangerBGSyncResponse triggerRangerBGSync( return RangerBGSyncResponse.newBuilder().setRunSuccess(res).build(); } + private RefetchSecretKeyResponse refetchSecretKey() { + UUID uuid = impl.refetchSecretKey(); + RefetchSecretKeyResponse response = + RefetchSecretKeyResponse.newBuilder() + .setId(ProtobufUtils.toProtobuf(uuid)).build(); + return response; + } + @RequestFeatureValidator( conditions = ValidationCondition.OLDER_CLIENT_REQUESTS, processingPhase = RequestProcessingPhase.POST_PROCESS, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java deleted file mode 100644 index 3297e3d992c8..000000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneManagerBlockToken.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.security; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.security.ssl.KeyStoreTestUtil; -import org.apache.ozone.test.GenericTestUtils; -import org.apache.hadoop.util.Time; -import org.junit.After; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.crypto.KeyGenerator; -import javax.crypto.Mac; -import javax.crypto.SecretKey; -import java.io.File; -import java.io.IOException; -import java.security.GeneralSecurityException; -import java.security.InvalidKeyException; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.Signature; -import java.security.SignatureException; -import java.security.cert.Certificate; -import java.security.cert.CertificateEncodingException; -import java.security.cert.X509Certificate; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; - -/** - * Test class for OzoneManagerDelegationToken. - */ -public class TestOzoneManagerBlockToken { - - private static final Logger LOG = LoggerFactory - .getLogger(TestOzoneManagerBlockToken.class); - private static final String BASEDIR = GenericTestUtils - .getTempPath(TestOzoneManagerBlockToken.class.getSimpleName()); - private static final String KEYSTORES_DIR = - new File(BASEDIR).getAbsolutePath(); - private static long expiryTime; - private static KeyPair keyPair; - private static X509Certificate cert; - private static final long MAX_LEN = 1000; - - @BeforeClass - public static void setUp() throws Exception { - File base = new File(BASEDIR); - FileUtil.fullyDelete(base); - base.mkdirs(); - expiryTime = Time.monotonicNow() + 60 * 60 * 24; - - // Create Ozone Master key pair. - keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - // Create Ozone Master certificate (SCM CA issued cert) and key store. - cert = KeyStoreTestUtil - .generateCertificate("CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); - } - - @After - public void cleanUp() { - } - - @Test - public void testSignToken() throws GeneralSecurityException, IOException { - String keystore = new File(KEYSTORES_DIR, "keystore.jks") - .getAbsolutePath(); - String truststore = new File(KEYSTORES_DIR, "truststore.jks") - .getAbsolutePath(); - String trustPassword = "trustPass"; - String keyStorePassword = "keyStorePass"; - String keyPassword = "keyPass"; - - - KeyStoreTestUtil.createKeyStore(keystore, keyStorePassword, keyPassword, - "OzoneMaster", keyPair.getPrivate(), cert); - - // Create trust store and put the certificate in the trust store - Map certs = Collections.singletonMap("server", - cert); - KeyStoreTestUtil.createTrustStore(truststore, trustPassword, certs); - - // Sign the OzoneMaster Token with Ozone Master private key - PrivateKey privateKey = keyPair.getPrivate(); - OzoneBlockTokenIdentifier tokenId = new OzoneBlockTokenIdentifier( - "testUser", "84940", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), MAX_LEN); - byte[] signedToken = signTokenAsymmetric(tokenId, privateKey); - - // Verify a valid signed OzoneMaster Token with Ozone Master - // public key(certificate) - boolean isValidToken = verifyTokenAsymmetric(tokenId, signedToken, cert); - LOG.info("{} is {}", tokenId, isValidToken ? "valid." : "invalid."); - - // Verify an invalid signed OzoneMaster Token with Ozone Master - // public key(certificate) - tokenId = new OzoneBlockTokenIdentifier("", "", - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), MAX_LEN); - LOG.info("Unsigned token {} is {}", tokenId, - verifyTokenAsymmetric(tokenId, RandomUtils.nextBytes(128), cert)); - - } - - public byte[] signTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - PrivateKey privateKey) throws NoSuchAlgorithmException, - InvalidKeyException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initSign(privateKey); - rsaSignature.update(tokenId.getBytes()); - byte[] signature = rsaSignature.sign(); - return signature; - } - - public boolean verifyTokenAsymmetric(OzoneBlockTokenIdentifier tokenId, - byte[] signature, Certificate certificate) throws InvalidKeyException, - NoSuchAlgorithmException, SignatureException { - Signature rsaSignature = Signature.getInstance("SHA256withRSA"); - rsaSignature.initVerify(certificate); - rsaSignature.update(tokenId.getBytes()); - boolean isValid = rsaSignature.verify(signature); - return isValid; - } - - private byte[] signTokenSymmetric(OzoneBlockTokenIdentifier identifier, - Mac mac, SecretKey key) { - try { - mac.init(key); - } catch (InvalidKeyException ike) { - throw new IllegalArgumentException("Invalid key to HMAC computation", - ike); - } - return mac.doFinal(identifier.getBytes()); - } - - OzoneBlockTokenIdentifier generateTestToken() { - return new OzoneBlockTokenIdentifier(RandomStringUtils.randomAlphabetic(6), - RandomStringUtils.randomAlphabetic(5), - EnumSet.allOf(HddsProtos.BlockTokenSecretProto.AccessModeProto.class), - expiryTime, cert.getSerialNumber().toString(), MAX_LEN); - } - - @Test - public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, - CertificateEncodingException, NoSuchProviderException, - InvalidKeyException, SignatureException { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordAsym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyPair kp = KeyStoreTestUtil.generateKeyPair("RSA"); - - // Create Ozone Master certificate (SCM CA issued cert) and key store - X509Certificate omCert; - omCert = KeyStoreTestUtil.generateCertificate("CN=OzoneMaster", - kp, 30, "SHA256withRSA"); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordAsym.add( - signTokenAsymmetric(tokenIds.get(i), kp.getPrivate())); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration / testTokenCount); - - startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - verifyTokenAsymmetric(tokenIds.get(i), tokenPasswordAsym.get(i), omCert); - } - duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration / testTokenCount); - } - - @Test - public void testSymmetricTokenPerf() { - String hmacSHA1 = "HmacSHA1"; - String hmacSHA256 = "HmacSHA256"; - - testSymmetricTokenPerfHelper(hmacSHA1, 64); - testSymmetricTokenPerfHelper(hmacSHA256, 1024); - } - - public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { - final int testTokenCount = 1000; - List tokenIds = new ArrayList<>(); - List tokenPasswordSym = new ArrayList<>(); - for (int i = 0; i < testTokenCount; i++) { - tokenIds.add(generateTestToken()); - } - - KeyGenerator keyGen; - try { - keyGen = KeyGenerator.getInstance(hmacAlgorithm); - keyGen.init(keyLen); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - Mac mac; - try { - mac = Mac.getInstance(hmacAlgorithm); - } catch (NoSuchAlgorithmException nsa) { - throw new IllegalArgumentException("Can't find " + hmacAlgorithm + - " algorithm."); - } - - SecretKey secretKey = keyGen.generateKey(); - - long startTime = Time.monotonicNowNanos(); - for (int i = 0; i < testTokenCount; i++) { - tokenPasswordSym.add( - signTokenSymmetric(tokenIds.get(i), mac, secretKey)); - } - long duration = Time.monotonicNowNanos() - startTime; - LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration / testTokenCount); - } -} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/FeatureProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/FeatureProvider.java index 7696b3992593..60d701d18b82 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/FeatureProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/FeatureProvider.java @@ -27,6 +27,8 @@ import java.util.List; import java.util.stream.Collectors; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HEATMAP_ENABLE_DEFAULT; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HEATMAP_ENABLE_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HEATMAP_PROVIDER_KEY; /** @@ -34,7 +36,7 @@ */ @Singleton public final class FeatureProvider { - private static EnumMap featureSupportMap = + private static EnumMap featureDisableMap = new EnumMap<>(Feature.class); private FeatureProvider() { @@ -69,13 +71,13 @@ public static Feature of(String featureName) { } } - public static EnumMap getFeatureSupportMap() { - return featureSupportMap; + public static EnumMap getFeatureDisableMap() { + return featureDisableMap; } public static List getAllDisabledFeatures() { - return getFeatureSupportMap().keySet().stream().filter(feature -> - Boolean.TRUE.equals(getFeatureSupportMap().get(feature))).collect( + return getFeatureDisableMap().keySet().stream().filter(feature -> + Boolean.TRUE.equals(getFeatureDisableMap().get(feature))).collect( Collectors.toList()); } @@ -85,13 +87,15 @@ public static void initFeatureSupport( resetInitOfFeatureSupport(); String heatMapProviderCls = ozoneConfiguration.get( OZONE_RECON_HEATMAP_PROVIDER_KEY); - if (StringUtils.isEmpty(heatMapProviderCls)) { - getFeatureSupportMap().put(Feature.HEATMAP, true); + boolean heatMapEnabled = ozoneConfiguration.getBoolean( + OZONE_RECON_HEATMAP_ENABLE_KEY, OZONE_RECON_HEATMAP_ENABLE_DEFAULT); + if (!heatMapEnabled || StringUtils.isEmpty(heatMapProviderCls)) { + getFeatureDisableMap().put(Feature.HEATMAP, true); } } private static void resetInitOfFeatureSupport() { - getFeatureSupportMap().keySet() - .forEach(feature -> getFeatureSupportMap().put(feature, false)); + getFeatureDisableMap().keySet() + .forEach(feature -> getFeatureDisableMap().put(feature, false)); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java index 3534da7ff601..6790befca7fd 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestFeaturesEndPoint.java @@ -37,6 +37,7 @@ import java.util.List; +import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HEATMAP_ENABLE_KEY; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_HEATMAP_PROVIDER_KEY; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; @@ -89,7 +90,7 @@ public void setUp() throws Exception { } @Test - public void testGetDisabledFeaturesGreaerThanZero() { + public void testGetDisabledFeaturesGreaterThanZero() { ozoneConfiguration.set(OZONE_RECON_HEATMAP_PROVIDER_KEY, ""); FeatureProvider.initFeatureSupport(ozoneConfiguration); Response disabledFeatures = featuresEndPoint.getDisabledFeatures(); @@ -105,6 +106,35 @@ public void testGetDisabledFeaturesGreaerThanZero() { public void testNoDisabledFeatures() { ozoneConfiguration.set(OZONE_RECON_HEATMAP_PROVIDER_KEY, "org.apache.hadoop.ozone.recon.heatmap.TestHeatMapProviderImpl"); + ozoneConfiguration.setBoolean(OZONE_RECON_HEATMAP_ENABLE_KEY, true); + FeatureProvider.initFeatureSupport(ozoneConfiguration); + Response disabledFeatures = featuresEndPoint.getDisabledFeatures(); + List allDisabledFeatures = + (List) disabledFeatures.getEntity(); + Assertions.assertNotNull(allDisabledFeatures); + Assertions.assertTrue(allDisabledFeatures.size() == 0); + } + + @Test + public void testGetHeatMapInDisabledFeaturesListWhenHeatMapFlagIsFalse() { + ozoneConfiguration.set(OZONE_RECON_HEATMAP_PROVIDER_KEY, + "org.apache.hadoop.ozone.recon.heatmap.TestHeatMapProviderImpl"); + ozoneConfiguration.setBoolean(OZONE_RECON_HEATMAP_ENABLE_KEY, false); + FeatureProvider.initFeatureSupport(ozoneConfiguration); + Response disabledFeatures = featuresEndPoint.getDisabledFeatures(); + List allDisabledFeatures = + (List) disabledFeatures.getEntity(); + Assertions.assertNotNull(allDisabledFeatures); + Assertions.assertTrue(allDisabledFeatures.size() > 0); + Assertions.assertEquals(FeatureProvider.Feature.HEATMAP.getFeatureName(), + allDisabledFeatures.get(0).getFeatureName()); + } + + @Test + public void testGetHeatMapNotInDisabledFeaturesListWhenHeatMapFlagIsTrue() { + ozoneConfiguration.set(OZONE_RECON_HEATMAP_PROVIDER_KEY, + "org.apache.hadoop.ozone.recon.heatmap.TestHeatMapProviderImpl"); + ozoneConfiguration.setBoolean(OZONE_RECON_HEATMAP_ENABLE_KEY, true); FeatureProvider.initFeatureSupport(ozoneConfiguration); Response disabledFeatures = featuresEndPoint.getDisabledFeatures(); List allDisabledFeatures = diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FetchKeySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FetchKeySubCommand.java new file mode 100644 index 000000000000..ba0220292cc1 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FetchKeySubCommand.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.admin.om; + +import java.util.UUID; +import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import picocli.CommandLine; + +/** + * Handler of ozone admin om fetch-key command. + */ +@CommandLine.Command( + name = "fetch-key", + description = "CLI command to force OM to fetch the latest secret key " + + "from SCM.", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class +) +public class FetchKeySubCommand implements Callable { + @CommandLine.ParentCommand + private OMAdmin parent; + + @CommandLine.Option( + names = {"-id", "--service-id"}, + description = "Ozone Manager Service ID", + required = true + ) + private String omServiceId; + + @Override + public Void call() throws Exception { + try (OzoneManagerProtocol client = parent.createOmClient(omServiceId)) { + UUID uuid = client.refetchSecretKey(); + System.out.println("Current Secret Key ID: " + uuid); + } + return null; + } +} + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index 9bb447669d8b..d3b36db6d849 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -59,7 +59,8 @@ FinalizationStatusSubCommand.class, DecommissionOMSubcommand.class, UpdateRangerSubcommand.class, - TransferOmLeaderSubCommand.class + TransferOmLeaderSubCommand.class, + FetchKeySubCommand.class }) @MetaInfServices(SubcommandWithParent.class) public class OMAdmin extends GenericCli implements SubcommandWithParent { From faac5af99ac78772ed96edd76217261f7744f340 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Fri, 9 Jun 2023 14:30:31 +0300 Subject: [PATCH 06/15] check for snapshots before listing --- .../hadoop/ozone/om/TestOmSnapshot.java | 23 +++++++++++++++++++ .../hadoop/ozone/om/OmSnapshotManager.java | 9 ++++++++ 2 files changed, 32 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index a052e4bb161b..42c59f3f6d94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -767,6 +767,29 @@ public void testSnapDiffMultipleBuckets() throws Exception { Assert.assertEquals(1, diff1.getDiffList().size()); } + @Test + public void testListSnapshotDiffWithInvalidParameters() + throws Exception { + String volume = "vol-" + RandomStringUtils.randomNumeric(5); + String bucket = "buck-" + RandomStringUtils.randomNumeric(5); + + String errorMessage = "Provided volume name " + volume + + " or bucket name " + bucket + " doesn't exist"; + // List SnapshotDiff jobs for non existent volume and bucket. + LambdaTestUtils.intercept(OMException.class, + errorMessage, + () -> store.listSnapshotDiffJobs(volume, bucket, "", true)); + + // Create the volume and the bucket. + store.createVolume(volume); + OzoneVolume ozVolume = store.getVolume(volume); + ozVolume.createBucket(bucket); + + // List SnapshotDiff jobs using a path that has no snapshots. + LambdaTestUtils.intercept(OMException.class, + "There are no snapshots", + () -> store.listSnapshotDiffJobs(volume, bucket, "", true)); + } /** * Tests snapdiff when there are multiple sst files in the from & to diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 0b460b535617..023701f876c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -703,6 +703,15 @@ public List getSnapshotDiffList(final String volumeName, throw new IOException("Provided volume name " + volumeName + " or bucket name " + bucketName + " doesn't exist"); } + OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) + ozoneManager.getMetadataManager(); + SnapshotChainManager snapshotChainManager = + omMetadataManager.getSnapshotChainManager(); + String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; + if (snapshotChainManager.getSnapshotChainPath(snapshotPath) == null) { + throw new IOException("There are no snapshots under path " + + snapshotPath); + } return snapshotDiffManager.getSnapshotDiffJobList( volumeName, bucketName, jobStatus, listAll); From 8929948b79858a1b6cff8214cafd4d9c1c88e5db Mon Sep 17 00:00:00 2001 From: xBis7 Date: Mon, 12 Jun 2023 16:32:26 +0300 Subject: [PATCH 07/15] cleanup - parameterized test refactor --- .../hadoop/ozone/om/TestOmSnapshot.java | 46 ++++++++-- .../hadoop/ozone/om/OmSnapshotManager.java | 13 +-- .../om/snapshot/SnapshotDiffManager.java | 23 +++-- .../om/snapshot/TestSnapshotDiffManager.java | 86 +++++++++---------- 4 files changed, 95 insertions(+), 73 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index 42c59f3f6d94..d5ad5c5b17d6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -773,22 +773,50 @@ public void testListSnapshotDiffWithInvalidParameters() String volume = "vol-" + RandomStringUtils.randomNumeric(5); String bucket = "buck-" + RandomStringUtils.randomNumeric(5); - String errorMessage = "Provided volume name " + volume + + String volBucketErrorMessage = "Provided volume name " + volume + " or bucket name " + bucket + " doesn't exist"; - // List SnapshotDiff jobs for non existent volume and bucket. - LambdaTestUtils.intercept(OMException.class, - errorMessage, - () -> store.listSnapshotDiffJobs(volume, bucket, "", true)); + + Exception volBucketEx = Assertions.assertThrows(OMException.class, + () -> store.listSnapshotDiffJobs(volume, bucket, + "", true)); + Assertions.assertEquals(volBucketErrorMessage, + volBucketEx.getMessage()); // Create the volume and the bucket. store.createVolume(volume); OzoneVolume ozVolume = store.getVolume(volume); ozVolume.createBucket(bucket); - // List SnapshotDiff jobs using a path that has no snapshots. - LambdaTestUtils.intercept(OMException.class, - "There are no snapshots", - () -> store.listSnapshotDiffJobs(volume, bucket, "", true)); + Assertions.assertDoesNotThrow(() -> + store.listSnapshotDiffJobs(volume, bucket, "", true)); + + // There are no snapshots, response should be empty. + Assertions.assertTrue(store + .listSnapshotDiffJobs(volume, bucket, + "", true).isEmpty()); + + OzoneBucket ozBucket = ozVolume.getBucket(bucket); + // Create keys and take snapshots. + String key1 = "key-1-" + RandomStringUtils.randomNumeric(5); + createFileKey(ozBucket, key1); + String snap1 = "snap-1-" + RandomStringUtils.randomNumeric(5); + createSnapshot(volume, bucket, snap1); + + String key2 = "key-2-" + RandomStringUtils.randomNumeric(5); + createFileKey(ozBucket, key2); + String snap2 = "snap-2-" + RandomStringUtils.randomNumeric(5); + createSnapshot(volume, bucket, snap2); + + store.snapshotDiff(volume, bucket, snap1, snap2, null, 0, true); + + String invalidStatus = "invalid"; + String statusErrorMessage = "Invalid job status: " + invalidStatus; + + Exception statusEx = Assertions.assertThrows(OMException.class, + () -> store.listSnapshotDiffJobs(volume, bucket, + invalidStatus, false)); + Assertions.assertEquals(statusErrorMessage, + statusEx.getMessage()); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java index 023701f876c1..b47c5d988151 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java @@ -696,10 +696,10 @@ public List getSnapshotDiffList(final String volumeName, String bucketKey = ozoneManager.getMetadataManager() .getBucketKey(volumeName, bucketName); - if (ozoneManager.getMetadataManager() - .getVolumeTable().getIfExist(volumeKey) == null || - ozoneManager.getMetadataManager() - .getBucketTable().getIfExist(bucketKey) == null) { + if (!ozoneManager.getMetadataManager() + .getVolumeTable().isExist(volumeKey) || + !ozoneManager.getMetadataManager() + .getBucketTable().isExist(bucketKey)) { throw new IOException("Provided volume name " + volumeName + " or bucket name " + bucketName + " doesn't exist"); } @@ -709,8 +709,9 @@ public List getSnapshotDiffList(final String volumeName, omMetadataManager.getSnapshotChainManager(); String snapshotPath = volumeName + OM_KEY_PREFIX + bucketName; if (snapshotChainManager.getSnapshotChainPath(snapshotPath) == null) { - throw new IOException("There are no snapshots under path " + - snapshotPath); + // Return an empty ArrayList here to avoid + // unnecessarily iterating the SnapshotDiffJob table. + return new ArrayList<>(); } return snapshotDiffManager.getSnapshotDiffJobList( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java index 48b3c1d742d9..d5b73fba102d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java @@ -380,7 +380,7 @@ private Set getSSTFileListForSnapshot(OmSnapshot snapshot, public List getSnapshotDiffJobList( String volumeName, String bucketName, - String jobStatus, boolean listAll) { + String jobStatus, boolean listAll) throws IOException { List jobList = new ArrayList<>(); try (ClosableIterator> iterator = @@ -394,11 +394,8 @@ public List getSnapshotDiffJobList( continue; } - // First check if the provided JobStatus is valid, - // then check for matches with the provided JobStatus. - if (validateStringJobStatusExists(jobStatus) && - Objects.equals(snapshotDiffJob.getStatus(), - JobStatus.valueOf(jobStatus.toUpperCase()))) { + if (Objects.equals(snapshotDiffJob.getStatus(), + getJobStatus(jobStatus))) { jobList.add(snapshotDiffJob); } } @@ -407,14 +404,14 @@ public List getSnapshotDiffJobList( return jobList; } - private boolean validateStringJobStatusExists(String jobStatus) { - for (JobStatus status : JobStatus.values()) { - if (Objects.equals(status.name(), - jobStatus.toUpperCase())) { - return true; - } + private JobStatus getJobStatus(String jobStatus) + throws IOException { + try { + return JobStatus.valueOf(jobStatus.toUpperCase()); + } catch (IllegalArgumentException ex) { + LOG.info(ex.toString()); + throw new IOException("Invalid job status: " + jobStatus); } - return false; } public SnapshotDiffResponse getSnapshotDiffReport( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 2c1c67eb4b90..1bfa5e810477 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -43,17 +43,19 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; import org.rocksdb.RocksDBException; import java.io.File; import java.io.IOException; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; +import java.util.stream.Stream; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.ozone.om.OmSnapshotManager.DELIMITER; @@ -112,9 +114,24 @@ public void setUp() { } } + private static Stream listSnapshotDiffJobsScenarios() { + return Stream.of( + Arguments.of("queued", false, false), + Arguments.of("done", false, false), + Arguments.of("in_progress", false, true), + Arguments.of("queued", true, true), + Arguments.of("done", true, true), + Arguments.of("in_progress", true, true), + Arguments.of("invalid", true, true), + Arguments.of("", true, true) + ); + } + @ParameterizedTest - @ValueSource(strings = {"queued", "done", "in_progress", ""}) - public void testListSnapshotDiffJobs(String jobStatus) + @MethodSource("listSnapshotDiffJobsScenarios") + public void testListSnapshotDiffJobs(String jobStatus, + boolean listAll, + boolean containsJob) throws IOException { String fromSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); String toSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); @@ -131,73 +148,52 @@ public void testListSnapshotDiffJobs(String jobStatus) // There are no jobs in the table, therefore // the response list should be empty. List jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, false); + .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, listAll); Assertions.assertTrue(jobList.isEmpty()); - // Submit a job. SnapshotDiffResponse snapshotDiffResponse = snapshotDiffManager .getSnapshotDiffReport(VOLUME, BUCKET, fromSnapshotName, toSnapshotName, 0, 0, false); - // Response should be IN_PROGRESS. Assertions.assertEquals(JobStatus.IN_PROGRESS, snapshotDiffResponse.getJobStatus()); - // Check snapDiffJobTable. diffJob = snapDiffJobTable.get(diffJobKey); Assertions.assertNotNull(diffJob); - // Status stored in the table should be IN_PROGRESS. Assertions.assertEquals(JobStatus.IN_PROGRESS, diffJob.getStatus()); - // If the job is IN_PROGRESS, there should be a response. - // Otherwise, response list should be empty. jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, false); + .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, listAll); - if (Objects.equals(jobStatus, "in_progress")) { + // When listAll is true, jobStatus is ignored. + // If the job is IN_PROGRESS or listAll is used, + // there should be a response. + // Otherwise, response list should be empty. + if (containsJob) { Assertions.assertTrue(jobList.contains(diffJob)); } else { Assertions.assertTrue(jobList.isEmpty()); } + } - // If listAll is true, jobStatus is ignored. - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, true); - Assertions.assertTrue(jobList.contains(diffJob)); - - // Providing an invalid jobStatus results in an empty list. - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid", false); - Assertions.assertTrue(jobList.isEmpty()); - - // If listAll is true, jobStatus will be ignored even if it's invalid. - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid", true); - Assertions.assertTrue(jobList.size() > 0); - - // Set up new snapshots to submit a second snapshot diff job. - String fromSnapshotName2 = "snap-" + RandomStringUtils.randomNumeric(5); - String toSnapshotName2 = "snap-" + RandomStringUtils.randomNumeric(5); - String fromSnapshotId2 = UUID.randomUUID().toString(); - String toSnapshotId2 = UUID.randomUUID().toString(); - String diffJobKey2 = fromSnapshotId2 + DELIMITER + toSnapshotId2; + @Test + public void testListSnapDiffWithInvalidStatus() throws IOException { + String fromSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String toSnapshotName = "snap-" + RandomStringUtils.randomNumeric(5); + String fromSnapshotId = UUID.randomUUID().toString(); + String toSnapshotId = UUID.randomUUID().toString(); - setUpKeysAndSnapshots(fromSnapshotName2, toSnapshotName2, - fromSnapshotId2, toSnapshotId2); + setUpKeysAndSnapshots(fromSnapshotName, toSnapshotName, + fromSnapshotId, toSnapshotId); - // Submit a second job. snapshotDiffManager.getSnapshotDiffReport(VOLUME, BUCKET, - fromSnapshotName2, toSnapshotName2, 0, 0, false); - SnapshotDiffJob diffJob2 = snapDiffJobTable.get(diffJobKey2); - - jobList = snapshotDiffManager - .getSnapshotDiffJobList(VOLUME, BUCKET, jobStatus, true); + fromSnapshotName, toSnapshotName, 0, 0, false); - Assertions.assertTrue(jobList.contains(diffJob)); - Assertions.assertTrue(jobList.contains(diffJob2)); - Assertions.assertEquals(2, jobList.size()); + // Invalid status, without listAll true, results in an exception. + Assertions.assertThrows(IOException.class, () -> snapshotDiffManager + .getSnapshotDiffJobList(VOLUME, BUCKET, "invalid", false)); } private void setUpKeysAndSnapshots(String fromSnapshotName, From 59c9eba9fbfa7e17b53ad3f6d0829fdcacd921a6 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Tue, 13 Jun 2023 18:56:41 +0300 Subject: [PATCH 08/15] findbugs and rat --- hadoop-ozone/ozone-manager/null/snapDiff/_README.txt | 2 -- .../hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java | 8 ++++---- 2 files changed, 4 insertions(+), 6 deletions(-) delete mode 100644 hadoop-ozone/ozone-manager/null/snapDiff/_README.txt diff --git a/hadoop-ozone/ozone-manager/null/snapDiff/_README.txt b/hadoop-ozone/ozone-manager/null/snapDiff/_README.txt deleted file mode 100644 index ec44336b39c1..000000000000 --- a/hadoop-ozone/ozone-manager/null/snapDiff/_README.txt +++ /dev/null @@ -1,2 +0,0 @@ -This directory is used to store SST files needed to generate snap diff report for a particular job. - DO NOT add, change or delete any files in this directory unless you know what you are doing. diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 4f8d32bc1258..7c565b73c654 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -816,13 +816,13 @@ private void setupMocksForRunningASnapDiff( keyTableMap.put(BucketLayout.LEGACY, OmMetadataManagerImpl.KEY_TABLE); - for (BucketLayout layout : keyTableMap.keySet()) { + for (Map.Entry entry : keyTableMap.entrySet()) { Mockito.when(ozoneManager.getMetadataManager() - .getKeyTable(layout)) + .getKeyTable(entry.getKey())) .thenReturn(Mockito.mock(Table.class)); Mockito.when(ozoneManager.getMetadataManager() - .getKeyTable(layout).getName()) - .thenReturn(keyTableMap.get(layout)); + .getKeyTable(entry.getKey()).getName()) + .thenReturn(entry.getValue()); } UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); From 031b21672efa5bd7fcd7d97d83996d1123b5bb3c Mon Sep 17 00:00:00 2001 From: xBis7 Date: Tue, 13 Jun 2023 19:54:36 +0300 Subject: [PATCH 09/15] Mocked SnapshotDiffManager generated null SnapDiff dir under OzoneManager package --- .../ozone/om/snapshot/TestSnapshotDiffManager.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 7c565b73c654..84e98111eda0 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -83,6 +83,7 @@ import java.io.File; import java.io.IOException; import java.nio.file.Files; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -161,8 +162,8 @@ private OmSnapshot getMockedOmSnapshot(String snapshot) { return omSnapshot; } - private SnapshotDiffManager getMockedSnapshotDiffManager(int cacheSize) { - + private SnapshotDiffManager getMockedSnapshotDiffManager(int cacheSize) + throws IOException { Mockito.when(snapdiffDB.get()).thenReturn(rocksDB); Mockito.when(rocksDB.newIterator(snapdiffJobCFH)) .thenReturn(jobTableIterator); @@ -181,7 +182,11 @@ public OmSnapshot load(String key) { OMMetadataManager mockedMetadataManager = Mockito.mock(OMMetadataManager.class); RDBStore mockedRDBStore = Mockito.mock(RDBStore.class); + Path diffDir = Files.createTempDirectory("snapdiff_dir"); + Mockito.when(mockedRDBStore.getSnapshotMetadataDir()) + .thenReturn(diffDir.toString()); Mockito.when(mockedMetadataManager.getStore()).thenReturn(mockedRDBStore); + Mockito.when(ozoneManager.getMetadataManager()) .thenReturn(mockedMetadataManager); SnapshotDiffManager snapshotDiffManager = Mockito.spy( From 03281c30999f57a6aa87796328b4b86feec25ff5 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Tue, 13 Jun 2023 19:55:42 +0300 Subject: [PATCH 10/15] extra line removed --- .../apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java index 84e98111eda0..a3f27c5dd562 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDiffManager.java @@ -186,7 +186,6 @@ public OmSnapshot load(String key) { Mockito.when(mockedRDBStore.getSnapshotMetadataDir()) .thenReturn(diffDir.toString()); Mockito.when(mockedMetadataManager.getStore()).thenReturn(mockedRDBStore); - Mockito.when(ozoneManager.getMetadataManager()) .thenReturn(mockedMetadataManager); SnapshotDiffManager snapshotDiffManager = Mockito.spy( From 02fe7d52a9cd0c14d8ffdaee3434474dd562f188 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Thu, 15 Jun 2023 13:40:11 +0300 Subject: [PATCH 11/15] simplify list diff job command --- .../hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java index 4707209b8e7c..630645423ffc 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java @@ -30,7 +30,8 @@ /** * ozone sh snapshot listSnapshotDiff. */ -@CommandLine.Command(name = "listSnapshotDiff", +@CommandLine.Command(name = "listDiff", + aliases = {"listDiffJob", "lsDiff", "lsDiffJob"}, description = "List snapshotDiff jobs for a bucket.") public class ListSnapshotDiffHandler extends Handler { From 43e3dcb0d4f8381764bd178e2d0fc47da4e8ddac Mon Sep 17 00:00:00 2001 From: xBis7 Date: Thu, 15 Jun 2023 13:43:38 +0300 Subject: [PATCH 12/15] update snapshot-sh.robot --- hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot b/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot index 1cbdc33dc6ce..9899e76cbea3 100644 --- a/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot +++ b/hadoop-ozone/dist/src/main/smoketest/snapshot/snapshot-sh.robot @@ -54,7 +54,7 @@ Snapshot Diff Should contain ${result} + ${KEY_THREE} List Snapshot Diff Jobs - ${result} = Execute ozone sh snapshot listSnapshotDiff /${VOLUME}/${BUCKET} --all + ${result} = Execute ozone sh snapshot listDiff /${VOLUME}/${BUCKET} --all Should contain ${result} ${VOLUME} Should contain ${result} ${BUCKET} Should contain ${result} ${SNAPSHOT_ONE} From 69f4dfc40f6627feceecddaa9bcfdac22f7e2e69 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Fri, 16 Jun 2023 17:31:29 +0300 Subject: [PATCH 13/15] OzoneSnapshotDiff class to hide internal fields --- .../hadoop/ozone/client/ObjectStore.java | 3 +- .../ozone/client/OzoneSnapshotDiff.java | 74 +++++++++++++++++++ .../ozone/client/protocol/ClientProtocol.java | 10 +-- .../hadoop/ozone/client/rpc/RpcClient.java | 12 +-- .../hadoop/ozone/om/SnapshotChainManager.java | 5 ++ .../ozone/client/ClientProtocolStub.java | 3 +- .../snapshot/ListSnapshotDiffHandler.java | 4 +- 7 files changed, 95 insertions(+), 16 deletions(-) create mode 100644 hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshotDiff.java diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index c7ac1ff82b02..75657e8b3915 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; -import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -692,7 +691,7 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, * @return a list of SnapshotDiffJob objects * @throws IOException in case there is a failure while getting a response. */ - public List listSnapshotDiffJobs(String volumeName, + public List listSnapshotDiffJobs(String volumeName, String bucketName, String jobStatus, boolean listAll) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshotDiff.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshotDiff.java new file mode 100644 index 000000000000..a6ef2dc4bb27 --- /dev/null +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneSnapshotDiff.java @@ -0,0 +1,74 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.client; + +import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; +import org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus; + +/** + * A class that encapsulates SnapshotDiffJob. + */ +public class OzoneSnapshotDiff { + + private final String volumeName; + private final String bucketName; + private final String fromSnapshot; + private final String toSnapshot; + private final JobStatus jobStatus; + + public OzoneSnapshotDiff(String volumeName, + String bucketName, + String fromSnapshot, + String toSnapshot, + JobStatus jobStatus) { + this.volumeName = volumeName; + this.bucketName = bucketName; + this.fromSnapshot = fromSnapshot; + this.toSnapshot = toSnapshot; + this.jobStatus = jobStatus; + } + + public String getVolumeName() { + return volumeName; + } + + public String getBucketName() { + return bucketName; + } + + public String getFromSnapshot() { + return fromSnapshot; + } + + public String getToSnapshot() { + return toSnapshot; + } + + public JobStatus getJobStatus() { + return jobStatus; + } + + public static OzoneSnapshotDiff fromSnapshotDiffJob( + SnapshotDiffJob snapshotDiffJob) { + return new OzoneSnapshotDiff( + snapshotDiffJob.getVolume(), + snapshotDiffJob.getBucket(), + snapshotDiffJob.getFromSnapshot(), + snapshotDiffJob.getToSnapshot(), + snapshotDiffJob.getStatus()); + } +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 2dbfe61f553b..76d1e5fdccc8 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -38,6 +38,7 @@ import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.client.OzoneSnapshot; +import org.apache.hadoop.ozone.client.OzoneSnapshotDiff; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.TenantArgs; import org.apache.hadoop.ozone.client.VolumeArgs; @@ -55,7 +56,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; -import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -1086,10 +1086,10 @@ SnapshotDiffResponse snapshotDiff(String volumeName, String bucketName, * @return a list of SnapshotDiffJob objects * @throws IOException in case there is a failure while getting a response. */ - List listSnapshotDiffJobs(String volumeName, - String bucketName, - String jobStatus, - boolean listAll) + List listSnapshotDiffJobs(String volumeName, + String bucketName, + String jobStatus, + boolean listAll) throws IOException; /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index fc09ecab253c..c35f5f749477 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -89,6 +89,7 @@ import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.client.OzoneSnapshot; +import org.apache.hadoop.ozone.client.OzoneSnapshotDiff; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.TenantArgs; import org.apache.hadoop.ozone.client.VolumeArgs; @@ -135,7 +136,6 @@ import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; -import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -998,18 +998,20 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, } @Override - public List listSnapshotDiffJobs(String volumeName, + public List listSnapshotDiffJobs(String volumeName, String bucketName, String jobStatus, boolean listAll) throws IOException { - Preconditions.checkArgument(Strings.isNotBlank(volumeName), + Preconditions.checkArgument(StringUtils.isNotBlank(volumeName), "volume can't be null or empty."); - Preconditions.checkArgument(Strings.isNotBlank(bucketName), + Preconditions.checkArgument(StringUtils.isNotBlank(bucketName), "bucket can't be null or empty."); return ozoneManagerClient.listSnapshotDiffJobs( - volumeName, bucketName, jobStatus, listAll); + volumeName, bucketName, jobStatus, listAll).stream() + .map(OzoneSnapshotDiff::fromSnapshotDiffJob) + .collect(Collectors.toList()); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java index c147c6fdc363..e4f66b6512f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SnapshotChainManager.java @@ -425,4 +425,9 @@ public UUID previousPathSnapshot(String snapshotPath, public String getTableKey(UUID snapshotId) { return snapshotIdToTableKey.get(snapshotId); } + + public LinkedHashMap getSnapshotChainPath( + String path) { + return snapshotChainByPath.get(path); + } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java index 5efc0fd47c0a..11888a7a99f2 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.helpers.S3VolumeContext; -import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; import org.apache.hadoop.ozone.om.helpers.TenantStateList; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.om.helpers.TenantUserList; @@ -646,7 +645,7 @@ public SnapshotDiffResponse snapshotDiff(String volumeName, } @Override - public List listSnapshotDiffJobs( + public List listSnapshotDiffJobs( String volumeName, String bucketName, String jobStatus, boolean listAll) { return null; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java index 630645423ffc..168a0fab9ea9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.shell.snapshot; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.om.helpers.SnapshotDiffJob; +import org.apache.hadoop.ozone.client.OzoneSnapshotDiff; import org.apache.hadoop.ozone.shell.Handler; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.bucket.BucketUri; @@ -61,7 +61,7 @@ protected void execute(OzoneClient client, OzoneAddress address) String volumeName = snapshotPath.getValue().getVolumeName(); String bucketName = snapshotPath.getValue().getBucketName(); - List jobList = + List jobList = client.getObjectStore().listSnapshotDiffJobs( volumeName, bucketName, jobStatus, listAll); From d88ae0b7250a9431188e1d8742098a851ab657f3 Mon Sep 17 00:00:00 2001 From: xBis7 Date: Wed, 21 Jun 2023 13:15:38 +0300 Subject: [PATCH 14/15] javadoc cleanup --- .../hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java index 168a0fab9ea9..d1e6d5fbd8ce 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java @@ -28,7 +28,7 @@ import java.util.List; /** - * ozone sh snapshot listSnapshotDiff. + * ozone sh snapshot listDiff. */ @CommandLine.Command(name = "listDiff", aliases = {"listDiffJob", "lsDiff", "lsDiffJob"}, From 29bca98aae1f2ecde151e81c6dfbd5a35bee5eee Mon Sep 17 00:00:00 2001 From: xBis7 Date: Wed, 21 Jun 2023 19:12:07 +0300 Subject: [PATCH 15/15] cleanup --- .../test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java index 86e7da732afd..c95e768f3d01 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshot.java @@ -940,7 +940,7 @@ public void testListSnapshotDiffWithInvalidParameters() String snap2 = "snap-2-" + RandomStringUtils.randomNumeric(5); createSnapshot(volume, bucket, snap2); - store.snapshotDiff(volume, bucket, snap1, snap2, null, 0, true); + store.snapshotDiff(volume, bucket, snap1, snap2, null, 0, true, false); String invalidStatus = "invalid"; String statusErrorMessage = "Invalid job status: " + invalidStatus;