diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 2a34580a954c..b757eb94a3ae 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -271,6 +271,7 @@ public static boolean isReadOnly( case AddAcl: case PurgeKeys: case RecoverTrash: + case DeleteOpenKeys: return false; default: LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType); diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index e30f77597e97..3ec014630f56 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -61,6 +61,7 @@ enum Type { AllocateBlock = 37; DeleteKeys = 38; RenameKeys = 39; + DeleteOpenKeys = 40; InitiateMultiPartUpload = 45; CommitMultiPartUpload = 46; @@ -128,6 +129,7 @@ message OMRequest { optional AllocateBlockRequest allocateBlockRequest = 37; optional DeleteKeysRequest deleteKeysRequest = 38; optional RenameKeysRequest renameKeysRequest = 39; + optional DeleteOpenKeysRequest deleteOpenKeysRequest = 40; optional MultipartInfoInitiateRequest initiateMultiPartUploadRequest = 45; optional MultipartCommitUploadPartRequest commitMultiPartUploadRequest = 46; @@ -929,6 +931,21 @@ message PurgeKeysResponse { } +message DeleteOpenKeysRequest { + repeated OpenKeyBucket openKeysPerBucket = 1; +} + +message OpenKeyBucket { + required string volumeName = 1; + required string bucketName = 2; + repeated OpenKey keys = 3; +} + +message OpenKey { + required string name = 1; + required uint64 clientID = 2; +} + message OMTokenProto { enum Type { DELEGATION_TOKEN = 1; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index cd6566292412..7e79fe7b3dec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -73,6 +73,10 @@ public class OMMetrics { private @Metric MutableCounterLong numLookupFile; private @Metric MutableCounterLong numListStatus; + private @Metric MutableCounterLong numOpenKeyDeleteRequests; + private @Metric MutableCounterLong numOpenKeysSubmittedForDeletion; + private @Metric MutableCounterLong numOpenKeysDeleted; + // Failure Metrics private @Metric MutableCounterLong numVolumeCreateFails; private @Metric MutableCounterLong numVolumeUpdateFails; @@ -103,6 +107,7 @@ public class OMMetrics { private @Metric MutableCounterLong numAbortMultipartUploadFails; private @Metric MutableCounterLong numListMultipartUploadParts; private @Metric MutableCounterLong numListMultipartUploadPartFails; + private @Metric MutableCounterLong numOpenKeyDeleteRequestFails; private @Metric MutableCounterLong numGetFileStatusFails; private @Metric MutableCounterLong numCreateDirectoryFails; @@ -539,6 +544,22 @@ public void incNumCheckpointFails() { numCheckpointFails.incr(); } + public void incNumOpenKeyDeleteRequests() { + numOpenKeyDeleteRequests.incr(); + } + + public void incNumOpenKeysSubmittedForDeletion(long amount) { + numOpenKeysSubmittedForDeletion.incr(amount); + } + + public void incNumOpenKeysDeleted() { + numOpenKeysDeleted.incr(); + } + + public void incNumOpenKeyDeleteRequestFails() { + numOpenKeyDeleteRequestFails.incr(); + } + @VisibleForTesting public long getNumVolumeCreates() { return numVolumeCreates.value(); @@ -795,6 +816,22 @@ public long getLastCheckpointStreamingTimeTaken() { return lastCheckpointStreamingTimeTaken.value(); } + public long getNumOpenKeyDeleteRequests() { + return numOpenKeyDeleteRequests.value(); + } + + public long getNumOpenKeysSubmittedForDeletion() { + return numOpenKeysSubmittedForDeletion.value(); + } + + public long getNumOpenKeysDeleted() { + return numOpenKeysDeleted.value(); + } + + public long getNumOpenKeyDeleteRequestFails() { + return numOpenKeyDeleteRequestFails.value(); + } + public void unRegister() { MetricsSystem ms = DefaultMetricsSystem.instance(); ms.unregisterSource(SOURCE_NAME); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index e27b7e116c70..43d9c2ddbb0c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -23,8 +23,6 @@ import com.google.common.base.Optional; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -145,15 +143,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, keyName)), new CacheValue<>(Optional.absent(), trxnLogIndex)); - long quotaReleased = 0; - int keyFactor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - OmKeyLocationInfoGroup keyLocationGroup = - omKeyInfo.getLatestVersionLocations(); - for(OmKeyLocationInfo locationInfo: keyLocationGroup.getLocationList()){ - quotaReleased += locationInfo.getLength() * keyFactor; - } + + long quotaReleased = sumBlockLengths(omKeyInfo); // update usedBytes atomically. omVolumeArgs.getUsedBytes().add(-quotaReleased); omBucketInfo.getUsedBytes().add(-quotaReleased); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index e24f4e2bffb0..d204f532e00a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -32,6 +32,7 @@ import com.google.common.base.Optional; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.PrefixManager; @@ -575,7 +576,8 @@ protected boolean checkDirectoryAlreadyExists(String volumeName, } /** - * Return volume info for the specified volume. + * Return volume info for the specified volume. If the volume does not + * exist, returns {@code null}. * @param omMetadataManager * @param volume * @return OmVolumeArgs @@ -583,9 +585,34 @@ protected boolean checkDirectoryAlreadyExists(String volumeName, */ protected OmVolumeArgs getVolumeInfo(OMMetadataManager omMetadataManager, String volume) { - return omMetadataManager.getVolumeTable().getCacheValue( - new CacheKey<>(omMetadataManager.getVolumeKey(volume))) - .getCacheValue(); + + OmVolumeArgs volumeArgs = null; + + CacheValue value = + omMetadataManager.getVolumeTable().getCacheValue( + new CacheKey<>(omMetadataManager.getVolumeKey(volume))); + + if (value != null) { + volumeArgs = value.getCacheValue(); + } + + return volumeArgs; + } + + /** + * @return the number of bytes used by blocks pointed to by {@code omKeyInfo}. + */ + protected static long sumBlockLengths(OmKeyInfo omKeyInfo) { + long bytesUsed = 0; + int keyFactor = omKeyInfo.getFactor().getNumber(); + OmKeyLocationInfoGroup keyLocationGroup = + omKeyInfo.getLatestVersionLocations(); + + for(OmKeyLocationInfo locationInfo: keyLocationGroup.getLocationList()) { + bytesUsed += locationInfo.getLength() * keyFactor; + } + + return bytesUsed; } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 907b5013b6c1..c6e7b9bafa37 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -29,8 +29,6 @@ import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -167,12 +165,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omKeyInfo.getKeyName())), new CacheValue<>(Optional.absent(), trxnLogIndex)); - int keyFactor = omKeyInfo.getFactor().getNumber(); - OmKeyLocationInfoGroup keyLocationGroup = - omKeyInfo.getLatestVersionLocations(); - for(OmKeyLocationInfo locationInfo: keyLocationGroup.getLocationList()){ - quotaReleased += locationInfo.getLength() * keyFactor; - } + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + quotaReleased += sumBlockLengths(omKeyInfo); } // update usedBytes atomically. omVolumeArgs.getUsedBytes().add(-quotaReleased); @@ -182,7 +176,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE) - .setSuccess(deleteStatus).build(), omKeyInfoList, trxnLogIndex, + .setSuccess(deleteStatus).build(), omKeyInfoList, ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); result = Result.SUCCESS; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java new file mode 100644 index 000000000000..9392f7e8f320 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/AbstractOMKeyDeleteResponse.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.hdds.utils.db.BatchOperation; + +import java.io.IOException; +import javax.annotation.Nullable; +import javax.annotation.Nonnull; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; + +/** + * Base class for responses that need to move keys from an arbitrary table to + * the deleted table. + */ +@CleanupTableInfo(cleanupTables = {DELETED_TABLE}) +public abstract class AbstractOMKeyDeleteResponse extends OMClientResponse { + + private boolean isRatisEnabled; + + public AbstractOMKeyDeleteResponse( + @Nonnull OMResponse omResponse, boolean isRatisEnabled) { + + super(omResponse); + this.isRatisEnabled = isRatisEnabled; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public AbstractOMKeyDeleteResponse(@Nonnull OMResponse omResponse) { + super(omResponse); + checkStatusNotOK(); + } + + /** + * Adds the operation of deleting the {@code keyName omKeyInfo} pair from + * {@code fromTable} to the batch operation {@code batchOperation}. The + * batch operation is not committed, so no changes are persisted to disk. + * The log transaction index used will be retrieved by calling + * {@link OmKeyInfo#getUpdateID} on {@code omKeyInfo}. + */ + protected void addDeletionToBatch( + OMMetadataManager omMetadataManager, + BatchOperation batchOperation, + Table fromTable, + String keyName, + OmKeyInfo omKeyInfo) throws IOException { + + // For OmResponse with failure, this should do nothing. This method is + // not called in failure scenario in OM code. + fromTable.deleteWithBatch(batchOperation, keyName); + + // If Key is not empty add this to delete table. + if (!isKeyEmpty(omKeyInfo)) { + // If a deleted key is put in the table where a key with the same + // name already exists, then the old deleted key information would be + // lost. To avoid this, first check if a key with same name exists. + // deletedTable in OM Metadata stores . + // The RepeatedOmKeyInfo is the structure that allows us to store a + // list of OmKeyInfo that can be tied to same key name. For a keyName + // if RepeatedOMKeyInfo structure is null, we create a new instance, + // if it is not null, then we simply add to the list and store this + // instance in deletedTable. + RepeatedOmKeyInfo repeatedOmKeyInfo = + omMetadataManager.getDeletedTable().get(keyName); + repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), + isRatisEnabled); + omMetadataManager.getDeletedTable().putWithBatch( + batchOperation, keyName, repeatedOmKeyInfo); + } + } + + @Override + public abstract void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException; + + /** + * Check if the key is empty or not. Key will be empty if it does not have + * blocks. + * + * @param keyInfo + * @return if empty true, else false. + */ + private boolean isKeyEmpty(@Nullable OmKeyInfo keyInfo) { + if (keyInfo == null) { + return true; + } + for (OmKeyLocationInfoGroup keyLocationList : keyInfo + .getKeyLocationVersions()) { + if (keyLocationList.getLocationListCount() != 0) { + return false; + } + } + return true; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index 128b657a6435..f9c6d185f398 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -18,21 +18,17 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; -import javax.annotation.Nullable; import javax.annotation.Nonnull; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; @@ -42,19 +38,17 @@ * Response for DeleteKey request. */ @CleanupTableInfo(cleanupTables = {KEY_TABLE, DELETED_TABLE}) -public class OMKeyDeleteResponse extends OMClientResponse { +public class OMKeyDeleteResponse extends AbstractOMKeyDeleteResponse { private OmKeyInfo omKeyInfo; - private boolean isRatisEnabled; private OmVolumeArgs omVolumeArgs; private OmBucketInfo omBucketInfo; public OMKeyDeleteResponse(@Nonnull OMResponse omResponse, @Nonnull OmKeyInfo omKeyInfo, boolean isRatisEnabled, @Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo) { - super(omResponse); + super(omResponse, isRatisEnabled); this.omKeyInfo = omKeyInfo; - this.isRatisEnabled = isRatisEnabled; this.omVolumeArgs = omVolumeArgs; this.omBucketInfo = omBucketInfo; } @@ -65,7 +59,6 @@ public OMKeyDeleteResponse(@Nonnull OMResponse omResponse, */ public OMKeyDeleteResponse(@Nonnull OMResponse omResponse) { super(omResponse); - checkStatusNotOK(); } @Override @@ -76,55 +69,17 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, // not called in failure scenario in OM code. String ozoneKey = omMetadataManager.getOzoneKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, ozoneKey); - - // If Key is not empty add this to delete table. - if (!isKeyEmpty(omKeyInfo)) { - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would be - // lost. To avoid this, first check if a key with same name exists. - // deletedTable in OM Metadata stores . - // The RepeatedOmKeyInfo is the structure that allows us to store a - // list of OmKeyInfo that can be tied to same key name. For a keyName - // if RepeatedOMKeyInfo structure is null, we create a new instance, - // if it is not null, then we simply add to the list and store this - // instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(ozoneKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo, omKeyInfo.getUpdateID(), - isRatisEnabled); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - ozoneKey, repeatedOmKeyInfo); - - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); - // update bucket usedBytes. - omMetadataManager.getBucketTable().putWithBatch(batchOperation, - omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), - omBucketInfo.getBucketName()), omBucketInfo); - } - } + Table keyTable = omMetadataManager.getKeyTable(); + addDeletionToBatch(omMetadataManager, batchOperation, keyTable, ozoneKey, + omKeyInfo); - /** - * Check if the key is empty or not. Key will be empty if it does not have - * blocks. - * - * @param keyInfo - * @return if empty true, else false. - */ - private boolean isKeyEmpty(@Nullable OmKeyInfo keyInfo) { - if (keyInfo == null) { - return true; - } - for (OmKeyLocationInfoGroup keyLocationList : keyInfo - .getKeyLocationVersions()) { - if (keyLocationList.getLocationListCount() != 0) { - return false; - } - } - return true; + // update volume usedBytes. + omMetadataManager.getVolumeTable().putWithBatch(batchOperation, + omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), + omVolumeArgs); + // update bucket usedBytes. + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), + omBucketInfo.getBucketName()), omBucketInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index 228e5a6b8ea8..bf1a8ddfe387 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -19,14 +19,12 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.hdds.utils.db.BatchOperation; -import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; -import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import javax.annotation.Nonnull; @@ -41,21 +39,17 @@ * Response for DeleteKey request. */ @CleanupTableInfo(cleanupTables = KEY_TABLE) -public class OMKeysDeleteResponse extends OMClientResponse { +public class OMKeysDeleteResponse extends AbstractOMKeyDeleteResponse { private List omKeyInfoList; - private boolean isRatisEnabled; - private long trxnLogIndex; private OmVolumeArgs omVolumeArgs; private OmBucketInfo omBucketInfo; public OMKeysDeleteResponse(@Nonnull OMResponse omResponse, - @Nonnull List keyDeleteList, long trxnLogIndex, + @Nonnull List keyDeleteList, boolean isRatisEnabled, @Nonnull OmVolumeArgs omVolumeArgs, @Nonnull OmBucketInfo omBucketInfo) { - super(omResponse); + super(omResponse, isRatisEnabled); this.omKeyInfoList = keyDeleteList; - this.isRatisEnabled = isRatisEnabled; - this.trxnLogIndex = trxnLogIndex; this.omVolumeArgs = omVolumeArgs; this.omBucketInfo = omBucketInfo; } @@ -66,7 +60,6 @@ public OMKeysDeleteResponse(@Nonnull OMResponse omResponse, */ public OMKeysDeleteResponse(@Nonnull OMResponse omResponse) { super(omResponse); - checkStatusNotOK(); } public void checkAndUpdateDB(OMMetadataManager omMetadataManager, @@ -80,10 +73,10 @@ public void checkAndUpdateDB(OMMetadataManager omMetadataManager, @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { - String volumeName = ""; String bucketName = ""; String keyName = ""; + Table keyTable = omMetadataManager.getKeyTable(); for (OmKeyInfo omKeyInfo : omKeyInfoList) { volumeName = omKeyInfo.getVolumeName(); bucketName = omKeyInfo.getBucketName(); @@ -92,25 +85,8 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, String deleteKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); - omMetadataManager.getKeyTable().deleteWithBatch(batchOperation, - deleteKey); - - // If a deleted key is put in the table where a key with the same - // name already exists, then the old deleted key information would - // be lost. To avoid this, first check if a key with same name - // exists. deletedTable in OM Metadata stores . The RepeatedOmKeyInfo is the structure that - // allows us to store a list of OmKeyInfo that can be tied to same - // key name. For a keyName if RepeatedOMKeyInfo structure is null, - // we create a new instance, if it is not null, then we simply add - // to the list and store this instance in deletedTable. - RepeatedOmKeyInfo repeatedOmKeyInfo = - omMetadataManager.getDeletedTable().get(deleteKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - omKeyInfo, repeatedOmKeyInfo, trxnLogIndex, - isRatisEnabled); - omMetadataManager.getDeletedTable().putWithBatch(batchOperation, - deleteKey, repeatedOmKeyInfo); + addDeletionToBatch(omMetadataManager, batchOperation, keyTable, + deleteKey, omKeyInfo); } // update volume usedBytes. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteRequest.java new file mode 100644 index 000000000000..074850018aba --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteRequest.java @@ -0,0 +1,192 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.response.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OpenKeyBucket; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OpenKey; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Map; +import java.util.HashMap; +import java.util.List; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles requests to move open keys from the open key table to the delete + * table. Modifies the open key table cache only, and no underlying databases. + * The delete table cache does not need to be modified since it is not used + * for client response validation. + */ +public class OMOpenKeysDeleteRequest extends OMKeyRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMOpenKeysDeleteRequest.class); + + public OMOpenKeysDeleteRequest(OMRequest omRequest) { + super(omRequest); + } + + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumOpenKeyDeleteRequests(); + + OzoneManagerProtocolProtos.DeleteOpenKeysRequest deleteOpenKeysRequest = + getOmRequest().getDeleteOpenKeysRequest(); + + List submittedOpenKeyBuckets = + deleteOpenKeysRequest.getOpenKeysPerBucketList(); + + long numSubmittedOpenKeys = 0; + for (OpenKeyBucket keyBucket: submittedOpenKeyBuckets) { + numSubmittedOpenKeys += keyBucket.getKeysCount(); + } + + LOG.debug("{} open keys submitted for deletion.", numSubmittedOpenKeys); + omMetrics.incNumOpenKeysSubmittedForDeletion(numSubmittedOpenKeys); + + OzoneManagerProtocolProtos.OMResponse.Builder omResponse = + OmResponseUtil.getOMResponseBuilder(getOmRequest()); + + IOException exception = null; + OMClientResponse omClientResponse = null; + Result result = null; + Map deletedOpenKeys = new HashMap<>(); + + try { + for (OpenKeyBucket openKeyBucket: submittedOpenKeyBuckets) { + // For each bucket where keys will be deleted from, + // get its bucket lock and update the cache accordingly. + Map deleted = updateOpenKeyTableCache(ozoneManager, + trxnLogIndex, openKeyBucket); + + deletedOpenKeys.putAll(deleted); + } + + omClientResponse = new OMOpenKeysDeleteResponse(omResponse.build(), + deletedOpenKeys, ozoneManager.isRatisEnabled()); + + result = Result.SUCCESS; + } catch (IOException ex) { + result = Result.FAILURE; + exception = ex; + omClientResponse = + new OMOpenKeysDeleteResponse(createErrorOMResponse(omResponse, + exception)); + } finally { + addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, + omDoubleBufferHelper); + } + + processResults(omMetrics, numSubmittedOpenKeys, deletedOpenKeys.size(), + deleteOpenKeysRequest, result); + + return omClientResponse; + } + + private void processResults(OMMetrics omMetrics, long numSubmittedOpenKeys, + long numDeletedOpenKeys, + OzoneManagerProtocolProtos.DeleteOpenKeysRequest request, Result result) { + + switch (result) { + case SUCCESS: + LOG.debug("Deleted {} open keys out of {} submitted keys.", + numDeletedOpenKeys, numSubmittedOpenKeys); + break; + case FAILURE: + omMetrics.incNumOpenKeyDeleteRequestFails(); + LOG.error("Failure occurred while trying to delete {} submitted open " + + "keys.", numSubmittedOpenKeys); + break; + default: + LOG.error("Unrecognized result for OMOpenKeysDeleteRequest: {}", + request); + } + } + + private Map updateOpenKeyTableCache( + OzoneManager ozoneManager, long trxnLogIndex, OpenKeyBucket keysPerBucket) + throws IOException { + + Map deletedKeys = new HashMap<>(); + + boolean acquiredLock = false; + String volumeName = keysPerBucket.getVolumeName(); + String bucketName = keysPerBucket.getBucketName(); + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + + try { + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); + + for (OpenKey key: keysPerBucket.getKeysList()) { + String fullKeyName = omMetadataManager.getOpenKey(volumeName, + bucketName, key.getName(), key.getClientID()); + + // If an open key is no longer present in the table, it was committed + // and should not be deleted. + OmKeyInfo omKeyInfo = + omMetadataManager.getOpenKeyTable().get(fullKeyName); + if (omKeyInfo != null) { + // Set the UpdateID to current transactionLogIndex + omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); + deletedKeys.put(fullKeyName, omKeyInfo); + + // Update table cache. + omMetadataManager.getOpenKeyTable().addCacheEntry( + new CacheKey<>(fullKeyName), + new CacheValue<>(Optional.absent(), trxnLogIndex)); + + ozoneManager.getMetrics().incNumOpenKeysDeleted(); + LOG.debug("Open key {} deleted.", fullKeyName); + + // No need to add cache entries to delete table. As delete table will + // be used by DeleteKeyService only, not used for any client response + // validation, so we don't need to add to cache. + } else { + LOG.debug("Key {} was not deleted, as it was not " + + "found in the open key table.", fullKeyName); + } + } + } finally { + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } + } + + return deletedKeys; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteResponse.java new file mode 100644 index 000000000000..00157ca91128 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteResponse.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE; + +/** + * Handles responses to move open keys from the open key table to the delete + * table. Modifies the open key table and delete table databases. + */ +@CleanupTableInfo(cleanupTables = {OPEN_KEY_TABLE, DELETED_TABLE}) +public class OMOpenKeysDeleteResponse extends AbstractOMKeyDeleteResponse { + + private Map keysToDelete; + + public OMOpenKeysDeleteResponse( + @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse, + @Nonnull Map keysToDelete, boolean isRatisEnabled) { + + super(omResponse, isRatisEnabled); + this.keysToDelete = keysToDelete; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMOpenKeysDeleteResponse( + @Nonnull OzoneManagerProtocolProtos.OMResponse omResponse) { + + super(omResponse); + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + + Table openKeyTable = omMetadataManager.getOpenKeyTable(); + + for (Map.Entry keyInfoPair: keysToDelete.entrySet()) { + addDeletionToBatch(omMetadataManager, batchOperation, openKeyTable, + keyInfoPair.getKey(), keyInfoPair.getValue()); + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index ae21e2d71c95..1be303c50a4b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -26,12 +26,16 @@ import java.util.UUID; import com.google.common.base.Optional; +import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -201,20 +205,28 @@ public static void addKeyToTableCache(String volumeName, keyName)), new CacheValue<>(Optional.of(omKeyInfo), 1L)); } - private OmKeyInfo createKeyInfo(String volumeName, String bucketName, - String keyName, HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) { - return new OmKeyInfo.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, new ArrayList<>()))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(1000L) - .setReplicationType(replicationType) - .setReplicationFactor(replicationFactor).build(); + /** + * Adds one block to {@code keyInfo} with the provided size and offset. + */ + public static void addKeyLocationInfo( + OmKeyInfo keyInfo, long offset, long keyLength) throws IOException { + + Pipeline pipeline = Pipeline.newBuilder() + .setState(Pipeline.PipelineState.OPEN) + .setId(PipelineID.randomId()) + .setType(keyInfo.getType()) + .setFactor(keyInfo.getFactor()) + .setNodes(new ArrayList<>()) + .build(); + + OmKeyLocationInfo locationInfo = new OmKeyLocationInfo.Builder() + .setBlockID(new BlockID(100L, 1000L)) + .setOffset(offset) + .setLength(keyLength) + .setPipeline(pipeline) + .build(); + + keyInfo.appendNewBlocks(Collections.singletonList(locationInfo), false); } /** diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java new file mode 100644 index 000000000000..7eba6cafac75 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java @@ -0,0 +1,419 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; +import java.util.Random; + +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.response.key.OMOpenKeysDeleteRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.junit.Assert; +import org.junit.Test; +import com.google.common.base.Optional; + +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .Status; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .DeleteOpenKeysRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OpenKey; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OpenKeyBucket; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; + +/** + * Tests OMOpenKeysDeleteRequest. + */ +public class TestOMOpenKeysDeleteRequest extends TestOMKeyRequest { + /** + * Tests removing keys from the open key table cache that never existed there. + * The operation should complete without errors. + *

+ * This simulates a run of the open key cleanup service where a set of + * expired open keys are identified and passed to the request, but before + * the request can process them, those keys are committed and removed from + * the open key table. + * @throws Exception + */ + @Test + public void testDeleteOpenKeysNotInTable() throws Exception { + OpenKeyBucket openKeys = makeOpenKeys(volumeName, bucketName, 5); + deleteOpenKeysFromCache(openKeys); + assertNotInOpenKeyTable(openKeys); + } + + /** + * Tests adding multiple keys to the open key table, and updating the table + * cache to only remove some of them. + * Keys not removed should still be present in the open key table. + * Mixes which keys will be kept and deleted among different volumes and + * buckets. + * @throws Exception + */ + @Test + public void testDeleteSubsetOfOpenKeys() throws Exception { + final String volume1 = "volume1"; + final String volume2 = "bucket1"; + final String bucket1 = "volume2"; + final String bucket2 = "bucket2"; + + OpenKeyBucket v1b1KeysToDelete = makeOpenKeys(volume1, bucket1, 3); + OpenKeyBucket v1b1KeysToKeep = makeOpenKeys(volume1, bucket1, 3); + + OpenKeyBucket v1b2KeysToDelete = makeOpenKeys(volume1, bucket2, 3); + OpenKeyBucket v1b2KeysToKeep = makeOpenKeys(volume1, bucket2, 3); + + OpenKeyBucket v2b2KeysToDelete = makeOpenKeys(volume2, bucket2, 3); + OpenKeyBucket v2b2KeysToKeep = makeOpenKeys(volume2, bucket2, 3); + + addToOpenKeyTableDB( + v1b1KeysToKeep, + v1b2KeysToKeep, + v2b2KeysToKeep, + v1b1KeysToDelete, + v1b2KeysToDelete, + v2b2KeysToDelete + ); + + deleteOpenKeysFromCache( + v1b1KeysToDelete, + v1b2KeysToDelete, + v2b2KeysToDelete + ); + + assertNotInOpenKeyTable( + v1b1KeysToDelete, + v1b2KeysToDelete, + v2b2KeysToDelete + ); + + assertInOpenKeyTable( + v1b1KeysToKeep, + v1b2KeysToKeep, + v2b2KeysToKeep + ); + } + + /** + * Tests removing keys from the open key table cache that have the same + * name, but different client IDs. + * @throws Exception + */ + @Test + public void testDeleteSameKeyNameDifferentClient() throws Exception { + OpenKeyBucket keysToKeep = + makeOpenKeys(volumeName, bucketName, keyName, 3); + OpenKeyBucket keysToDelete = + makeOpenKeys(volumeName, bucketName, keyName, 3); + + addToOpenKeyTableDB(keysToKeep, keysToDelete); + deleteOpenKeysFromCache(keysToDelete); + + assertNotInOpenKeyTable(keysToDelete); + assertInOpenKeyTable(keysToKeep); + } + + /** + * Tests metrics set by {@link OMOpenKeysDeleteRequest}. + * Submits a set of keys for deletion where only some of the keys actually + * exist in the open key table, and asserts that the metrics count keys + * that were submitted for deletion versus those that were actually deleted. + * @throws Exception + */ + @Test + public void testMetrics() throws Exception { + final int numExistentKeys = 3; + final int numNonExistentKeys = 5; + + OMMetrics metrics = ozoneManager.getMetrics(); + Assert.assertEquals(metrics.getNumOpenKeyDeleteRequests(), 0); + Assert.assertEquals(metrics.getNumOpenKeyDeleteRequestFails(), 0); + Assert.assertEquals(metrics.getNumOpenKeysSubmittedForDeletion(), 0); + Assert.assertEquals(metrics.getNumOpenKeysDeleted(), 0); + + OpenKeyBucket existentKeys = + makeOpenKeys(volumeName, bucketName, keyName, numExistentKeys); + OpenKeyBucket nonExistentKeys = + makeOpenKeys(volumeName, bucketName, keyName, numNonExistentKeys); + + addToOpenKeyTableDB(existentKeys); + deleteOpenKeysFromCache(existentKeys, nonExistentKeys); + + assertNotInOpenKeyTable(existentKeys); + assertNotInOpenKeyTable(nonExistentKeys); + + Assert.assertEquals(1, metrics.getNumOpenKeyDeleteRequests()); + Assert.assertEquals(0, metrics.getNumOpenKeyDeleteRequestFails()); + Assert.assertEquals(numExistentKeys + numNonExistentKeys, + metrics.getNumOpenKeysSubmittedForDeletion()); + Assert.assertEquals(numExistentKeys, metrics.getNumOpenKeysDeleted()); + } + + /** + * Runs the validate and update cache step of + * {@link OMOpenKeysDeleteRequest} to mark the keys in {@code openKeys} + * as deleted in the open key table cache. + * Asserts that the call's response status is {@link Status#OK}. + * @throws Exception + */ + private void deleteOpenKeysFromCache(OpenKeyBucket... openKeys) + throws Exception { + + OMRequest omRequest = + doPreExecute(createDeleteOpenKeyRequest(openKeys)); + + OMOpenKeysDeleteRequest openKeyDeleteRequest = + new OMOpenKeysDeleteRequest(omRequest); + + OMClientResponse omClientResponse = + openKeyDeleteRequest.validateAndUpdateCache(ozoneManager, + 100L, ozoneManagerDoubleBufferHelper); + + Assert.assertEquals(Status.OK, + omClientResponse.getOMResponse().getStatus()); + } + + /** + * Adds {@code openKeys} to the open key table DB only, and asserts that they + * are present after the addition. + * @throws Exception + */ + private void addToOpenKeyTableDB(OpenKeyBucket... openKeys) + throws Exception { + + addToOpenKeyTableDB(0, openKeys); + } + + /** + * Adds {@code openKeys} to the open key table DB only, and asserts that they + * are present after the addition. Adds each key to the table with a single + * block of size {@code keySize}. + * @throws Exception + */ + private void addToOpenKeyTableDB(long keySize, OpenKeyBucket... openKeys) + throws Exception { + + for (OpenKeyBucket openKeyBucket: openKeys) { + String volume = openKeyBucket.getVolumeName(); + String bucket = openKeyBucket.getBucketName(); + + for (OpenKey openKey: openKeyBucket.getKeysList()) { + if (keySize > 0) { + OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volume, bucket, + openKey.getName(), replicationType, replicationFactor); + TestOMRequestUtils.addKeyLocationInfo(keyInfo, 0, keySize); + + TestOMRequestUtils.addKeyToTable(true, false, + keyInfo, openKey.getClientID(), 0L, omMetadataManager); + } else { + TestOMRequestUtils.addKeyToTable(true, + volume, bucket, openKey.getName(), openKey.getClientID(), + replicationType, replicationFactor, omMetadataManager); + } + } + } + + assertInOpenKeyTable(openKeys); + } + + /** + * Constructs a list of {@link OpenKeyBucket} objects of size {@code numKeys}. + * The keys created will all have the same volume and bucket, but + * randomized key names and client IDs. These keys are not added to the + * open key table. + * + * @param volume The volume all open keys created will have. + * @param bucket The bucket all open keys created will have. + * @param numKeys The number of keys with randomized key names and client + * IDs to create. + * @return A list of new open keys with size {@code numKeys}. + */ + private OpenKeyBucket makeOpenKeys(String volume, String bucket, + int numKeys) { + + OpenKeyBucket.Builder keysPerBucketBuilder = + OpenKeyBucket.newBuilder() + .setVolumeName(volume) + .setBucketName(bucket); + + for (int i = 0; i < numKeys; i++) { + String keyName = UUID.randomUUID().toString(); + long clientID = new Random().nextLong(); + + OpenKey openKey = OpenKey.newBuilder() + .setName(keyName) + .setClientID(clientID) + .build(); + keysPerBucketBuilder.addKeys(openKey); + } + + return keysPerBucketBuilder.build(); + } + + /** + * Constructs a list of {@link OpenKey} objects of size {@code numKeys}. + * The keys created will all have the same volume, bucket, and + * key names, but randomized client IDs. These keys are not added to the + * open key table. + * + * @param volume The volume all open keys created will have. + * @param bucket The bucket all open keys created will have. + * @param key The key name all open keys created will have. + * @param numKeys The number of keys with randomized key names and client + * IDs to create. + * @return A list of new open keys with size {@code numKeys}. + */ + private OpenKeyBucket makeOpenKeys(String volume, String bucket, + String key, int numKeys) { + + OpenKeyBucket.Builder keysPerBucketBuilder = + OpenKeyBucket.newBuilder() + .setVolumeName(volume) + .setBucketName(bucket); + + for (int i = 0; i < numKeys; i++) { + long clientID = new Random().nextLong(); + + OpenKey openKey = OpenKey.newBuilder() + .setName(key) + .setClientID(clientID) + .build(); + keysPerBucketBuilder.addKeys(openKey); + } + + return keysPerBucketBuilder.build(); + } + + private void assertInOpenKeyTable(OpenKeyBucket... openKeys) + throws Exception { + + for (String keyName: getFullOpenKeyNames(openKeys)) { + Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(keyName)); + } + } + + private void assertNotInOpenKeyTable(OpenKeyBucket... openKeys) + throws Exception { + + for (String keyName: getFullOpenKeyNames(openKeys)) { + Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(keyName)); + } + } + + /** + * Expands all the open keys represented by {@code openKeyBuckets} to their + * full + * key names as strings. + * @param openKeyBuckets + * @return + */ + private List getFullOpenKeyNames(OpenKeyBucket... openKeyBuckets) { + List fullKeyNames = new ArrayList<>(); + + for(OpenKeyBucket keysPerBucket: openKeyBuckets) { + String volume = keysPerBucket.getVolumeName(); + String bucket = keysPerBucket.getBucketName(); + + for (OpenKey openKey: keysPerBucket.getKeysList()) { + String fullName = omMetadataManager.getOpenKey(volume, bucket, + openKey.getName(), openKey.getClientID()); + fullKeyNames.add(fullName); + } + } + + return fullKeyNames; + } + + /** + * Constructs a new {@link OMOpenKeysDeleteRequest} objects, and calls its + * {@link OMOpenKeysDeleteRequest#preExecute} method with {@code + * originalOMRequest}. It verifies that {@code originalOMRequest} is modified + * after the call, and returns it. + * @throws Exception + */ + private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { + OMOpenKeysDeleteRequest omOpenKeysDeleteRequest = + new OMOpenKeysDeleteRequest(originalOmRequest); + + OMRequest modifiedOmRequest = + omOpenKeysDeleteRequest.preExecute(ozoneManager); + + // Will not be equal, as UserInfo will be set. + Assert.assertNotEquals(originalOmRequest, modifiedOmRequest); + + return modifiedOmRequest; + } + + /** + * Creates an {@code OpenKeyDeleteRequest} to delete the keys represented by + * {@code keysToDelete}. Returns an {@code OMRequest} which encapsulates this + * {@code OpenKeyDeleteRequest}. + */ + private OMRequest createDeleteOpenKeyRequest(OpenKeyBucket... keysToDelete) { + DeleteOpenKeysRequest deleteOpenKeysRequest = + DeleteOpenKeysRequest.newBuilder() + .addAllOpenKeysPerBucket(Arrays.asList(keysToDelete)) + .build(); + + return OMRequest.newBuilder() + .setDeleteOpenKeysRequest(deleteOpenKeysRequest) + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteOpenKeys) + .setClientId(UUID.randomUUID().toString()).build(); + } + + private void addVolumeToCacheAndDB(OmVolumeArgs volumeArgs) throws Exception { + String volumeKey = omMetadataManager.getVolumeKey(volumeArgs.getVolume()); + + omMetadataManager.getVolumeTable().addCacheEntry( + new CacheKey<>(volumeKey), + new CacheValue<>(Optional.of(volumeArgs), volumeArgs.getUpdateID()) + ); + + omMetadataManager.getVolumeTable().put(volumeKey, volumeArgs); + } + + private OmVolumeArgs getVolumeFromDB(String volume) throws Exception { + String volumeKey = omMetadataManager.getVolumeKey(volume); + return omMetadataManager.getVolumeTable().getSkipCache(volumeKey); + } + + private OmVolumeArgs getVolumeFromCache(String volume) { + String volumeKey = omMetadataManager.getVolumeKey(volume); + CacheValue value = omMetadataManager.getVolumeTable() + .getCacheValue(new CacheKey<>(volumeKey)); + + OmVolumeArgs result = null; + if (value != null) { + result = value.getCacheValue(); + } + + return result; + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 9c7092623f7f..e1f68ba9abb7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -85,7 +85,7 @@ public void testKeysDeleteResponse() throws Exception { .setCreationTime(Time.now()).build(); OMClientResponse omKeysDeleteResponse = new OMKeysDeleteResponse( - omResponse, omKeyInfoList, 10L, true, + omResponse, omKeyInfoList, true, omVolumeArgs, omBucketInfo); omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); @@ -94,13 +94,11 @@ public void testKeysDeleteResponse() throws Exception { for (String ozKey : ozoneKeys) { Assert.assertNull(omMetadataManager.getKeyTable().get(ozKey)); + // ozKey had no block information associated with it, so it should have + // been removed from the key table but not added to the delete table. RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable().get(ozKey); - Assert.assertNotNull(repeatedOmKeyInfo); - - Assert.assertEquals(1, repeatedOmKeyInfo.getOmKeyInfoList().size()); - Assert.assertEquals(10L, - repeatedOmKeyInfo.getOmKeyInfoList().get(0).getUpdateID()); + Assert.assertNull(repeatedOmKeyInfo); } } @@ -123,7 +121,7 @@ public void testKeysDeleteResponseFail() throws Exception { .setCreationTime(Time.now()).build(); OMClientResponse omKeysDeleteResponse = new OMKeysDeleteResponse( - omResponse, omKeyInfoList, 10L, true, + omResponse, omKeyInfoList, true, omVolumeArgs, omBucketInfo); omKeysDeleteResponse.checkAndUpdateDB(omMetadataManager, batchOperation); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java new file mode 100644 index 000000000000..f28bb4ca31aa --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.junit.Assert; +import org.junit.Test; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.UUID; + +/** + * Tests OMOpenKeysDeleteResponse. + */ +public class TestOMOpenKeysDeleteResponse extends TestOMKeyResponse { + private static final long KEY_LENGTH = 100; + + /** + * Tests deleting a subset of keys from the open key table DB when the keys + * have no associated block data. + */ + @Test + public void testAddToDBBatchWithEmptyBlocks() throws Exception { + Map keysToDelete = addOpenKeysToDB(volumeName, 3); + Map keysToKeep = addOpenKeysToDB(volumeName, 3); + createAndCommitResponse(keysToDelete, Status.OK); + + for (String key: keysToDelete.keySet()) { + // open keys with no associated block data should have been removed + // from the open key table, but not added to the deleted table. + Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(key)); + Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(key)); + } + + for (String key: keysToKeep.keySet()) { + // These keys should not have been removed from the open key table. + Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(key)); + Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(key)); + } + } + + /** + * Tests deleting a subset of keys from the open key table DB when the keys + * have associated block data. + */ + @Test + public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { + Map keysToDelete = addOpenKeysToDB(volumeName, 3, + KEY_LENGTH); + Map keysToKeep = addOpenKeysToDB(volumeName, 3, + KEY_LENGTH); + + createAndCommitResponse(keysToDelete, Status.OK); + + for (String key: keysToDelete.keySet()) { + // These keys should have been moved from the open key table to the + // delete table. + Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(key)); + Assert.assertTrue(omMetadataManager.getDeletedTable().isExist(key)); + } + + for (String key: keysToKeep.keySet()) { + // These keys should not have been moved out of the open key table. + Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(key)); + Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(key)); + } + } + + /** + * Tests attempting deleting keys from the open key table DB when the + * submitted response has an error status. In this case, no changes to the + * DB should be made. + */ + @Test + public void testAddToDBBatchWithErrorResponse() throws Exception { + Map keysToDelete = addOpenKeysToDB(volumeName, 3); + createAndCommitResponse(keysToDelete, Status.INTERNAL_ERROR); + + for (String key: keysToDelete.keySet()) { + // If an error occurs in the response, the batch operation moving keys + // from the open key table to the delete table should not be committed. + Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(key)); + Assert.assertFalse(omMetadataManager.getDeletedTable().isExist(key)); + } + } + + /** + * Constructs an {@link OMOpenKeysDeleteResponse} to delete the keys in + * {@code keysToDelete}, with the completion status set to {@code status}. + * If {@code status} is {@link Status#OK}, the keys to delete will be added + * to a batch operation and committed to the database. + * @throws Exception + */ + private void createAndCommitResponse(Map keysToDelete, + Status status) throws Exception { + + OMResponse omResponse = OMResponse.newBuilder() + .setStatus(status) + .setCmdType(OzoneManagerProtocolProtos.Type.DeleteOpenKeys) + .build(); + + OMOpenKeysDeleteResponse response = new OMOpenKeysDeleteResponse(omResponse, + keysToDelete, true); + + // Operations are only added to the batch by this method when status is OK. + response.checkAndUpdateDB(omMetadataManager, batchOperation); + + // If status is not OK, this will do nothing. + omMetadataManager.getStore().commitBatchOperation(batchOperation); + } + + /** + * Creates {@code numKeys} open keys with random names, maps each one to a + * new {@link OmKeyInfo} object, adds them to the open key table cache, and + * returns them. These keys will have no associated block data. + */ + private Map addOpenKeysToDB(String volume, int numKeys) + throws Exception { + return addOpenKeysToDB(volume, numKeys, 0); + } + + /** + * Creates {@code numKeys} open keys with random names, maps each one to a + * new {@link OmKeyInfo} object, adds them to the open key table cache, and + * returns them. + * If {@code keyLength} is greater than 0, adds one block with that many + * bytes of data for each key. + * @throws Exception + */ + private Map addOpenKeysToDB(String volume, int numKeys, + long keyLength) throws Exception { + + Map newOpenKeys = new HashMap<>(); + + for (int i = 0; i < numKeys; i++) { + String bucket = UUID.randomUUID().toString(); + String key = UUID.randomUUID().toString(); + long clientID = new Random().nextLong(); + + OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volume, + bucket, key, replicationType, replicationFactor); + + if (keyLength > 0) { + TestOMRequestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); + } + + String openKey = omMetadataManager.getOpenKey(volume, bucket, + key, clientID); + + // Add to the open key table DB, not cache. + // In a real execution, the open key would have been removed from the + // cache by the request, and it would only remain in the DB. + TestOMRequestUtils.addKeyToTable(true, false, omKeyInfo, + clientID, 0L, omMetadataManager); + Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey)); + + newOpenKeys.put(openKey, omKeyInfo); + } + + return newOpenKeys; + } +}