diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneRecoverWindow.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneRecoverWindow.java new file mode 100644 index 000000000000..228f22cdd423 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneRecoverWindow.java @@ -0,0 +1,173 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.client; + +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * OzoneRecoverWindow that can be applied to bucket. + */ +public class OzoneRecoverWindow { + + public static final String OZONE_RECOVER_WINDOW_MIN = "MIN"; + public static final String OZONE_RECOVER_WINDOW_HR = "HR"; + public static final String OZONE_RECOVER_WINDOW_DAY = "DAY"; + + private Units unit; + private long windowLength; + + /** Recover-window Units.*/ + public enum Units {UNDEFINED, MIN, HR, DAY} + + /** + * Returns recover-window length. + */ + public long getWindowLength() { + return windowLength; + } + + /** + * Returns Units. + * + * @return Unit in MIN, HR or DAY + */ + public Units getUnit() { + return unit; + } + + /** + * Constructs a default OzoneRecoverWindow object. + */ + public OzoneRecoverWindow() { + this.windowLength = 0; + this.unit = Units.UNDEFINED; + } + + /** + * Constructor for OzoneRecoverWindow. + * @param windowLength recover-window length + * @param unit MIN, HR or DAY + */ + public OzoneRecoverWindow(long windowLength, Units unit) { + this.windowLength = windowLength; + this.unit = unit; + } + + /** + * Formats recover-window as a string. + */ + public static String formatWindow(OzoneRecoverWindow window) { + return String.valueOf(window.windowLength) + window.unit; + } + + /** + * Parses user provided string and returns the OzoneRecoverWindow. + */ + public static OzoneRecoverWindow parseWindow(String windowString) { + + if ((windowString == null) || (windowString.isEmpty())) { + throw new IllegalArgumentException( + "Recover-Window string cannot be null or empty."); + } + + String uppercase = windowString.toUpperCase().replaceAll("\\s+", ""); + String length = ""; + int nLength; + Units currUnit = Units.MIN; + boolean found = false; + if (uppercase.endsWith(OZONE_RECOVER_WINDOW_MIN)) { + length = uppercase + .substring(0, uppercase.length() - OZONE_RECOVER_WINDOW_MIN.length()); + currUnit = Units.MIN; + found = true; + } + + if (uppercase.endsWith(OZONE_RECOVER_WINDOW_HR)) { + length = uppercase + .substring(0, uppercase.length() - OZONE_RECOVER_WINDOW_HR.length()); + currUnit = Units.HR; + found = true; + } + + if (uppercase.endsWith(OZONE_RECOVER_WINDOW_DAY)) { + length = uppercase + .substring(0, uppercase.length() - OZONE_RECOVER_WINDOW_DAY.length()); + currUnit = Units.DAY; + found = true; + } + + if (!found) { + throw new IllegalArgumentException("Window-length unit not recognized. " + + "Supported values are MIN, HR and DAY."); + } + + nLength = Integer.parseInt(length); + if (nLength < 0) { + throw new IllegalArgumentException("Window-length cannot be negative."); + } + + return new OzoneRecoverWindow(nLength, currUnit); + } + + + /** + * Returns length in seconds or -1 if there is no Window. + */ + public long lengthInSeconds() { + switch (this.unit) { + case MIN: + return this.getWindowLength() * OzoneConsts.MIN; + case HR: + return this.getWindowLength() * OzoneConsts.HR; + case DAY: + return this.getWindowLength() * OzoneConsts.DAY; + case UNDEFINED: + default: + return -1; + } + } + + + /** + * Returns OzoneRecoverWindow corresponding to window length in seconds. + */ + public static OzoneRecoverWindow getOzoneRecoverWindow(long windowInSeconds) { + long length; + Units unit; + if (windowInSeconds % OzoneConsts.DAY == 0) { + length = windowInSeconds / OzoneConsts.DAY; + unit = Units.DAY; + } else if (windowInSeconds % OzoneConsts.HR == 0) { + length = windowInSeconds / OzoneConsts.HR; + unit = Units.HR; + } else if (windowInSeconds % OzoneConsts.MIN == 0) { + length = windowInSeconds / OzoneConsts.MIN; + unit = Units.MIN; + } else { + length = 0; + unit = Units.MIN; + } + return new OzoneRecoverWindow((int)length, unit); + } + + @Override + public String toString() { + return windowLength + " " + unit; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java index 17cea828a805..9fc7751c3800 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java @@ -440,6 +440,13 @@ public final class OzoneConfigKeys { "ozone.client.list.trash.keys.max"; public static final int OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT = 1000; + public static final String OZONE_TRASH_ENABLED_KEY = "ozone.trash.enabled"; + public static final boolean OZONE_TRASH_ENABLED_KEY_DEFAULT = false; + public static final String OZONE_TRASH_RECOVER_WINDOW = + "ozone.trash.recover.window"; + // Default is disabled. + public static final String OZONE_TRASH_RECOVER_WINDOW_DEFAULT = "0MIN"; + public static final String OZONE_HTTP_BASEDIR = "ozone.http.basedir"; public static final String OZONE_HTTP_POLICY_KEY = diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index e027c82abf73..3cb076871b19 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -113,6 +113,11 @@ public final class OzoneConsts { public static final long GB = MB * 1024L; public static final long TB = GB * 1024L; + // Used by Ozone Recover-Window. + public static final long MIN = 60L * 1000L; // 60 seconds + public static final long HR = MIN * 60L; + public static final long DAY = HR * 24L; + /** * level DB names used by SCM and data nodes. */ @@ -279,6 +284,8 @@ private OzoneConsts() { public static final String STORAGE_TYPE = "storageType"; public static final String RESOURCE_TYPE = "resourceType"; public static final String IS_VERSION_ENABLED = "isVersionEnabled"; + public static final String IS_TRASH_ENABLED = "isTrashEnabled"; + public static final String RECOVER_WINDOW = "recoverWindow"; public static final String CREATION_TIME = "creationTime"; public static final String DATA_SIZE = "dataSize"; public static final String REPLICATION_TYPE = "replicationType"; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index b792cc9d86b6..2f74e4b12aae 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2391,6 +2391,22 @@ The maximum number of keys to return for a list trash request. + + ozone.trash.enabled + false + OZONE, OM + + Globally enable trash ability of buckets. + + + + ozone.trash.recover.window + 0MIN + OZONE, OM + + Window of recovering trash, default is 0 for trash-disabled. + + ozone.http.basedir diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java index 5bae15ddfe11..2fe733621f0e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/BucketArgs.java @@ -55,6 +55,16 @@ public final class BucketArgs { */ private String bucketEncryptionKey; + /** + * Bucket is trash enabled or not. + */ + private boolean trashEnabled; + + /** + * recover-window of the bucket for deleted key (trash). + */ + private String recoverWindow; + /** * Private constructor, constructed via builder. * @param versioning Bucket version flag. @@ -62,15 +72,20 @@ public final class BucketArgs { * @param acls list of ACLs. * @param metadata map of bucket metadata * @param bucketEncryptionKey bucket encryption key name + * @param trashEnabled bucket is trash enabled or not. + * @param recoverWindow recover-window of the bucket for deleted key (trash). */ private BucketArgs(Boolean versioning, StorageType storageType, List acls, Map metadata, - String bucketEncryptionKey) { + String bucketEncryptionKey, boolean trashEnabled, + String recoverWindow) { this.acls = acls; this.versioning = versioning; this.storageType = storageType; this.metadata = metadata; this.bucketEncryptionKey = bucketEncryptionKey; + this.trashEnabled = trashEnabled; + this.recoverWindow = recoverWindow; } /** @@ -114,6 +129,22 @@ public String getEncryptionKey() { return bucketEncryptionKey; } + /** + * Returns the bucket is trash enabled or not. + * @return boolean + */ + public boolean isTrashEnabled() { + return trashEnabled; + } + + /** + * Return the recover-window of the bucket. + * @return String + */ + public String getRecoverWindow() { + return recoverWindow; + } + /** * Returns new builder class that builds a OmBucketInfo. * @@ -132,6 +163,8 @@ public static class Builder { private List acls; private Map metadata; private String bucketEncryptionKey; + private boolean trashEnabled; + private String recoverWindow; public Builder() { metadata = new HashMap<>(); @@ -161,13 +194,24 @@ public BucketArgs.Builder setBucketEncryptionKey(String bek) { this.bucketEncryptionKey = bek; return this; } + + public BucketArgs.Builder setTrashEnabled(boolean trashEnabledSetting) { + this.trashEnabled = trashEnabledSetting; + return this; + } + + public BucketArgs.Builder setRecoverWindow(String recoverWindowSetting) { + this.recoverWindow = recoverWindowSetting; + return this; + } + /** * Constructs the BucketArgs. * @return instance of BucketArgs. */ public BucketArgs build() { return new BucketArgs(versioning, storageType, acls, metadata, - bucketEncryptionKey); + bucketEncryptionKey, trashEnabled, recoverWindow); } } } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 87710ea01156..b98408d1c285 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -101,6 +101,16 @@ public class OzoneBucket extends WithMetadata { */ private String encryptionKeyName; + /** + * Bucket is trash enabled or not. + */ + private boolean trashEnabled; + + /** + * recover-window of bucket for deleted key (trash). + */ + private long recoverWindow; + private OzoneObj ozoneObj; @@ -144,6 +154,19 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, this.creationTime = Instant.ofEpochMilli(creationTime); this.metadata = metadata; this.encryptionKeyName = encryptionKeyName; + this.trashEnabled = false; + this.recoverWindow = 0; + } + + @SuppressWarnings("parameternumber") + public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, + String volumeName, String bucketName, StorageType storageType, + Boolean versioning, long creationTime, Map metadata, + String encryptionKeyName, boolean trashEnabled, long recoverWindow) { + this(conf, proxy, volumeName, bucketName, storageType, versioning, + creationTime, metadata, encryptionKeyName); + this.trashEnabled = trashEnabled; + this.recoverWindow = recoverWindow; } /** @@ -166,6 +189,31 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, this.listCacheSize = HddsClientUtils.getListCacheSize(conf); this.creationTime = Instant.ofEpochMilli(creationTime); this.metadata = metadata; + this.trashEnabled = false; + this.recoverWindow = 0; + } + + /** + * Constructs OzoneBucket instance. + * @param conf Configuration object. + * @param proxy ClientProtocol proxy. + * @param volumeName Name of the volume the bucket belongs to. + * @param bucketName Name of the bucket. + * @param storageType StorageType of the bucket. + * @param versioning versioning status of the bucket. + * @param creationTime creation time of the bucket. + * @param trashEnabled trash is enabled or not with the bucket. + * @param recoverWindow recover-window of the bucket. + */ + @SuppressWarnings("parameternumber") + public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, + String volumeName, String bucketName, StorageType storageType, + Boolean versioning, long creationTime, Map metadata, + boolean trashEnabled, long recoverWindow) { + this(conf, proxy, volumeName, bucketName, storageType, versioning, + creationTime, metadata); + this.trashEnabled = trashEnabled; + this.recoverWindow = recoverWindow; } @VisibleForTesting @@ -187,8 +235,22 @@ public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, .setVolumeName(volumeName) .setResType(OzoneObj.ResourceType.BUCKET) .setStoreType(OzoneObj.StoreType.OZONE).build(); + this.trashEnabled = false; + this.recoverWindow = 0; } + @VisibleForTesting + @SuppressWarnings("parameternumber") + OzoneBucket(String volumeName, String name, + ReplicationFactor defaultReplication, + ReplicationType defaultReplicationType, StorageType storageType, + Boolean versioning, long creationTime, boolean trashEnabled, + long recoverWindow) { + this(volumeName, name, defaultReplication, defaultReplicationType, + storageType, versioning, creationTime); + this.trashEnabled = trashEnabled; + this.recoverWindow = recoverWindow; + } /** * Returns Volume Name. @@ -253,6 +315,20 @@ public String getEncryptionKeyName() { return encryptionKeyName; } + /** + * Return bucket is trash enabled or not. + */ + public boolean isTrashEnabled() { + return trashEnabled; + } + + /** + * Return length of recover-window in seconds. + */ + public long getRecoverWindow() { + return recoverWindow; + } + /** * Adds ACLs to the Bucket. * @param addAcl ACL to be added diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index dbed053f74f5..4c942c35b0a3 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -347,7 +347,8 @@ List listTrash(String volumeName, String bucketName, * @param volumeName - The volume name. * @param bucketName - The bucket name. * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. + * @param destinationBucket - The bucket under volumeName + * user want to recover to. * @return The result of recovering operation is success or not. * @throws IOException */ diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 843769996068..cbad0f62f51b 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -38,6 +38,7 @@ import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.hdds.client.OzoneQuota; +import org.apache.hadoop.hdds.client.OzoneRecoverWindow; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -429,6 +430,14 @@ public void createBucket( .setKeyName(bucketArgs.getEncryptionKey()).build(); } + boolean trashEnabled = bucketArgs.isTrashEnabled(); + String recoverWindow = bucketArgs.getRecoverWindow() == null ? + OzoneConfigKeys.OZONE_TRASH_RECOVER_WINDOW_DEFAULT : + bucketArgs.getRecoverWindow(); + long recoverWindowLength = OzoneRecoverWindow + .parseWindow(recoverWindow) + .lengthInSeconds(); + List listOfAcls = getAclList(); //ACLs from BucketArgs if(bucketArgs.getAcls() != null) { @@ -441,7 +450,9 @@ public void createBucket( .setIsVersionEnabled(isVersionEnabled) .addAllMetadata(bucketArgs.getMetadata()) .setStorageType(storageType) - .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())); + .setAcls(listOfAcls.stream().distinct().collect(Collectors.toList())) + .setTrashEnabled(trashEnabled) + .setRecoverWindow(recoverWindowLength); if (bek != null) { builder.setBucketEncryptionKey(bek); @@ -606,7 +617,9 @@ public OzoneBucket getBucketDetails( bucketInfo.getCreationTime(), bucketInfo.getMetadata(), bucketInfo.getEncryptionKeyInfo() != null ? bucketInfo - .getEncryptionKeyInfo().getKeyName() : null); + .getEncryptionKeyInfo().getKeyName() : null, + bucketInfo.getTrashEnabled(), + bucketInfo.getRecoverWindow()); } @Override @@ -626,7 +639,9 @@ public List listBuckets(String volumeName, String bucketPrefix, bucket.getCreationTime(), bucket.getMetadata(), bucket.getEncryptionKeyInfo() != null ? bucket - .getEncryptionKeyInfo().getKeyName() : null)) + .getEncryptionKeyInfo().getKeyName() : null, + bucket.getTrashEnabled(), + bucket.getRecoverWindow())) .collect(Collectors.toList()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 12220cd7b528..14487d8f4048 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -428,14 +428,20 @@ public static File createOMDir(String dirPath) { /** * Prepares key info to be moved to deletedTable. + * Or prepares key info to be recovered to trashTable. + * * 1. It strips GDPR metadata from key info - * 2. For given object key, if the repeatedOmKeyInfo instance is null, it + * 2-1. For given object key, if the repeatedOmKeyInfo instance is null, it * implies that no entry for the object key exists in deletedTable so we * create a new instance to include this key, else we update the existing * repeatedOmKeyInfo instance. + * 2-2. For given object key, if the repeatedOmKeyInfo instance is null, it + * implies that no entry for the object key exists in trashTable so we + * create a new instance to include this key, else we update the existing + * repeatedOmKeyInfo instance. * 3. Set the updateID to the transactionLogIndex. * @param keyInfo args supplied by client - * @param repeatedOmKeyInfo key details from deletedTable + * @param repeatedOmKeyInfo key details from deletedTable/trashTable * @param trxnLogIndex For Multipart keys, this is the transactionLogIndex * of the MultipartUploadAbort request which needs to * be set as the updateID of the partKeyInfos. @@ -460,10 +466,11 @@ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, keyInfo.setUpdateID(trxnLogIndex, isRatisEnabled); if(repeatedOmKeyInfo == null) { - //The key doesn't exist in deletedTable, so create a new instance. + //The key doesn't exist in deletedTable/trashTable, + //so create a new instance. repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo); } else { - //The key exists in deletedTable, so update existing instance. + //The key exists in deletedTable/trashTable, so update existing instance. repeatedOmKeyInfo.addOmKeyInfo(keyInfo); } @@ -503,17 +510,4 @@ public static long getOMClientRpcTimeOut(Configuration configuration) { .getObject(OMClientConfig.class).getRpcTimeOut(); } - /** - * Return OmKeyInfo that would be recovered. - */ - public static OmKeyInfo prepareKeyForRecover(OmKeyInfo keyInfo, - RepeatedOmKeyInfo repeatedOmKeyInfo) { - - /* TODO: HDDS-2425. HDDS-2426.*/ - if (repeatedOmKeyInfo.getOmKeyInfoList().contains(keyInfo)) { - return keyInfo; - } else { - return null; - } - } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java index 58d5a02e1f30..c8f08c9042ad 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/exceptions/OMException.java @@ -223,6 +223,12 @@ public enum ResultCodes { INVALID_VOLUME_NAME, - REPLAY // When ratis logs are replayed. + REPLAY, // When ratis logs are replayed. + + // When recovering key that not in deletedTable. + RECOVERED_KEY_NOT_FOUND, + + // When recovering key that has key of same name in bucket. + RECOVERED_KEY_ALREADY_EXISTS } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index f8c4d7a7d652..c304e6e7cbc9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -27,6 +27,7 @@ import java.util.Objects; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.client.OzoneRecoverWindow; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; @@ -72,6 +73,16 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { */ private BucketEncryptionKeyInfo bekInfo; + /** + * Bucket is trash enabled or not. + */ + private boolean trashEnabled; + + /** + * recover-window of the bucket for deleted key (trash). + */ + private long recoverWindow; + /** * Private constructor, constructed via builder. * @param volumeName - Volume name. @@ -82,6 +93,7 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { * @param creationTime - Bucket creation time. * @param metadata - metadata. * @param bekInfo - bucket encryption key info. + * @param trashEnabled - bucket is trash enabled or not. */ @SuppressWarnings("checkstyle:ParameterNumber") private OmBucketInfo(String volumeName, @@ -93,7 +105,9 @@ private OmBucketInfo(String volumeName, long objectID, long updateID, Map metadata, - BucketEncryptionKeyInfo bekInfo) { + BucketEncryptionKeyInfo bekInfo, + boolean trashEnabled, + long recoverWindow) { this.volumeName = volumeName; this.bucketName = bucketName; this.acls = acls; @@ -104,6 +118,8 @@ private OmBucketInfo(String volumeName, this.updateID = updateID; this.metadata = metadata; this.bekInfo = bekInfo; + this.trashEnabled = trashEnabled; + this.recoverWindow = recoverWindow; } /** @@ -192,6 +208,21 @@ public BucketEncryptionKeyInfo getEncryptionKeyInfo() { return bekInfo; } + /** + * Returns bucket is trash enabled or not. + * @return boolean + */ + public boolean getTrashEnabled() { + return trashEnabled; + } + + /** + * Returns length of recover-window in seconds. + */ + public long getRecoverWindow() { + return recoverWindow; + } + /** * Returns new builder class that builds a OmBucketInfo. * @@ -217,6 +248,10 @@ public Map toAuditMap() { auditMap.put(OzoneConsts.CREATION_TIME, String.valueOf(this.creationTime)); auditMap.put(OzoneConsts.BUCKET_ENCRYPTION_KEY, (bekInfo != null) ? bekInfo.getKeyName() : null); + auditMap.put(OzoneConsts.IS_TRASH_ENABLED, + String.valueOf(this.trashEnabled)); + auditMap.put(OzoneConsts.RECOVER_WINDOW, + String.valueOf(this.recoverWindow)); return auditMap; } @@ -234,7 +269,9 @@ public OmBucketInfo copyObject() { .setUpdateID(updateID) .setBucketEncryptionKey(bekInfo != null ? new BucketEncryptionKeyInfo(bekInfo.getVersion(), - bekInfo.getSuite(), bekInfo.getKeyName()) : null); + bekInfo.getSuite(), bekInfo.getKeyName()) : null) + .setTrashEnabled(trashEnabled) + .setRecoverWindow(recoverWindow); acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(), acl.getName(), (BitSet) acl.getAclBitSet().clone(), @@ -261,6 +298,8 @@ public static class Builder { private long updateID; private Map metadata; private BucketEncryptionKeyInfo bekInfo; + private boolean trashEnabled; + private long recoverWindow; public Builder() { //Default values @@ -268,6 +307,7 @@ public Builder() { this.isVersionEnabled = false; this.storageType = StorageType.DISK; this.metadata = new HashMap<>(); + this.trashEnabled = false; } public Builder setVolumeName(String volume) { @@ -337,6 +377,16 @@ public Builder setBucketEncryptionKey( return this; } + public Builder setTrashEnabled(boolean trashEnabledSetting) { + this.trashEnabled = trashEnabledSetting; + return this; + } + + public Builder setRecoverWindow(long windowLength) { + this.recoverWindow = windowLength; + return this; + } + /** * Constructs the OmBucketInfo. * @return instance of OmBucketInfo. @@ -349,7 +399,8 @@ public OmBucketInfo build() { Preconditions.checkNotNull(storageType); return new OmBucketInfo(volumeName, bucketName, acls, isVersionEnabled, - storageType, creationTime, objectID, updateID, metadata, bekInfo); + storageType, creationTime, objectID, updateID, metadata, bekInfo, + trashEnabled, recoverWindow); } } @@ -366,7 +417,9 @@ public BucketInfo getProtobuf() { .setCreationTime(creationTime) .setObjectID(objectID) .setUpdateID(updateID) - .addAllMetadata(KeyValueUtil.toProtobuf(metadata)); + .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) + .setTrashEnabled(trashEnabled) + .setRecoverWindow(recoverWindow); if (bekInfo != null && bekInfo.getKeyName() != null) { bib.setBeinfo(OMPBHelper.convert(bekInfo)); } @@ -386,7 +439,9 @@ public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { OzoneAcl::fromProtobuf).collect(Collectors.toList())) .setIsVersionEnabled(bucketInfo.getIsVersionEnabled()) .setStorageType(StorageType.valueOf(bucketInfo.getStorageType())) - .setCreationTime(bucketInfo.getCreationTime()); + .setCreationTime(bucketInfo.getCreationTime()) + .setTrashEnabled(bucketInfo.getTrashEnabled()) + .setRecoverWindow(bucketInfo.getRecoverWindow()); if (bucketInfo.hasObjectID()) { obib.setObjectID(bucketInfo.getObjectID()); } @@ -405,12 +460,17 @@ public static OmBucketInfo getFromProtobuf(BucketInfo bucketInfo) { @Override public String getObjectInfo() { + String windowFormatted = OzoneRecoverWindow + .formatWindow(OzoneRecoverWindow.getOzoneRecoverWindow(recoverWindow)); + return "OMBucketInfo{" + "volume='" + volumeName + '\'' + ", bucket='" + bucketName + '\'' + ", isVersionEnabled='" + isVersionEnabled + '\'' + ", storageType='" + storageType + '\'' + ", creationTime='" + creationTime + '\'' + + ", trashEnabled='" + trashEnabled+ '\'' + + ", recoverWindow='" + windowFormatted + '\'' + '}'; } @@ -432,7 +492,9 @@ public boolean equals(Object o) { objectID == that.objectID && updateID == that.updateID && Objects.equals(metadata, that.metadata) && - Objects.equals(bekInfo, that.bekInfo); + Objects.equals(bekInfo, that.bekInfo) && + Objects.equals(trashEnabled, that.trashEnabled) && + recoverWindow == that.recoverWindow; } @Override diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index b377cf2f6214..35aed8373d57 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -494,9 +494,11 @@ List listTrash(String volumeName, String bucketName, * @param volumeName - The volume name. * @param bucketName - The bucket name. * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. + * @param destinationBucket - The bucket under volumeName + * user want to recover to. * @return The result of recovering operation is success or not. * @throws IOException + * TODO: throw NotImplementedException when cleanup old write path. */ default boolean recoverTrash(String volumeName, String bucketName, String keyName, String destinationBucket) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index 91eafe95ba16..8274b452a5a4 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -1396,19 +1396,19 @@ public List listTrash(String volumeName, public boolean recoverTrash(String volumeName, String bucketName, String keyName, String destinationBucket) throws IOException { - Preconditions.checkArgument(Strings.isNullOrEmpty(volumeName), + Preconditions.checkArgument(!Strings.isNullOrEmpty(volumeName), "The volume name cannot be null or empty. " + "Please enter a valid volume name."); - Preconditions.checkArgument(Strings.isNullOrEmpty(bucketName), + Preconditions.checkArgument(!Strings.isNullOrEmpty(bucketName), "The bucket name cannot be null or empty. " + "Please enter a valid bucket name."); - Preconditions.checkArgument(Strings.isNullOrEmpty(keyName), + Preconditions.checkArgument(!Strings.isNullOrEmpty(keyName), "The key name cannot be null or empty. " + "Please enter a valid key name."); - Preconditions.checkArgument(Strings.isNullOrEmpty(destinationBucket), + Preconditions.checkArgument(!Strings.isNullOrEmpty(destinationBucket), "The destination bucket name cannot be null or empty. " + "Please enter a valid destination bucket name."); @@ -1424,6 +1424,7 @@ public boolean recoverTrash(String volumeName, String bucketName, RecoverTrashResponse recoverResponse = handleError(submitRequest(omRequest)).getRecoverTrashResponse(); + recoverResponse.toBuilder().setResponse(true).build(); return recoverResponse.getResponse(); } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 569be740e293..d6edc861c75c 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -302,6 +302,11 @@ enum Status { // When transactions are replayed REPLAY = 100; + + // When recovering key that not in deletedTable. + RECOVERED_KEY_NOT_FOUND = 70; + // When recovering key that has key of same name in bucket. + RECOVERED_KEY_ALREADY_EXISTS = 71; } /** @@ -330,6 +335,7 @@ message RecoverTrashRequest { required string bucketName = 2; required string keyName = 3; required string destinationBucket = 4; + optional uint64 modificationTime = 5; } message RecoverTrashResponse { @@ -478,6 +484,8 @@ message BucketInfo { optional BucketEncryptionInfoProto beinfo = 8; optional uint64 objectID = 9; optional uint64 updateID = 10; + required bool trashEnabled = 11 [default = false]; + optional uint64 recoverWindow = 12; } enum StorageTypeProto { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java index 1f483345eb26..3eeb051c74d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java @@ -24,10 +24,13 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import com.google.common.base.Optional; import com.google.protobuf.ServiceException; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; @@ -196,6 +199,7 @@ public BackgroundTaskResult call() throws Exception { private int deleteAllKeys(List results) throws RocksDBException, IOException { Table deletedTable = manager.getMetadataManager().getDeletedTable(); + Table trashTable = manager.getMetadataManager().getTrashTable(); DBStore store = manager.getMetadataManager().getStore(); @@ -207,6 +211,13 @@ private int deleteAllKeys(List results) // Purge key from OM DB. deletedTable.deleteWithBatch(writeBatch, result.getObjectKey()); + // Purge deleted key from OM DB. + trashTable.deleteWithBatch(writeBatch, + result.getObjectKey()); + // Clean the cache. + trashTable.addCacheEntry( + new CacheKey<>(result.getObjectKey()), + new CacheValue<>(Optional.absent(), 0L)); LOG.debug("Key {} deleted from OM DB", result.getObjectKey()); deletedCount++; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java index 9a13cb94094c..391e6a8ac94a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java @@ -203,18 +203,6 @@ List listKeys(String volumeName, List listTrash(String volumeName, String bucketName, String startKeyName, String keyPrefix, int maxKeys) throws IOException; - /** - * Recover trash allows the user to recover the keys - * that were marked as deleted, but not actually deleted by Ozone Manager. - * @param volumeName - The volume name. - * @param bucketName - The bucket name. - * @param keyName - The key user want to recover. - * @param destinationBucket - The bucket user want to recover to. - * @return The result of recovering operation is success or not. - */ - boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException; - /** * Returns a list of volumes owned by a given user; if user is null, returns * all volumes. @@ -287,6 +275,13 @@ List listVolumes(String userName, String prefix, */ Table getDeletedTable(); + /** + * Get Trash Table. + * + * @return TrashTable. + */ + Table getTrashTable(); + /** * Gets the OpenKeyTable. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 452afaa38cb0..a15c2b7eaa1f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -78,7 +78,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; - +import org.apache.hadoop.util.Time; import org.eclipse.jetty.util.StringUtil; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -109,6 +109,8 @@ public class OmMetadataManagerImpl implements OMMetadataManager { * |----------------------------------------------------------------------| * | deletedTable | /volumeName/bucketName/keyName->RepeatedKeyInfo | * |----------------------------------------------------------------------| + * | trashTable | /volumeName/bucketName/keyName->RepeatedKeyInfo | + * |----------------------------------------------------------------------| * | openKey | /volumeName/bucketName/keyName/id->KeyInfo | * |----------------------------------------------------------------------| * | s3SecretTable | s3g_access_key_id -> s3Secret | @@ -129,6 +131,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { public static final String BUCKET_TABLE = "bucketTable"; public static final String KEY_TABLE = "keyTable"; public static final String DELETED_TABLE = "deletedTable"; + public static final String TRASH_TABLE = "trashTable"; public static final String OPEN_KEY_TABLE = "openKeyTable"; public static final String MULTIPARTINFO_TABLE = "multipartInfoTable"; public static final String S3_SECRET_TABLE = "s3SecretTable"; @@ -147,6 +150,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager { private Table bucketTable; private Table keyTable; private Table deletedTable; + private Table trashTable; private Table openKeyTable; private Table multipartInfoTable; private Table s3SecretTable; @@ -208,6 +212,11 @@ public Table getDeletedTable() { return deletedTable; } + @Override + public Table getTrashTable() { + return trashTable; + } + @Override public Table getOpenKeyTable() { return openKeyTable; @@ -285,6 +294,7 @@ protected static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { .addTable(BUCKET_TABLE) .addTable(KEY_TABLE) .addTable(DELETED_TABLE) + .addTable(TRASH_TABLE) .addTable(OPEN_KEY_TABLE) .addTable(MULTIPARTINFO_TABLE) .addTable(DELEGATION_TOKEN_TABLE) @@ -334,6 +344,11 @@ protected void initializeOmTables() throws IOException { RepeatedOmKeyInfo.class); checkTableStatus(deletedTable, DELETED_TABLE); + // We set trashTable partial-cache here. + trashTable = this.store.getTable(TRASH_TABLE, String.class, + RepeatedOmKeyInfo.class); + checkTableStatus(trashTable, TRASH_TABLE); + openKeyTable = this.store.getTable(OPEN_KEY_TABLE, String.class, OmKeyInfo.class); checkTableStatus(openKeyTable, OPEN_KEY_TABLE); @@ -815,18 +830,6 @@ public List listTrash(String volumeName, String bucketName, return deletedKeys; } - @Override - public boolean recoverTrash(String volumeName, String bucketName, - String keyName, String destinationBucket) throws IOException { - - /* TODO: HDDS-2425 and HDDS-2426 - core logic stub would be added in later patch. - */ - - boolean recoverOperation = true; - return recoverOperation; - } - /** * @param userName volume owner, null for listing all volumes. */ @@ -942,24 +945,56 @@ public List getPendingDeletionKeys(final int keyCount) KeyValue kv = keyIter.next(); if (kv != null) { RepeatedOmKeyInfo infoList = kv.getValue(); - // Get block keys as a list. - for(OmKeyInfo info : infoList.getOmKeyInfoList()){ - OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); - List item = latest.getLocationList().stream() - .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) - .collect(Collectors.toList()); - BlockGroup keyBlocks = BlockGroup.newBuilder() - .setKeyName(kv.getKey()) - .addAllBlockIDs(item) - .build(); - keyBlocksList.add(keyBlocks); - currentCount++; - } + int lastKeyIndex = infoList.getOmKeyInfoList().size() - 1; + OmKeyInfo lastKeyInfo = infoList.getOmKeyInfoList().get(lastKeyIndex); + /* + * Once the last OmKeyInfo is checked to not delete now, + * we skip the flow of processing here. + * This way would keep this + * from deleting by DB. + */ + if (shouldDelete(lastKeyInfo)) { + // Get block keys as a list. + for (OmKeyInfo info : infoList.getOmKeyInfoList()) { + OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); + List item = latest.getLocationList().stream() + .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) + .collect(Collectors.toList()); + BlockGroup keyBlocks = BlockGroup.newBuilder() + .setKeyName(kv.getKey()) + .addAllBlockIDs(item) + .build(); + keyBlocksList.add(keyBlocks); + currentCount++; + } + } /* else do no-op*/ } } } return keyBlocksList; } + /* + * Check the key should be deleting by KeyDeletingService in this time or not. + * + * If the remaining-time of key is more than recover-window, + * it should be deleted. + * @return true If key should be deleted. + */ + private boolean shouldDelete(OmKeyInfo keyInfo) throws IOException { + String bucketKey = + getBucketKey(keyInfo.getVolumeName(), keyInfo.getBucketName()); + long recoverWindow = getBucketTable().get(bucketKey).getRecoverWindow(); + + final long currentTime = Time.now(); + /** + * Because setting recover-window in 0 when creating trash-disabled bucket, + * here we could not check the trash-enabled of bucket. + */ + if (currentTime - keyInfo.getModificationTime() > recoverWindow) { + return true; + } + return false; + } @Override public List getExpiredOpenKeys() throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index fcc4ab49f34a..5f63b36e3dec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -368,6 +368,7 @@ private void cleanupCache(List lastRatisTransactionIndex) { omMetadataManager.getOpenKeyTable().cleanupCache(lastRatisTransactionIndex); omMetadataManager.getKeyTable().cleanupCache(lastRatisTransactionIndex); omMetadataManager.getDeletedTable().cleanupCache(lastRatisTransactionIndex); + omMetadataManager.getTrashTable().cleanupCache(lastRatisTransactionIndex); omMetadataManager.getMultipartInfoTable().cleanupCache( lastRatisTransactionIndex); omMetadataManager.getS3SecretTable().cleanupCache( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 1f1b0fb05333..bfcbeeeb9cd7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -22,6 +22,8 @@ import java.util.Map; import com.google.common.base.Optional; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -147,6 +149,22 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, keyName)), new CacheValue<>(Optional.absent(), trxnLogIndex)); + // Check recover-setting to update cache of trashTable. + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + boolean trashEnabled = omMetadataManager.getBucketTable() + .getCacheValue(new CacheKey<>(bucketKey)).getCacheValue() + .getTrashEnabled(); + if (trashEnabled) { + RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager + .getTrashTable().get(objectKey); + repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(omKeyInfo, + repeatedOmKeyInfo, trxnLogIndex, ozoneManager.isRatisEnabled()); + + omMetadataManager.getTrashTable().addCacheEntry( + new CacheKey<>(objectKey), + new CacheValue<>(Optional.of(repeatedOmKeyInfo), trxnLogIndex)); + } + // No need to add cache entries to delete table. As delete table will // be used by DeleteKeyService only, not used for any client response // validation, so we don't need to add to cache. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index f7783dbe42c6..3bd57f673571 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -20,6 +20,10 @@ import java.io.IOException; import java.util.ArrayList; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -85,6 +89,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, for (String deletedKey : bucketWithDeleteKeys.getKeysList()) { RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager.getDeletedTable().get(deletedKey); + // Update cache of trashTable. + omMetadataManager.getTrashTable().addCacheEntry( + new CacheKey<>(deletedKey), + new CacheValue<>(Optional.absent(), trxnLogIndex)); boolean purgeKey = true; if (repeatedOmKeyInfo != null) { for (OmKeyInfo omKeyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java index eac7842f84e2..d983e3b3b2ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java @@ -20,10 +20,19 @@ import java.io.IOException; +import com.google.common.base.Optional; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMReplayException; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.key.OMTrashRecoverResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -36,10 +45,14 @@ .OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .RecoverTrashResponse; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.RECOVERED_KEY_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes + .RECOVERED_KEY_NOT_FOUND; /** * Handles RecoverTrash request. @@ -58,7 +71,13 @@ public OMRequest preExecute(OzoneManager ozoneManager) { .getRecoverTrashRequest(); Preconditions.checkNotNull(recoverTrashRequest); - return getOmRequest().toBuilder().build(); + long modificationTime = Time.now(); + + return getOmRequest().toBuilder() + .setRecoverTrashRequest( + recoverTrashRequest.toBuilder() + .setModificationTime(modificationTime)) + .setUserInfo(getUserInfo()).build(); } @Override @@ -78,13 +97,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, * OMMetrics omMetrics = ozoneManager.getMetrics(); */ - OMResponse.Builder omResponse = OMResponse.newBuilder() - .setCmdType(Type.RecoverTrash).setStatus(Status.OK) - .setSuccess(true); + OMResponse.Builder omResponse = OmResponseUtil + .getOMResponseBuilder(getOmRequest()); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); boolean acquireLock = false; OMClientResponse omClientResponse = null; + //TODO: HDDS-2818. New Metrics for Trash Key Recover and Fails. + Result result = null; try { // Check acl for the destination bucket. checkBucketAcls(ozoneManager, volumeName, destinationBucket, keyName, @@ -93,31 +113,89 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, acquireLock = omMetadataManager.getLock() .acquireWriteLock(BUCKET_LOCK, volumeName, destinationBucket); - // Validate. + // Validate original vol/buc, destinationBucket exists or not. validateBucketAndVolume(omMetadataManager, volumeName, bucketName); validateBucketAndVolume(omMetadataManager, volumeName, destinationBucket); + // TODO: HDDS-2425. recovering trash in non-existing bucket. + + String trashTableKey = omMetadataManager + .getOzoneKey(volumeName, bucketName, keyName); + RepeatedOmKeyInfo trashRepeatedKeyInfo = + omMetadataManager.getTrashTable().get(trashTableKey); + OmKeyInfo trashKeyInfo = null; + if (trashRepeatedKeyInfo != null) { + int lastKeyIndex = trashRepeatedKeyInfo.getOmKeyInfoList().size() - 1; + trashKeyInfo = trashRepeatedKeyInfo + .getOmKeyInfoList().get(lastKeyIndex); + // update modificationTime after recovering. + trashKeyInfo.setModificationTime( + recoverTrashRequest.getModificationTime()); + + // Check this transaction is replayed or not. + if (isReplay(ozoneManager, trashKeyInfo, transactionLogIndex)) { + throw new OMReplayException(); + } + + // Set the updateID to current transactionLogIndex. + trashKeyInfo.setUpdateID(transactionLogIndex, + ozoneManager.isRatisEnabled()); + + // Update cache of keyTable, + if (omMetadataManager.getKeyTable().get(trashTableKey) != null) { + throw new OMException( + "The bucket has key of same name as recovered key", + RECOVERED_KEY_ALREADY_EXISTS); + } else { + omMetadataManager.getKeyTable().addCacheEntry( + new CacheKey<>(trashTableKey), + new CacheValue<>(Optional.of(trashKeyInfo), transactionLogIndex)); + } + + // Update cache of trashTable. + trashRepeatedKeyInfo.getOmKeyInfoList().remove(lastKeyIndex); + omMetadataManager.getTrashTable().addCacheEntry( + new CacheKey<>(trashTableKey), + new CacheValue<>(Optional.of(trashRepeatedKeyInfo), + transactionLogIndex)); + + // Update cache of deletedTable. + omMetadataManager.getDeletedTable().addCacheEntry( + new CacheKey<>(trashTableKey), + new CacheValue<>(Optional.of(trashRepeatedKeyInfo), + transactionLogIndex)); - /** TODO: HDDS-2425. HDDS-2426. - * Update cache. - * omMetadataManager.getKeyTable().addCacheEntry( - * new CacheKey<>(), - * new CacheValue<>() - * ); - * - * Execute recovering trash in non-existing bucket. - * Execute recovering trash in existing bucket. - * omClientResponse = new OMTrashRecoverResponse(omKeyInfo, - * omResponse.setRecoverTrashResponse( - * RecoverTrashResponse.newBuilder()) - * .build()); - */ - omClientResponse = null; + omResponse.setSuccess(true); + } else { + /* key we want to recover not exist */ + throw new OMException("Recovered key is not in trash table", + RECOVERED_KEY_NOT_FOUND); + } + + result = Result.SUCCESS; + omClientResponse = new OMTrashRecoverResponse(trashRepeatedKeyInfo, + trashKeyInfo, + omResponse.setRecoverTrashResponse( + RecoverTrashResponse.newBuilder().setResponse(true)) + .build()); + + } catch (OMException | OMReplayException ex) { + LOG.error("Fail for recovering trash.", ex); + if (ex instanceof OMReplayException) { + omClientResponse = new OMTrashRecoverResponse(null, null, + createReplayOMResponse(omResponse)); + result = Result.REPLAY; + } else { + omClientResponse = new OMTrashRecoverResponse(null, null, + createErrorOMResponse(omResponse, ex)); + result = Result.FAILURE; + } } catch (IOException ex) { LOG.error("Fail for recovering trash.", ex); - omClientResponse = new OMTrashRecoverResponse(null, + omClientResponse = new OMTrashRecoverResponse(null, null, createErrorOMResponse(omResponse, ex)); + result = Result.FAILURE; } finally { if (omClientResponse != null) { omClientResponse.setFlushFuture( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index afff73a8836e..453d040d2c97 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -86,6 +86,9 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, isRatisEnabled); omMetadataManager.getDeletedTable().putWithBatch(batchOperation, ozoneKey, repeatedOmKeyInfo); + // Update trashTable in DB. + omMetadataManager.getTrashTable().putWithBatch(batchOperation, + ozoneKey, repeatedOmKeyInfo); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java index abfc0f6cdf63..5c9369813f4a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java @@ -55,8 +55,8 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { for (String key : purgeKeyList) { - omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation, - key); + omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation, key); + omMetadataManager.getTrashTable().deleteWithBatch(batchOperation, key); } } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java index fb330a309046..28692276a863 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java @@ -18,13 +18,14 @@ package org.apache.hadoop.ozone.om.response.key; -import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .Status; import org.apache.hadoop.hdds.utils.db.BatchOperation; import java.io.IOException; @@ -36,29 +37,38 @@ */ public class OMTrashRecoverResponse extends OMClientResponse { private OmKeyInfo omKeyInfo; + private RepeatedOmKeyInfo trashRepeatedKeyInfo; - public OMTrashRecoverResponse(@Nullable OmKeyInfo omKeyInfo, + public OMTrashRecoverResponse( + @Nullable RepeatedOmKeyInfo trashRepeatedKeyInfo, + @Nullable OmKeyInfo omKeyInfo, @Nonnull OMResponse omResponse) { super(omResponse); + this.trashRepeatedKeyInfo = trashRepeatedKeyInfo; this.omKeyInfo = omKeyInfo; } @Override public void addToDBBatch(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { + // For omResponse with non-OK status, we do nothing. + if (getOMResponse().getStatus() == Status.OK) { + String trashTableKey = omMetadataManager.getOzoneKey( + omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(), + omKeyInfo.getKeyName()); + + // Update keyTable in OMDB. + omMetadataManager.getKeyTable() + .putWithBatch(batchOperation, trashTableKey, omKeyInfo); + + // Update trashTable in OMDB + omMetadataManager.getTrashTable() + .putWithBatch(batchOperation, trashTableKey, trashRepeatedKeyInfo); - /* TODO: HDDS-2425. HDDS-2426. */ - String trashKey = omMetadataManager - .getOzoneKey(omKeyInfo.getVolumeName(), - omKeyInfo.getBucketName(), omKeyInfo.getKeyName()); - RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager - .getDeletedTable().get(trashKey); - omKeyInfo = OmUtils.prepareKeyForRecover(omKeyInfo, repeatedOmKeyInfo); - omMetadataManager.getDeletedTable() - .deleteWithBatch(batchOperation, omKeyInfo.getKeyName()); - /* TODO: trashKey should be updated to destinationBucket. */ - omMetadataManager.getKeyTable() - .putWithBatch(batchOperation, trashKey, omKeyInfo); + // Update deletedTable in OMDB + omMetadataManager.getDeletedTable() + .putWithBatch(batchOperation, trashTableKey, trashRepeatedKeyInfo); + } } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java deleted file mode 100644 index cf9e62649deb..000000000000 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.ozone.om; - - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.UUID; - -/** - * Test Key Trash Service. - *

- * This test does the things including: - * 1. UTs for list trash. - * 2. UTs for recover trash. - * 3. UTs for empty trash. - *

- */ -public class TestTrashService { - - @Rule - public TemporaryFolder tempFolder = new TemporaryFolder(); - - private KeyManager keyManager; - private OmMetadataManagerImpl omMetadataManager; - private String volumeName; - private String bucketName; - - @Before - public void setup() throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - - File folder = tempFolder.newFolder(); - if (!folder.exists()) { - Assert.assertTrue(folder.mkdirs()); - } - System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); - ServerUtils.setOzoneMetaDirPath(configuration, folder.toString()); - - omMetadataManager = new OmMetadataManagerImpl(configuration); - - keyManager = new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 0), - omMetadataManager, configuration, UUID.randomUUID().toString(), null); - keyManager.start(configuration); - - volumeName = "volume"; - bucketName = "bucket"; - } - - @Test - public void testRecoverTrash() throws IOException { - String keyName = "testKey"; - String destinationBucket = "destBucket"; - createAndDeleteKey(keyName); - - boolean recoverOperation = omMetadataManager - .recoverTrash(volumeName, bucketName, keyName, destinationBucket); - Assert.assertTrue(recoverOperation); - } - - private void createAndDeleteKey(String keyName) throws IOException { - - TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(), - OmVolumeArgs.newBuilder() - .setOwnerName("owner") - .setAdminName("admin") - .setVolume(volumeName) - .build()); - - TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(), - OmBucketInfo.newBuilder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .build()); - - OmKeyArgs keyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .setAcls(Collections.emptyList()) - .setLocationInfoList(new ArrayList<>()) - .build(); - - /* Create and delete key in the Key Manager. */ - OpenKeySession session = keyManager.openKey(keyArgs); - keyManager.commitKey(keyArgs, session.getId()); - keyManager.deleteKey(keyArgs); - } - -} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java index c5aa9fe103b9..263f91511eb2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java @@ -291,7 +291,9 @@ public static OzoneManagerProtocolProtos.OMRequest createBucketRequest( .setVolumeName(volumeName) .setIsVersionEnabled(isVersionEnabled) .setStorageType(storageTypeProto) - .addAllMetadata(getMetadataList()).build(); + .addAllMetadata(getMetadataList()) + .setTrashEnabled(false) + .setRecoverWindow(0).build(); OzoneManagerProtocolProtos.CreateBucketRequest.Builder req = OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder(); req.setBucketInfo(bucketInfo); @@ -428,7 +430,7 @@ public static OMRequest createVolumeSetAclRequest(String volumeName, } /** - * Deletes key from Key table and adds it to DeletedKeys table. + * Deletes key from Key table and adds it to deletedTable and trashTable. * @return the deletedKey name */ public static String deleteKey(String ozoneKey, @@ -448,6 +450,12 @@ public static String deleteKey(String ozoneKey, omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo); + // Update cache of trashTable and update trashTable in OM DB. + omMetadataManager.getTrashTable().addCacheEntry( + new CacheKey<>(ozoneKey), + new CacheValue<>(Optional.of(repeatedOmKeyInfo), trxnLogIndex)); + omMetadataManager.getTrashTable().put(ozoneKey, repeatedOmKeyInfo); + return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMTrashRecoverRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMTrashRecoverRequest.java new file mode 100644 index 000000000000..8964a80d17ac --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMTrashRecoverRequest.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import java.util.UUID; + +import org.junit.Assert; +import org.junit.Test; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .RecoverTrashRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMRequest; + +/** + * Tests OMTrashRecoverRequest request. + */ +public class TestOMTrashRecoverRequest extends TestOMKeyRequest { + + @Test + public void testPreExecute() throws Exception { + doPreExecute(createRecoverTrashRequest()); + } + + @Test + public void testValidateAndUpdateCache() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(createRecoverTrashRequest()); + + OMTrashRecoverRequest omTrashRecoverRequest = + new OMTrashRecoverRequest(modifiedOmRequest); + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + // TransactionLogIndex is 0L here. + TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + clientID, replicationType, replicationFactor, omMetadataManager); + + String tableKey = omMetadataManager.getOzoneKey(volumeName, bucketName, + keyName); + // let's set transactionLogIndex 10L here. + long trxnLogIndex = 10L; + TestOMRequestUtils.deleteKey(tableKey, omMetadataManager, trxnLogIndex); + + // We delete entry in keyTable when executing deleteKey. + OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(tableKey); + Assert.assertNull(omKeyInfo); + + trxnLogIndex = trxnLogIndex + 10; + OMClientResponse omClientResponse = + omTrashRecoverRequest.validateAndUpdateCache(ozoneManager, + trxnLogIndex, ozoneManagerDoubleBufferHelper); + + Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK, + omClientResponse.getOMResponse().getStatus()); + + // Now after calling validateAndUpdateCache, it should be existed. + omKeyInfo = omMetadataManager.getKeyTable().get(tableKey); + Assert.assertNotNull(omKeyInfo); + } + + @Test + public void testReplayRequest() throws Exception { + OMRequest modifiedOmRequest = + doPreExecute(createRecoverTrashRequest()); + + OMTrashRecoverRequest omTrashRecoverRequest = + new OMTrashRecoverRequest(modifiedOmRequest); + + // Add volume, bucket and key entries to OM DB. + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName, + clientID, replicationType, replicationFactor, 1L, omMetadataManager); + + // let's set transactionLogIndex 10L here. + long trxnLogIndex = 10L; + String tableKey = omMetadataManager + .getOzoneKey(volumeName, bucketName, keyName); + TestOMRequestUtils.deleteKey(tableKey, omMetadataManager, trxnLogIndex); + + // Replay the original TrashRecoverRequest. + OMClientResponse omClientResponse = omTrashRecoverRequest + .validateAndUpdateCache(ozoneManager, trxnLogIndex, + ozoneManagerDoubleBufferHelper); + + // Replay should result in Replay response + Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY, + omClientResponse.getOMResponse().getStatus()); + } + + /** + * This method calls preExecute and verify the modified request. + * @param originalOmRequest - OMRequest not preExecute. + * @return OMRequest - modified request returned from preExecute. + */ + private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception { + + OMTrashRecoverRequest omTrashRecoverRequest = + new OMTrashRecoverRequest(originalOmRequest); + + OMRequest modifiedOmRequest = omTrashRecoverRequest + .preExecute(ozoneManager); + + // Not be equal as updating modificationTime. + Assert.assertNotEquals(originalOmRequest, modifiedOmRequest); + + return modifiedOmRequest; + } + + /** + * Create OMRequest which encapsulates RecoverTrashRequest. + * @return OMRequest - initial OMRequest. + */ + private OMRequest createRecoverTrashRequest() { + + //TODO: HDDS-2425 recover to non-existing bucket. + RecoverTrashRequest recoverTrashRequest = + RecoverTrashRequest.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setDestinationBucket(bucketName) + .setModificationTime(0L) + .build(); + + return OMRequest.newBuilder().setRecoverTrashRequest(recoverTrashRequest) + .setCmdType(OzoneManagerProtocolProtos.Type.RecoverTrash) + .setClientId(UUID.randomUUID().toString()).build(); + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMTrashRecoverResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMTrashRecoverResponse.java new file mode 100644 index 000000000000..44128d339486 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMTrashRecoverResponse.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.key; + +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.junit.Assert; +import org.junit.Test; + +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .RecoverTrashResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; + +/** + * Tests OMTrashRecoverResponse. + */ +public class TestOMTrashRecoverResponse extends TestOMKeyResponse { + + @Test + public void testAddToDBBatch() throws Exception { + + OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + bucketName, keyName, replicationType, replicationFactor); + TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + clientID, replicationType, replicationFactor, omMetadataManager); + + String tableKey = omMetadataManager + .getOzoneKey(volumeName, bucketName, keyName); + long trxnLogIndex = 10L; + TestOMRequestUtils.deleteKey(tableKey, omMetadataManager, trxnLogIndex); + + Assert.assertNull(omMetadataManager.getKeyTable().get(tableKey)); + + RecoverTrashResponse recoverTrashResponse = RecoverTrashResponse + .newBuilder().setResponse(true).build(); + OMResponse omResponse = OMResponse.newBuilder() + .setRecoverTrashResponse(recoverTrashResponse) + .setStatus(Status.OK).setCmdType(Type.RecoverTrash) + .build(); + + RepeatedOmKeyInfo trashRepeatedKeyInfo = + omMetadataManager.getTrashTable().get(tableKey); + OMTrashRecoverResponse omTrashRecoverResponse = + new OMTrashRecoverResponse(trashRepeatedKeyInfo, omKeyInfo, omResponse); + + omTrashRecoverResponse.addToDBBatch(omMetadataManager, batchOperation); + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + Assert.assertNotNull(omMetadataManager.getKeyTable().get(tableKey)); + + /* TODO: HDDS-2425 Complete tests about the table used in the flow. + * Include trashTable and deletedTable. + */ + } + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java index 901c1e352cd4..b0a834319439 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.shell.bucket; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.BucketArgs; @@ -24,6 +25,10 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.shell.OzoneAddress; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_ENABLED_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_ENABLED_KEY_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_RECOVER_WINDOW; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_RECOVER_WINDOW_DEFAULT; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -46,6 +51,23 @@ public class CreateBucketHandler extends BucketHandler { "false/unspecified indicates otherwise") private Boolean isGdprEnforced; + @Option(names = {"--enableTrash", "-t"}, + description = "if true, indicates bucket with trash-enabled, " + + "false indicates trash-disabled, " + + "unspecified depends on global setting (default is false)") + // Using null to check whether assigned by user. + private Boolean trashEnabled = null; + + @Option(names = {"--recoverWindow", "-r"}, + description = + "if trash-enabled," + + " set indicates recover window of key in this bucket" + + " (eg. 5MIN, 1HR, 1DAY), " + + "unspecified depends on global setting (default is 0MIN)\n" + + "if trash-disabled, indicates ignoring.") + // Using null to check whether assigned by user. + private String recoverWindow = null; + /** * Executes create bucket. */ @@ -61,6 +83,34 @@ public void execute(OzoneClient client, OzoneAddress address) bb.addMetadata(OzoneConsts.GDPR_FLAG, String.valueOf(isGdprEnforced)); } + // If user did not assign property of trash, it depends on global setting. + OzoneConfiguration ozoneConfig = getConf(); + if (trashEnabled == null) { + trashEnabled = ozoneConfig.getBoolean( + OZONE_TRASH_ENABLED_KEY, + OZONE_TRASH_ENABLED_KEY_DEFAULT); + } + if (recoverWindow == null) { + recoverWindow = ozoneConfig.get( + OZONE_TRASH_RECOVER_WINDOW, + OZONE_TRASH_RECOVER_WINDOW_DEFAULT); + } + + if (isGdprEnforced != null && + isGdprEnforced && + trashEnabled) { + trashEnabled = false; + System.out.println("GDPR-enabled buckets cannot be trash-enabled.\n" + + "Set trash-disabled."); + } + bb.setTrashEnabled(trashEnabled); + + if (trashEnabled) { + bb.setRecoverWindow(recoverWindow); + } else { + bb.setRecoverWindow("0MIN"); + } + if (bekName != null) { if (!bekName.isEmpty()) { bb.setBucketEncryptionKey(bekName);