lastRatisTransactionIndex) {
omMetadataManager.getOpenKeyTable().cleanupCache(lastRatisTransactionIndex);
omMetadataManager.getKeyTable().cleanupCache(lastRatisTransactionIndex);
omMetadataManager.getDeletedTable().cleanupCache(lastRatisTransactionIndex);
+ omMetadataManager.getTrashTable().cleanupCache(lastRatisTransactionIndex);
omMetadataManager.getMultipartInfoTable().cleanupCache(
lastRatisTransactionIndex);
omMetadataManager.getS3SecretTable().cleanupCache(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index 1f1b0fb05333..bfcbeeeb9cd7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -22,6 +22,8 @@
import java.util.Map;
import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
@@ -147,6 +149,22 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
keyName)),
new CacheValue<>(Optional.absent(), trxnLogIndex));
+ // Check recover-setting to update cache of trashTable.
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ boolean trashEnabled = omMetadataManager.getBucketTable()
+ .getCacheValue(new CacheKey<>(bucketKey)).getCacheValue()
+ .getTrashEnabled();
+ if (trashEnabled) {
+ RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager
+ .getTrashTable().get(objectKey);
+ repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(omKeyInfo,
+ repeatedOmKeyInfo, trxnLogIndex, ozoneManager.isRatisEnabled());
+
+ omMetadataManager.getTrashTable().addCacheEntry(
+ new CacheKey<>(objectKey),
+ new CacheValue<>(Optional.of(repeatedOmKeyInfo), trxnLogIndex));
+ }
+
// No need to add cache entries to delete table. As delete table will
// be used by DeleteKeyService only, not used for any client response
// validation, so we don't need to add to cache.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
index f7783dbe42c6..3bd57f673571 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java
@@ -20,6 +20,10 @@
import java.io.IOException;
import java.util.ArrayList;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -85,6 +89,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
for (String deletedKey : bucketWithDeleteKeys.getKeysList()) {
RepeatedOmKeyInfo repeatedOmKeyInfo =
omMetadataManager.getDeletedTable().get(deletedKey);
+ // Update cache of trashTable.
+ omMetadataManager.getTrashTable().addCacheEntry(
+ new CacheKey<>(deletedKey),
+ new CacheValue<>(Optional.absent(), trxnLogIndex));
boolean purgeKey = true;
if (repeatedOmKeyInfo != null) {
for (OmKeyInfo omKeyInfo : repeatedOmKeyInfo.getOmKeyInfoList()) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java
index eac7842f84e2..d983e3b3b2ea 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMTrashRecoverRequest.java
@@ -20,10 +20,19 @@
import java.io.IOException;
+import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMReplayException;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
import org.apache.hadoop.ozone.om.response.key.OMTrashRecoverResponse;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -36,10 +45,14 @@
.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .RecoverTrashResponse;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.RECOVERED_KEY_ALREADY_EXISTS;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes
+ .RECOVERED_KEY_NOT_FOUND;
/**
* Handles RecoverTrash request.
@@ -58,7 +71,13 @@ public OMRequest preExecute(OzoneManager ozoneManager) {
.getRecoverTrashRequest();
Preconditions.checkNotNull(recoverTrashRequest);
- return getOmRequest().toBuilder().build();
+ long modificationTime = Time.now();
+
+ return getOmRequest().toBuilder()
+ .setRecoverTrashRequest(
+ recoverTrashRequest.toBuilder()
+ .setModificationTime(modificationTime))
+ .setUserInfo(getUserInfo()).build();
}
@Override
@@ -78,13 +97,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
* OMMetrics omMetrics = ozoneManager.getMetrics();
*/
- OMResponse.Builder omResponse = OMResponse.newBuilder()
- .setCmdType(Type.RecoverTrash).setStatus(Status.OK)
- .setSuccess(true);
+ OMResponse.Builder omResponse = OmResponseUtil
+ .getOMResponseBuilder(getOmRequest());
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquireLock = false;
OMClientResponse omClientResponse = null;
+ //TODO: HDDS-2818. New Metrics for Trash Key Recover and Fails.
+ Result result = null;
try {
// Check acl for the destination bucket.
checkBucketAcls(ozoneManager, volumeName, destinationBucket, keyName,
@@ -93,31 +113,89 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
acquireLock = omMetadataManager.getLock()
.acquireWriteLock(BUCKET_LOCK, volumeName, destinationBucket);
- // Validate.
+ // Validate original vol/buc, destinationBucket exists or not.
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, destinationBucket);
+ // TODO: HDDS-2425. recovering trash in non-existing bucket.
+
+ String trashTableKey = omMetadataManager
+ .getOzoneKey(volumeName, bucketName, keyName);
+ RepeatedOmKeyInfo trashRepeatedKeyInfo =
+ omMetadataManager.getTrashTable().get(trashTableKey);
+ OmKeyInfo trashKeyInfo = null;
+ if (trashRepeatedKeyInfo != null) {
+ int lastKeyIndex = trashRepeatedKeyInfo.getOmKeyInfoList().size() - 1;
+ trashKeyInfo = trashRepeatedKeyInfo
+ .getOmKeyInfoList().get(lastKeyIndex);
+ // update modificationTime after recovering.
+ trashKeyInfo.setModificationTime(
+ recoverTrashRequest.getModificationTime());
+
+ // Check this transaction is replayed or not.
+ if (isReplay(ozoneManager, trashKeyInfo, transactionLogIndex)) {
+ throw new OMReplayException();
+ }
+
+ // Set the updateID to current transactionLogIndex.
+ trashKeyInfo.setUpdateID(transactionLogIndex,
+ ozoneManager.isRatisEnabled());
+
+ // Update cache of keyTable,
+ if (omMetadataManager.getKeyTable().get(trashTableKey) != null) {
+ throw new OMException(
+ "The bucket has key of same name as recovered key",
+ RECOVERED_KEY_ALREADY_EXISTS);
+ } else {
+ omMetadataManager.getKeyTable().addCacheEntry(
+ new CacheKey<>(trashTableKey),
+ new CacheValue<>(Optional.of(trashKeyInfo), transactionLogIndex));
+ }
+
+ // Update cache of trashTable.
+ trashRepeatedKeyInfo.getOmKeyInfoList().remove(lastKeyIndex);
+ omMetadataManager.getTrashTable().addCacheEntry(
+ new CacheKey<>(trashTableKey),
+ new CacheValue<>(Optional.of(trashRepeatedKeyInfo),
+ transactionLogIndex));
+
+ // Update cache of deletedTable.
+ omMetadataManager.getDeletedTable().addCacheEntry(
+ new CacheKey<>(trashTableKey),
+ new CacheValue<>(Optional.of(trashRepeatedKeyInfo),
+ transactionLogIndex));
- /** TODO: HDDS-2425. HDDS-2426.
- * Update cache.
- * omMetadataManager.getKeyTable().addCacheEntry(
- * new CacheKey<>(),
- * new CacheValue<>()
- * );
- *
- * Execute recovering trash in non-existing bucket.
- * Execute recovering trash in existing bucket.
- * omClientResponse = new OMTrashRecoverResponse(omKeyInfo,
- * omResponse.setRecoverTrashResponse(
- * RecoverTrashResponse.newBuilder())
- * .build());
- */
- omClientResponse = null;
+ omResponse.setSuccess(true);
+ } else {
+ /* key we want to recover not exist */
+ throw new OMException("Recovered key is not in trash table",
+ RECOVERED_KEY_NOT_FOUND);
+ }
+
+ result = Result.SUCCESS;
+ omClientResponse = new OMTrashRecoverResponse(trashRepeatedKeyInfo,
+ trashKeyInfo,
+ omResponse.setRecoverTrashResponse(
+ RecoverTrashResponse.newBuilder().setResponse(true))
+ .build());
+
+ } catch (OMException | OMReplayException ex) {
+ LOG.error("Fail for recovering trash.", ex);
+ if (ex instanceof OMReplayException) {
+ omClientResponse = new OMTrashRecoverResponse(null, null,
+ createReplayOMResponse(omResponse));
+ result = Result.REPLAY;
+ } else {
+ omClientResponse = new OMTrashRecoverResponse(null, null,
+ createErrorOMResponse(omResponse, ex));
+ result = Result.FAILURE;
+ }
} catch (IOException ex) {
LOG.error("Fail for recovering trash.", ex);
- omClientResponse = new OMTrashRecoverResponse(null,
+ omClientResponse = new OMTrashRecoverResponse(null, null,
createErrorOMResponse(omResponse, ex));
+ result = Result.FAILURE;
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
index afff73a8836e..453d040d2c97 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java
@@ -86,6 +86,9 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
isRatisEnabled);
omMetadataManager.getDeletedTable().putWithBatch(batchOperation,
ozoneKey, repeatedOmKeyInfo);
+ // Update trashTable in DB.
+ omMetadataManager.getTrashTable().putWithBatch(batchOperation,
+ ozoneKey, repeatedOmKeyInfo);
}
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
index abfc0f6cdf63..5c9369813f4a 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyPurgeResponse.java
@@ -55,8 +55,8 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
for (String key : purgeKeyList) {
- omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation,
- key);
+ omMetadataManager.getDeletedTable().deleteWithBatch(batchOperation, key);
+ omMetadataManager.getTrashTable().deleteWithBatch(batchOperation, key);
}
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java
index fb330a309046..28692276a863 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMTrashRecoverResponse.java
@@ -18,13 +18,14 @@
package org.apache.hadoop.ozone.om.response.key;
-import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .Status;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import java.io.IOException;
@@ -36,29 +37,38 @@
*/
public class OMTrashRecoverResponse extends OMClientResponse {
private OmKeyInfo omKeyInfo;
+ private RepeatedOmKeyInfo trashRepeatedKeyInfo;
- public OMTrashRecoverResponse(@Nullable OmKeyInfo omKeyInfo,
+ public OMTrashRecoverResponse(
+ @Nullable RepeatedOmKeyInfo trashRepeatedKeyInfo,
+ @Nullable OmKeyInfo omKeyInfo,
@Nonnull OMResponse omResponse) {
super(omResponse);
+ this.trashRepeatedKeyInfo = trashRepeatedKeyInfo;
this.omKeyInfo = omKeyInfo;
}
@Override
public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
+ // For omResponse with non-OK status, we do nothing.
+ if (getOMResponse().getStatus() == Status.OK) {
+ String trashTableKey = omMetadataManager.getOzoneKey(
+ omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(),
+ omKeyInfo.getKeyName());
+
+ // Update keyTable in OMDB.
+ omMetadataManager.getKeyTable()
+ .putWithBatch(batchOperation, trashTableKey, omKeyInfo);
+
+ // Update trashTable in OMDB
+ omMetadataManager.getTrashTable()
+ .putWithBatch(batchOperation, trashTableKey, trashRepeatedKeyInfo);
- /* TODO: HDDS-2425. HDDS-2426. */
- String trashKey = omMetadataManager
- .getOzoneKey(omKeyInfo.getVolumeName(),
- omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
- RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager
- .getDeletedTable().get(trashKey);
- omKeyInfo = OmUtils.prepareKeyForRecover(omKeyInfo, repeatedOmKeyInfo);
- omMetadataManager.getDeletedTable()
- .deleteWithBatch(batchOperation, omKeyInfo.getKeyName());
- /* TODO: trashKey should be updated to destinationBucket. */
- omMetadataManager.getKeyTable()
- .putWithBatch(batchOperation, trashKey, omKeyInfo);
+ // Update deletedTable in OMDB
+ omMetadataManager.getDeletedTable()
+ .putWithBatch(batchOperation, trashTableKey, trashRepeatedKeyInfo);
+ }
}
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
deleted file mode 100644
index cf9e62649deb..000000000000
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package org.apache.hadoop.ozone.om;
-
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.hdds.utils.db.DBConfigFromFile;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
-import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.UUID;
-
-/**
- * Test Key Trash Service.
- *
- * This test does the things including:
- * 1. UTs for list trash.
- * 2. UTs for recover trash.
- * 3. UTs for empty trash.
- *
- */
-public class TestTrashService {
-
- @Rule
- public TemporaryFolder tempFolder = new TemporaryFolder();
-
- private KeyManager keyManager;
- private OmMetadataManagerImpl omMetadataManager;
- private String volumeName;
- private String bucketName;
-
- @Before
- public void setup() throws IOException {
- OzoneConfiguration configuration = new OzoneConfiguration();
-
- File folder = tempFolder.newFolder();
- if (!folder.exists()) {
- Assert.assertTrue(folder.mkdirs());
- }
- System.setProperty(DBConfigFromFile.CONFIG_DIR, "/");
- ServerUtils.setOzoneMetaDirPath(configuration, folder.toString());
-
- omMetadataManager = new OmMetadataManagerImpl(configuration);
-
- keyManager = new KeyManagerImpl(
- new ScmBlockLocationTestingClient(null, null, 0),
- omMetadataManager, configuration, UUID.randomUUID().toString(), null);
- keyManager.start(configuration);
-
- volumeName = "volume";
- bucketName = "bucket";
- }
-
- @Test
- public void testRecoverTrash() throws IOException {
- String keyName = "testKey";
- String destinationBucket = "destBucket";
- createAndDeleteKey(keyName);
-
- boolean recoverOperation = omMetadataManager
- .recoverTrash(volumeName, bucketName, keyName, destinationBucket);
- Assert.assertTrue(recoverOperation);
- }
-
- private void createAndDeleteKey(String keyName) throws IOException {
-
- TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(),
- OmVolumeArgs.newBuilder()
- .setOwnerName("owner")
- .setAdminName("admin")
- .setVolume(volumeName)
- .build());
-
- TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(),
- OmBucketInfo.newBuilder()
- .setVolumeName(volumeName)
- .setBucketName(bucketName)
- .build());
-
- OmKeyArgs keyArgs = new OmKeyArgs.Builder()
- .setVolumeName(volumeName)
- .setBucketName(bucketName)
- .setKeyName(keyName)
- .setAcls(Collections.emptyList())
- .setLocationInfoList(new ArrayList<>())
- .build();
-
- /* Create and delete key in the Key Manager. */
- OpenKeySession session = keyManager.openKey(keyArgs);
- keyManager.commitKey(keyArgs, session.getId());
- keyManager.deleteKey(keyArgs);
- }
-
-}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index c5aa9fe103b9..263f91511eb2 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -291,7 +291,9 @@ public static OzoneManagerProtocolProtos.OMRequest createBucketRequest(
.setVolumeName(volumeName)
.setIsVersionEnabled(isVersionEnabled)
.setStorageType(storageTypeProto)
- .addAllMetadata(getMetadataList()).build();
+ .addAllMetadata(getMetadataList())
+ .setTrashEnabled(false)
+ .setRecoverWindow(0).build();
OzoneManagerProtocolProtos.CreateBucketRequest.Builder req =
OzoneManagerProtocolProtos.CreateBucketRequest.newBuilder();
req.setBucketInfo(bucketInfo);
@@ -428,7 +430,7 @@ public static OMRequest createVolumeSetAclRequest(String volumeName,
}
/**
- * Deletes key from Key table and adds it to DeletedKeys table.
+ * Deletes key from Key table and adds it to deletedTable and trashTable.
* @return the deletedKey name
*/
public static String deleteKey(String ozoneKey,
@@ -448,6 +450,12 @@ public static String deleteKey(String ozoneKey,
omMetadataManager.getDeletedTable().put(ozoneKey, repeatedOmKeyInfo);
+ // Update cache of trashTable and update trashTable in OM DB.
+ omMetadataManager.getTrashTable().addCacheEntry(
+ new CacheKey<>(ozoneKey),
+ new CacheValue<>(Optional.of(repeatedOmKeyInfo), trxnLogIndex));
+ omMetadataManager.getTrashTable().put(ozoneKey, repeatedOmKeyInfo);
+
return ozoneKey;
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMTrashRecoverRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMTrashRecoverRequest.java
new file mode 100644
index 000000000000..8964a80d17ac
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMTrashRecoverRequest.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import java.util.UUID;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .RecoverTrashRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .OMRequest;
+
+/**
+ * Tests OMTrashRecoverRequest request.
+ */
+public class TestOMTrashRecoverRequest extends TestOMKeyRequest {
+
+ @Test
+ public void testPreExecute() throws Exception {
+ doPreExecute(createRecoverTrashRequest());
+ }
+
+ @Test
+ public void testValidateAndUpdateCache() throws Exception {
+ OMRequest modifiedOmRequest =
+ doPreExecute(createRecoverTrashRequest());
+
+ OMTrashRecoverRequest omTrashRecoverRequest =
+ new OMTrashRecoverRequest(modifiedOmRequest);
+
+ // Add volume, bucket and key entries to OM DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+ // TransactionLogIndex is 0L here.
+ TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
+ clientID, replicationType, replicationFactor, omMetadataManager);
+
+ String tableKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
+ keyName);
+ // let's set transactionLogIndex 10L here.
+ long trxnLogIndex = 10L;
+ TestOMRequestUtils.deleteKey(tableKey, omMetadataManager, trxnLogIndex);
+
+ // We delete entry in keyTable when executing deleteKey.
+ OmKeyInfo omKeyInfo = omMetadataManager.getKeyTable().get(tableKey);
+ Assert.assertNull(omKeyInfo);
+
+ trxnLogIndex = trxnLogIndex + 10;
+ OMClientResponse omClientResponse =
+ omTrashRecoverRequest.validateAndUpdateCache(ozoneManager,
+ trxnLogIndex, ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+ omClientResponse.getOMResponse().getStatus());
+
+ // Now after calling validateAndUpdateCache, it should be existed.
+ omKeyInfo = omMetadataManager.getKeyTable().get(tableKey);
+ Assert.assertNotNull(omKeyInfo);
+ }
+
+ @Test
+ public void testReplayRequest() throws Exception {
+ OMRequest modifiedOmRequest =
+ doPreExecute(createRecoverTrashRequest());
+
+ OMTrashRecoverRequest omTrashRecoverRequest =
+ new OMTrashRecoverRequest(modifiedOmRequest);
+
+ // Add volume, bucket and key entries to OM DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+ TestOMRequestUtils.addKeyToTableAndCache(volumeName, bucketName, keyName,
+ clientID, replicationType, replicationFactor, 1L, omMetadataManager);
+
+ // let's set transactionLogIndex 10L here.
+ long trxnLogIndex = 10L;
+ String tableKey = omMetadataManager
+ .getOzoneKey(volumeName, bucketName, keyName);
+ TestOMRequestUtils.deleteKey(tableKey, omMetadataManager, trxnLogIndex);
+
+ // Replay the original TrashRecoverRequest.
+ OMClientResponse omClientResponse = omTrashRecoverRequest
+ .validateAndUpdateCache(ozoneManager, trxnLogIndex,
+ ozoneManagerDoubleBufferHelper);
+
+ // Replay should result in Replay response
+ Assert.assertEquals(OzoneManagerProtocolProtos.Status.REPLAY,
+ omClientResponse.getOMResponse().getStatus());
+ }
+
+ /**
+ * This method calls preExecute and verify the modified request.
+ * @param originalOmRequest - OMRequest not preExecute.
+ * @return OMRequest - modified request returned from preExecute.
+ */
+ private OMRequest doPreExecute(OMRequest originalOmRequest) throws Exception {
+
+ OMTrashRecoverRequest omTrashRecoverRequest =
+ new OMTrashRecoverRequest(originalOmRequest);
+
+ OMRequest modifiedOmRequest = omTrashRecoverRequest
+ .preExecute(ozoneManager);
+
+ // Not be equal as updating modificationTime.
+ Assert.assertNotEquals(originalOmRequest, modifiedOmRequest);
+
+ return modifiedOmRequest;
+ }
+
+ /**
+ * Create OMRequest which encapsulates RecoverTrashRequest.
+ * @return OMRequest - initial OMRequest.
+ */
+ private OMRequest createRecoverTrashRequest() {
+
+ //TODO: HDDS-2425 recover to non-existing bucket.
+ RecoverTrashRequest recoverTrashRequest =
+ RecoverTrashRequest.newBuilder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setDestinationBucket(bucketName)
+ .setModificationTime(0L)
+ .build();
+
+ return OMRequest.newBuilder().setRecoverTrashRequest(recoverTrashRequest)
+ .setCmdType(OzoneManagerProtocolProtos.Type.RecoverTrash)
+ .setClientId(UUID.randomUUID().toString()).build();
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMTrashRecoverResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMTrashRecoverResponse.java
new file mode 100644
index 000000000000..44128d339486
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMTrashRecoverResponse.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.junit.Assert;
+import org.junit.Test;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .RecoverTrashResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+
+/**
+ * Tests OMTrashRecoverResponse.
+ */
+public class TestOMTrashRecoverResponse extends TestOMKeyResponse {
+
+ @Test
+ public void testAddToDBBatch() throws Exception {
+
+ OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+ bucketName, keyName, replicationType, replicationFactor);
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName,
+ clientID, replicationType, replicationFactor, omMetadataManager);
+
+ String tableKey = omMetadataManager
+ .getOzoneKey(volumeName, bucketName, keyName);
+ long trxnLogIndex = 10L;
+ TestOMRequestUtils.deleteKey(tableKey, omMetadataManager, trxnLogIndex);
+
+ Assert.assertNull(omMetadataManager.getKeyTable().get(tableKey));
+
+ RecoverTrashResponse recoverTrashResponse = RecoverTrashResponse
+ .newBuilder().setResponse(true).build();
+ OMResponse omResponse = OMResponse.newBuilder()
+ .setRecoverTrashResponse(recoverTrashResponse)
+ .setStatus(Status.OK).setCmdType(Type.RecoverTrash)
+ .build();
+
+ RepeatedOmKeyInfo trashRepeatedKeyInfo =
+ omMetadataManager.getTrashTable().get(tableKey);
+ OMTrashRecoverResponse omTrashRecoverResponse =
+ new OMTrashRecoverResponse(trashRepeatedKeyInfo, omKeyInfo, omResponse);
+
+ omTrashRecoverResponse.addToDBBatch(omMetadataManager, batchOperation);
+ omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+ Assert.assertNotNull(omMetadataManager.getKeyTable().get(tableKey));
+
+ /* TODO: HDDS-2425 Complete tests about the table used in the flow.
+ * Include trashTable and deletedTable.
+ */
+ }
+
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
index 901c1e352cd4..b0a834319439 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.ozone.shell.bucket;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.BucketArgs;
@@ -24,6 +25,10 @@
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.shell.OzoneAddress;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_ENABLED_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_ENABLED_KEY_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_RECOVER_WINDOW;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRASH_RECOVER_WINDOW_DEFAULT;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
@@ -46,6 +51,23 @@ public class CreateBucketHandler extends BucketHandler {
"false/unspecified indicates otherwise")
private Boolean isGdprEnforced;
+ @Option(names = {"--enableTrash", "-t"},
+ description = "if true, indicates bucket with trash-enabled, " +
+ "false indicates trash-disabled, " +
+ "unspecified depends on global setting (default is false)")
+ // Using null to check whether assigned by user.
+ private Boolean trashEnabled = null;
+
+ @Option(names = {"--recoverWindow", "-r"},
+ description =
+ "if trash-enabled," +
+ " set indicates recover window of key in this bucket" +
+ " (eg. 5MIN, 1HR, 1DAY), " +
+ "unspecified depends on global setting (default is 0MIN)\n" +
+ "if trash-disabled, indicates ignoring.")
+ // Using null to check whether assigned by user.
+ private String recoverWindow = null;
+
/**
* Executes create bucket.
*/
@@ -61,6 +83,34 @@ public void execute(OzoneClient client, OzoneAddress address)
bb.addMetadata(OzoneConsts.GDPR_FLAG, String.valueOf(isGdprEnforced));
}
+ // If user did not assign property of trash, it depends on global setting.
+ OzoneConfiguration ozoneConfig = getConf();
+ if (trashEnabled == null) {
+ trashEnabled = ozoneConfig.getBoolean(
+ OZONE_TRASH_ENABLED_KEY,
+ OZONE_TRASH_ENABLED_KEY_DEFAULT);
+ }
+ if (recoverWindow == null) {
+ recoverWindow = ozoneConfig.get(
+ OZONE_TRASH_RECOVER_WINDOW,
+ OZONE_TRASH_RECOVER_WINDOW_DEFAULT);
+ }
+
+ if (isGdprEnforced != null &&
+ isGdprEnforced &&
+ trashEnabled) {
+ trashEnabled = false;
+ System.out.println("GDPR-enabled buckets cannot be trash-enabled.\n" +
+ "Set trash-disabled.");
+ }
+ bb.setTrashEnabled(trashEnabled);
+
+ if (trashEnabled) {
+ bb.setRecoverWindow(recoverWindow);
+ } else {
+ bb.setRecoverWindow("0MIN");
+ }
+
if (bekName != null) {
if (!bekName.isEmpty()) {
bb.setBucketEncryptionKey(bekName);