Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ List<RepeatedOmKeyInfo> listTrash(String volumeName, String bucketName,
* @param bucketName - The bucket name.
* @param keyName - The key user want to recover.
* @param destinationBucket - The bucket user want to recover to.
* @return The recoverTrash
* @return The result of recovering operation is success or not.
* @throws IOException
*/
boolean recoverTrash(String volumeName, String bucketName, String keyName,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,6 @@ public static boolean isReadOnly(
case LookupKey:
case ListKeys:
case ListTrash:
case RecoverTrash:
case ServiceList:
case ListMultiPartUploadParts:
case GetFileStatus:
Expand Down Expand Up @@ -264,6 +263,7 @@ public static boolean isReadOnly(
case SetAcl:
case AddAcl:
case PurgeKeys:
case RecoverTrash:
return false;
default:
LOG.error("CmdType {} is not categorized as readOnly or not.", cmdType);
Expand Down Expand Up @@ -502,4 +502,18 @@ public static long getOMClientRpcTimeOut(Configuration configuration) {
return OzoneConfiguration.of(configuration)
.getObject(OMClientConfig.class).getRpcTimeOut();
}

/**
* Return OmKeyInfo that would be recovered.
*/
public static OmKeyInfo prepareKeyForRecover(OmKeyInfo keyInfo,
RepeatedOmKeyInfo repeatedOmKeyInfo) {

/* TODO: HDDS-2425. HDDS-2426.*/
if (repeatedOmKeyInfo.getOmKeyInfoList().contains(keyInfo)) {
return keyInfo;
} else {
return null;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -500,10 +500,12 @@ List<RepeatedOmKeyInfo> listTrash(String volumeName, String bucketName,
* @param bucketName - The bucket name.
* @param keyName - The key user want to recover.
* @param destinationBucket - The bucket user want to recover to.
* @return The recoverTrash
* @return The result of recovering operation is success or not.
* @throws IOException
*/
boolean recoverTrash(String volumeName, String bucketName, String keyName,
String destinationBucket) throws IOException;
default boolean recoverTrash(String volumeName, String bucketName,
String keyName, String destinationBucket) throws IOException {
return false;
}
Comment on lines +506 to +509
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bharatviswa504
What do you think about this cleanup write-request ?
Could we set the write-operation with a default or
we need a separated interface addressed the write-operation ?


}
Original file line number Diff line number Diff line change
Expand Up @@ -1639,15 +1639,14 @@ public boolean recoverTrash(String volumeName, String bucketName,
"The destination bucket name cannot be null or empty. " +
"Please enter a valid destination bucket name.");

RecoverTrashRequest recoverRequest = RecoverTrashRequest.newBuilder()
RecoverTrashRequest.Builder req = RecoverTrashRequest.newBuilder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
.setKeyName(keyName)
.setDestinationBucket(destinationBucket)
.build();
.setDestinationBucket(destinationBucket);

OMRequest omRequest = createOMRequest(Type.RecoverTrash)
.setRecoverTrashRequest(recoverRequest)
.setRecoverTrashRequest(req)
.build();

RecoverTrashResponse recoverResponse =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,18 @@ List<OmKeyInfo> listKeys(String volumeName,
List<RepeatedOmKeyInfo> listTrash(String volumeName, String bucketName,
String startKeyName, String keyPrefix, int maxKeys) throws IOException;

/**
* Recover trash allows the user to recover the keys
* that were marked as deleted, but not actually deleted by Ozone Manager.
* @param volumeName - The volume name.
* @param bucketName - The bucket name.
* @param keyName - The key user want to recover.
* @param destinationBucket - The bucket user want to recover to.
* @return The result of recovering operation is success or not.
*/
boolean recoverTrash(String volumeName, String bucketName,
String keyName, String destinationBucket) throws IOException;

/**
* Returns a list of volumes owned by a given user; if user is null, returns
* all volumes.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -804,6 +804,18 @@ public List<RepeatedOmKeyInfo> listTrash(String volumeName, String bucketName,
return deletedKeys;
}

@Override
public boolean recoverTrash(String volumeName, String bucketName,
String keyName, String destinationBucket) throws IOException {

/* TODO: HDDS-2425 and HDDS-2426
core logic stub would be added in later patch.
*/

boolean recoverOperation = true;
return recoverOperation;
}

/**
* @param userName volume owner, null for listing all volumes.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2319,15 +2319,6 @@ public List<RepeatedOmKeyInfo> listTrash(String volumeName,
}
}

// TODO: HDDS-2424. recover-trash command server side handling.
@Override
public boolean recoverTrash(String volumeName, String bucketName,
String keyName, String destinationBucket) throws IOException {

boolean recoverOperation = true;
return recoverOperation;
}

/**
* Sets bucket property from args.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest;
import org.apache.hadoop.ozone.om.request.key.acl.OMKeySetAclRequest;
Expand Down Expand Up @@ -140,6 +141,8 @@ public static OMClientRequest createClientRequest(OMRequest omRequest) {
return new OMRenewDelegationTokenRequest(omRequest);
case GetS3Secret:
return new S3GetSecretRequest(omRequest);
case RecoverTrash:
return new OMTrashRecoverRequest(omRequest);
default:
throw new IllegalStateException("Unrecognized write command " +
"type request" + cmdType);
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,136 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.om.request.key;

import java.io.IOException;

import com.google.common.base.Preconditions;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.response.key.OMTrashRecoverResponse;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.RecoverTrashRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status;

import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;

/**
* Handles RecoverTrash request.
*/
public class OMTrashRecoverRequest extends OMKeyRequest {
private static final Logger LOG =
LoggerFactory.getLogger(OMTrashRecoverRequest.class);

public OMTrashRecoverRequest(OMRequest omRequest) {
super(omRequest);
}

@Override
public OMRequest preExecute(OzoneManager ozoneManager) {
RecoverTrashRequest recoverTrashRequest = getOmRequest()
.getRecoverTrashRequest();
Preconditions.checkNotNull(recoverTrashRequest);

return getOmRequest().toBuilder().build();
}

@Override
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long transactionLogIndex,
OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) {
RecoverTrashRequest recoverTrashRequest = getOmRequest()
.getRecoverTrashRequest();
Preconditions.checkNotNull(recoverTrashRequest);

String volumeName = recoverTrashRequest.getVolumeName();
String bucketName = recoverTrashRequest.getBucketName();
String keyName = recoverTrashRequest.getKeyName();
String destinationBucket = recoverTrashRequest.getDestinationBucket();

/** TODO: HDDS-2818. New Metrics for Trash Key Recover and Fails.
* OMMetrics omMetrics = ozoneManager.getMetrics();
*/

OMResponse.Builder omResponse = OMResponse.newBuilder()
.setCmdType(Type.RecoverTrash).setStatus(Status.OK)
.setSuccess(true);

OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
boolean acquireLock = false;
OMClientResponse omClientResponse = null;
try {
// Check acl for the destination bucket.
checkBucketAcls(ozoneManager, volumeName, destinationBucket, keyName,
IAccessAuthorizer.ACLType.WRITE);

acquireLock = omMetadataManager.getLock()
.acquireWriteLock(BUCKET_LOCK, volumeName, destinationBucket);

// Validate.
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
validateBucketAndVolume(omMetadataManager, volumeName, destinationBucket);


/** TODO: HDDS-2425. HDDS-2426.
* Update cache.
* omMetadataManager.getKeyTable().addCacheEntry(
* new CacheKey<>(),
* new CacheValue<>()
* );
*
* Execute recovering trash in non-existing bucket.
* Execute recovering trash in existing bucket.
* omClientResponse = new OMTrashRecoverResponse(omKeyInfo,
* omResponse.setRecoverTrashResponse(
* RecoverTrashResponse.newBuilder())
* .build());
*/
omClientResponse = null;

} catch (IOException ex) {
LOG.error("Fail for recovering trash.", ex);
omClientResponse = new OMTrashRecoverResponse(null,
createErrorOMResponse(omResponse, ex));
} finally {
if (omClientResponse != null) {
omClientResponse.setFlushFuture(
ozoneManagerDoubleBufferHelper.add(omClientResponse,
transactionLogIndex));
}
if (acquireLock) {
omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
destinationBucket);
}
}

return omClientResponse;
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.om.response.key;

import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.OMResponse;
import org.apache.hadoop.hdds.utils.db.BatchOperation;

import java.io.IOException;
import javax.annotation.Nullable;
import javax.annotation.Nonnull;

/**
* Response for RecoverTrash request.
*/
public class OMTrashRecoverResponse extends OMClientResponse {
private OmKeyInfo omKeyInfo;

public OMTrashRecoverResponse(@Nullable OmKeyInfo omKeyInfo,
@Nonnull OMResponse omResponse) {
super(omResponse);
this.omKeyInfo = omKeyInfo;
}

@Override
public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {

/* TODO: HDDS-2425. HDDS-2426. */
String trashKey = omMetadataManager
.getOzoneKey(omKeyInfo.getVolumeName(),
omKeyInfo.getBucketName(), omKeyInfo.getKeyName());
RepeatedOmKeyInfo repeatedOmKeyInfo = omMetadataManager
.getDeletedTable().get(trashKey);
omKeyInfo = OmUtils.prepareKeyForRecover(omKeyInfo, repeatedOmKeyInfo);
omMetadataManager.getDeletedTable()
.deleteWithBatch(batchOperation, omKeyInfo.getKeyName());
/* TODO: trashKey should be updated to destinationBucket. */
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One question:

  1. if the key is created, deleted, the key is created and the key is deleted. Now when recover, which omKeyInfo will be used from the delete table.

Copy link
Member Author

@cxorm cxorm Mar 25, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you @bharatviswa504 for the question.

Refer to processing of DeletedTable in OMKeyDeleteResponse#addToDBBatch() and OmUtils#prepareKeyForDelete(), the latest deleted key is added in tail of RepeatedOmKeyInfo#omKeyInfoList.

So I think we could recover the latest deleted key from the DeletedTable in this created-deleted-created-deleted situation. (And when recovering the latest key, I think we should clear the old deleted key.)

Would you please give me advice if I miss something ?
If the idea is proper, I will update the description of this jira.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am fine with recovering last delete key if that is the expected behavior.

(And when recovering the latest key, I think we should clear the old deleted key.)
We should not delete the other keys, as those keys will be picked by background trash service and the data for those keys need to be deleted.

And also doing this way, is also not correct from my understanding, let us say, we put those keys in delete table, and background delete key service will pick them up and send to SCM for deletion, at this point we got a recover trash command, so there is a chance that we recover the key which might have no data, as we submitted the request to SCM for deletion, and SCM, in turn, it will send to DN. How we shall handle this kind of scenarios?

Because deletion from delete table will happen when key purge request happens.

Code snippet link #link

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

/pending I'm tracing the background part. (Hope soon)

Copy link
Member Author

@cxorm cxorm Apr 9, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you @bharatviswa504 for taking time to review this.

Here is my thought,
We set modificationTime when deleting key.

So I think we can compare the modificationTime with RECOVERY_WINDOW to exclude keys(exist in trash-enabled buckets) from purging.

Code snippet would be added after this line might like

if (trashEnable(info.getBucketName()) &&
    (Time.now() - info.getModificationTime()) < RECOVERY_WINDOW) {
    
  /* Would not delete key in this situation. */
}

note recovery_window of bucket would be added in later Jira.

Could you please give me your thoughts or ideas if I miss something, thank you.

And here is discussion about trash-recovery.

omMetadataManager.getKeyTable()
.putWithBatch(batchOperation, trashKey, omKeyInfo);
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ public class TestTrashService {
public TemporaryFolder tempFolder = new TemporaryFolder();

private KeyManager keyManager;
private OmMetadataManagerImpl omMetadataManager;
private String volumeName;
private String bucketName;

Expand All @@ -69,8 +70,8 @@ public void setup() throws IOException {
System.setProperty(DBConfigFromFile.CONFIG_DIR, "/");
ServerUtils.setOzoneMetaDirPath(configuration, folder.toString());

OmMetadataManagerImpl omMetadataManager =
new OmMetadataManagerImpl(configuration);
omMetadataManager = new OmMetadataManagerImpl(configuration);

keyManager = new KeyManagerImpl(
new ScmBlockLocationTestingClient(null, null, 0),
omMetadataManager, configuration, UUID.randomUUID().toString(), null);
Expand All @@ -86,11 +87,9 @@ public void testRecoverTrash() throws IOException {
String destinationBucket = "destBucket";
createAndDeleteKey(keyName);

/* TODO:HDDS-2424. */
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, what i means is like this TODO comment.

// boolean recoverOperation =
// ozoneManager.recoverTrash(
// volumeName, bucketName, keyName, destinationBucket);
// Assert.assertTrue(recoverOperation);
boolean recoverOperation = omMetadataManager
.recoverTrash(volumeName, bucketName, keyName, destinationBucket);
Assert.assertTrue(recoverOperation);
}

private void createAndDeleteKey(String keyName) throws IOException {
Expand Down