Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;

import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
Expand Down Expand Up @@ -226,6 +227,40 @@ public void testOfsHSync() throws Exception {
}
}

@Test
public void testHSyncDeletedKey() throws Exception {
// Verify that a key can't be successfully hsync'ed again after it's deleted,
// and that key won't reappear after a failed hsync.

// Set the fs.defaultFS
final String rootPath = String.format("%s://%s/",
OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY));
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);

final String dir = OZONE_ROOT + bucket.getVolumeName()
+ OZONE_URI_DELIMITER + bucket.getName();
final Path key1 = new Path(dir, "key-hsync-del");

try (FileSystem fs = FileSystem.get(CONF)) {
// Create key1
try (FSDataOutputStream os = fs.create(key1, true)) {
os.write(1);
os.hsync();
fs.delete(key1, false);

// getFileStatus should throw FNFE because the key is deleted
assertThrows(FileNotFoundException.class,
() -> fs.getFileStatus(key1));
// hsync should throw because the open key is gone
assertThrows(OMException.class,
() -> os.hsync());
// key1 should not reappear because of hsync
assertThrows(FileNotFoundException.class,
() -> fs.getFileStatus(key1));
}
}
}

@Test
public void testUncommittedBlocks() throws Exception {
// Set the fs.defaultFS
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,21 @@ String getOzoneKeyFSO(String volumeName,
* @param id - the id for this open
* @return bytes of DB key.
*/
String getOpenKey(String volume, String bucket, String key, long id);
default String getOpenKey(String volume, String bucket, String key, long id) {
return getOpenKey(volume, bucket, key, String.valueOf(id));
}

/**
* Returns the DB key name of a open key in OM metadata store. Should be
* #open# prefix followed by actual key name.
*
* @param volume - volume name
* @param bucket - bucket name
* @param key - key name
* @param clientId - client Id String for this open key
* @return bytes of DB key.
*/
String getOpenKey(String volume, String bucket, String key, String clientId);

/**
* Returns client ID in Long of an OpenKeyTable DB Key String.
Expand Down Expand Up @@ -572,9 +586,22 @@ default String getOzonePathKey(long volumeId, long bucketId,
* @param id - client id for this open request
* @return DB directory key as String.
*/
String getOpenFileName(long volumeId, long bucketId,
long parentObjectId, String fileName, long id);
default String getOpenFileName(long volumeId, long bucketId, long parentObjectId, String fileName, long id) {
return getOpenFileName(volumeId, bucketId, parentObjectId, fileName, String.valueOf(id));
}

/**
* Returns DB key name of an open file in OM metadata store. Should be
* #open# prefix followed by actual leaf node name.
*
* @param volumeId - ID of the volume
* @param bucketId - ID of the bucket
* @param parentObjectId - parent object Id
* @param fileName - file name
* @param clientId - client id String for this open request
* @return DB directory key as String.
*/
String getOpenFileName(long volumeId, long bucketId, long parentObjectId, String fileName, String clientId);

/**
* Given a volume, bucket and a objectID, return the DB key name in
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -865,9 +865,9 @@ public String getOzoneDirKey(String volume, String bucket, String key) {

@Override
public String getOpenKey(String volume, String bucket,
String key, long id) {
String key, String clientId) {
String openKey = OM_KEY_PREFIX + volume + OM_KEY_PREFIX + bucket +
OM_KEY_PREFIX + key + OM_KEY_PREFIX + id;
OM_KEY_PREFIX + key + OM_KEY_PREFIX + clientId;
return openKey;
}

Expand Down Expand Up @@ -2178,13 +2178,13 @@ public String getOzoneDeletePathDirKey(String ozoneDeletePath) {
@Override
public String getOpenFileName(long volumeId, long bucketId,
long parentID, String fileName,
long id) {
String clientId) {
StringBuilder openKey = new StringBuilder();
openKey.append(OM_KEY_PREFIX).append(volumeId);
openKey.append(OM_KEY_PREFIX).append(bucketId);
openKey.append(OM_KEY_PREFIX).append(parentID);
openKey.append(OM_KEY_PREFIX).append(fileName);
openKey.append(OM_KEY_PREFIX).append(id);
openKey.append(OM_KEY_PREFIX).append(clientId);
return openKey.toString();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
if (omKeyInfo == null) {
String action = isRecovery ? "recovery" : isHSync ? "hsync" : "commit";
throw new OMException("Failed to " + action + " key, as " + dbOpenKey +
"entry is not found in the OpenKey table", KEY_NOT_FOUND);
" entry is not found in the OpenKey table", KEY_NOT_FOUND);
}
if (omKeyInfo.getMetadata().containsKey(OzoneConsts.LEASE_RECOVERY) &&
omKeyInfo.getMetadata().containsKey(OzoneConsts.HSYNC_CLIENT_ID)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
import java.nio.file.InvalidPathException;
import java.util.Map;

import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
Expand Down Expand Up @@ -147,7 +149,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
// Set the UpdateID to current transactionLogIndex
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());

// Update table cache.
// Update table cache. Put a tombstone entry
omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(
new CacheKey<>(
omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)),
Expand All @@ -160,15 +162,25 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
omBucketInfo.incrUsedBytes(-quotaReleased);
omBucketInfo.incrUsedNamespace(-1L);

// No need to add cache entries to delete table. As delete table will
// be used by DeleteKeyService only, not used for any client response
// validation, so we don't need to add to cache.
// TODO: Revisit if we need it later.
// If omKeyInfo has hsync metadata, delete its corresponding open key as well
String dbOpenKey = null;
String hsyncClientId = omKeyInfo.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID);
if (hsyncClientId != null) {
Table<String, OmKeyInfo> openKeyTable = omMetadataManager.getOpenKeyTable(getBucketLayout());
dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, hsyncClientId);
OmKeyInfo openKeyInfo = openKeyTable.get(dbOpenKey);
if (openKeyInfo != null) {
// Remove the open key by putting a tombstone entry
openKeyTable.addCacheEntry(dbOpenKey, trxnLogIndex);
} else {
LOG.warn("Potentially inconsistent DB state: open key not found with dbOpenKey '{}'", dbOpenKey);
}
}

omClientResponse = new OMKeyDeleteResponse(
omResponse.setDeleteKeyResponse(DeleteKeyResponse.newBuilder())
.build(), omKeyInfo, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject());
omBucketInfo.copyObject(), dbOpenKey);

result = Result.SUCCESS;
} catch (IOException | InvalidPathException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@

package org.apache.hadoop.ozone.om.request.key;

import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
Expand Down Expand Up @@ -154,15 +156,26 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
omBucketInfo.incrUsedBytes(-quotaReleased);
omBucketInfo.incrUsedNamespace(-1L);

// No need to add cache entries to delete table. As delete table will
// be used by DeleteKeyService only, not used for any client response
// validation, so we don't need to add to cache.
// TODO: Revisit if we need it later.
// If omKeyInfo has hsync metadata, delete its corresponding open key as well
String dbOpenKey = null;
String hsyncClientId = omKeyInfo.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID);
if (hsyncClientId != null) {
Table<String, OmKeyInfo> openKeyTable = omMetadataManager.getOpenKeyTable(getBucketLayout());
long parentId = omKeyInfo.getParentObjectID();
dbOpenKey = omMetadataManager.getOpenFileName(volumeId, bucketId, parentId, fileName, hsyncClientId);
OmKeyInfo openKeyInfo = openKeyTable.get(dbOpenKey);
if (openKeyInfo != null) {
// Remove the open key by putting a tombstone entry
openKeyTable.addCacheEntry(dbOpenKey, trxnLogIndex);
} else {
LOG.warn("Potentially inconsistent DB state: open key not found with dbOpenKey '{}'", dbOpenKey);
}
}

omClientResponse = new OMKeyDeleteResponseWithFSO(omResponse
.setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
keyName, omKeyInfo, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject(), keyStatus.isDirectory(), volumeId);
omBucketInfo.copyObject(), keyStatus.isDirectory(), volumeId, dbOpenKey);

result = Result.SUCCESS;
} catch (IOException | InvalidPathException ex) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
package org.apache.hadoop.ozone.om.request.key;

import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.ratis.server.protocol.TermIndex;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
Expand Down Expand Up @@ -172,17 +174,18 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn
OmBucketInfo omBucketInfo =
getBucketInfo(omMetadataManager, volumeName, bucketName);

List<String> dbOpenKeys = new ArrayList<>();
// Mark all keys which can be deleted, in cache as deleted.
quotaReleased =
markKeysAsDeletedInCache(ozoneManager, trxnLogIndex, omKeyInfoList,
dirList, omMetadataManager, quotaReleased);
dirList, omMetadataManager, quotaReleased, dbOpenKeys);
omBucketInfo.incrUsedBytes(-quotaReleased);
omBucketInfo.incrUsedNamespace(-1L * omKeyInfoList.size());

final long volumeId = omMetadataManager.getVolumeId(volumeName);
omClientResponse =
getOmClientResponse(ozoneManager, omKeyInfoList, dirList, omResponse,
unDeletedKeys, deleteStatus, omBucketInfo, volumeId);
unDeletedKeys, deleteStatus, omBucketInfo, volumeId, dbOpenKeys);

result = Result.SUCCESS;

Expand Down Expand Up @@ -257,31 +260,48 @@ protected OMClientResponse getOmClientResponse(OzoneManager ozoneManager,
List<OmKeyInfo> omKeyInfoList, List<OmKeyInfo> dirList,
OMResponse.Builder omResponse,
OzoneManagerProtocolProtos.DeleteKeyArgs.Builder unDeletedKeys,
boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId) {
boolean deleteStatus, OmBucketInfo omBucketInfo, long volumeId, List<String> dbOpenKeys) {
OMClientResponse omClientResponse;
omClientResponse = new OMKeysDeleteResponse(omResponse
.setDeleteKeysResponse(
DeleteKeysResponse.newBuilder().setStatus(deleteStatus)
.setUnDeletedKeys(unDeletedKeys))
.setStatus(deleteStatus ? OK : PARTIAL_DELETE).setSuccess(deleteStatus)
.build(), omKeyInfoList, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject());
omBucketInfo.copyObject(), dbOpenKeys);
return omClientResponse;
}

protected long markKeysAsDeletedInCache(OzoneManager ozoneManager,
long trxnLogIndex, List<OmKeyInfo> omKeyInfoList, List<OmKeyInfo> dirList,
OMMetadataManager omMetadataManager, long quotaReleased)
OMMetadataManager omMetadataManager, long quotaReleased, List<String> dbOpenKeys)
throws IOException {
for (OmKeyInfo omKeyInfo : omKeyInfoList) {
String volumeName = omKeyInfo.getVolumeName();
String bucketName = omKeyInfo.getBucketName();
String keyName = omKeyInfo.getKeyName();
omMetadataManager.getKeyTable(getBucketLayout()).addCacheEntry(
new CacheKey<>(omMetadataManager
.getOzoneKey(omKeyInfo.getVolumeName(), omKeyInfo.getBucketName(),
omKeyInfo.getKeyName())),
new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)),
CacheValue.get(trxnLogIndex));

omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
quotaReleased += sumBlockLengths(omKeyInfo);

// If omKeyInfo has hsync metadata, delete its corresponding open key as well
String hsyncClientId = omKeyInfo.getMetadata().get(OzoneConsts.HSYNC_CLIENT_ID);
if (hsyncClientId != null) {
Table<String, OmKeyInfo> openKeyTable = omMetadataManager.getOpenKeyTable(getBucketLayout());
String dbOpenKey = omMetadataManager.getOpenKey(volumeName, bucketName, keyName, hsyncClientId);
OmKeyInfo openKeyInfo = openKeyTable.get(dbOpenKey);
if (openKeyInfo != null) {
// Remove the open key by putting a tombstone entry
openKeyTable.addCacheEntry(dbOpenKey, trxnLogIndex);
// Append to the list of open keys to be deleted. The list is not expected to be large.
dbOpenKeys.add(dbOpenKey);
} else {
LOG.warn("Potentially inconsistent DB state: open key not found with dbOpenKey '{}'", dbOpenKey);
}
}
}
return quotaReleased;
}
Expand Down
Loading