Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.io.UnsupportedEncodingException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
Expand Down Expand Up @@ -87,6 +88,8 @@
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.OzoneTestUtils;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.audit.AuditLogTestUtils;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
Expand Down Expand Up @@ -228,10 +231,13 @@ abstract class OzoneRpcClientTests extends OzoneTestBase {
remoteGroupName, ACCESS, READ);
private static MessageDigest eTagProvider;
private static Set<OzoneClient> ozoneClients = new HashSet<>();
private static GenericTestUtils.PrintStreamCapturer output;

@BeforeAll
public static void initialize() throws NoSuchAlgorithmException {
public static void initialize() throws NoSuchAlgorithmException, UnsupportedEncodingException {
eTagProvider = MessageDigest.getInstance(MD5_HASH);
AuditLogTestUtils.enableAuditLog();
output = GenericTestUtils.captureOut();
}

/**
Expand Down Expand Up @@ -270,6 +276,7 @@ static void startCluster(OzoneConfiguration conf, MiniOzoneCluster.Builder build
static void shutdownCluster() {
org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(ozoneClients);
ozoneClients.clear();
org.apache.hadoop.hdds.utils.IOUtils.closeQuietly(output);

if (storageContainerLocationClient != null) {
storageContainerLocationClient.close();
Expand Down Expand Up @@ -1076,6 +1083,64 @@ public void testDeleteLinkedBucket() throws Exception {
store.deleteVolume(volumeName);
}

@Test
public void testDeleteAuditLog() throws Exception {
String volumeName = UUID.randomUUID().toString();
String bucketName = UUID.randomUUID().toString();

String value = "sample value";
int valueLength = value.getBytes(UTF_8).length;
store.createVolume(volumeName);
OzoneVolume volume = store.getVolume(volumeName);
volume.createBucket(bucketName);
OzoneBucket bucket = volume.getBucket(bucketName);

// create a three replica file
String keyName1 = "key1";
TestDataUtil.createKey(bucket, keyName1, THREE, RATIS, value);

// create a EC replica file
String keyName2 = "key2";
ReplicationConfig replicationConfig = new ECReplicationConfig("rs-3-2-1024k");
TestDataUtil.createKey(bucket, keyName2, replicationConfig, value);

// create a directory and a file
String dirName = "dir1";
bucket.createDirectory(dirName);
String keyName3 = "key3";
TestDataUtil.createKey(bucket, keyName3, THREE, RATIS, value);

// delete files and directory
output.reset();
bucket.deleteKey(keyName1);
bucket.deleteKey(keyName2);
bucket.deleteDirectory(dirName, true);

// create keys for deleteKeys case
String keyName4 = "key4";
TestDataUtil.createKey(bucket, dirName + "/" + keyName4, THREE, RATIS, value);

String keyName5 = "key5";
TestDataUtil.createKey(bucket, dirName + "/" + keyName5, replicationConfig, value);

List<String> keysToDelete = new ArrayList<>();
keysToDelete.add(dirName + "/" + keyName4);
keysToDelete.add(dirName + "/" + keyName5);
bucket.deleteKeys(keysToDelete);

String consoleOutput = output.get();
assertThat(consoleOutput).contains("op=DELETE_KEY {volume=" + volumeName + ", bucket=" + bucketName +
", key=key1, dataSize=" + valueLength + ", replicationConfig=RATIS/THREE");
assertThat(consoleOutput).contains("op=DELETE_KEY {volume=" + volumeName + ", bucket=" + bucketName +
", key=key2, dataSize=" + valueLength + ", replicationConfig=EC{rs-3-2-1024k}");
assertThat(consoleOutput).contains("op=DELETE_KEY {volume=" + volumeName + ", bucket=" + bucketName +
", key=dir1, Transaction");
assertThat(consoleOutput).contains("op=DELETE_KEYS {volume=" + volumeName + ", bucket=" + bucketName +
", deletedKeysList={key=dir1/key4, dataSize=" + valueLength +
", replicationConfig=RATIS/THREE}, {key=dir1/key5, dataSize=" + valueLength +
", replicationConfig=EC{rs-3-2-1024k}}, unDeletedKeysList=");
}

protected void verifyReplication(String volumeName, String bucketName,
String keyName, ReplicationConfig replication)
throws IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,27 @@ OMAuditLogger.Builder buildAuditMessage(
* Build auditMap for KeyArgs.
* @param keyArgs
*/
default Map<String, String> buildKeyArgsAuditMap(KeyArgs keyArgs) {

default Map<String, String> buildLightKeyArgsAuditMap(KeyArgs keyArgs) {
if (keyArgs == null) {
return new HashMap<>(0);
} else {
Map< String, String > auditMap = new LinkedHashMap<>();
Map<String, String> auditMap = new LinkedHashMap<>();
auditMap.put(OzoneConsts.VOLUME, keyArgs.getVolumeName());
auditMap.put(OzoneConsts.BUCKET, keyArgs.getBucketName());
auditMap.put(OzoneConsts.KEY, keyArgs.getKeyName());
return auditMap;
}
}

/**
* Build auditMap for KeyArgs.
* @param keyArgs
*/
default Map<String, String> buildKeyArgsAuditMap(KeyArgs keyArgs) {
if (keyArgs == null) {
return new HashMap<>(0);
} else {
Map< String, String > auditMap = buildLightKeyArgsAuditMap(keyArgs);
auditMap.put(OzoneConsts.DATA_SIZE,
String.valueOf(keyArgs.getDataSize()));
if (keyArgs.hasType()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();

OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
Map<String, String> auditMap = buildLightKeyArgsAuditMap(keyArgs);

String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
Expand Down Expand Up @@ -187,6 +187,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
omResponse.setDeleteKeyResponse(DeleteKeyResponse.newBuilder())
.build(), omKeyInfo, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject(), deletedOpenKeyInfo);
if (omKeyInfo.isFile()) {
auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(omKeyInfo.getDataSize()));
auditMap.put(OzoneConsts.REPLICATION_CONFIG, omKeyInfo.getReplicationConfig().toString());
}

result = Result.SUCCESS;
long endNanosDeleteKeySuccessLatencyNs = Time.monotonicNowNanos();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut

OzoneManagerProtocolProtos.KeyArgs keyArgs =
deleteKeyRequest.getKeyArgs();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
Map<String, String> auditMap = buildLightKeyArgsAuditMap(keyArgs);

String volumeName = keyArgs.getVolumeName();
String bucketName = keyArgs.getBucketName();
Expand Down Expand Up @@ -175,6 +175,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
}
}

if (keyStatus.isFile()) {
auditMap.put(OzoneConsts.DATA_SIZE, String.valueOf(omKeyInfo.getDataSize()));
auditMap.put(OzoneConsts.REPLICATION_CONFIG, omKeyInfo.getReplicationConfig().toString());
}

omClientResponse = new OMKeyDeleteResponseWithFSO(omResponse
.setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
keyName, omKeyInfo, ozoneManager.isRatisEnabled(),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,11 @@
import java.util.Map;

import static org.apache.hadoop.ozone.OzoneConsts.BUCKET;
import static org.apache.hadoop.ozone.OzoneConsts.DATA_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.DELETED_HSYNC_KEY;
import static org.apache.hadoop.ozone.OzoneConsts.DELETED_KEYS_LIST;
import static org.apache.hadoop.ozone.OzoneConsts.KEY;
import static org.apache.hadoop.ozone.OzoneConsts.REPLICATION_CONFIG;
import static org.apache.hadoop.ozone.OzoneConsts.UNDELETED_KEYS_LIST;
import static org.apache.hadoop.ozone.OzoneConsts.VOLUME;
import static org.apache.hadoop.ozone.audit.OMAction.DELETE_KEYS;
Expand Down Expand Up @@ -96,6 +99,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
deleteKeyRequest.getDeleteKeys();

List<String> deleteKeys = new ArrayList<>(deleteKeyArgs.getKeysList());
List<OmKeyInfo> deleteKeysInfo = new ArrayList<>();

Exception exception = null;
OMClientResponse omClientResponse = null;
Expand Down Expand Up @@ -175,6 +179,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
ozoneManager, omMetadataManager, volumeName, bucketName, keyName);
addKeyToAppropriateList(omKeyInfoList, omKeyInfo, dirList,
fileStatus);
deleteKeysInfo.add(omKeyInfo);
} catch (Exception ex) {
deleteStatus = false;
LOG.error("Acl check failed for Key: {}", objectKey, ex);
Expand Down Expand Up @@ -211,6 +216,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut

// reset deleteKeys as request failed.
deleteKeys = new ArrayList<>();
deleteKeysInfo.clear();
// Add all keys which are failed due to any other exception .
for (int i = indexFailed; i < length; i++) {
unDeletedKeys.addKeys(deleteKeyArgs.getKeys(i));
Expand All @@ -235,7 +241,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut
}
}

addDeletedKeys(auditMap, deleteKeys, unDeletedKeys.getKeysList());
addDeletedKeys(auditMap, deleteKeysInfo, unDeletedKeys.getKeysList());

markForAudit(auditLogger,
buildAuditMessage(DELETE_KEYS, auditMap, exception, userInfo));
Expand Down Expand Up @@ -346,8 +352,18 @@ protected OmKeyInfo getOmKeyInfo(
* Add key info to audit map for DeleteKeys request.
*/
protected static void addDeletedKeys(Map<String, String> auditMap,
List<String> deletedKeys, List<String> unDeletedKeys) {
auditMap.put(DELETED_KEYS_LIST, String.join(",", deletedKeys));
List<OmKeyInfo> deletedKeyInfos, List<String> unDeletedKeys) {
StringBuilder keys = new StringBuilder();
for (int i = 0; i < deletedKeyInfos.size(); i++) {
OmKeyInfo key = deletedKeyInfos.get(i);
keys.append("{").append(KEY).append("=").append(key.getKeyName()).append(", ");
keys.append(DATA_SIZE).append("=").append(key.getDataSize()).append(", ");
keys.append(REPLICATION_CONFIG).append("=").append(key.getReplicationConfig()).append("}");
if (i < deletedKeyInfos.size() - 1) {
keys.append(", ");
}
}
auditMap.put(DELETED_KEYS_LIST, keys.toString());
auditMap.put(UNDELETED_KEYS_LIST, String.join(",", unDeletedKeys));
}

Expand Down
Loading