Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,14 @@
import javax.ws.rs.core.Response;
import java.util.List;

import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_DIR_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;


/**
* Endpoint to fetch current state of ozone cluster.
*/
Expand Down Expand Up @@ -118,6 +121,12 @@ public Response getClusterState() {
// Keys from FILE_SYSTEM_OPTIMIZED buckets
GlobalStats fileRecord = globalStatsDao.findById(
TableCountTask.getRowKeyFromTable(FILE_TABLE));
// Keys from the DeletedTable
GlobalStats deletedKeyRecord = globalStatsDao.findById(
TableCountTask.getRowKeyFromTable(DELETED_TABLE));
// Directories from the DeletedDirectoryTable
GlobalStats deletedDirRecord = globalStatsDao.findById(
TableCountTask.getRowKeyFromTable(DELETED_DIR_TABLE));

if (volumeRecord != null) {
builder.setVolumes(volumeRecord.getValue());
Expand All @@ -127,13 +136,25 @@ public Response getClusterState() {
}

Long totalKeys = 0L;
Long deletedKeys = 0L;
Long deletedDirs = 0L;

if (keyRecord != null) {
totalKeys += keyRecord.getValue();
}
if (fileRecord != null) {
totalKeys += fileRecord.getValue();
}
if (deletedKeyRecord != null) {
deletedKeys += deletedKeyRecord.getValue();
}
if (deletedDirRecord != null) {
deletedDirs += deletedDirRecord.getValue();
}

builder.setKeys(totalKeys);
builder.setDeletedKeys(deletedKeys);
builder.setDeletedDirs(deletedDirs);

ClusterStateResponse response = builder
.setStorageReport(storageReport)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,18 @@ public final class ClusterStateResponse {
@JsonProperty("keys")
private long keys;

/**
* Total count of keys marked for deletion in the cluster.
*/
@JsonProperty("deletedKeys")
private long deletedKeys;

/**
* Total count of directories marked for deletion in the cluster.
*/
@JsonProperty
private long deletedDirs;

/**
* Returns new builder class that builds a ClusterStateResponse.
*
Expand All @@ -104,6 +116,8 @@ private ClusterStateResponse(Builder b) {
this.containers = b.containers;
this.missingContainers = b.missingContainers;
this.openContainers = b.openContainers;
this.deletedKeys = b.deletedKeys;
this.deletedDirs = b.deletedDirs;
}

/**
Expand All @@ -121,6 +135,8 @@ public static final class Builder {
private long volumes;
private long buckets;
private long keys;
private long deletedKeys;
private long deletedDirs;

public Builder() {
// Default values
Expand All @@ -133,6 +149,8 @@ public Builder() {
this.pipelines = 0;
this.totalDatanodes = 0;
this.healthyDatanodes = 0;
this.deletedKeys = 0;
this.deletedDirs = 0;
}

public Builder setPipelines(int pipelines) {
Expand Down Expand Up @@ -180,6 +198,14 @@ public Builder setBuckets(long buckets) {
return this;
}

public void setDeletedKeys(long deletedKeys) {
this.deletedKeys = deletedKeys;
}

public void setDeletedDirs(long deletedDirs) {
this.deletedDirs = deletedDirs;
}

public Builder setKeys(long keys) {
this.keys = keys;
return this;
Expand Down Expand Up @@ -231,4 +257,12 @@ public long getBuckets() {
public long getKeys() {
return keys;
}

public long getDeletedKeys() {
return deletedKeys;
}

public long getDeletedDirs() {
return deletedDirs;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

Expand All @@ -44,6 +45,7 @@
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
Expand Down Expand Up @@ -258,6 +260,35 @@ public static void writeKeyToOm(OMMetadataManager omMetadataManager,
.build());
}

/**
* Writes deleted key information to the Ozone Manager metadata table.
* @param omMetadataManager the Ozone Manager metadata manager
* @param keyNames the names of the deleted keys
* @param bucketName name of the bucket that used to contain the deleted keys
* @param volName name of the volume that used to contain the deleted keys
* @throws IOException if there is an error accessing the metadata table
*/
public static void writeDeletedKeysToOm(OMMetadataManager omMetadataManager,
List<String> keyNames,
String bucketName,
String volName) throws IOException {
List<OmKeyInfo> infos = new ArrayList<>();
for (int i = 0; i < keyNames.size(); i++) {
infos.add(new OmKeyInfo.Builder()
.setBucketName(bucketName)
.setVolumeName(volName)
.setKeyName(keyNames.get(i))
.setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE))
.build());
}
// Get the Ozone key for the first deleted key
String omKey = omMetadataManager.getOzoneKey(volName,
bucketName, keyNames.get(0));
RepeatedOmKeyInfo repeatedKeyInfo = new RepeatedOmKeyInfo(infos);
// Put the deleted key information into the deleted table
omMetadataManager.getDeletedTable().put(omKey, repeatedKeyInfo);
}

/**
* Write a directory as key on OM instance.
* We don't need to set size.
Expand Down Expand Up @@ -297,6 +328,27 @@ public static void writeDirToOm(OMMetadataManager omMetadataManager,
.build());
}

public static void writeDeletedDirToOm(OMMetadataManager omMetadataManager,
String bucketName,
String volumeName,
String dirName,
long parentObjectId,
long bucketObjectId,
long volumeObjectId)
throws IOException {
// DB key in DeletedDirectoryTable => "volumeID/bucketID/parentId/dirName"
String omKey = omMetadataManager.getOzonePathKey(volumeObjectId,
bucketObjectId, parentObjectId, dirName);

omMetadataManager.getDeletedDirTable().put(omKey,
new OmKeyInfo.Builder()
.setBucketName(bucketName)
.setVolumeName(volumeName)
.setKeyName(dirName)
.setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE))
.build());
}

public static OzoneManagerServiceProviderImpl
getMockOzoneManagerServiceProvider() throws IOException {
OzoneManagerServiceProviderImpl omServiceProviderMock =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,12 @@

import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedDirToOm;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDeletedKeysToOm;
import static org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl.PROMETHEUS_INSTANT_QUERY_API;
import static org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS;
import static org.junit.jupiter.api.Assertions.assertEquals;
Expand All @@ -115,6 +117,7 @@
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URI;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
import java.util.UUID;
Expand Down Expand Up @@ -255,6 +258,7 @@ private void initializeInjector() throws Exception {
dslContext = getDslContext();
}

@SuppressWarnings("checkstyle:MethodLength")
@BeforeEach
public void setUp() throws Exception {
// The following setup runs only once
Expand Down Expand Up @@ -366,9 +370,7 @@ public void setUp() throws Exception {
} catch (Exception ex) {
Assertions.fail(ex.getMessage());
}

// Write Data to OM

// A sample volume (sampleVol) and a bucket (bucketOne) is already created
// in AbstractOMMetadataManagerTest.
// Create a new volume and bucket and then write keys to the bucket.
Expand All @@ -393,13 +395,30 @@ public void setUp() throws Exception {

// key = key_one
writeDataToOm(reconOMMetadataManager, "key_one");

// key = key_two
writeDataToOm(reconOMMetadataManager, "key_two");

// key = key_three
writeDataToOm(reconOMMetadataManager, "key_three");

// Populate the deletedKeys table in OM DB
List<String> deletedKeysList1 = Arrays.asList("key1");
writeDeletedKeysToOm(reconOMMetadataManager,
deletedKeysList1, "Bucket1", "Volume1");
List<String> deletedKeysList2 = Arrays.asList("key2", "key2");
writeDeletedKeysToOm(reconOMMetadataManager,
deletedKeysList2, "Bucket2", "Volume2");
List<String> deletedKeysList3 = Arrays.asList("key3", "key3", "key3");
writeDeletedKeysToOm(reconOMMetadataManager,
deletedKeysList3, "Bucket3", "Volume3");

// Populate the deletedDirectories table in OM DB
writeDeletedDirToOm(reconOMMetadataManager, "Bucket1", "Volume1", "dir1",
3L, 2L, 1L);
writeDeletedDirToOm(reconOMMetadataManager, "Bucket2", "Volume2", "dir2",
6L, 5L, 4L);
writeDeletedDirToOm(reconOMMetadataManager, "Bucket3", "Volume3", "dir3",
9L, 8L, 7L);

// Truncate global stats table before running each test
dslContext.truncate(GLOBAL_STATS);
}
Expand Down Expand Up @@ -612,6 +631,8 @@ public void testGetClusterState() throws Exception {
Assertions.assertEquals(2, clusterStateResponse.getVolumes());
Assertions.assertEquals(2, clusterStateResponse.getBuckets());
Assertions.assertEquals(3, clusterStateResponse.getKeys());
Assertions.assertEquals(3, clusterStateResponse.getDeletedKeys());
Assertions.assertEquals(3, clusterStateResponse.getDeletedDirs());
}

@Test
Expand Down