diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 5990435b6c0e..7371a235ae16 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -421,7 +421,7 @@ public Response getUnhealthyContainers( * } * @param limit limits the number of deleted containers * @param prevKey previous container Id to skip - * @return Response of delete containers. + * @return Response of deleted containers. */ @GET @Path("/deleted") @@ -566,4 +566,76 @@ public Response getContainerMisMatchInsights() { } return Response.ok(containerDiscrepancyInfoList).build(); } + + /** This API retrieves set of deleted containers in SCM which are present + * in OM to find out list of keys mapped to such DELETED state containers. + * + * limit - limits the number of such SCM DELETED containers present in OM. + * prevKey - Skip containers till it seeks correctly to the previous + * containerId. + * Sample API Response: + * [ + * { + * "containerId": 2, + * "numberOfKeys": 2, + * "pipelines": [] + * } + * ] + */ + @GET + @Path("/mismatch/deleted") + public Response getOmContainersDeletedInSCM( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(PREV_CONTAINER_ID_DEFAULT_VALUE) + @QueryParam(RECON_QUERY_PREVKEY) long prevKey) { + if (prevKey < 0) { + // Send back an empty response + return Response.status(Response.Status.NOT_ACCEPTABLE).build(); + } + List containerDiscrepancyInfoList = + new ArrayList<>(); + try { + Map omContainers = + reconContainerMetadataManager.getContainers(limit, prevKey); + + List deletedStateSCMContainerIds = + containerManager.getContainers().stream() + .filter(containerInfo -> (containerInfo.getState() == + HddsProtos.LifeCycleState.DELETED)) + .map(containerInfo -> containerInfo.getContainerID()).collect( + Collectors.toList()); + + List> + omContainersDeletedInSCM = + omContainers.entrySet().stream().filter(containerMetadataEntry -> + (deletedStateSCMContainerIds.contains( + containerMetadataEntry.getKey()))) + .collect( + Collectors.toList()); + + omContainersDeletedInSCM.forEach( + containerMetadataEntry -> { + ContainerDiscrepancyInfo containerDiscrepancyInfo = + new ContainerDiscrepancyInfo(); + containerDiscrepancyInfo.setContainerID( + containerMetadataEntry.getKey()); + containerDiscrepancyInfo.setNumberOfKeys( + containerMetadataEntry.getValue().getNumberOfKeys()); + containerDiscrepancyInfo.setPipelines( + containerMetadataEntry.getValue() + .getPipelines()); + containerDiscrepancyInfoList.add(containerDiscrepancyInfo); + }); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + return Response.ok(containerDiscrepancyInfoList).build(); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java new file mode 100644 index 000000000000..51fba63b73dc --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/OMDBInsightEndpoint.java @@ -0,0 +1,481 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyEntityInfo; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconContainerManager; +import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; + +import javax.inject.Inject; +import javax.ws.rs.DefaultValue; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +import static org.apache.hadoop.ozone.recon.ReconConstants.DEFAULT_FETCH_COUNT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_QUERY_PREVKEY; + +/** + * Endpoint to get following key level info under OM DB Insight page of Recon. + * 1. Number of open keys for Legacy/OBS buckets. + * 2. Number of open files for FSO buckets. + * 3. Amount of data mapped to open keys and open files. + * 4. Number of pending delete keys in legacy/OBS buckets and pending + * delete files in FSO buckets. + * 5. Amount of data mapped to pending delete keys in legacy/OBS buckets and + * pending delete files in FSO buckets. + */ +@Path("/keys") +@Produces(MediaType.APPLICATION_JSON) +@AdminOnly +public class OMDBInsightEndpoint { + + @Inject + private ContainerEndpoint containerEndpoint; + @Inject + private ReconContainerMetadataManager reconContainerMetadataManager; + private final ReconOMMetadataManager omMetadataManager; + private final ReconContainerManager containerManager; + + @Inject + public OMDBInsightEndpoint(OzoneStorageContainerManager reconSCM, + ReconOMMetadataManager omMetadataManager) { + this.containerManager = + (ReconContainerManager) reconSCM.getContainerManager(); + this.omMetadataManager = omMetadataManager; + } + + /** + * This method retrieves set of keys/files which are open. + * + * @return the http json response wrapped in below format: + * { + * replicatedTotal: 13824, + * unreplicatedTotal: 4608, + * entities: [ + * { + * path: “/vol1/bucket1/key1”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 1024, + * replicatedSize: 3072, + * unreplicatedSize: 1024, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/bucket1/key2”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 512, + * replicatedSize: 1536, + * unreplicatedSize: 512, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/fso-bucket/dir1/file1”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 1024, + * replicatedSize: 3072, + * unreplicatedSize: 1024, + * replicationType: RATIS, + * replicationFactor: THREE + * }. + * { + * path: “/vol1/fso-bucket/dir1/dir2/file2”, + * keyState: “Open”, + * inStateSince: 1667564193026, + * size: 2048, + * replicatedSize: 6144, + * unreplicatedSize: 2048, + * replicationType: RATIS, + * replicationFactor: THREE + * } + * ] + * } + */ + @GET + @Path("/open") + public Response getOpenKeyInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKey) { + KeyInsightInfoResponse openKeyInsightInfo = new KeyInsightInfoResponse(); + List nonFSOKeyInfoList = + openKeyInsightInfo.getNonFSOKeyInfoList(); + boolean skipPrevKeyDone = false; + boolean isLegacyBucketLayout = true; + boolean recordsFetchedLimitReached = false; + String lastKey = ""; + List fsoKeyInfoList = openKeyInsightInfo.getFsoKeyInfoList(); + for (BucketLayout layout : Arrays.asList(BucketLayout.LEGACY, + BucketLayout.FILE_SYSTEM_OPTIMIZED)) { + isLegacyBucketLayout = (layout == BucketLayout.LEGACY); + Table openKeyTable = + omMetadataManager.getOpenKeyTable(layout); + try ( + TableIterator> + keyIter = openKeyTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKey; + if (!skipPrevKeyDone && StringUtils.isNotBlank(prevKey)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKey) && + !seekKeyValue.getKey().equals(prevKey))) { + continue; + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + lastKey = key; + OmKeyInfo omKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKey)) { + skipPrevKeyDone = true; + continue; + } + KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setKey(key); + keyEntityInfo.setPath(omKeyInfo.getKeyName()); + keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); + keyEntityInfo.setSize(omKeyInfo.getDataSize()); + keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); + openKeyInsightInfo.setUnreplicatedTotal( + openKeyInsightInfo.getUnreplicatedTotal() + + keyEntityInfo.getSize()); + openKeyInsightInfo.setReplicatedTotal( + openKeyInsightInfo.getReplicatedTotal() + + keyEntityInfo.getReplicatedSize()); + boolean added = + isLegacyBucketLayout ? nonFSOKeyInfoList.add(keyEntityInfo) : + fsoKeyInfoList.add(keyEntityInfo); + if ((nonFSOKeyInfoList.size() + fsoKeyInfoList.size()) == limit) { + recordsFetchedLimitReached = true; + break; + } + } + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + if (recordsFetchedLimitReached) { + break; + } + } + openKeyInsightInfo.setLastKey(lastKey); + return Response.ok(openKeyInsightInfo).build(); + } + + private void getPendingForDeletionKeyInfo( + int limit, + String prevKey, + KeyInsightInfoResponse deletedKeyAndDirInsightInfo) { + List repeatedOmKeyInfoList = + deletedKeyAndDirInsightInfo.getRepeatedOmKeyInfoList(); + Table deletedTable = + omMetadataManager.getDeletedTable(); + try ( + TableIterator> + keyIter = deletedTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKey; + String lastKey = ""; + if (StringUtils.isNotBlank(prevKey)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKey) && + !seekKeyValue.getKey().equals(prevKey))) { + return; + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + lastKey = key; + RepeatedOmKeyInfo repeatedOmKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKey)) { + continue; + } + updateReplicatedAndUnReplicatedTotal(deletedKeyAndDirInsightInfo, + repeatedOmKeyInfo); + repeatedOmKeyInfoList.add(repeatedOmKeyInfo); + if ((repeatedOmKeyInfoList.size()) == limit) { + break; + } + } + deletedKeyAndDirInsightInfo.setLastKey(lastKey); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + } + + /** This method retrieves set of keys/files pending for deletion. + * + * limit - limits the number of key/files returned. + * prevKey - E.g. /vol1/bucket1/key1, this will skip keys till it + * seeks correctly to the given prevKey. + * Sample API Response: + * { + * "lastKey": "vol1/bucket1/key1", + * "replicatedTotal": -1530804718628866300, + * "unreplicatedTotal": -1530804718628866300, + * "deletedkeyinfo": [ + * { + * "omKeyInfoList": [ + * { + * "metadata": {}, + * "objectID": 0, + * "updateID": 0, + * "parentObjectID": 0, + * "volumeName": "sampleVol", + * "bucketName": "bucketOne", + * "keyName": "key_one", + * "dataSize": -1530804718628866300, + * "keyLocationVersions": [], + * "creationTime": 0, + * "modificationTime": 0, + * "replicationConfig": { + * "replicationFactor": "ONE", + * "requiredNodes": 1, + * "replicationType": "STANDALONE" + * }, + * "fileChecksum": null, + * "fileName": "key_one", + * "acls": [], + * "path": "0/key_one", + * "file": false, + * "latestVersionLocations": null, + * "replicatedSize": -1530804718628866300, + * "fileEncryptionInfo": null, + * "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', + * key='key_one', dataSize='-1530804718628866186', creationTime='0', + * objectID='0', parentID='0', replication='STANDALONE/ONE', + * fileChecksum='null}", + * "updateIDset": false + * } + * ] + * } + * ], + * "status": "OK" + * } + */ + @GET + @Path("/deletePending") + public Response getDeletedKeyInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKey) { + KeyInsightInfoResponse + deletedKeyInsightInfo = new KeyInsightInfoResponse(); + getPendingForDeletionKeyInfo(limit, prevKey, + deletedKeyInsightInfo); + return Response.ok(deletedKeyInsightInfo).build(); + } + + private void getPendingForDeletionDirInfo( + int limit, String prevKey, + KeyInsightInfoResponse pendingForDeletionKeyInfo) { + + List deletedDirInfoList = + pendingForDeletionKeyInfo.getDeletedDirInfoList(); + + Table deletedDirTable = + omMetadataManager.getDeletedDirTable(); + try ( + TableIterator> + keyIter = deletedDirTable.iterator()) { + boolean skipPrevKey = false; + String seekKey = prevKey; + String lastKey = ""; + if (StringUtils.isNotBlank(prevKey)) { + skipPrevKey = true; + Table.KeyValue seekKeyValue = + keyIter.seek(seekKey); + // check if RocksDB was able to seek correctly to the given key prefix + // if not, then return empty result + // In case of an empty prevKeyPrefix, all the keys are returned + if (seekKeyValue == null || + (StringUtils.isNotBlank(prevKey) && + !seekKeyValue.getKey().equals(prevKey))) { + return; + } + } + while (keyIter.hasNext()) { + Table.KeyValue kv = keyIter.next(); + String key = kv.getKey(); + lastKey = key; + OmKeyInfo omKeyInfo = kv.getValue(); + // skip the prev key if prev key is present + if (skipPrevKey && key.equals(prevKey)) { + continue; + } + KeyEntityInfo keyEntityInfo = new KeyEntityInfo(); + keyEntityInfo.setKey(key); + keyEntityInfo.setPath(omKeyInfo.getKeyName()); + keyEntityInfo.setInStateSince(omKeyInfo.getCreationTime()); + keyEntityInfo.setSize(omKeyInfo.getDataSize()); + keyEntityInfo.setReplicatedSize(omKeyInfo.getReplicatedSize()); + keyEntityInfo.setReplicationConfig(omKeyInfo.getReplicationConfig()); + pendingForDeletionKeyInfo.setUnreplicatedTotal( + pendingForDeletionKeyInfo.getUnreplicatedTotal() + + keyEntityInfo.getSize()); + pendingForDeletionKeyInfo.setReplicatedTotal( + pendingForDeletionKeyInfo.getReplicatedTotal() + + keyEntityInfo.getReplicatedSize()); + deletedDirInfoList.add(keyEntityInfo); + if (deletedDirInfoList.size() == limit) { + break; + } + } + pendingForDeletionKeyInfo.setLastKey(lastKey); + } catch (IOException ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } catch (IllegalArgumentException e) { + throw new WebApplicationException(e, Response.Status.BAD_REQUEST); + } catch (Exception ex) { + throw new WebApplicationException(ex, + Response.Status.INTERNAL_SERVER_ERROR); + } + } + + /** This method retrieves set of directories pending for deletion. + * + * limit - limits the number of directories returned. + * prevKey - E.g. /vol1/bucket1/bucket1/dir1, this will skip dirs till it + * seeks correctly to the given prevKey. + * Sample API Response: + * { + * "lastKey": "vol1/bucket1/bucket1/dir1" + * "replicatedTotal": -1530804718628866300, + * "unreplicatedTotal": -1530804718628866300, + * "deletedkeyinfo": [ + * { + * "omKeyInfoList": [ + * { + * "metadata": {}, + * "objectID": 0, + * "updateID": 0, + * "parentObjectID": 0, + * "volumeName": "sampleVol", + * "bucketName": "bucketOne", + * "keyName": "key_one", + * "dataSize": -1530804718628866300, + * "keyLocationVersions": [], + * "creationTime": 0, + * "modificationTime": 0, + * "replicationConfig": { + * "replicationFactor": "ONE", + * "requiredNodes": 1, + * "replicationType": "STANDALONE" + * }, + * "fileChecksum": null, + * "fileName": "key_one", + * "acls": [], + * "path": "0/key_one", + * "file": false, + * "latestVersionLocations": null, + * "replicatedSize": -1530804718628866300, + * "fileEncryptionInfo": null, + * "objectInfo": "OMKeyInfo{volume='sampleVol', bucket='bucketOne', + * key='key_one', dataSize='-1530804718628866186', creationTime='0', + * objectID='0', parentID='0', replication='STANDALONE/ONE', + * fileChecksum='null}", + * "updateIDset": false + * } + * ] + * } + * ], + * "status": "OK" + * } + */ + @GET + @Path("/deletePending/dirs") + public Response getDeletedDirInfo( + @DefaultValue(DEFAULT_FETCH_COUNT) @QueryParam(RECON_QUERY_LIMIT) + int limit, + @DefaultValue(StringUtils.EMPTY) @QueryParam(RECON_QUERY_PREVKEY) + String prevKey) { + KeyInsightInfoResponse + deletedDirInsightInfo = new KeyInsightInfoResponse(); + getPendingForDeletionDirInfo(limit, prevKey, + deletedDirInsightInfo); + return Response.ok(deletedDirInsightInfo).build(); + } + + private void updateReplicatedAndUnReplicatedTotal( + KeyInsightInfoResponse deletedKeyAndDirInsightInfo, + RepeatedOmKeyInfo repeatedOmKeyInfo) { + repeatedOmKeyInfo.getOmKeyInfoList().forEach(omKeyInfo -> { + deletedKeyAndDirInsightInfo.setUnreplicatedTotal( + deletedKeyAndDirInsightInfo.getUnreplicatedTotal() + + omKeyInfo.getDataSize()); + deletedKeyAndDirInsightInfo.setReplicatedTotal( + deletedKeyAndDirInsightInfo.getReplicatedTotal() + + omKeyInfo.getReplicatedSize()); + }); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java index 33702bf3a050..91bfb8630d9d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/ContainerDiscrepancyInfo.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon.api.types; +import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -37,6 +38,7 @@ public class ContainerDiscrepancyInfo { private List pipelines; @JsonProperty("existsAt") + @JsonInclude(JsonInclude.Include.NON_EMPTY) private String existsAt; public ContainerDiscrepancyInfo() { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java new file mode 100644 index 000000000000..cf02c503f41d --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyEntityInfo.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.hdds.client.ReplicationConfig; + +import java.time.Instant; + +/** + * POJO object wrapper for metadata of a given key/file. + */ +public class KeyEntityInfo { + + /** This is key table key of rocksDB and will help UI to implement pagination + * where UI will use the last record key to send in API as preKeyPrefix. */ + @JsonProperty("key") + private String key; + + /** Path of a key/file. */ + @JsonProperty("path") + private String path; + + @JsonProperty("inStateSince") + private long inStateSince; + + @JsonProperty("size") + private long size; + + @JsonProperty("replicatedSize") + private long replicatedSize; + + @JsonProperty("replicationInfo") + private ReplicationConfig replicationConfig; + + public KeyEntityInfo() { + key = ""; + path = ""; + inStateSince = Instant.now().toEpochMilli(); + size = 0L; + replicatedSize = 0L; + replicationConfig = null; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + + public long getInStateSince() { + return inStateSince; + } + + public void setInStateSince(long inStateSince) { + this.inStateSince = inStateSince; + } + + public long getSize() { + return size; + } + + public void setSize(long size) { + this.size = size; + } + + public long getReplicatedSize() { + return replicatedSize; + } + + public void setReplicatedSize(long replicatedSize) { + this.replicatedSize = replicatedSize; + } + + public ReplicationConfig getReplicationConfig() { + return replicationConfig; + } + + public void setReplicationConfig( + ReplicationConfig replicationConfig) { + this.replicationConfig = replicationConfig; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java new file mode 100644 index 000000000000..18da6b438e6b --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/KeyInsightInfoResponse.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api.types; + +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; + +import java.util.ArrayList; +import java.util.List; + +/** + * HTTP Response wrapped for keys insights. + */ +public class KeyInsightInfoResponse { + + /** last key sent. */ + @JsonProperty("lastKey") + private String lastKey; + + /** Amount of data mapped to all keys and files in + * a cluster across all DNs. */ + @JsonProperty("replicatedTotal") + private long replicatedTotal; + + /** Amount of data mapped to all keys and files on a single DN. */ + @JsonProperty("unreplicatedTotal") + private long unreplicatedTotal; + + /** List of all non-fso keys. */ + @JsonProperty("nonFSO") + @JsonInclude(JsonInclude.Include.NON_EMPTY) + private List nonFSOKeyInfoList; + + /** List of all fso keys. */ + @JsonProperty("fso") + @JsonInclude(JsonInclude.Include.NON_EMPTY) + private List fsoKeyInfoList; + + /** List of all deleted and repeatedly deleted keys. */ + @JsonProperty("deletedKeyInfo") + @JsonInclude(JsonInclude.Include.NON_EMPTY) + private List repeatedOmKeyInfoList; + + @JsonProperty("deletedDirInfo") + @JsonInclude(JsonInclude.Include.NON_EMPTY) + private List deletedDirInfoList; + + /** Path status. */ + @JsonProperty("status") + private ResponseStatus responseCode; + + public KeyInsightInfoResponse() { + responseCode = ResponseStatus.OK; + lastKey = ""; + replicatedTotal = 0L; + unreplicatedTotal = 0L; + nonFSOKeyInfoList = new ArrayList<>(); + fsoKeyInfoList = new ArrayList<>(); + repeatedOmKeyInfoList = new ArrayList<>(); + deletedDirInfoList = new ArrayList<>(); + } + + public String getLastKey() { + return lastKey; + } + + public void setLastKey(String lastKey) { + this.lastKey = lastKey; + } + + public long getReplicatedTotal() { + return replicatedTotal; + } + + public void setReplicatedTotal(long replicatedTotal) { + this.replicatedTotal = replicatedTotal; + } + + public long getUnreplicatedTotal() { + return unreplicatedTotal; + } + + public void setUnreplicatedTotal(long unreplicatedTotal) { + this.unreplicatedTotal = unreplicatedTotal; + } + + public List getNonFSOKeyInfoList() { + return nonFSOKeyInfoList; + } + + public void setNonFSOKeyInfoList( + List nonFSOKeyInfoList) { + this.nonFSOKeyInfoList = nonFSOKeyInfoList; + } + + public List getFsoKeyInfoList() { + return fsoKeyInfoList; + } + + public void setFsoKeyInfoList( + List fsoKeyInfoList) { + this.fsoKeyInfoList = fsoKeyInfoList; + } + + public List getRepeatedOmKeyInfoList() { + return repeatedOmKeyInfoList; + } + + public void setRepeatedOmKeyInfoList( + List repeatedOmKeyInfoList) { + this.repeatedOmKeyInfoList = repeatedOmKeyInfoList; + } + + public List getDeletedDirInfoList() { + return deletedDirInfoList; + } + + public void setDeletedDirInfoList( + List deletedDirInfoList) { + this.deletedDirInfoList = deletedDirInfoList; + } + + public ResponseStatus getResponseCode() { + return responseCode; + } + + public void setResponseCode(ResponseStatus responseCode) { + this.responseCode = responseCode; + } + +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 96c68a710190..cd6e31ec1bac 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.utils.db.RDBBatchOperation; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -1173,6 +1174,20 @@ public void testGetSCMDeletedContainersPrevKeyParam() throws Exception { deletedContainerInfo.getContainerState()); } + private void updateContainerStateToDeleted(long containerId) + throws IOException, InvalidStateTransitionException, TimeoutException { + reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.FINALIZE); + reconContainerManager.updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.CLOSE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.DELETE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(containerId), + HddsProtos.LifeCycleEvent.CLEANUP); + } + @Test public void testGetContainerInsightsNonSCMContainers() throws IOException, TimeoutException { @@ -1221,4 +1236,111 @@ public void testGetContainerInsightsNonOMContainers() assertEquals(1, containerDiscrepancyInfoList.size()); assertEquals("SCM", containerDiscrepancyInfo.getExistsAt()); } + + @Test + public void testGetOmContainersDeletedInSCM() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(2, omContainers.size()); + assertEquals(2, scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.FINALIZE); + reconContainerManager.updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLOSE); + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.DELETE); + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETING); + Assert.assertEquals(1, containerIDs.size()); + + reconContainerManager + .updateContainerState(ContainerID.valueOf(1), + HddsProtos.LifeCycleEvent.CLEANUP); + containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response omContainersDeletedInSCMResponse = + containerEndpoint.getOmContainersDeletedInSCM(-1, 0); + assertNotNull(omContainersDeletedInSCMResponse); + List containerDiscrepancyInfoList = + (List) + omContainersDeletedInSCMResponse.getEntity(); + assertEquals(3, containerDiscrepancyInfoList.get(0) + .getNumberOfKeys()); + assertEquals(1, containerDiscrepancyInfoList.size()); + } + + @Test + public void testGetOmContainersDeletedInSCMLimitParam() throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(1, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(1, deletedSCMContainers.size()); + + Response omContainersDeletedInSCMResponse = + containerEndpoint.getOmContainersDeletedInSCM(1, 0); + assertNotNull(omContainersDeletedInSCMResponse); + List containerDiscrepancyInfoList = + (List) + omContainersDeletedInSCMResponse.getEntity(); + assertEquals(3, containerDiscrepancyInfoList.get(0) + .getNumberOfKeys()); + assertEquals(1, containerDiscrepancyInfoList.size()); + } + + @Test + public void testGetOmContainersDeletedInSCMPrevContainerParam() + throws Exception { + Map omContainers = + reconContainerMetadataManager.getContainers(-1, 0); + putContainerInfos(2); + List scmContainers = reconContainerManager.getContainers(); + assertEquals(omContainers.size(), scmContainers.size()); + // Update container state of Container Id 1 to CLOSING to CLOSED + // and then to DELETED + updateContainerStateToDeleted(1); + updateContainerStateToDeleted(2); + + Set containerIDs = containerStateManager + .getContainerIDs(HddsProtos.LifeCycleState.DELETED); + Assert.assertEquals(2, containerIDs.size()); + + List deletedSCMContainers = + reconContainerManager.getContainers(HddsProtos.LifeCycleState.DELETED); + assertEquals(2, deletedSCMContainers.size()); + + Response omContainersDeletedInSCMResponse = + containerEndpoint.getOmContainersDeletedInSCM(2, + 1); + assertNotNull(omContainersDeletedInSCMResponse); + List containerDiscrepancyInfoList = + (List) + omContainersDeletedInSCMResponse.getEntity(); + assertEquals(2, containerDiscrepancyInfoList.get(0) + .getNumberOfKeys()); + assertEquals(1, containerDiscrepancyInfoList.size()); + assertEquals(2, containerDiscrepancyInfoList.get(0).getContainerID()); + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java new file mode 100644 index 000000000000..23b94e4ccfdf --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOmDBInsightEndPoint.java @@ -0,0 +1,498 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.api; + +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.recon.ReconTestInjector; +import org.apache.hadoop.ozone.recon.api.types.KeyInsightInfoResponse; +import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; +import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.scm.ReconPipelineManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; +import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; +import org.apache.hadoop.ozone.recon.tasks.ContainerKeyMapperTask; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; +import org.junit.rules.TemporaryFolder; + +import javax.ws.rs.core.Response; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getBucketLayout; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getOmKeyLocationInfo; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; +import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Unit test for OmDBInsightEndPoint. + */ +public class TestOmDBInsightEndPoint { + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + private OzoneStorageContainerManager ozoneStorageContainerManager; + private ReconContainerMetadataManager reconContainerMetadataManager; + private OMMetadataManager omMetadataManager; + private ReconPipelineManager reconPipelineManager; + private ReconOMMetadataManager reconOMMetadataManager; + private OMDBInsightEndpoint omdbInsightEndpoint; + private Pipeline pipeline; + private Random random = new Random(); + + @Before + public void setUp() throws Exception { + omMetadataManager = initializeNewOmMetadataManager( + temporaryFolder.newFolder()); + reconOMMetadataManager = getTestReconOmMetadataManager(omMetadataManager, + temporaryFolder.newFolder()); + ReconTestInjector reconTestInjector = + new ReconTestInjector.Builder(temporaryFolder) + .withReconSqlDb() + .withReconOm(reconOMMetadataManager) + .withOmServiceProvider(mock(OzoneManagerServiceProviderImpl.class)) + // No longer using mock reconSCM as we need nodeDB in Facade + // to establish datanode UUID to hostname mapping + .addBinding(OzoneStorageContainerManager.class, + ReconStorageContainerManagerFacade.class) + .withContainerDB() + .addBinding(StorageContainerServiceProvider.class, + mock(StorageContainerServiceProviderImpl.class)) + .addBinding(OMDBInsightEndpoint.class) + .addBinding(ContainerHealthSchemaManager.class) + .build(); + reconContainerMetadataManager = + reconTestInjector.getInstance(ReconContainerMetadataManager.class); + omdbInsightEndpoint = reconTestInjector.getInstance( + OMDBInsightEndpoint.class); + ozoneStorageContainerManager = + reconTestInjector.getInstance(OzoneStorageContainerManager.class); + reconPipelineManager = (ReconPipelineManager) + ozoneStorageContainerManager.getPipelineManager(); + pipeline = getRandomPipeline(); + reconPipelineManager.addPipeline(pipeline); + setUpOmData(); + } + + private void setUpOmData() throws Exception { + List omKeyLocationInfoList = new ArrayList<>(); + BlockID blockID1 = new BlockID(1, 101); + OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID1, + pipeline); + omKeyLocationInfoList.add(omKeyLocationInfo1); + + BlockID blockID2 = new BlockID(2, 102); + OmKeyLocationInfo omKeyLocationInfo2 = getOmKeyLocationInfo(blockID2, + pipeline); + omKeyLocationInfoList.add(omKeyLocationInfo2); + + OmKeyLocationInfoGroup omKeyLocationInfoGroup = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList); + + //key = key_one, Blocks = [ {CID = 1, LID = 101}, {CID = 2, LID = 102} ] + writeDataToOm(reconOMMetadataManager, + "key_one", "bucketOne", "sampleVol", + Collections.singletonList(omKeyLocationInfoGroup)); + + List infoGroups = new ArrayList<>(); + BlockID blockID3 = new BlockID(1, 103); + OmKeyLocationInfo omKeyLocationInfo3 = getOmKeyLocationInfo(blockID3, + pipeline); + + List omKeyLocationInfoListNew = new ArrayList<>(); + omKeyLocationInfoListNew.add(omKeyLocationInfo3); + infoGroups.add(new OmKeyLocationInfoGroup(0, + omKeyLocationInfoListNew)); + + BlockID blockID4 = new BlockID(2, 104); + OmKeyLocationInfo omKeyLocationInfo4 = getOmKeyLocationInfo(blockID4, + pipeline); + + omKeyLocationInfoListNew = new ArrayList<>(); + omKeyLocationInfoListNew.add(omKeyLocationInfo4); + infoGroups.add(new OmKeyLocationInfoGroup(1, + omKeyLocationInfoListNew)); + + //key = key_two, Blocks = [ {CID = 1, LID = 103}, {CID = 1, LID = 104} ] + writeDataToOm(reconOMMetadataManager, + "key_two", "bucketOne", "sampleVol", infoGroups); + + List omKeyLocationInfoList2 = new ArrayList<>(); + BlockID blockID5 = new BlockID(3, 105); + OmKeyLocationInfo omKeyLocationInfo5 = getOmKeyLocationInfo(blockID5, + pipeline); + omKeyLocationInfoList2.add(omKeyLocationInfo5); + + BlockID blockID6 = new BlockID(3, 106); + OmKeyLocationInfo omKeyLocationInfo6 = getOmKeyLocationInfo(blockID6, + pipeline); + omKeyLocationInfoList2.add(omKeyLocationInfo6); + + OmKeyLocationInfoGroup omKeyLocationInfoGroup2 = new + OmKeyLocationInfoGroup(0, omKeyLocationInfoList2); + + //key = key_three, Blocks = [ {CID = 2, LID = 2}, {CID = 2, LID = 3} ] + writeDataToOm(reconOMMetadataManager, + "key_three", "bucketOne", "sampleVol", + Collections.singletonList(omKeyLocationInfoGroup2)); + + //Generate Recon container DB data. + OMMetadataManager omMetadataManagerMock = mock(OMMetadataManager.class); + Table tableMock = mock(Table.class); + when(tableMock.getName()).thenReturn("KeyTable"); + when(omMetadataManagerMock.getKeyTable(getBucketLayout())) + .thenReturn(tableMock); + ContainerKeyMapperTask containerKeyMapperTask = + new ContainerKeyMapperTask(reconContainerMetadataManager); + containerKeyMapperTask.reprocess(reconOMMetadataManager); + } + + @Test + public void testGetOpenKeyInfo() throws Exception { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo); + OmKeyInfo omKeyInfo1 = + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", omKeyInfo1.getKeyName()); + Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(-1, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals("key_one", + keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); + } + + @Test + public void testGetOpenKeyInfoLimitParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); + + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + Response openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(2, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals(0, keyInsightInfoResp.getFsoKeyInfoList().size()); + Assertions.assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals("key_three", + keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); + + openKeyInfoResp = omdbInsightEndpoint.getOpenKeyInfo(3, ""); + keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); + Assertions.assertEquals(3, keyInsightInfoResp.getFsoKeyInfoList().size() + + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals("key_three", + keyInsightInfoResp.getNonFSOKeyInfoList().get(1).getPath()); + } + + @Test + public void testGetOpenKeyInfoPrevKeyParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); + + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + reconOMMetadataManager.getOpenKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED) + .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + reconOMMetadataManager.getOpenKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + Response openKeyInfoResp = + omdbInsightEndpoint.getOpenKeyInfo(-1, "/sampleVol/bucketOne/key_one"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) openKeyInfoResp.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(1, + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals(1, keyInsightInfoResp.getFsoKeyInfoList().size()); + Assertions.assertEquals(2, keyInsightInfoResp.getFsoKeyInfoList().size() + + keyInsightInfoResp.getNonFSOKeyInfoList().size()); + Assertions.assertEquals("key_three", + keyInsightInfoResp.getNonFSOKeyInfoList().get(0).getPath()); + Assertions.assertEquals("key_two", + keyInsightInfoResp.getFsoKeyInfoList().get(0).getPath()); + } + + @Test + public void testGetDeletedKeyInfoLimitParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); + + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo1); + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_two", omKeyInfo2); + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", omKeyInfoCopy.getKeyName()); + RepeatedOmKeyInfo repeatedOmKeyInfo1 = new RepeatedOmKeyInfo(omKeyInfoCopy); + + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo1); + Assertions.assertEquals("key_one", + repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); + + RepeatedOmKeyInfo repeatedOmKeyInfo2 = new RepeatedOmKeyInfo(omKeyInfo2); + RepeatedOmKeyInfo repeatedOmKeyInfo3 = new RepeatedOmKeyInfo(omKeyInfo2); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_two", repeatedOmKeyInfo2); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); + + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + Assertions.assertEquals("key_two", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(1).getOmKeyInfoList() + .get(0).getKeyName()); + } + + @Test + public void testGetDeletedKeyInfoPrevKeyParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "key_two", true); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "key_three", true); + + RepeatedOmKeyInfo repeatedOmKeyInfo1 = new RepeatedOmKeyInfo(omKeyInfo1); + RepeatedOmKeyInfo repeatedOmKeyInfo2 = new RepeatedOmKeyInfo(omKeyInfo2); + RepeatedOmKeyInfo repeatedOmKeyInfo3 = new RepeatedOmKeyInfo(omKeyInfo3); + + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo1); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_two", repeatedOmKeyInfo2); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_three", repeatedOmKeyInfo3); + + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(2, + "/sampleVol/bucketOne/key_one"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getRepeatedOmKeyInfoList().size()); + + List pendingDeleteKeys = + keyInsightInfoResp.getRepeatedOmKeyInfoList().stream() + .map( + repeatedOmKeyInfo -> repeatedOmKeyInfo.getOmKeyInfoList().get(0) + .getKeyName()) + .collect(Collectors.toList()); + Assertions.assertFalse(pendingDeleteKeys.contains("key_one")); + } + + @Test + public void testGetDeletedKeyInfo() throws Exception { + OmKeyInfo omKeyInfo = + getOmKeyInfo("sampleVol", "bucketOne", "key_one", true); + + reconOMMetadataManager.getKeyTable(getBucketLayout()) + .put("/sampleVol/bucketOne/key_one", omKeyInfo); + OmKeyInfo omKeyInfo1 = reconOMMetadataManager.getKeyTable(getBucketLayout()) + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", omKeyInfo1.getKeyName()); + RepeatedOmKeyInfo repeatedOmKeyInfo = new RepeatedOmKeyInfo(omKeyInfo); + reconOMMetadataManager.getDeletedTable() + .put("/sampleVol/bucketOne/key_one", repeatedOmKeyInfo); + RepeatedOmKeyInfo repeatedOmKeyInfo1 = + reconOMMetadataManager.getDeletedTable() + .get("/sampleVol/bucketOne/key_one"); + Assertions.assertEquals("key_one", + repeatedOmKeyInfo1.getOmKeyInfoList().get(0).getKeyName()); + Response deletedKeyInfo = omdbInsightEndpoint.getDeletedKeyInfo(-1, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedKeyInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals("key_one", + keyInsightInfoResp.getRepeatedOmKeyInfoList().get(0).getOmKeyInfoList() + .get(0).getKeyName()); + } + + private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, + String keyName, boolean isFile) { + return new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setFile(isFile) + .setReplicationConfig(StandaloneReplicationConfig + .getInstance(HddsProtos.ReplicationFactor.ONE)) + .setDataSize(random.nextLong()) + .build(); + } + + @Test + public void testGetDeletedDirInfoLimitParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_one", false); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_two", false); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_three", false); + + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_one", omKeyInfo1); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_two", omKeyInfo2); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getDeletedDirTable() + .get("/sampleVol/bucketOne/dir_one"); + Assertions.assertEquals("dir_one", omKeyInfoCopy.getKeyName()); + + Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(2, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedDirInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getDeletedDirInfoList().size()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_one", + keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); + } + + @Test + public void testGetDeletedDirInfoPrevKeyParam() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_one", false); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_two", false); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_three", false); + + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_one", omKeyInfo1); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_two", omKeyInfo2); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getDeletedDirTable() + .get("/sampleVol/bucketOne/dir_one"); + Assertions.assertEquals("dir_one", omKeyInfoCopy.getKeyName()); + + Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(2, + "/sampleVol/bucketOne/dir_one"); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedDirInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(2, + keyInsightInfoResp.getDeletedDirInfoList().size()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_three", + keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_two", + keyInsightInfoResp.getLastKey()); + } + + @Test + public void testGetDeletedDirInfo() throws Exception { + OmKeyInfo omKeyInfo1 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_one", false); + OmKeyInfo omKeyInfo2 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_two", false); + OmKeyInfo omKeyInfo3 = + getOmKeyInfo("sampleVol", "bucketOne", "dir_three", false); + + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_one", omKeyInfo1); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_two", omKeyInfo2); + reconOMMetadataManager.getDeletedDirTable() + .put("/sampleVol/bucketOne/dir_three", omKeyInfo3); + + OmKeyInfo omKeyInfoCopy = + reconOMMetadataManager.getDeletedDirTable() + .get("/sampleVol/bucketOne/dir_one"); + Assertions.assertEquals("dir_one", omKeyInfoCopy.getKeyName()); + + Response deletedDirInfo = omdbInsightEndpoint.getDeletedDirInfo(-1, ""); + KeyInsightInfoResponse keyInsightInfoResp = + (KeyInsightInfoResponse) deletedDirInfo.getEntity(); + Assertions.assertNotNull(keyInsightInfoResp); + Assertions.assertEquals(3, + keyInsightInfoResp.getDeletedDirInfoList().size()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_one", + keyInsightInfoResp.getDeletedDirInfoList().get(0).getKey()); + Assertions.assertEquals("/sampleVol/bucketOne/dir_two", + keyInsightInfoResp.getLastKey()); + } +}