diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java index 538dd522d01..489cf3c41ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/server/JsonUtils.java @@ -20,9 +20,11 @@ import java.io.File; import java.io.IOException; +import java.util.HashMap; import java.util.List; import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.MappingIterator; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; @@ -81,6 +83,14 @@ public static JsonNode readTree(String content) throws IOException { return MAPPER.readTree(content); } + public static List> readTreeAsListOfMaps(String json) + throws IOException { + return MAPPER.readValue(json, + new TypeReference>>() { + }); + } + + /** * Utility to sequentially write a large collection of items to a file. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index c0e5acc20e7..d52b0e99b2f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -38,6 +38,7 @@ import java.util.Collections; import java.util.List; import java.util.Optional; +import java.util.HashMap; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.client.BlockID; @@ -46,6 +47,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -65,8 +67,6 @@ import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.util.EntityUtils; -import com.google.gson.Gson; -import com.google.gson.internal.LinkedTreeMap; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; @@ -381,16 +381,23 @@ private static OmKeyLocationInfoGroup getOmKeyLocationInfoGroup() { private long getReconTaskAttributeFromJson(String taskStatusResponse, String taskName, - String entityAttribute) { - ArrayList taskStatusList = new Gson() - .fromJson(taskStatusResponse, ArrayList.class); - Optional taskEntity = - taskStatusList - .stream() - .filter(task -> task.get("taskName").equals(taskName)) - .findFirst(); - assertTrue(taskEntity.isPresent()); - return (long) (double) taskEntity.get().get(entityAttribute); + String entityAttribute) + throws IOException { + List> taskStatusList = + JsonUtils.readTreeAsListOfMaps(taskStatusResponse); + + // Stream through the list to find the task entity matching the taskName + Optional> taskEntity = taskStatusList.stream() + .filter(task -> taskName.equals(task.get("taskName"))) + .findFirst(); + + if (taskEntity.isPresent()) { + Number number = (Number) taskEntity.get().get(entityAttribute); + return number.longValue(); + } else { + throw new IOException( + "Task entity for task name " + taskName + " not found"); + } } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java index 0585fea000c..2f46729d525 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/DiskUsageSubCommand.java @@ -17,18 +17,16 @@ */ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.internal.LinkedTreeMap; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.shell.ListOptions; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; - +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.node.ArrayNode; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -101,21 +99,20 @@ public Void call() throws Exception { return null; } - HashMap duResponse = getResponseMap(response); + JsonNode duResponse = JsonUtils.readTree(response); - if (duResponse.get("status").equals("PATH_NOT_FOUND")) { + if (duResponse.get("status").asText().equals("PATH_NOT_FOUND")) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { printBucketReminder(); } - long totalSize = (long)(double)duResponse.get("size"); - + long totalSize = duResponse.get("size").asLong(); if (!noHeader) { printWithUnderline("Path", false); printKVSeparator(); - System.out.println(duResponse.get("path")); + System.out.println(duResponse.get("path").asText()); printWithUnderline("Total Size", false); printKVSeparator(); @@ -124,11 +121,11 @@ public Void call() throws Exception { if (withReplica) { printWithUnderline("Total Disk Usage", false); printKVSeparator(); - long du = (long)(double)duResponse.get("sizeWithReplica"); + long du = duResponse.get("sizeWithReplica").asLong(); System.out.println(FileUtils.byteCountToDisplaySize(du)); } - long sizeDirectKey = (long)(double)duResponse.get("sizeDirectKey"); + long sizeDirectKey = duResponse.get("sizeDirectKey").asLong(); if (!listFiles && sizeDirectKey != -1) { printWithUnderline("Size of Direct Keys", false); printKVSeparator(); @@ -137,7 +134,7 @@ public Void call() throws Exception { printNewLines(1); } - if ((double)duResponse.get("subPathCount") == 0) { + if (duResponse.get("subPathCount").asInt() == 0) { if (totalSize == 0) { // the object is empty System.out.println("The object is empty.\n" + @@ -160,20 +157,19 @@ public Void call() throws Exception { seekStr = ""; } - ArrayList duData = (ArrayList)duResponse.get("subPaths"); + ArrayNode subPaths = (ArrayNode) duResponse.get("subPaths"); int cnt = 0; - for (int i = 0; i < duData.size(); ++i) { + for (JsonNode subPathDU : subPaths) { if (cnt >= limit) { break; } - LinkedTreeMap subPathDU = (LinkedTreeMap) duData.get(i); - String subPath = subPathDU.get("path").toString(); + String subPath = subPathDU.get("path").asText(); // differentiate key from other types - if (!(boolean)subPathDU.get("isKey")) { + if (!subPathDU.get("isKey").asBoolean()) { subPath += OM_KEY_PREFIX; } - long size = (long)(double)subPathDU.get("size"); - long sizeWithReplica = (long)(double)subPathDU.get("sizeWithReplica"); + long size = subPathDU.get("size").asLong(); + long sizeWithReplica = subPathDU.get("sizeWithReplica").asLong(); if (subPath.startsWith(seekStr)) { printDURow(subPath, size, sizeWithReplica); ++cnt; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java index f74ee109504..0af263dbe31 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/FileSizeDistSubCommand.java @@ -17,15 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.ArrayList; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +72,11 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap distResponse = getResponseMap(response); + JsonNode distResponse = JsonUtils.readTree(response); - if (distResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(distResponse.path("status").asText())) { printPathNotFound(); - } else if (distResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(distResponse.path("status").asText())) { printTypeNA("File Size Distribution"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,11 +84,11 @@ public Void call() throws Exception { } printWithUnderline("File Size Distribution", true); - ArrayList fileSizeDist = (ArrayList) distResponse.get("dist"); + JsonNode fileSizeDist = distResponse.path("dist"); double sum = 0; for (int i = 0; i < fileSizeDist.size(); ++i) { - sum += (double) fileSizeDist.get(i); + sum += fileSizeDist.get(i).asDouble(); } if (sum == 0) { printSpaces(2); @@ -100,11 +99,11 @@ public Void call() throws Exception { } for (int i = 0; i < fileSizeDist.size(); ++i) { - if ((double)fileSizeDist.get(i) == 0) { + if (fileSizeDist.get(i).asDouble() == 0) { continue; } String label = convertBinIndexToReadableRange(i); - printDistRow(label, (double) fileSizeDist.get(i), sum); + printDistRow(label, fileSizeDist.get(i).asDouble(), sum); } } printNewLines(1); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java index 729aa20c5ce..9aff2e9999a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryCLIUtils.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.admin.nssummary; -import com.google.gson.Gson; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -31,7 +30,6 @@ import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.StandardCharsets; -import java.util.HashMap; import static java.net.HttpURLConnection.HTTP_CREATED; import static java.net.HttpURLConnection.HTTP_OK; @@ -107,10 +105,6 @@ public static String makeHttpCall(StringBuffer url, String path, } } - public static HashMap getResponseMap(String response) { - return new Gson().fromJson(response, HashMap.class); - } - public static void printNewLines(int cnt) { for (int i = 0; i < cnt; ++i) { System.out.println(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java index 113193c929b..1e4e719baf8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/QuotaUsageSubCommand.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printBucketReminder; @@ -73,11 +73,11 @@ public Void call() throws Exception { return null; } - HashMap quotaResponse = getResponseMap(response); + JsonNode quotaResponse = JsonUtils.readTree(response); - if (quotaResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(quotaResponse.path("status").asText())) { printPathNotFound(); - } else if (quotaResponse.get("status").equals("TYPE_NOT_APPLICABLE")) { + } else if ("TYPE_NOT_APPLICABLE".equals(quotaResponse.path("status").asText())) { printTypeNA("Quota"); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -85,8 +85,10 @@ public Void call() throws Exception { } printWithUnderline("Quota", true); - long quotaAllowed = (long)(double)quotaResponse.get("allowed"); - long quotaUsed = (long)(double)quotaResponse.get("used"); + + long quotaAllowed = quotaResponse.get("allowed").asLong(); + long quotaUsed = quotaResponse.get("used").asLong(); + printSpaces(2); System.out.print("Allowed"); printKVSeparator(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java index 9180274b9c7..11cc1e926cd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/SummarySubCommand.java @@ -17,13 +17,13 @@ */ package org.apache.hadoop.ozone.admin.nssummary; +import com.fasterxml.jackson.databind.JsonNode; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; -import java.util.HashMap; import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.getResponseMap; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.makeHttpCall; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.parseInputPath; import static org.apache.hadoop.ozone.admin.nssummary.NSSummaryCLIUtils.printEmptyPathRequest; @@ -71,9 +71,9 @@ public Void call() throws Exception { printNewLines(1); return null; } - HashMap summaryResponse = getResponseMap(response); + JsonNode summaryResponse = JsonUtils.readTree(response); - if (summaryResponse.get("status").equals("PATH_NOT_FOUND")) { + if ("PATH_NOT_FOUND".equals(summaryResponse.path("status").asText())) { printPathNotFound(); } else { if (parent.isNotValidBucketOrOBSBucket(path)) { @@ -83,10 +83,11 @@ public Void call() throws Exception { printWithUnderline("Entity Type", false); printKVSeparator(); System.out.println(summaryResponse.get("type")); - int numVol = ((Double) summaryResponse.get("numVolume")).intValue(); - int numBucket = ((Double) summaryResponse.get("numBucket")).intValue(); - int numDir = ((Double) summaryResponse.get("numDir")).intValue(); - int numKey = ((Double) summaryResponse.get("numKey")).intValue(); + + int numVol = summaryResponse.get("numVolume").asInt(); + int numBucket = summaryResponse.get("numBucket").asInt(); + int numDir = summaryResponse.get("numDir").asInt(); + int numKey = summaryResponse.get("numKey").asInt(); if (numVol != -1) { printWithUnderline("Volumes", false);