diff --git a/dev-support/pmd/pmd-ruleset.xml b/dev-support/pmd/pmd-ruleset.xml
index 08a043819b85..2c7115237bce 100644
--- a/dev-support/pmd/pmd-ruleset.xml
+++ b/dev-support/pmd/pmd-ruleset.xml
@@ -28,6 +28,7 @@
+
.*/generated-sources/.*
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
index 72f4b310dc3c..c17ef1bfb623 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.ozone.container.keyvalue;
import static org.apache.ozone.test.GenericTestUtils.toLog4j;
-import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNull;
@@ -529,12 +528,4 @@ private KeyValueContainer createOpenContainer(int normalBlocks)
throws Exception {
return super.createContainerWithBlocks(CONTAINER_ID, normalBlocks, 0, true);
}
-
- private void containsAllStrings(String logOutput, String[] expectedMessages) {
- for (String expectedMessage : expectedMessages) {
- assertThat(logOutput)
- .withFailMessage("Log output did not contain \"" + expectedMessage + "\"")
- .contains(expectedMessage);
- }
- }
}
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestSecretKeyManager.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestSecretKeyManager.java
index 08c002a5378f..084e8a5a2b62 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestSecretKeyManager.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/symmetric/TestSecretKeyManager.java
@@ -52,7 +52,7 @@ public class TestSecretKeyManager {
private SecretKeyStore mockedKeyStore;
@BeforeEach
- private void setup() {
+ void setup() {
mockedKeyStore = mock(SecretKeyStore.class);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
index a6bbb54641a5..cf217cbff94a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerTask.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.hdds.scm.container.balancer;
import static java.time.OffsetDateTime.now;
-import static java.util.Collections.emptyMap;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NODE_REPORT_INTERVAL_DEFAULT;
import static org.apache.hadoop.util.StringUtils.byteDesc;
@@ -410,53 +409,6 @@ private ContainerBalancerTaskIterationStatusInfo createCurrentIterationStatistic
}
}
- private static ContainerBalancerTaskIterationStatusInfo getEmptyCurrentIterationStatistic(
- long iterationDuration) {
- ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(0, 0, 0, 0);
- DataMoveInfo dataMoveInfo = new DataMoveInfo(
- 0,
- 0,
- emptyMap(),
- emptyMap()
- );
- IterationInfo iterationInfo = new IterationInfo(
- 0,
- null,
- iterationDuration
- );
- return new ContainerBalancerTaskIterationStatusInfo(
- iterationInfo,
- containerMoveInfo,
- dataMoveInfo
- );
- }
-
- private ContainerBalancerTaskIterationStatusInfo getFilledCurrentIterationStatistic(int lastIterationNumber,
- long iterationDuration) {
- Map sizeEnteringDataToNodes =
- convertToNodeIdToTrafficMap(findTargetStrategy.getSizeEnteringNodes());
- Map sizeLeavingDataFromNodes =
- convertToNodeIdToTrafficMap(findSourceStrategy.getSizeLeavingNodes());
-
- ContainerMoveInfo containerMoveInfo = new ContainerMoveInfo(metrics);
- DataMoveInfo dataMoveInfo = new DataMoveInfo(
- getSizeScheduledForMoveInLatestIteration(),
- sizeActuallyMovedInLatestIteration,
- sizeEnteringDataToNodes,
- sizeLeavingDataFromNodes
- );
- IterationInfo iterationInfo = new IterationInfo(
- lastIterationNumber + 1,
- null,
- iterationDuration
- );
- return new ContainerBalancerTaskIterationStatusInfo(
- iterationInfo,
- containerMoveInfo,
- dataMoveInfo
- );
- }
-
private long getCurrentIterationDuration() {
if (currentIterationStarted == null) {
return ABSENCE_OF_DURATION;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index a0401a7cae33..0dee75f559e5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -997,20 +997,6 @@ public NodeStatus getNodeStatus(DatanodeDetails datanode)
return nodeManager.getNodeStatus(datanode);
}
- /**
- * An open container is healthy if all its replicas are in the same state as
- * the container.
- * @param container The container to check
- * @param replicas The replicas belonging to the container
- * @return True if the container is healthy, false otherwise
- */
- private boolean isOpenContainerHealthy(
- ContainerInfo container, Set replicas) {
- HddsProtos.LifeCycleState state = container.getState();
- return replicas.stream()
- .allMatch(r -> compareState(state, r.getState()));
- }
-
/**
* Compares the container state with the replica state.
*
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
index ac67bd71459f..b4301e7bc6e7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java
@@ -34,7 +34,6 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
import org.slf4j.Logger;
@@ -124,19 +123,6 @@ public void insertNewDatanode(UUID datanodeID,
}
}
- //TODO: This should be called once SCMNodeManager gets Started.
- private void registerMXBean() {
- this.scmNodeStorageInfoBean = MBeans.register("StorageContainerManager",
- "scmNodeStorageInfo", this);
- }
-
- //TODO: Unregister call should happen as a part of SCMNodeManager shutdown.
- private void unregisterMXBean() {
- if (this.scmNodeStorageInfoBean != null) {
- MBeans.unregister(this.scmNodeStorageInfoBean);
- this.scmNodeStorageInfoBean = null;
- }
- }
/**
* Updates the Container list of an existing DN.
*
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
index 7700430c7491..1f60b9c1fc78 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancerTask.java
@@ -590,16 +590,4 @@ private void startBalancer(ContainerBalancerConfiguration config)
private void stopBalancer() {
// do nothing as testcase is not threaded
}
-
- private CompletableFuture
- genCompletableFuture(int sleepMilSec) {
- return CompletableFuture.supplyAsync(() -> {
- try {
- Thread.sleep(sleepMilSec);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- return MoveManager.MoveResult.COMPLETED;
- });
- }
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
index 66ca10d298d7..ac529b08f7f2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerScenarios.java
@@ -498,35 +498,35 @@ public static class Expectation {
private int underReplicatedQueue = 0;
private int overReplicatedQueue = 0;
- private void setUnderReplicated(int underReplicated) {
+ public void setUnderReplicated(int underReplicated) {
stateCounts.put(ReplicationManagerReport.HealthState.UNDER_REPLICATED, underReplicated);
}
- private void setOverReplicated(int overReplicated) {
+ public void setOverReplicated(int overReplicated) {
stateCounts.put(ReplicationManagerReport.HealthState.OVER_REPLICATED, overReplicated);
}
- private void setMisReplicated(int misReplicated) {
+ public void setMisReplicated(int misReplicated) {
stateCounts.put(ReplicationManagerReport.HealthState.MIS_REPLICATED, misReplicated);
}
- private void setUnhealthy(int unhealthy) {
+ public void setUnhealthy(int unhealthy) {
stateCounts.put(ReplicationManagerReport.HealthState.UNHEALTHY, unhealthy);
}
- private void setMissing(int missing) {
+ public void setMissing(int missing) {
stateCounts.put(ReplicationManagerReport.HealthState.MISSING, missing);
}
- private void setEmpty(int empty) {
+ public void setEmpty(int empty) {
stateCounts.put(ReplicationManagerReport.HealthState.EMPTY, empty);
}
- private void setQuasiClosedStuck(int quasiClosedStuck) {
+ public void setQuasiClosedStuck(int quasiClosedStuck) {
stateCounts.put(ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK, quasiClosedStuck);
}
- private void setOpenUnhealthy(int openUnhealthy) {
+ public void setOpenUnhealthy(int openUnhealthy) {
stateCounts.put(ReplicationManagerReport.HealthState.OPEN_UNHEALTHY, openUnhealthy);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index ef1c99ecaeb6..15dc614a321a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -41,7 +41,6 @@
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
-import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import java.util.stream.Collectors;
@@ -618,14 +617,6 @@ private boolean checkDuplicateNodesUUID(List nodes) {
return uuids.size() == nodes.size();
}
- private Set mockPipelineIDs(int count) {
- Set pipelineIDs = new HashSet<>(count);
- for (int i = 0; i < count; i++) {
- pipelineIDs.add(PipelineID.randomId());
- }
- return pipelineIDs;
- }
-
private void insertHeavyNodesIntoNodeManager(
List nodes, int heavyNodeCount)
throws IOException, TimeoutException {
diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
index 051e1a644991..f71b3d2b53da 100644
--- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
+++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/HttpFSServer.java
@@ -20,7 +20,6 @@
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
-import java.nio.charset.StandardCharsets;
import java.security.AccessControlException;
import java.security.PrivilegedExceptionAction;
import java.text.MessageFormat;
@@ -52,7 +51,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import org.apache.ozone.fs.http.HttpFSConstants;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.AclPermissionParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.DataParam;
@@ -60,16 +58,13 @@
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.ECPolicyParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.FilterParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.FsActionParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.GroupParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.LenParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.NewLengthParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.NoRedirectParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OffsetParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OldSnapshotNameParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OperationParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OverwriteParam;
-import org.apache.ozone.fs.http.server.HttpFSParametersProvider.OwnerParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.PermissionParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.PolicyNameParam;
import org.apache.ozone.fs.http.server.HttpFSParametersProvider.RecursiveParam;
@@ -133,21 +128,6 @@ public HttpFSServer() {
}
}
-
- // First try getting a user through HttpUserGroupInformation. This will return
- // if the built-in hadoop auth filter is not used. Fall back to getting the
- // authenticated user from the request.
- private UserGroupInformation getHttpUGI(HttpServletRequest request) {
- UserGroupInformation user = HttpUserGroupInformation.get();
- if (user != null) {
- return user;
- }
-
- return UserGroupInformation
- .createRemoteUser(request.getUserPrincipal().getName());
- }
-
-
/**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem
* for the effective user.
@@ -443,36 +423,6 @@ private Response handleGetAllStoragePolicy(String path,
return response;
}
- private Response handleGetTrashRoot(String path, UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path);
- JSONObject json = fsExecute(user, command);
- AUDIT_LOG.info("[{}]", path);
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- return response;
- }
-
- private Response handleListStatusBatch(String path,
- Parameters params,
- UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- String startAfter = params.get(
- HttpFSParametersProvider.StartAfterParam.NAME,
- HttpFSParametersProvider.StartAfterParam.class);
- byte[] token = HttpFSConstants.EMPTY_BYTES;
- if (startAfter != null) {
- token = startAfter.getBytes(StandardCharsets.UTF_8);
- }
- FSOperations.FSListStatusBatch command = new FSOperations
- .FSListStatusBatch(path, token);
- @SuppressWarnings("rawtypes") Map json = fsExecute(user, command);
- AUDIT_LOG.info("[{}] token [{}]", path, token);
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- return response;
- }
-
private Response handleListXAttrs(String path, UserGroupInformation user)
throws IOException, FileSystemAccessException {
Response response;
@@ -510,29 +460,6 @@ private Response handleGetACLStatus(String path, UserGroupInformation user)
return response;
}
- private Response handleGetFileCheckSum(String path,
- UriInfo uriInfo,
- Parameters params,
- UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- FSOperations.FSFileChecksum command =
- new FSOperations.FSFileChecksum(path);
-
- Boolean noRedirect = params.get(
- NoRedirectParam.NAME, NoRedirectParam.class);
- AUDIT_LOG.info("[{}]", path);
- if (noRedirect) {
- URI redirectURL = createOpenRedirectionURL(uriInfo);
- final String js = JsonUtil.toJsonString("Location", redirectURL);
- response = Response.ok(js).type(MediaType.APPLICATION_JSON).build();
- } else {
- Map json = fsExecute(user, command);
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- }
- return response;
- }
-
private Response handleGetQuotaUsage(String path, UserGroupInformation user)
throws IOException, FileSystemAccessException {
Response response;
@@ -575,19 +502,6 @@ private Response handleInstrumentation(String path,
return response;
}
- private Response handleGetHomeDir(String path,
- OperationParam op,
- UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- enforceRootPath(op.value(), path);
- FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
- JSONObject json = fsExecute(user, command);
- AUDIT_LOG.info("Home Directory for [{}]", user);
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- return response;
- }
-
private Response handleListStatus(String path,
Parameters params,
UserGroupInformation user)
@@ -1159,69 +1073,6 @@ private Response handleSetACL(String path,
return response;
}
- private Response handleSetTimes(String path,
- Parameters params,
- UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- Long modifiedTime = params.get(ModifiedTimeParam.NAME,
- ModifiedTimeParam.class);
- Long accessTime = params.get(AccessTimeParam.NAME,
- AccessTimeParam.class);
- FSOperations.FSSetTimes command
- = new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
- fsExecute(user, command);
- AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
- modifiedTime + ":" + accessTime);
- response = Response.ok().build();
- return response;
- }
-
- private Response handleSetReplication(String path,
- Parameters params,
- UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- Short replication = params.get(ReplicationParam.NAME,
- ReplicationParam.class);
- FSOperations.FSSetReplication command
- = new FSOperations.FSSetReplication(path, replication);
- JSONObject json = fsExecute(user, command);
- AUDIT_LOG.info("[{}] to [{}]", path, replication);
- response = Response.ok(json).build();
- return response;
- }
-
- private Response handleSetPermission(String path,
- Parameters params,
- UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- Short permission = params.get(PermissionParam.NAME,
- PermissionParam.class);
- FSOperations.FSSetPermission command
- = new FSOperations.FSSetPermission(path, permission);
- fsExecute(user, command);
- AUDIT_LOG.info("[{}] to [{}]", path, permission);
- response = Response.ok().build();
- return response;
- }
-
- private Response handleSetOwner(String path,
- Parameters params,
- UserGroupInformation user)
- throws IOException, FileSystemAccessException {
- Response response;
- String owner = params.get(OwnerParam.NAME, OwnerParam.class);
- String group = params.get(GroupParam.NAME, GroupParam.class);
- FSOperations.FSSetOwner command
- = new FSOperations.FSSetOwner(path, owner, group);
- fsExecute(user, command);
- AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
- response = Response.ok().build();
- return response;
- }
-
private Response handleRename(String path,
Parameters params,
UserGroupInformation user)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
index 801a161bc751..3f6f163b9101 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java
@@ -2324,10 +2324,6 @@ private FileStatus getDirectoryStat(Path path) throws IOException {
return status;
}
- private void assertCounter(long value, String key) {
- assertEquals(value, statistics.getLong(key).longValue());
- }
-
@Test
void testSnapshotRead() throws Exception {
// Init data
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
index 9a60f317492e..197e8b425f50 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java
@@ -1968,12 +1968,6 @@ && getOMMetrics().getNumTrashFilesDeletes()
}
- private Path getTrashKeyPath(Path keyPath, Path userTrash) {
- Path userTrashCurrent = new Path(userTrash, "Current");
- String key = keyPath.toString().substring(1);
- return new Path(userTrashCurrent, key);
- }
-
@Test
void testCreateWithInvalidPaths() {
assumeFalse(isBucketFSOptimized);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
index 04f6c6fb5556..235dd9f78f3f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithAllRunning.java
@@ -1036,27 +1036,6 @@ private void testAddAcl(String remoteUserName, OzoneObj ozoneObj,
assertTrue(addAcl);
}
- private void testAddLinkAcl(String remoteUserName, OzoneObj ozoneObj,
- OzoneAcl userAcl) throws Exception {
- ObjectStore objectStore = getObjectStore();
- boolean addAcl = objectStore.addAcl(ozoneObj, userAcl);
- assertTrue(addAcl);
-
- List acls = objectStore.getAcl(ozoneObj);
-
- assertTrue(containsAcl(userAcl, acls));
-
- // Add an already existing acl.
- addAcl = objectStore.addAcl(ozoneObj, userAcl);
- assertFalse(addAcl);
-
- // Add an acl by changing acl type with same type, name and scope.
- userAcl = new OzoneAcl(USER, remoteUserName,
- DEFAULT, WRITE);
- addAcl = objectStore.addAcl(ozoneObj, userAcl);
- assertTrue(addAcl);
- }
-
private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj,
OzoneAcl userAcl) throws Exception {
ObjectStore objectStore = getObjectStore();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index 252e426714eb..5d1634e0639c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -59,7 +59,6 @@
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
import jakarta.annotation.Nonnull;
import java.io.File;
import java.io.IOException;
@@ -73,7 +72,6 @@
import java.util.Iterator;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
@@ -96,8 +94,6 @@
import org.apache.hadoop.hdds.utils.db.DBProfile;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksObjectUtils;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
@@ -116,7 +112,6 @@
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.KeyManagerImpl;
import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.OmSnapshot;
import org.apache.hadoop.ozone.om.OmSnapshotManager;
@@ -126,9 +121,7 @@
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
-import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.service.SnapshotDiffCleanupService;
@@ -589,30 +582,6 @@ public void testCreateSnapshotMissingMandatoryParams() throws Exception {
() -> createSnapshot(nullstr, bucket));
}
- private Set getDeletedKeysFromRocksDb(
- OMMetadataManager metadataManager) throws IOException {
- Set deletedKeys = Sets.newHashSet();
- try (TableIterator>
- deletedTableIterator = metadataManager.getDeletedTable()
- .iterator()) {
- while (deletedTableIterator.hasNext()) {
- Table.KeyValue val =
- deletedTableIterator.next();
- deletedKeys.addAll(val.getValue().getOmKeyInfoList());
- }
- }
-
- try (TableIterator>
- deletedDirTableIterator = metadataManager.getDeletedDirTable()
- .iterator()) {
- while (deletedDirTableIterator.hasNext()) {
- deletedKeys.add(deletedDirTableIterator.next().getValue());
- }
- }
- return deletedKeys;
- }
-
private void getOmKeyInfo(String volume, String bucket,
String key) throws IOException {
ResolvedBucket resolvedBucket = new ResolvedBucket(volume, bucket,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java
index b637d6b1eeb5..ac562d2e3743 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java
@@ -49,8 +49,6 @@
import org.apache.hadoop.hdds.scm.HddsTestUtils;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.utils.db.RDBStore;
-import org.apache.hadoop.hdds.utils.db.Table;
-import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
@@ -416,16 +414,6 @@ private void addKeys(int start, int end) throws Exception {
}
}
- private long getTableKeyCount(TableIterator> iterator) {
- long keyCount = 0;
- while (iterator.hasNext()) {
- keyCount++;
- iterator.next();
- }
- return keyCount;
- }
-
private static OmKeyLocationInfo getOmKeyLocationInfo(BlockID blockID,
Pipeline pipeline) {
return new OmKeyLocationInfo.Builder()
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
index 7f4833566ff0..929b618aa44f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
@@ -26,10 +26,8 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.assertThrows;
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.common.base.Strings;
import java.io.File;
import java.io.IOException;
import java.io.PrintWriter;
@@ -214,26 +212,6 @@ private int executeHA(GenericCli shell, String[] args) {
return execute(shell, newArgs);
}
- /**
- * Execute command, assert exception message and returns true if error
- * was thrown.
- */
- private void executeWithError(OzoneShell shell, String[] args,
- String expectedError) {
- if (Strings.isNullOrEmpty(expectedError)) {
- execute(shell, args);
- } else {
- Exception ex = assertThrows(Exception.class, () -> execute(shell, args));
- if (!Strings.isNullOrEmpty(expectedError)) {
- Throwable exceptionToCheck = ex;
- if (exceptionToCheck.getCause() != null) {
- exceptionToCheck = exceptionToCheck.getCause();
- }
- assertThat(exceptionToCheck.getMessage()).contains(expectedError);
- }
- }
- }
-
private String getSetConfStringFromConf(String key) {
return String.format("--set=%s=%s", key, conf.get(key));
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
index 14409f8c6387..21f8b9fa28ff 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmSnapshotManager.java
@@ -553,26 +553,6 @@ private static void performOperationOnKeys(
}
}
- /**
- * Helper method to do deleteRange on a table, including endKey.
- * TODO: Do remove this method, it is not used anywhere. Need to check if
- * deleteRange causes RocksDB corruption.
- * TODO: Move this into {@link Table} ?
- * @param table Table
- * @param beginKey begin key
- * @param endKey end key
- */
- private static void deleteRangeInclusive(
- Table table, String beginKey, String endKey)
- throws IOException {
-
- if (endKey != null) {
- table.deleteRange(beginKey, endKey);
- // Remove range end key itself
- table.delete(endKey);
- }
- }
-
/**
* Helper method to delete DB keys in the snapshot scope (bucket)
* from active DB's deletedTable.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
index 4ae7f5a0e2c4..66a1da44acc7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
@@ -37,8 +37,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.db.BatchOperation;
-import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.ClientVersion;
import org.apache.hadoop.ozone.common.BlockGroup;
@@ -132,38 +130,6 @@ protected int processKeyDeletes(List keyBlocksList,
return delCount;
}
- /**
- * Deletes all the keys that SCM has acknowledged and queued for delete.
- *
- * @param results DeleteBlockGroups returned by SCM.
- * @throws IOException on Error
- */
- private int deleteAllKeys(List results,
- KeyManager manager) throws IOException {
- Table deletedTable =
- manager.getMetadataManager().getDeletedTable();
- DBStore store = manager.getMetadataManager().getStore();
-
- // Put all keys to delete in a single transaction and call for delete.
- int deletedCount = 0;
- try (BatchOperation writeBatch = store.initBatchOperation()) {
- for (DeleteBlockGroupResult result : results) {
- if (result.isSuccess()) {
- // Purge key from OM DB.
- deletedTable.deleteWithBatch(writeBatch,
- result.getObjectKey());
- if (LOG.isDebugEnabled()) {
- LOG.debug("Key {} deleted from OM DB", result.getObjectKey());
- }
- deletedCount++;
- }
- }
- // Write a single transaction for delete.
- store.commitBatchOperation(writeBatch);
- }
- return deletedCount;
- }
-
/**
* Submits PurgeKeys request for the keys whose blocks have been deleted
* by SCM.
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
index 602b79a1b0ef..99f34d863de9 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotDiffManager.java
@@ -1444,28 +1444,6 @@ private boolean areAclsSame(OmDirectoryInfo fromObject,
return fromObject.getAcls().equals(toObject.getAcls());
}
- private boolean isBlockLocationSame(
- String fromObjectName,
- String toObjectName,
- final Table fromSnapshotTable,
- final Table toSnapshotTable
- ) throws IOException {
- Objects.requireNonNull(fromObjectName, "fromObjectName is null.");
- Objects.requireNonNull(toObjectName, "toObjectName is null.");
-
- final WithObjectID fromObject = fromSnapshotTable.get(fromObjectName);
- final WithObjectID toObject = toSnapshotTable.get(toObjectName);
-
- if (!(fromObject instanceof OmKeyInfo) ||
- !(toObject instanceof OmKeyInfo)) {
- throw new IllegalStateException("fromObject or toObject is not of " +
- "OmKeyInfo");
- }
-
- return SnapshotDeletingService.isBlockLocationInfoSame(
- (OmKeyInfo) fromObject, (OmKeyInfo) toObject);
- }
-
private PersistentList createDiffReportPersistentList(
ColumnFamilyHandle columnFamilyHandle
) {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
index 41f4e2a5bc4b..7cad5c943110 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/upgrade/OMLayoutVersionManager.java
@@ -142,13 +142,6 @@ public static Set> getRequestClasses(
return validRequests;
}
- private void registerRequestType(String type, int version,
- Class extends OMClientRequest> reqClass) {
- VersionFactoryKey key = new VersionFactoryKey.Builder()
- .key(type).version(version).build();
- requestFactory.register(this, key, reqClass);
- }
-
/**
* Given a type and version, get the corresponding request class type.
* @param type type string
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
index e190f4487a99..980eb41c759c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java
@@ -234,18 +234,6 @@ public long updateToken(Token token,
return renewTime;
}
- /**
- * Stores given identifier in token store.
- */
- private void addToTokenStore(OzoneTokenIdentifier identifier,
- byte[] password, long renewTime)
- throws IOException {
- TokenInfo tokenInfo = new TokenInfo(renewTime, password,
- identifier.getTrackingId());
- currentTokens.put(identifier, tokenInfo);
- store.storeToken(identifier, tokenInfo.getRenewDate());
- }
-
/**
* Updates issue date, master key id and sequence number for identifier.
*
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
index bf6dbc4d6a95..3dfe9de0203b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -455,8 +455,4 @@ public void testLinkedBucketResolution() throws Exception {
bucketInfo.getIsVersionEnabled(),
storedLinkBucket.getIsVersionEnabled());
}
-
- private BucketLayout getBucketLayout() {
- return BucketLayout.DEFAULT;
- }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
index dc619920a98d..f6587010c0ee 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java
@@ -61,8 +61,6 @@
import org.apache.hadoop.ozone.om.response.volume.OMVolumeCreateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketInfo;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteBucketResponse;
-import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.ozone.test.GenericTestUtils;
@@ -487,18 +485,5 @@ private OMBucketCreateResponse createBucket(String volumeName,
doubleBuffer.add(omClientResponse, termIndex);
return (OMBucketCreateResponse) omClientResponse;
}
-
- /**
- * Create OMBucketDeleteResponse for specified volume and bucket.
- * @return OMBucketDeleteResponse
- */
- private OMBucketDeleteResponse deleteBucket(String volumeName,
- String bucketName) {
- return new OMBucketDeleteResponse(OMResponse.newBuilder()
- .setCmdType(OzoneManagerProtocolProtos.Type.DeleteBucket)
- .setStatus(OzoneManagerProtocolProtos.Status.OK)
- .setDeleteBucketResponse(DeleteBucketResponse.newBuilder().build())
- .build(), volumeName, bucketName);
- }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java
index 12f4ff2c8623..53e26c347dbb 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java
@@ -292,11 +292,6 @@ private List getBucketAcls(String vol, String buck)
return OzoneNativeAclTestUtil.getBucketAcls(metadataManager, vol, buck);
}
- private List getKeyAcls(String vol, String buck, String key)
- throws IOException {
- return OzoneNativeAclTestUtil.getKeyAcls(metadataManager, vol, buck, getBucketLayout(), key);
- }
-
private void setBucketAcl(String vol, String buck,
List ozoneAcls) throws IOException {
OzoneNativeAclTestUtil.setBucketAcl(metadataManager, vol, buck, ozoneAcls);
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index 9b16643d0f3f..61d1863141aa 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -1268,10 +1268,6 @@ private void waitAndCheckConditionAfterHeartbeat(Callable check)
LambdaTestUtils.await(30000, 1000, check);
}
- private BucketLayout getBucketLayout() {
- return BucketLayout.DEFAULT;
- }
-
@Test
public void testExplicitRemovalOfDecommissionedNode() throws Exception {
Response response = nodeEndpoint.getDatanodes();
diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java
index 9c37d3b2b3f0..6f5d4d5d13bd 100644
--- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java
+++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java
@@ -21,12 +21,10 @@
import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.BUCKET_ALREADY_EXISTS;
import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER;
-import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.DATE_FORMATTER;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import java.time.LocalDate;
import javax.ws.rs.core.Response;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.client.OzoneClient;
@@ -88,18 +86,4 @@ public void testBucketFailWithInvalidHeader() throws Exception {
assertEquals(MALFORMED_HEADER.getCode(), ex.getCode());
}
}
-
- /**
- * Generate dummy auth header.
- * @return auth header.
- */
- private String generateAuthHeader() {
- LocalDate now = LocalDate.now();
- String curDate = DATE_FORMATTER.format(now);
- return "AWS4-HMAC-SHA256 " +
- "Credential=ozone/" + curDate + "/us-east-1/s3/aws4_request, " +
- "SignedHeaders=host;range;x-amz-date, " +
- "Signature=fe5f80f77d5fa3beca038a248ff027";
- }
-
}