Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2188,13 +2188,9 @@ public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive,

countEntries = 0;
// Convert results in cacheKeyMap to List
for (Map.Entry<String, OzoneFileStatus> entry : cacheKeyMap.entrySet()) {
for (OzoneFileStatus fileStatus : cacheKeyMap.values()) {
// No need to check if a key is deleted or not here, this is handled
// when adding entries to cacheKeyMap from DB.
OzoneFileStatus fileStatus = entry.getValue();
if (fileStatus.isFile()) {
refreshPipeline(fileStatus.getKeyInfo());
}
fileStatusList.add(fileStatus);
countEntries++;
if (countEntries >= numEntries) {
Expand All @@ -2209,14 +2205,18 @@ public List<OzoneFileStatus> listStatus(OmKeyArgs args, boolean recursive,
bucketName);
}

List<OmKeyInfo> keyInfoList = new ArrayList<>(fileStatusList.size());
for (OzoneFileStatus fileStatus : fileStatusList) {
if (args.getRefreshPipeline()) {
refreshPipeline(fileStatus.getKeyInfo());
}
if (args.getSortDatanodes()) {
keyInfoList.add(fileStatus.getKeyInfo());
}
refreshPipeline(keyInfoList);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just a question listStatus is limited by batch size.
But we are making a single Rpc Call to SCM for all the containers we got from this list, is this fine here? (My question is if it has a large number of containerIDs due to larger key sizes, will it have an issue in Rpc Response size.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

listKeys() is also limited by batch size and already does the same single refresh call:

List<OmKeyInfo> keyList = metadataManager.listKeys(volumeName, bucketName,
startKey, keyPrefix, maxKeys);
refreshPipeline(keyList);

Copy link
Contributor

@bharatviswa504 bharatviswa504 Nov 17, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My question is keyList/listStatus is limited by batch size, but if Keys are huge in size, then it might have a long list of container IDS, and then calling SCM with all those containerIDS to fetch ContainerWithPipeline, will it cause an issue of exceeding Rpc response size.

listKeys() is also limited by batch size and already does the same single refresh call:

Then this might be a problem for listKeys also, but do you see this as a problem?

Copy link
Contributor Author

@adoroszlai adoroszlai Nov 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then this might be a problem for listKeys also, but do you see this as a problem?

I don't see this as an immediate problem.

As far as I understand, batch size for listKeys and listStatus is a convenience for the client, not a safety guarantee for the server. If RPC response size is a problem when performing getContainerWithPipelineBatch call for multiple keys, then the same can be triggered by the client simply by increasing batch size (if there are enough keys in the bucket).

Copy link
Contributor

@bharatviswa504 bharatviswa504 Nov 19, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ya agreed, not only that thinking more if with normal batch size it is causing issue for containerID's, then it will be same for OMResponse for listKeys also, as it adds KeyInfo and Pipeline.


if (args.getSortDatanodes()) {
for (OzoneFileStatus fileStatus : fileStatusList) {
sortDatanodeInPipeline(fileStatus.getKeyInfo(), clientAddress);
}
}

return fileStatusList;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,14 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
Expand All @@ -61,6 +63,7 @@
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager;
Expand All @@ -73,13 +76,17 @@
import org.junit.Test;
import org.mockito.Mockito;

import static java.util.Collections.singletonList;
import static org.mockito.Mockito.verify;

/**
* Unit test key manager.
*/
public class TestKeyManagerUnit {

private OzoneConfiguration configuration;
private OmMetadataManagerImpl metadataManager;
private StorageContainerLocationProtocol containerClient;
private KeyManagerImpl keyManager;

private Instant startDate;
Expand All @@ -92,8 +99,10 @@ public void setup() throws IOException {
configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS,
testDir.toString());
metadataManager = new OmMetadataManagerImpl(configuration);
containerClient = Mockito.mock(StorageContainerLocationProtocol.class);
keyManager = new KeyManagerImpl(
Mockito.mock(ScmBlockLocationProtocol.class),
containerClient,
metadataManager,
configuration,
"omtest",
Expand Down Expand Up @@ -332,13 +341,6 @@ private void abortMultipart(

@Test
public void testLookupFileWithDnFailure() throws IOException {
final StorageContainerLocationProtocol containerClient =
Mockito.mock(StorageContainerLocationProtocol.class);
final KeyManager manager = new KeyManagerImpl(null,
new ScmClient(Mockito.mock(ScmBlockLocationProtocol.class),
containerClient), metadataManager, configuration, "test-om",
Mockito.mock(OzoneBlockTokenSecretManager.class), null, null);

final DatanodeDetails dnOne = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnTwo = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dnThree = MockDatanodeDetails.randomDatanodeDetails();
Expand Down Expand Up @@ -400,9 +402,9 @@ public void testLookupFileWithDnFailure() throws IOException {
.setVolumeName("volumeOne")
.setBucketName("bucketOne")
.setKeyName("keyOne")
.setOmKeyLocationInfos(Collections.singletonList(
.setOmKeyLocationInfos(singletonList(
new OmKeyLocationInfoGroup(0,
Collections.singletonList(keyLocationInfo))))
singletonList(keyLocationInfo))))
.setCreationTime(Time.now())
.setModificationTime(Time.now())
.setDataSize(256000)
Expand All @@ -417,7 +419,7 @@ public void testLookupFileWithDnFailure() throws IOException {
.setBucketName("bucketOne")
.setKeyName("keyOne");

final OmKeyInfo newKeyInfo = manager
final OmKeyInfo newKeyInfo = keyManager
.lookupFile(keyArgs.build(), "test");

final OmKeyLocationInfo newBlockLocation = newKeyInfo
Expand All @@ -436,4 +438,57 @@ public void testLookupFileWithDnFailure() throws IOException {
.getNodes().contains(dnSix));
}

@Test
public void listStatus() throws Exception {
String volume = "vol";
String bucket = "bucket";
String keyPrefix = "key";

TestOMRequestUtils.addVolumeToDB(volume, OzoneConsts.OZONE,
metadataManager);

TestOMRequestUtils.addBucketToDB(volume, bucket, metadataManager);

final Pipeline pipeline = MockPipeline.createPipeline(3);

OmKeyInfo.Builder keyInfoBuilder = new OmKeyInfo.Builder()
.setVolumeName(volume)
.setBucketName(bucket)
.setCreationTime(Time.now())
.setOmKeyLocationInfos(singletonList(
new OmKeyLocationInfoGroup(0, new ArrayList<>())))
.setReplicationFactor(ReplicationFactor.THREE)
.setReplicationType(ReplicationType.RATIS);

List<Long> containerIDs = new ArrayList<>();
for (long i = 1; i <= 10; i++) {
final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder()
.setBlockID(new BlockID(i, 1L))
.setPipeline(pipeline)
.setOffset(0)
.setLength(256000)
.build();

containerIDs.add(i);

OmKeyInfo keyInfo = keyInfoBuilder
.setKeyName(keyPrefix + i)
.setObjectID(i)
.setUpdateID(i)
.build();
keyInfo.appendNewBlocks(singletonList(keyLocationInfo), false);
TestOMRequestUtils.addKeyToOM(metadataManager, keyInfo);
}

OmKeyArgs.Builder builder = new OmKeyArgs.Builder()
.setVolumeName(volume)
.setBucketName(bucket)
.setKeyName("");
List<OzoneFileStatus> fileStatusList =
keyManager.listStatus(builder.build(), false, null, Long.MAX_VALUE);

Assert.assertEquals(10, fileStatusList.size());
verify(containerClient).getContainerWithPipelineBatch(containerIDs);
}

}