Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.debug.replicas;

import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;

import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.XceiverClientSpi;
import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;

/**
* Checks block existence using GetBlock calls to the Datanodes.
*/
public class BlockExistenceVerifier implements ReplicaVerifier {

private OzoneConfiguration conf;
private static final String CHECKTYPE = "blockExistence";

public BlockExistenceVerifier(OzoneConfiguration conf) {
this.conf = conf;
}

@Override
public BlockVerificationResult verifyBlock(DatanodeDetails datanode, OzoneInputStream stream,
OmKeyLocationInfo keyLocation) {
try (ContainerOperationClient containerClient = new ContainerOperationClient(conf);
XceiverClientManager xceiverClientManager = containerClient.getXceiverClientManager()) {

Pipeline keyPipeline = keyLocation.getPipeline();
boolean isECKey = keyPipeline.getReplicationConfig().getReplicationType() == HddsProtos.ReplicationType.EC;
Pipeline pipeline = isECKey ? keyPipeline :
Pipeline.newBuilder(keyPipeline)
.setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE))
.build();

try (XceiverClientSpi client = xceiverClientManager.acquireClientForReadData(pipeline)) {

Map<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses =
ContainerProtocolCalls.getBlockFromAllNodes(
client,
keyLocation.getBlockID().getDatanodeBlockIDProtobuf(),
keyLocation.getToken()
);

ContainerProtos.GetBlockResponseProto response = responses.get(datanode);
boolean hasBlock = response != null && response.hasBlockData();

return new BlockVerificationResult(CHECKTYPE, hasBlock, hasBlock ? Collections.emptyList() :
Collections.singletonList(new BlockVerificationResult.FailureDetail(
true, "Block does not exist on this replica")));
}

} catch (IOException | InterruptedException e) {
BlockVerificationResult.FailureDetail failure = new BlockVerificationResult.FailureDetail(true, e.getMessage());
return new BlockVerificationResult(CHECKTYPE, false, Collections.singletonList(failure));
}
}

}
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.debug.replicas;

import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.List;

/**
* Json structure for replicas to pass through each check and give output.
*/
public class BlockVerificationResult {

private final String type;
private final boolean pass;
private final List<FailureDetail> failures;

public BlockVerificationResult(String type, boolean pass, List<FailureDetail> failures) {
this.type = type;
this.pass = pass;
this.failures = failures;
}

public String getType() {
return type;
}

public boolean isPass() {
return pass;
}

public List<FailureDetail> getFailures() {
return failures;
}

/**
* Details about the check failure.
*/
public static class FailureDetail {
private final boolean present;
private final String message;

public FailureDetail(boolean present, String message) {
this.present = present;
this.message = message;
}

public boolean isPresent() {
return present;
}

public String getFailureMessage() {
return message;
}

}

public ObjectNode toJson(ObjectMapper mapper) {
ObjectNode resultNode = mapper.createObjectNode();
resultNode.put("type", type);
resultNode.put("pass", pass);

ArrayNode failuresArray = mapper.createArrayNode();
for (FailureDetail failure : failures) {
ObjectNode failureNode = mapper.createObjectNode();
failureNode.put("present", failure.isPresent());
failureNode.put("message", failure.getFailureMessage());
failuresArray.add(failureNode);
}

resultNode.set("failures", failuresArray);
return resultNode;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,13 @@

package org.apache.hadoop.ozone.debug.replicas;

import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import jakarta.annotation.Nonnull;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import java.util.Collections;
import org.apache.commons.io.IOUtils;
import org.apache.commons.io.output.NullOutputStream;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.server.JsonUtils;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;

/**
Expand All @@ -44,121 +32,23 @@
* downloaded replicas.
*/
public class Checksums implements ReplicaVerifier {
private static final String CHECKTYPE = "checksum";

private static final String JSON_PROPERTY_FILE_NAME = "filename";
private static final String JSON_PROPERTY_FILE_SIZE = "datasize";
private static final String JSON_PROPERTY_FILE_BLOCKS = "blocks";
private static final String JSON_PROPERTY_BLOCK_INDEX = "blockIndex";
private static final String JSON_PROPERTY_BLOCK_CONTAINERID = "containerId";
private static final String JSON_PROPERTY_BLOCK_LOCALID = "localId";
private static final String JSON_PROPERTY_BLOCK_LENGTH = "length";
private static final String JSON_PROPERTY_BLOCK_OFFSET = "offset";
private static final String JSON_PROPERTY_BLOCK_REPLICAS = "replicas";
private static final String JSON_PROPERTY_REPLICA_HOSTNAME = "hostname";
private static final String JSON_PROPERTY_REPLICA_UUID = "uuid";
private static final String JSON_PROPERTY_REPLICA_EXCEPTION = "exception";


private String outputDir;
private OzoneClient client;

public Checksums(OzoneClient client, String outputDir) {
this.client = client;
this.outputDir = outputDir;
}

private void downloadReplicasAndCreateManifest(
Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas,
ArrayNode blocks) throws IOException {
int blockIndex = 0;

for (Map.Entry<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>>
block : replicas.entrySet()) {
ObjectNode blockJson = JsonUtils.createObjectNode(null);
ArrayNode replicasJson = JsonUtils.createArrayNode();

blockIndex += 1;
OmKeyLocationInfo locationInfo = block.getKey();
blockJson.put(JSON_PROPERTY_BLOCK_INDEX, blockIndex);
blockJson.put(JSON_PROPERTY_BLOCK_CONTAINERID, locationInfo.getContainerID());
blockJson.put(JSON_PROPERTY_BLOCK_LOCALID, locationInfo.getLocalID());
blockJson.put(JSON_PROPERTY_BLOCK_LENGTH, locationInfo.getLength());
blockJson.put(JSON_PROPERTY_BLOCK_OFFSET, locationInfo.getOffset());

for (Map.Entry<DatanodeDetails, OzoneInputStream>
replica : block.getValue().entrySet()) {
DatanodeDetails datanode = replica.getKey();

ObjectNode replicaJson = JsonUtils.createObjectNode(null);

replicaJson.put(JSON_PROPERTY_REPLICA_HOSTNAME, datanode.getHostName());
replicaJson.put(JSON_PROPERTY_REPLICA_UUID, datanode.getUuidString());

try (InputStream is = replica.getValue()) {
IOUtils.copyLarge(is, NullOutputStream.INSTANCE);
} catch (IOException e) {
replicaJson.put(JSON_PROPERTY_REPLICA_EXCEPTION, e.getMessage());
}
replicasJson.add(replicaJson);
}
blockJson.set(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson);
blocks.add(blockJson);
}
}

@Nonnull
private File createDirectory(String volumeName, String bucketName,
String keyName) throws IOException {
String fileSuffix
= new SimpleDateFormat("yyyyMMddHHmmss").format(new Date());
String directoryName = volumeName + "_" + bucketName + "_" + keyName +
"_" + fileSuffix;
System.out.println("Creating directory : " + directoryName);
File dir = new File(outputDir, directoryName);
if (!dir.exists()) {
if (dir.mkdirs()) {
System.out.println("Successfully created!");
} else {
throw new IOException(String.format(
"Failed to create directory %s.", dir));
}
}
return dir;
public Checksums() {
}

@Override
public void verifyKey(OzoneKeyDetails keyDetails) {
String volumeName = keyDetails.getVolumeName();
String bucketName = keyDetails.getBucketName();
String keyName = keyDetails.getName();
System.out.println("Processing key : " + volumeName + "/" + bucketName + "/" + keyName);
try {
ClientProtocol checksumClient = client.getObjectStore().getClientProxy();

// Multilevel keys will have a '/' in their names. This interferes with
// directory and file creation process. Flatten the keys to fix this.
String sanitizedKeyName = keyName.replace("/", "_");
public BlockVerificationResult verifyBlock(DatanodeDetails datanode, OzoneInputStream stream,
OmKeyLocationInfo keyLocation) {
try (InputStream is = stream) {
IOUtils.copyLarge(is, NullOutputStream.INSTANCE);

File dir = createDirectory(volumeName, bucketName, sanitizedKeyName);
OzoneKeyDetails keyInfoDetails = checksumClient.getKeyDetails(volumeName, bucketName, keyName);
Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> replicas =
checksumClient.getKeysEveryReplicas(volumeName, bucketName, keyName);

ObjectNode result = JsonUtils.createObjectNode(null);
result.put(JSON_PROPERTY_FILE_NAME, volumeName + "/" + bucketName + "/" + keyName);
result.put(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize());

ArrayNode blocks = JsonUtils.createArrayNode();
downloadReplicasAndCreateManifest(replicas, blocks);
result.set(JSON_PROPERTY_FILE_BLOCKS, blocks);

String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result);

String manifestFileName = sanitizedKeyName + "_manifest";
File manifestFile = new File(dir, manifestFileName);
Files.write(manifestFile.toPath(), prettyJson.getBytes(StandardCharsets.UTF_8));
return new BlockVerificationResult(CHECKTYPE, true, Collections.emptyList());
} catch (IOException e) {
throw new RuntimeException(e);
BlockVerificationResult.FailureDetail failure = new BlockVerificationResult
.FailureDetail(true, e.getMessage());
return new BlockVerificationResult(CHECKTYPE, false, Collections.singletonList(failure));
}
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,14 @@

package org.apache.hadoop.ozone.debug.replicas;

import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;

/**
* Functional interface for implementing a key verifier.
* Functional interface for implementing a block verifier.
*/
@FunctionalInterface
public interface ReplicaVerifier {
void verifyKey(OzoneKeyDetails keyDetails);
BlockVerificationResult verifyBlock(DatanodeDetails datanode, OzoneInputStream stream, OmKeyLocationInfo keyLocation);
}
Loading