Skip to content
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ jobs:
integration-suites: ${{ steps.integration-suites.outputs.suites }}
needs-basic-check: ${{ steps.categorize-basic-checks.outputs.needs-basic-check }}
basic-checks: ${{ steps.categorize-basic-checks.outputs.basic-checks }}
needs-build: ${{ steps.selective-checks.outputs.needs-build }}
needs-build: ${{ steps.selective-checks.outputs.needs-build || steps.selective-checks.outputs.needs-integration-tests }}
needs-compile: ${{ steps.selective-checks.outputs.needs-compile }}
needs-compose-tests: ${{ steps.selective-checks.outputs.needs-compose-tests }}
needs-integration-tests: ${{ steps.selective-checks.outputs.needs-integration-tests }}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,13 @@
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;

import java.io.IOException;
import java.util.UUID;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.ozone.container.common.ContainerTestUtils;
import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
Expand Down Expand Up @@ -235,8 +236,8 @@ public void closeNonExistenceContainer(ContainerLayoutVersion layout) throws Exc
initLayoutVersion(layout);
long containerID = 1L;

IOException ioe = assertThrows(IOException.class, () -> controller.markContainerForClose(containerID));
assertThat(ioe).hasMessage("The Container is not found. ContainerID: " + containerID);
Exception e = assertThrows(ContainerNotFoundException.class, () -> controller.markContainerForClose(containerID));
assertThat(e).hasMessageContaining(" " + ContainerID.valueOf(containerID) + " ");
}

@ContainerLayoutTestInfo.ContainerTest
Expand All @@ -246,9 +247,8 @@ public void closeMissingContainer(ContainerLayoutVersion layout)
long containerID = 2L;
containerSet.getMissingContainerSet().add(containerID);

IOException ioe = assertThrows(IOException.class, () -> controller.markContainerForClose(containerID));
assertThat(ioe)
.hasMessage("The Container is in the MissingContainerSet hence we can't close it. ContainerID: " + containerID);
Exception e = assertThrows(ContainerNotFoundException.class, () -> controller.markContainerForClose(containerID));
assertThat(e).hasMessageContaining(" " + ContainerID.valueOf(containerID) + " ");
}

private CloseContainerCommand closeWithKnownPipeline() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,8 @@ public void testReplicationImportReserveSpace(ContainerLayoutVersion layout)
ContainerLayoutVersion.FILE_PER_BLOCK, containerMaxSize, "test", "test");
HddsVolume vol1 = (HddsVolume) volumeSet.getVolumesList().get(0);
containerData.setVolume(vol1);
containerData.updateWriteStats(100, false);
// the container is not yet in HDDS, so only set its own size, leaving HddsVolume with used=0
containerData.getStatistics().updateWrite(100, false);
KeyValueContainer container = new KeyValueContainer(containerData, conf);
ContainerController controllerMock = mock(ContainerController.class);
Semaphore semaphore = new Semaphore(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ public void forEachAndIterator() throws Exception {
public void testIteratorOnException() throws Exception {
RDBTable rdbTable = mock(RDBTable.class);
when(rdbTable.iterator((CodecBuffer) null, Table.KeyValueIterator.Type.KEY_AND_VALUE))
.thenThrow(new IOException());
.thenThrow(new RocksDatabaseException());
try (Table<String, String> testTable = new TypedTable<>(rdbTable,
StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE)) {
assertThrows(IOException.class, testTable::iterator);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput()
when(scmClient.queryNode(any(), any(), any(), any())).thenAnswer(invocation -> getNodeDetails());
when(scmClient.listPipelines()).thenReturn(new ArrayList<>());

CommandLine c = new CommandLine(cmd);
c.parseArgs();
cmd.execute(scmClient);

// The output should contain a string like:
Expand Down
10 changes: 9 additions & 1 deletion hadoop-ozone/dev-support/checks/_post_process.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,18 @@
# - $REPORT_FILE should be defined
# - Maven output should be saved in $REPORT_DIR/output.log

if [[ ! -d "${REPORT_DIR}" ]]; then
mkdir -p "${REPORT_DIR}"
fi

if [[ ! -s "${REPORT_FILE}" ]]; then
# check if there are errors in the log
if [[ -n "${ERROR_PATTERN:-}" ]]; then
grep -m25 "${ERROR_PATTERN}" "${REPORT_DIR}/output.log" > "${REPORT_FILE}"
if [[ -e "${REPORT_DIR}/output.log" ]]; then
grep -m25 "${ERROR_PATTERN}" "${REPORT_DIR}/output.log" > "${REPORT_FILE}"
else
echo "Unknown failure, output.log missing" > "${REPORT_FILE}"
fi
fi
# script failed, but report file is empty (does not reflect failure)
if [[ ${rc} -ne 0 ]] && [[ ! -s "${REPORT_FILE}" ]]; then
Expand Down
4 changes: 2 additions & 2 deletions hadoop-ozone/dev-support/checks/junit.sh
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ for i in $(seq 1 ${ITERATIONS}); do
mkdir -p "${REPORT_DIR}"
fi

mvn ${MAVEN_OPTIONS} -Dmaven-surefire-plugin.argLineAccessArgs="${OZONE_MODULE_ACCESS_ARGS}" "$@" clean verify \
mvn ${MAVEN_OPTIONS} -Dmaven-surefire-plugin.argLineAccessArgs="${OZONE_MODULE_ACCESS_ARGS}" "$@" verify \
| tee "${REPORT_DIR}/output.log"
irc=$?

Expand Down Expand Up @@ -107,5 +107,5 @@ if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then
mvn -B -N jacoco:merge -Djacoco.destFile=$REPORT_DIR/jacoco-combined.exec -Dscan=false
fi

ERROR_PATTERN="BUILD FAILURE"
ERROR_PATTERN="\[ERROR\]"
source "${DIR}/_post_process.sh"
3 changes: 2 additions & 1 deletion hadoop-ozone/dev-support/checks/rat.sh
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ mkdir -p "$REPORT_DIR"

REPORT_FILE="$REPORT_DIR/summary.txt"

mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:check "$@"
mvn -B --no-transfer-progress -fn org.apache.rat:apache-rat-plugin:check "$@" \
| tee "${REPORT_DIR}/output.log"

grep -r --include=rat.txt "!????" $dirs | tee "$REPORT_FILE"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
import org.apache.hadoop.hdds.scm.pipeline.MockPipeline;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.MiniOzoneCluster;
Expand Down Expand Up @@ -740,7 +741,7 @@ private OMMetadataManager mockWritePathExceptions(
OMMetadataManager metadataManager = ozoneManager.getMetadataManager();
OMMetadataManager spy = spy(metadataManager);
Table<String, ?> table = getTable.apply(spy);
doThrow(exception).when(table).isExist(any());
doThrow(new RocksDatabaseException()).when(table).isExist(any());
HddsWhiteboxTestUtils.setInternalState(
ozoneManager, "metadataManager", spy);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.client.ScmTopologyClient;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.client.SecretKeyTestClient;
Expand All @@ -58,6 +59,7 @@
import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
Expand Down Expand Up @@ -99,6 +101,11 @@ public class TestOzoneManagerListVolumesSecure {
private UserGroupInformation userUGI1;
private UserGroupInformation userUGI2;

@BeforeAll
static void setup() {
DefaultMetricsSystem.setMiniClusterMode(true);
}

@BeforeEach
public void init() throws Exception {
this.conf = new OzoneConfiguration();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@
import java.util.stream.Stream;
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.CodecException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.SnapshotChainManager;
Expand Down Expand Up @@ -233,7 +234,7 @@ public void testValidateAndUpdateCacheFailure() throws Exception {
OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class);
Table<String, SnapshotInfo> mockedSnapshotInfoTable = mock(Table.class);

when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error."));
when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new CodecException("Injected fault error."));
when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable);
when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import org.apache.hadoop.hdds.utils.db.CodecException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
Expand Down Expand Up @@ -121,7 +122,7 @@ public void testValidateAndUpdateCacheFailure() throws IOException {
OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class);
Table<String, SnapshotInfo> mockedSnapshotInfoTable = mock(Table.class);

when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error."));
when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new CodecException("Injected fault error."));
when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable);
when(getOzoneManager().getMetadataManager()).thenReturn(mockedMetadataManager);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
import org.apache.hadoop.hdds.utils.db.RocksDatabaseException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.ozone.om.KeyManager;
import org.apache.hadoop.ozone.om.OMMetadataManager;
Expand Down Expand Up @@ -110,8 +111,8 @@ private <T> Table<String, T> getMockedTable(Map<String, T> map) throws IOExcepti

private <T> Table<String, T> getFailingMockedTable() throws IOException {
Table<String, T> table = mock(Table.class);
when(table.get(anyString())).thenThrow(new IOException());
when(table.getIfExist(anyString())).thenThrow(new IOException());
when(table.get(anyString())).thenThrow(new RocksDatabaseException());
when(table.getIfExist(anyString())).thenThrow(new RocksDatabaseException());
return table;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ public File getLastKnownDB(File reconDbDir, String fileNamePrefix) {
if (lastKnonwnSnapshotTs < snapshotTimestamp) {
if (lastKnownSnapshotFile != null) {
try {
FileUtils.deleteDirectory(lastKnownSnapshotFile);
FileUtils.forceDelete(lastKnownSnapshotFile);
} catch (IOException e) {
log.warn("Error deleting existing om db snapshot directory: {}",
lastKnownSnapshotFile.getAbsolutePath());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,6 @@ public Response get(
OzoneBucket bucket = null;

try {
bucket = getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner());
if (aclMarker != null) {
s3GAction = S3GAction.GET_ACL;
S3BucketAcl result = getAcl(bucketName);
Expand Down Expand Up @@ -171,6 +169,9 @@ public Response get(
boolean shallow = listKeysShallowEnabled
&& OZONE_URI_DELIMITER.equals(delimiter);

bucket = getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner());

ozoneKeyIterator = bucket.listKeys(prefix, prevKey, shallow);

} catch (OMException ex) {
Expand Down Expand Up @@ -363,6 +364,7 @@ public Response listMultipartUploads(
OzoneBucket bucket = getBucket(bucketName);

try {
S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner());
OzoneMultipartUploadList ozoneMultipartUploadList =
bucket.listMultipartUploads(prefix, keyMarker, uploadIdMarker, maxUploads);

Expand Down Expand Up @@ -546,6 +548,7 @@ public S3BucketAcl getAcl(String bucketName)
S3BucketAcl result = new S3BucketAcl();
try {
OzoneBucket bucket = getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(headers, bucketName, bucket.getOwner());
S3Owner owner = S3Owner.of(bucket.getOwner());
result.setOwner(owner);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ public void setup() throws Exception {
"STANDARD");
when(headers.getHeaderString(X_AMZ_CONTENT_SHA256))
.thenReturn("mockSignature");
bucketEndpoint.setHeaders(headers);
keyEndpoint.setHeaders(headers);
metrics = bucketEndpoint.getMetrics();

Expand Down