Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@ OZONE-SITE.XML_ozone.scm.dead.node.interval=45s
OZONE-SITE.XML_hdds.container.report.interval=60s
OZONE-SITE.XML_ozone.scm.close.container.wait.duration=5s

OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true
# Ratis streaming is disabled to ensure coverage for both cases
OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=false

HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/[email protected]
HDFS-SITE.XML_dfs.datanode.kerberos.keytab.file=/etc/security/keytabs/dn.keytab
Expand Down
9 changes: 9 additions & 0 deletions hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,15 @@ execute_robot_test scm security

execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:bucket -N ozonefs-ofs-bucket ozonefs/ozonefs.robot

## Exclude virtual-host tests. This is tested separately as it requires additional config.
exclude="--exclude virtual-host"
for bucket in encrypted; do
execute_robot_test s3g -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3
# some tests are independent of the bucket type, only need to be run once
## Exclude virtual-host.robot
exclude="--exclude virtual-host --exclude no-bucket-type"
done

#expects 4 pipelines, should be run before
#admincli which creates STANDALONE pipeline
execute_robot_test scm recon
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.KeyMetadataAware;
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
Expand Down Expand Up @@ -1000,12 +999,10 @@ private Response createMultipartKey(OzoneVolume volume, String bucket,
metadataLatencyNs =
getMetrics().updatePutKeyMetadataStats(startNanos);
putLength = IOUtils.copyLarge(digestInputStream, ozoneOutputStream);
((KeyMetadataAware)ozoneOutputStream.getOutputStream())
.getMetadata().put(ETAG, DatatypeConverter.printHexBinary(
digestInputStream.getMessageDigest().digest())
.toLowerCase());
keyOutputStream
= ozoneOutputStream.getKeyOutputStream();
byte[] digest = digestInputStream.getMessageDigest().digest();
ozoneOutputStream.getMetadata()
.put(ETAG, DatatypeConverter.printHexBinary(digest).toLowerCase());
keyOutputStream = ozoneOutputStream.getKeyOutputStream();
}
getMetrics().incPutKeySuccessLength(putLength);
perf.appendSizeBytes(putLength);
Expand Down