Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import java.util.ListIterator;
import java.util.Map;

import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.hdds.client.ReplicationConfig;
import org.apache.hadoop.hdds.scm.ByteStringConversion;
import org.apache.hadoop.hdds.scm.ContainerClientMetrics;
Expand Down Expand Up @@ -85,6 +86,8 @@ public class BlockOutputStreamEntryPool implements KeyMetadataAware {
private final ExcludeList excludeList;
private final ContainerClientMetrics clientMetrics;
private final StreamBufferArgs streamBufferArgs;
// update blocks on OM
private ContainerBlockID lastUpdatedBlockId = new ContainerBlockID(-1, -1);

@SuppressWarnings({"parameternumber", "squid:S00107"})
public BlockOutputStreamEntryPool(
Expand Down Expand Up @@ -368,7 +371,16 @@ void hsyncKey(long offset) throws IOException {
if (keyArgs.getIsMultipartKey()) {
throw new IOException("Hsync is unsupported for multipart keys.");
} else {
omClient.hsyncKey(keyArgs, openID);
if (keyArgs.getLocationInfoList().size() == 0) {
omClient.hsyncKey(keyArgs, openID);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we want to hsync without any block info? Or can we return error in this case.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hsync is called right after file is created. See TestHSync#testUncommittedBlocks.

} else {
ContainerBlockID lastBLockId = keyArgs.getLocationInfoList().get(keyArgs.getLocationInfoList().size() - 1)
.getBlockID().getContainerBlockID();
if (!lastUpdatedBlockId.equals(lastBLockId)) {
omClient.hsyncKey(keyArgs, openID);
lastUpdatedBlockId = lastBLockId;
}
}
}
} else {
LOG.warn("Closing KeyOutputStream, but key args is null");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,22 @@
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.UUID;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicReference;

import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.conf.StorageUnit;
import org.apache.hadoop.crypto.CipherSuite;
import org.apache.hadoop.crypto.CryptoCodec;
import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.crypto.Encryptor;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
Expand All @@ -57,6 +60,7 @@
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OMMetrics;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;

Expand All @@ -79,7 +83,6 @@
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY;
Expand Down Expand Up @@ -193,39 +196,6 @@ public void testKeyHSyncThenClose() throws Exception {
}
}

@Test
public void testO3fsHSync() throws Exception {
// Set the fs.defaultFS
final String rootPath = String.format("%s://%s.%s/",
OZONE_URI_SCHEME, bucket.getName(), bucket.getVolumeName());
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);

try (FileSystem fs = FileSystem.get(CONF)) {
for (int i = 0; i < 10; i++) {
final Path file = new Path("/file" + i);
runTestHSync(fs, file, 1 << i);
}
}
}

@Test
public void testOfsHSync() throws Exception {
// Set the fs.defaultFS
final String rootPath = String.format("%s://%s/",
OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY));
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);

final String dir = OZONE_ROOT + bucket.getVolumeName()
+ OZONE_URI_DELIMITER + bucket.getName();

try (FileSystem fs = FileSystem.get(CONF)) {
for (int i = 0; i < 10; i++) {
final Path file = new Path(dir, "file" + i);
runTestHSync(fs, file, 1 << i);
}
}
}

@Test
public void testUncommittedBlocks() throws Exception {
// Set the fs.defaultFS
Expand Down Expand Up @@ -279,18 +249,69 @@ public void testOverwriteHSyncFile() throws Exception {
}
}

static void runTestHSync(FileSystem fs, Path file, int initialDataSize)
throws Exception {
try (StreamWithLength out = new StreamWithLength(
fs.create(file, true))) {
runTestHSync(fs, file, out, initialDataSize);
for (int i = 1; i < 5; i++) {
for (int j = -1; j <= 1; j++) {
int dataSize = (1 << (i * 5)) + j;
runTestHSync(fs, file, out, dataSize);
@Test
public void testHsyncKeyCallCount() throws Exception {
// Set the fs.defaultFS
final String rootPath = String.format("%s://%s/",
OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY));
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);

final String dir = OZONE_ROOT + bucket.getVolumeName()
+ OZONE_URI_DELIMITER + bucket.getName();

OMMetrics omMetrics = cluster.getOzoneManager().getMetrics();
omMetrics.resetNumKeyHSyncs();
String data = "random data";
final Path file = new Path(dir, "file-hsync-then-close");
long blockSize;
try (FileSystem fs = FileSystem.get(CONF)) {
blockSize = fs.getDefaultBlockSize(file);
long fileSize = 0;
try (FSDataOutputStream outputStream = fs.create(file, true)) {
// make sure at least writing 2 blocks data
while (fileSize < blockSize) {
outputStream.write(data.getBytes(UTF_8), 0, data.length());
outputStream.hsync();
fileSize += data.length();
}
}
}
assertEquals(2, omMetrics.getNumKeyHSyncs());
}

@Test
public void testPreAllocatedFileHsyncKeyCallCount() throws Exception {
// Set the fs.defaultFS
final String rootPath = String.format("%s://%s/",
OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY));
CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);

final String dir = OZONE_ROOT + bucket.getVolumeName()
+ OZONE_URI_DELIMITER + bucket.getName();

String data = "random data";
String keyName = "key-" + RandomStringUtils.randomNumeric(5);
final Path file = new Path(dir, keyName);
long blockSize;
long fileSize;
try (FileSystem fs = FileSystem.get(CONF)) {
blockSize = fs.getDefaultBlockSize(file);
fileSize = 2 * blockSize;
}

OMMetrics omMetrics = cluster.getOzoneManager().getMetrics();
omMetrics.resetNumKeyHSyncs();
long writtenSize = 0;
try (OzoneOutputStream outputStream = bucket.createKey(keyName, fileSize, ReplicationType.RATIS,
ReplicationFactor.THREE, new HashMap<>())) {
// make sure at least writing 2 blocks data
while (writtenSize < blockSize) {
outputStream.write(data.getBytes(UTF_8), 0, data.length());
outputStream.hsync();
writtenSize += data.length();
}
}
assertEquals(2, omMetrics.getNumKeyHSyncs());
}

private static class StreamWithLength implements Closeable {
Expand All @@ -317,36 +338,6 @@ public void close() throws IOException {
}
}

static void runTestHSync(FileSystem fs, Path file,
StreamWithLength out, int dataSize)
throws Exception {
final long length = out.getLength();
LOG.info("runTestHSync {} with size {}, skipLength={}",
file, dataSize, length);
final byte[] data = new byte[dataSize];
ThreadLocalRandom.current().nextBytes(data);
out.writeAndHsync(data);

final byte[] buffer = new byte[4 << 10];
int offset = 0;
try (FSDataInputStream in = fs.open(file)) {
final long skipped = in.skip(length);
assertEquals(length, skipped);

for (; ;) {
final int n = in.read(buffer, 0, buffer.length);
if (n <= 0) {
break;
}
for (int i = 0; i < n; i++) {
assertEquals(data[offset + i], buffer[i]);
}
offset += n;
}
}
assertEquals(data.length, offset);
}

private void runConcurrentWriteHSync(Path file,
final FSDataOutputStream out, int initialDataSize)
throws InterruptedException, IOException {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1100,6 +1100,11 @@ public long getNumKeyHSyncs() {
return numKeyHSyncs.value();
}

@VisibleForTesting
public void resetNumKeyHSyncs() {
numKeyHSyncs.incr(-numKeyHSyncs.value());
}

@VisibleForTesting
public long getNumKeyCommitFails() {
return numKeyCommitFails.value();
Expand Down