Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,11 @@ public class AbfsConfiguration{
DefaultValue = DEFAULT_FOOTER_READ_BUFFER_SIZE)
private int footerReadBufferSize;

@BooleanConfigurationValidatorAnnotation(
ConfigurationKey = FS_AZURE_BUFFERED_PREAD_DISABLE,
DefaultValue = DEFAULT_BUFFERED_PREAD_DISABLE)
private boolean isBufferedPReadDisabled;

@BooleanConfigurationValidatorAnnotation(
ConfigurationKey = FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED,
DefaultValue = DEFAULT_FS_AZURE_ACCOUNT_IS_EXPECT_HEADER_ENABLED)
Expand Down Expand Up @@ -953,6 +958,14 @@ public int getFooterReadBufferSize() {
return this.footerReadBufferSize;
}

/**
* Returns whether the buffered pread is disabled.
* @return true if buffered pread is disabled, false otherwise.
*/
public boolean isBufferedPReadDisabled() {
return this.isBufferedPReadDisabled;
}

public int getReadBufferSize() {
return this.readBufferSize;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -945,8 +945,9 @@ contentLength, populateAbfsInputStreamContext(
private AbfsInputStreamContext populateAbfsInputStreamContext(
Optional<Configuration> options, ContextEncryptionAdapter contextEncryptionAdapter) {
boolean bufferedPreadDisabled = options
.map(c -> c.getBoolean(FS_AZURE_BUFFERED_PREAD_DISABLE, false))
.orElse(false);
.map(c -> c.getBoolean(FS_AZURE_BUFFERED_PREAD_DISABLE,
getAbfsConfiguration().isBufferedPReadDisabled()))
.orElse(getAbfsConfiguration().isBufferedPReadDisabled());
int footerReadBufferSize = options.map(c -> c.getInt(
AZURE_FOOTER_READ_BUFFER_SIZE, getAbfsConfiguration().getFooterReadBufferSize()))
.orElse(getAbfsConfiguration().getFooterReadBufferSize());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,8 @@ public final class AbfsHttpConstants {
public static final char CHAR_STAR = '*';
public static final char CHAR_PLUS = '+';

public static final int SPLIT_NO_LIMIT = -1;

/**
* Specifies the version of the REST protocol used for processing the request.
* Versions should be added in enum list in ascending chronological order.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ public final class FileSystemConfigurations {
public static final boolean DEFAULT_READ_SMALL_FILES_COMPLETELY = false;
public static final boolean DEFAULT_OPTIMIZE_FOOTER_READ = true;
public static final int DEFAULT_FOOTER_READ_BUFFER_SIZE = 512 * ONE_KB;
public static final boolean DEFAULT_BUFFERED_PREAD_DISABLE = false;
public static final boolean DEFAULT_ALWAYS_READ_BUFFER_SIZE = false;
public static final int DEFAULT_READ_AHEAD_BLOCK_SIZE = 4 * ONE_MB;
public static final int DEFAULT_READ_AHEAD_RANGE = 64 * ONE_KB; // 64 KB
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.fs.azurebfs.constants;

/**
* Enumeration for different types of read operations triggered by AbfsInputStream.
*/
public enum ReadType {
/**
* Synchronous read from the storage service. No optimization is being applied.
*/
DIRECT_READ("DR"),
/**
* Synchronous read from the storage service where optimization were considered but found disabled.
*/
NORMAL_READ("NR"),
/**
* Asynchronous read from the storage service for filling up cache.
*/
PREFETCH_READ("PR"),
/**
* Synchronous read from the storage service when nothing was found in cache.
*/
MISSEDCACHE_READ("MR"),
/**
* Synchronous read from the storage service for reading the footer of a file.
* Only triggered when footer read optimization kicks in.
*/
FOOTER_READ("FR"),
/**
* Synchronous read from the storage service for reading a small file fully.
* Only triggered when small file read optimization kicks in.
*/
SMALLFILE_READ("SR"),
/**
* None of the above read types were applicable.
*/
UNKNOWN_READ("UR");

private final String readType;

ReadType(String readType) {
this.readType = readType;
}

/**
* Get the read type as a string.
*
* @return the read type string
*/
@Override
public String toString() {
return readType;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.azurebfs.constants.ReadType;
import org.apache.hadoop.fs.impl.BackReference;
import org.apache.hadoop.util.Preconditions;

Expand Down Expand Up @@ -165,6 +166,7 @@ public AbfsInputStream(
this.tracingContext = new TracingContext(tracingContext);
this.tracingContext.setOperation(FSOperationType.READ);
this.tracingContext.setStreamID(inputStreamId);
this.tracingContext.setReadType(ReadType.UNKNOWN_READ);
this.context = abfsInputStreamContext;
readAheadBlockSize = abfsInputStreamContext.getReadAheadBlockSize();
if (abfsReadFooterMetrics != null) {
Expand Down Expand Up @@ -227,7 +229,9 @@ public int read(long position, byte[] buffer, int offset, int length)
if (streamStatistics != null) {
streamStatistics.readOperationStarted();
}
int bytesRead = readRemote(position, buffer, offset, length, tracingContext);
TracingContext tc = new TracingContext(tracingContext);
tc.setReadType(ReadType.DIRECT_READ);
int bytesRead = readRemote(position, buffer, offset, length, tc);
if (statistics != null) {
statistics.incrementBytesRead(bytesRead);
}
Expand Down Expand Up @@ -345,6 +349,8 @@ private int readOneBlock(final byte[] b, final int off, final int len) throws IO
buffer = new byte[bufferSize];
}

// Reset Read Type back to normal and set again based on code flow.
tracingContext.setReadType(ReadType.NORMAL_READ);
if (alwaysReadBufferSize) {
bytesRead = readInternal(fCursor, buffer, 0, bufferSize, false);
} else {
Expand Down Expand Up @@ -385,6 +391,7 @@ private int readFileCompletely(final byte[] b, final int off, final int len)
// data need to be copied to user buffer from index bCursor, bCursor has
// to be the current fCusor
bCursor = (int) fCursor;
tracingContext.setReadType(ReadType.SMALLFILE_READ);
return optimisedRead(b, off, len, 0, contentLength);
}

Expand All @@ -405,6 +412,7 @@ private int readLastBlock(final byte[] b, final int off, final int len)
bCursor = (int) (fCursor - lastBlockStart);
// 0 if contentlength is < buffersize
long actualLenToRead = min(footerReadSize, contentLength);
tracingContext.setReadType(ReadType.FOOTER_READ);
return optimisedRead(b, off, len, lastBlockStart, actualLenToRead);
}

Expand Down Expand Up @@ -520,6 +528,7 @@ private int readInternal(final long position, final byte[] b, final int offset,
LOG.debug("read ahead enabled issuing readheads num = {}", numReadAheads);
TracingContext readAheadTracingContext = new TracingContext(tracingContext);
readAheadTracingContext.setPrimaryRequestID();
readAheadTracingContext.setReadType(ReadType.PREFETCH_READ);
while (numReadAheads > 0 && nextOffset < contentLength) {
LOG.debug("issuing read ahead requestedOffset = {} requested size {}",
nextOffset, nextSize);
Expand All @@ -544,7 +553,9 @@ private int readInternal(final long position, final byte[] b, final int offset,
}

// got nothing from read-ahead, do our own read now
receivedBytes = readRemote(position, b, offset, length, new TracingContext(tracingContext));
TracingContext tc = new TracingContext(tracingContext);
tc.setReadType(ReadType.MISSEDCACHE_READ);
receivedBytes = readRemote(position, b, offset, length, tc);
return receivedBytes;
} else {
LOG.debug("read ahead disabled, reading remote");
Expand Down Expand Up @@ -578,6 +589,7 @@ int readRemote(long position, byte[] b, int offset, int length, TracingContext t
streamStatistics.remoteReadOperation();
}
LOG.trace("Trigger client.read for path={} position={} offset={} length={}", path, position, offset, length);
tracingContext.setPosition(String.valueOf(position));

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there a test to verify position is correctly added to tracing context? Position is a key identifier for read operations.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for the suggestion. I updated the current test to assert on position as well.

op = client.read(path, position, b, offset, length,
tolerateOobAppends ? "*" : eTag, cachedSasToken.get(),
contextEncryptionAdapter, tracingContext);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ public boolean execute() throws AzureBlobFileSystemException {
*/
deleted = recursive ? safeDelete(path) : deleteInternal(path);
} finally {
tracingContext.setOperatedBlobCount(null);
tracingContext.setOperatedBlobCount(0);
}
if (deleteCount.get() == 0) {
/*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ private boolean finalSrcRename() throws AzureBlobFileSystemException {
}
throw e;
} finally {
tracingContext.setOperatedBlobCount(null);
tracingContext.setOperatedBlobCount(0);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.hadoop.fs.azurebfs.utils;

import org.apache.hadoop.fs.azurebfs.constants.FSOperationType;
import org.apache.hadoop.fs.azurebfs.constants.ReadType;

/**
* Interface for testing identifiers tracked via TracingContext
Expand All @@ -32,4 +33,5 @@ public interface Listener {
void setOperation(FSOperationType operation);
void updateIngressHandler(String ingressHandler);
void updatePosition(String position);
void updateReadType(ReadType readType);
}
Loading