Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1424,7 +1424,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final long DFS_JOURNALNODE_SYNC_INTERVAL_DEFAULT = 2*60*1000L;
public static final String DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY =
"dfs.journalnode.edit-cache-size.bytes";
public static final int DFS_JOURNALNODE_EDIT_CACHE_SIZE_DEFAULT = 1024 * 1024;

public static final String DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY =
"dfs.journalnode.edit-cache-size.fraction";
public static final float DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_DEFAULT = 0.5f;

// Journal-node related configs for the client side.
public static final String DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY = "dfs.qjournal.queued-edits.limit.mb";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,12 +121,14 @@ class JournaledEditsCache {
// ** End lock-protected fields **

JournaledEditsCache(Configuration conf) {
float fraction = conf.getFloat(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_DEFAULT);
capacity = conf.getInt(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_DEFAULT);
(int) Math.floor(Runtime.getRuntime().maxMemory() * fraction));
if (capacity > 0.9 * Runtime.getRuntime().maxMemory()) {
Journal.LOG.warn(String.format("Cache capacity is set at %d bytes but " +
"maximum JVM memory is only %d bytes. It is recommended that you " +
"decrease the cache size or increase the heap size.",
"decrease the cache size/fraction or increase the heap size.",
capacity, Runtime.getRuntime().maxMemory()));
}
Journal.LOG.info("Enabling the journaled edits cache with a capacity " +
Expand Down Expand Up @@ -277,11 +279,10 @@ void storeEdits(byte[] inputData, long newStartTxn, long newEndTxn,
initialize(INVALID_TXN_ID);
Journal.LOG.warn(String.format("A single batch of edits was too " +
"large to fit into the cache: startTxn = %d, endTxn = %d, " +
"input length = %d. The capacity of the cache (%s) must be " +
"input length = %d. The capacity of the cache must be " +
"increased for it to work properly (current capacity %d)." +
"Cache is now empty.",
newStartTxn, newEndTxn, inputData.length,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY, capacity));
newStartTxn, newEndTxn, inputData.length, capacity));
return;
}
if (dataMap.isEmpty()) {
Expand Down Expand Up @@ -388,10 +389,9 @@ private CacheMissException getCacheMissException(long requestedTxnId) {
} else {
return new CacheMissException(lowestTxnId - requestedTxnId,
"Oldest txn ID available in the cache is %d, but requested txns " +
"starting at %d. The cache size (%s) may need to be increased " +
"starting at %d. The cache size may need to be increased " +
"to hold more transactions (currently %d bytes containing %d " +
"transactions)", lowestTxnId, requestedTxnId,
DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY, capacity,
"transactions)", lowestTxnId, requestedTxnId, capacity,
highestTxnId - lowestTxnId + 1);
}
}
Expand All @@ -414,4 +414,9 @@ long getCacheMissAmount() {

}

@VisibleForTesting
int getCapacity() {
return capacity;
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -4955,6 +4955,19 @@
</description>
</property>

<property>
<name>dfs.journalnode.edit-cache-size.fraction</name>
<value>0.5f</value>
<description>
This ratio refers to the proportion of the maximum memory of the JVM.
Used to calculate the size of the edits cache that is kept in the JournalNode's memory.
The recommended value is less than 0.9. We recommend using
dfs.journalnode.edit-cache-size.fraction instead of
dfs.journalnode.edit-cache-size.bytes. If we set
dfs.journalnode.edit-cache-size.bytes, this parameter will not take effect.
</description>
</property>

<property>
<name>dfs.journalnode.kerberos.internal.spnego.principal</name>
<value></value>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -502,6 +502,12 @@ lag time will be much longer. The relevant configurations are:
the oldest data in the cache was at transaction ID 20, a value of 10 would be added to the
average.

* **dfs.journalnode.edit-cache-size.fraction** - This ratio refers to the proportion of the maximum memory
of the JVM. Used to calculate the size of the edits cache that is kept in the JournalNode's memory.
The recommended value is less than 0.9. We recommend using dfs.journalnode.edit-cache-size.fraction
instead of dfs.journalnode.edit-cache-size.bytes. If we set dfs.journalnode.edit-cache-size.bytes,
this parameter will not take effect.

This feature is primarily useful in conjunction with the Standby/Observer Read feature. Using this
feature, read requests can be serviced from non-active NameNodes; thus tailing in-progress edits
provides these nodes with the ability to serve requests with data which is much more fresh. See the
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,25 @@ few configurations to your **hdfs-site.xml**:
<value>1048576</value>
</property>

* **dfs.journalnode.edit-cache-size.fraction** - the ratio refers to
the proportion of the maximum memory of the JVM.

Used to calculate the size of the edits cache that
is kept in the JournalNode's memory.
The recommended value is less than 0.9.
The cache is used for serving edits via
RPC-based tailing. This is only effective when
dfs.ha.tail-edits.in-progress is turned on.
We recommend using dfs.journalnode.edit-cache-size.fraction
instead of dfs.journalnode.edit-cache-size.bytes.
If we set dfs.journalnode.edit-cache-size.bytes,
this parameter will not take effect.

<property>
<name>dfs.journalnode.edit-cache-size.fraction</name>
<value>0.5f</value>
</property>

* **dfs.namenode.accesstime.precision** -- whether to enable access
time for HDFS file.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,11 @@ public class TestJournaledEditsCache {
PathUtils.getTestDir(TestJournaledEditsCache.class, false);
private JournaledEditsCache cache;

private long memory;

@Before
public void setup() throws Exception {
memory = Runtime.getRuntime().maxMemory();
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY,
createTxnData(1, 1).length * EDITS_CAPACITY);
Expand Down Expand Up @@ -221,6 +224,24 @@ public void testCacheMalformedInput() throws Exception {
cache.retrieveEdits(-1, 10, new ArrayList<>());
}

@Test
public void testCacheFraction() {
// Set dfs.journalnode.edit-cache-size.bytes.
Configuration config = new Configuration();
config.setInt(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_KEY, 1);
config.setFloat(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY, 0.1f);
cache = new JournaledEditsCache(config);
assertEquals(1, cache.getCapacity(), 0.0);

// Don't set dfs.journalnode.edit-cache-size.bytes.
Configuration config1 = new Configuration();
config1.setFloat(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY, 0.1f);
cache = new JournaledEditsCache(config1);
assertEquals(
memory * config1.getFloat(DFSConfigKeys.DFS_JOURNALNODE_EDIT_CACHE_SIZE_FRACTION_KEY, 0.0f),
cache.getCapacity(), 0.0);
}

private void storeEdits(int startTxn, int endTxn) throws Exception {
cache.storeEdits(createTxnData(startTxn, endTxn - startTxn + 1), startTxn,
endTxn, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
Expand Down