Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@
private final FSNamesystem namesystem;
private volatile boolean skipQuotaCheck = false; //skip while consuming edits
private final int maxComponentLength;
private final int maxDirItems;
private volatile int maxDirItems;
private final int lsLimit; // max list limit
private final int contentCountLimit; // max content summary counts per run
private final long contentSleepMicroSec;
Expand Down Expand Up @@ -217,6 +217,11 @@
// authorizeWithContext() API or not.
private boolean useAuthorizationWithContextAPI = false;

// We need a maximum maximum because by default, PB limits message sizes
// to 64MB. This means we can only store approximately 6.7 million entries
// per directory, but let's use 6.4 million for some safety.
private static final int MAX_DIR_ITEMS = 64 * 100 * 1000;

public void setINodeAttributeProvider(
@Nullable INodeAttributeProvider provider) {
attributeProvider = provider;
Expand Down Expand Up @@ -309,7 +314,7 @@
READ,
READ_LINK,
WRITE, // disallows snapshot paths.
WRITE_LINK,

Check failure on line 317 in hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

View check run for this annotation

ASF Cloudbees Jenkins ci-hadoop / Apache Yetus

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java#L317

javadoc: warning: no comment
CREATE, // like write, but also blocks invalid path names.
CREATE_LINK;
};
Expand Down Expand Up @@ -395,10 +400,6 @@
Preconditions.checkArgument(this.inodeXAttrsLimit >= 0,
"Cannot set a negative limit on the number of xattrs per inode (%s).",
DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY);
// We need a maximum maximum because by default, PB limits message sizes
// to 64MB. This means we can only store approximately 6.7 million entries
// per directory, but let's use 6.4 million for some safety.
final int MAX_DIR_ITEMS = 64 * 100 * 1000;
Preconditions.checkArgument(
maxDirItems > 0 && maxDirItems <= MAX_DIR_ITEMS, "Cannot set "
+ DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
Expand Down Expand Up @@ -580,6 +581,18 @@
return Joiner.on(",").skipNulls().join(protectedDirectories);
}

public void setMaxDirItems(int newVal) {
Preconditions.checkArgument(
newVal > 0 && newVal <= MAX_DIR_ITEMS, "Cannot set "
+ DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY
+ " to a value less than 1 or greater than " + MAX_DIR_ITEMS);
maxDirItems = newVal;
}

public int getMaxDirItems() {
return maxDirItems;
}

BlockManager getBlockManager() {
return getFSNamesystem().getBlockManager();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_IMAGE_PARALLEL_LOAD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_DEFAULT;
Expand Down Expand Up @@ -385,7 +387,8 @@ public enum OperationCategory {
DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY,
DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY,
DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY,
DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY));
DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY,
DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY));

private static final String USAGE = "Usage: hdfs namenode ["
+ StartupOption.BACKUP.getName() + "] | \n\t["
Expand Down Expand Up @@ -2388,6 +2391,8 @@ protected String reconfigurePropertyImpl(String property, String newVal)
|| property.equals(DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY)
|| property.equals(DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY)) {
return reconfigureFSNamesystemLockMetricsParameters(property, newVal);
} else if (property.equals(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY)) {
return reconfigureMaxDirItems(newVal);
} else {
throw new ReconfigurationException(property, newVal, getConf().get(
property));
Expand Down Expand Up @@ -2806,6 +2811,23 @@ private String reconfigureFSNamesystemLockMetricsParameters(final String propert
}
}

private String reconfigureMaxDirItems(String newVal) throws ReconfigurationException {
int newSetting;
namesystem.writeLock(RwLockMode.BM);
try {
getNamesystem().getFSDirectory()
.setMaxDirItems(adjustNewVal(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_DEFAULT, newVal));
newSetting = getNamesystem().getFSDirectory().getMaxDirItems();
LOG.info("RECONFIGURE* changed {} to {}", DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, newSetting);
return String.valueOf(newSetting);
} catch (IllegalArgumentException e) {
throw new ReconfigurationException(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, newVal,
getConf().get(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY), e);
} finally {
namesystem.writeUnlock(RwLockMode.BM, "reconfigureMaxDirItems");
}
}

@Override // ReconfigurableBase
protected Configuration getNewConf() {
return new HdfsConfiguration();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SLOWPEER_COLLECT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_WRITE_LOCK_REPORTING_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -865,6 +866,32 @@ public void testReconfigureSlowPeerCollectInterval() throws Exception {
assertEquals(600000, datanodeManager.getSlowPeerCollectionInterval());
}

@Test
public void testReconfigureMaxDirItems() throws Exception {
final NameNode nameNode = cluster.getNameNode();
final FSDirectory fsd = nameNode.namesystem.getFSDirectory();

// By default, DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY is 1024 * 1024.
assertEquals(1024 * 1024, fsd.getMaxDirItems());

// Reconfigure.
nameNode.reconfigureProperty(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
Integer.toString(1024 * 1024 * 2));

// Assert DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is 1024 * 1024 * 2.
assertEquals(1024 * 1024 * 2, fsd.getMaxDirItems());

// Reconfigure to negative, and expect failed.
LambdaTestUtils.intercept(ReconfigurationException.class,
"Could not change property dfs.namenode.fs-limits.max-directory-items from '"
+ 1024 * 1024 * 2 + "' to '" + 1024 * 1024 * -1 + "'",
() -> nameNode.reconfigureProperty(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY,
Integer.toString(1024 * 1024 * -1)));

// Assert DFS_NAMENODE_MAX_SLOWPEER_COLLECT_NODES_KEY is also 1024 * 1024 * 2.
assertEquals(1024 * 1024 * 2, fsd.getMaxDirItems());
}

@AfterEach
public void shutDown() throws IOException {
if (cluster != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@
import org.slf4j.LoggerFactory;

import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY;
import static org.apache.hadoop.hdfs.client.HdfsAdmin.TRASH_PERMISSION;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
Expand Down Expand Up @@ -449,7 +450,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
final List<String> outs = Lists.newArrayList();
final List<String> errs = Lists.newArrayList();
getReconfigurableProperties("namenode", address, outs, errs);
assertEquals(29, outs.size());
assertEquals(30, outs.size());
assertTrue(outs.get(0).contains("Reconfigurable properties:"));
assertEquals(DFS_BLOCK_INVALIDATE_LIMIT_KEY, outs.get(1));
assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(2));
Expand All @@ -463,8 +464,9 @@ public void testNameNodeGetReconfigurableProperties() throws IOException, Interr
assertEquals(DFS_NAMENODE_BLOCKPLACEMENTPOLICY_MIN_BLOCKS_FOR_WRITE_KEY, outs.get(10));
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_BLOCKS_PER_LOCK, outs.get(11));
assertEquals(DFS_NAMENODE_DECOMMISSION_BACKOFF_MONITOR_PENDING_LIMIT, outs.get(12));
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(13));
assertEquals(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, outs.get(14));
assertEquals(DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, outs.get(13));
assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(14));
assertEquals(DFS_NAMENODE_LOCK_DETAILED_METRICS_KEY, outs.get(15));
assertEquals(errs.size(), 0);
}

Expand Down
Loading