diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java index e7a3b38357ac9..cd2b4b6905bfb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java @@ -35,22 +35,22 @@ enum LockLevel { /** * Acquire readLock and then lock. */ - T readLock(LockLevel level, String... resources); + T readLock(LockLevel level, String opName, String... resources); /** * Acquire writeLock and then lock. */ - T writeLock(LockLevel level, String... resources); + T writeLock(LockLevel level, String opName, String... resources); /** * Add a lock to LockManager. */ - void addLock(LockLevel level, String... resources); + void addLock(LockLevel level, String opName, String... resources); /** * Remove a lock from LockManager. */ - void removeLock(LockLevel level, String... resources); + void removeLock(LockLevel level, String opName, String... resources); /** * LockManager may need to back hook. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/NoLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/NoLockManager.java index 848495cc4e018..78adefb1038d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/NoLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/NoLockManager.java @@ -45,21 +45,21 @@ public NoLockManager() { } @Override - public AutoCloseDataSetLock readLock(LockLevel level, String... resources) { + public AutoCloseDataSetLock readLock(LockLevel level, String opName, String... resources) { return lock; } @Override - public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) { + public AutoCloseDataSetLock writeLock(LockLevel level, String opName, String... resources) { return lock; } @Override - public void addLock(LockLevel level, String... resources) { + public void addLock(LockLevel level, String opName, String... resources) { } @Override - public void removeLock(LockLevel level, String... resources) { + public void removeLock(LockLevel level, String opName, String... resources) { } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java index 844b67ce1a877..7abf25fb03558 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java @@ -311,7 +311,7 @@ private void connectToNNAndHandshake() throws IOException { NamespaceInfo nsInfo = retrieveNamespaceInfo(); // init block pool lock when init. - dn.getDataSetLockManager().addLock(LockLevel.BLOCK_POOl, + dn.getDataSetLockManager().addLock(LockLevel.BLOCK_POOl, "connectToNNAndHandshake", nsInfo.getBlockPoolID()); // Verify that this matches the other NN in this HA pair. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 5c4212fea537f..7e4345a98cdc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -258,7 +258,7 @@ class BlockSender implements java.io.Closeable { ChunkChecksum chunkChecksum = null; final long replicaVisibleLength; try (AutoCloseableLock lock = datanode.getDataSetLockManager().readLock( - LockLevel.BLOCK_POOl, block.getBlockPoolId())) { + LockLevel.BLOCK_POOl, "BlockSender", block.getBlockPoolId())) { replica = getReplica(block, datanode); replicaVisibleLength = replica.getVisibleLength(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 10438cd79e39c..5dc33f9d84876 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -3460,7 +3460,8 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b, //get replica information try (AutoCloseableLock lock = dataSetLockManager.readLock( - LockLevel.BLOCK_POOl, b.getBlockPoolId())) { + LockLevel.BLOCK_POOl, + "transferReplicaForPipelineRecovery", b.getBlockPoolId())) { Block storedBlock = data.getStoredBlock(b.getBlockPoolId(), b.getBlockId()); if (null == storedBlock) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java index 1d59f87ab2b82..127ec5b8e589b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java @@ -78,21 +78,22 @@ public synchronized AutoCloseDataSetLock getWriteLock(String name) { /** * Generate lock order string concatenates with lock name. * @param level which level lock want to acquire. + * @param opName operate name * @param resources lock name by lock order. * @return lock order string concatenates with lock name. */ - private String generateLockName(LockLevel level, String... resources) { + private String generateLockName(LockLevel level, String opName, String... resources) { if (resources.length == 1 && level == LockLevel.BLOCK_POOl) { if (resources[0] == null) { throw new IllegalArgumentException("acquire a null block pool lock"); } - return resources[0]; + return opName + ":" + resources[0]; } else if (resources.length == 2 && level == LockLevel.VOLUME) { if (resources[0] == null || resources[1] == null) { throw new IllegalArgumentException("acquire a null bp lock : " + resources[0] + "volume lock :" + resources[1]); } - return resources[0] + resources[1]; + return opName + ":" + resources[0] + resources[1]; } else { throw new IllegalArgumentException("lock level do not match resource"); } @@ -149,12 +150,12 @@ public DataSetLockManager() { } @Override - public AutoCloseDataSetLock readLock(LockLevel level, String... resources) { + public AutoCloseDataSetLock readLock(LockLevel level, String opName, String... resources) { if (level == LockLevel.BLOCK_POOl) { - return getReadLock(level, resources[0]); + return getReadLock(level, opName, resources[0]); } else { - AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]); - AutoCloseDataSetLock volLock = getReadLock(level, resources); + AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, opName, resources[0]); + AutoCloseDataSetLock volLock = getReadLock(level, opName, resources); volLock.setParentLock(bpLock); if (openLockTrace) { LOG.info("Sub lock " + resources[0] + resources[1] + " parent lock " + @@ -165,12 +166,12 @@ public AutoCloseDataSetLock readLock(LockLevel level, String... resources) { } @Override - public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) { + public AutoCloseDataSetLock writeLock(LockLevel level, String opName, String... resources) { if (level == LockLevel.BLOCK_POOl) { - return getWriteLock(level, resources[0]); + return getWriteLock(level, opName, resources[0]); } else { - AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]); - AutoCloseDataSetLock volLock = getWriteLock(level, resources); + AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, opName, resources[0]); + AutoCloseDataSetLock volLock = getWriteLock(level, opName, resources); volLock.setParentLock(bpLock); if (openLockTrace) { LOG.info("Sub lock " + resources[0] + resources[1] + " parent lock " + @@ -183,8 +184,8 @@ public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) { /** * Return a not null ReadLock. */ - private AutoCloseDataSetLock getReadLock(LockLevel level, String... resources) { - String lockName = generateLockName(level, resources); + private AutoCloseDataSetLock getReadLock(LockLevel level, String opName, String... resources) { + String lockName = generateLockName(level, opName, resources); AutoCloseDataSetLock lock = lockMap.getReadLock(lockName); if (lock == null) { LOG.warn("Ignore this error during dn restart: Not existing readLock " @@ -202,8 +203,8 @@ private AutoCloseDataSetLock getReadLock(LockLevel level, String... resources) { /** * Return a not null WriteLock. */ - private AutoCloseDataSetLock getWriteLock(LockLevel level, String... resources) { - String lockName = generateLockName(level, resources); + private AutoCloseDataSetLock getWriteLock(LockLevel level, String opName, String... resources) { + String lockName = generateLockName(level, opName, resources); AutoCloseDataSetLock lock = lockMap.getWriteLock(lockName); if (lock == null) { LOG.warn("Ignore this error during dn restart: Not existing writeLock" @@ -219,8 +220,8 @@ private AutoCloseDataSetLock getWriteLock(LockLevel level, String... resources) } @Override - public void addLock(LockLevel level, String... resources) { - String lockName = generateLockName(level, resources); + public void addLock(LockLevel level, String opName, String... resources) { + String lockName = generateLockName(level, opName, resources); if (level == LockLevel.BLOCK_POOl) { lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair)); } else { @@ -230,9 +231,9 @@ public void addLock(LockLevel level, String... resources) { } @Override - public void removeLock(LockLevel level, String... resources) { - String lockName = generateLockName(level, resources); - try (AutoCloseDataSetLock lock = writeLock(level, resources)) { + public void removeLock(LockLevel level, String opName, String... resources) { + String lockName = generateLockName(level, opName, resources); + try (AutoCloseDataSetLock lock = writeLock(level, opName, resources)) { lock.lock(); lockMap.removeLock(lockName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 633eeab03e9cc..4f1cc8712b3aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -187,7 +187,7 @@ public StorageReport[] getStorageReports(String bpid) @Override public FsVolumeImpl getVolume(final ExtendedBlock b) { try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - b.getBlockPoolId())) { + "getVolume", b.getBlockPoolId())) { final ReplicaInfo r = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); return r != null ? (FsVolumeImpl) r.getVolume() : null; @@ -198,7 +198,7 @@ public FsVolumeImpl getVolume(final ExtendedBlock b) { public Block getStoredBlock(String bpid, long blkid) throws IOException { try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - bpid)) { + "getStoredBlock", bpid)) { ReplicaInfo r = volumeMap.get(bpid, blkid); if (r == null) { return null; @@ -210,7 +210,8 @@ public Block getStoredBlock(String bpid, long blkid) @Override public Set deepCopyReplica(String bpid) throws IOException { - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "deepCopyReplica", bpid)) { Set replicas = new HashSet<>(); volumeMap.replicas(bpid, (iterator) -> { while (iterator.hasNext()) { @@ -428,7 +429,8 @@ private synchronized void activateVolume( Storage.StorageDirectory sd, StorageType storageType, FsVolumeReference ref) throws IOException { for (String bp : volumeMap.getBlockPoolList()) { - lockManager.addLock(LockLevel.VOLUME, bp, ref.getVolume().getStorageID()); + lockManager.addLock(LockLevel.VOLUME, "activateVolume", + bp, ref.getVolume().getStorageID()); } DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid()); if (dnStorage != null) { @@ -522,7 +524,8 @@ public void addVolume(final StorageLocation location, for (final NamespaceInfo nsInfo : nsInfos) { String bpid = nsInfo.getBlockPoolID(); - try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, + "addVolume", bpid)) { fsVolume.addBlockPool(bpid, this.conf, this.timer); fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker); } catch (IOException e) { @@ -628,7 +631,7 @@ public void removeVolumes( for (String storageUuid : storageToRemove) { storageMap.remove(storageUuid); for (String bp : volumeMap.getBlockPoolList()) { - lockManager.removeLock(LockLevel.VOLUME, bp, storageUuid); + lockManager.removeLock(LockLevel.VOLUME, "removeVolumes", bp, storageUuid); } } } @@ -819,7 +822,7 @@ public InputStream getBlockInputStream(ExtendedBlock b, ReplicaInfo info; try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - b.getBlockPoolId())) { + "getBlockInputStream", b.getBlockPoolId())) { info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); } @@ -1091,7 +1094,7 @@ public ReplicaInfo moveBlockAcrossStorage(ExtendedBlock block, boolean useVolumeOnSameMount = false; try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - block.getBlockPoolId())) { + "moveBlockAcrossStorage", block.getBlockPoolId())) { if (shouldConsiderSameMountVolume) { volumeRef = volumes.getVolumeByMount(targetStorageType, ((FsVolumeImpl) replicaInfo.getVolume()).getMount(), @@ -1286,7 +1289,7 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block, FsVolumeSpi FsVolumeReference volumeRef = null; try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - block.getBlockPoolId())) { + "moveBlockAcrossVolumes", block.getBlockPoolId())) { volumeRef = destination.obtainReference(); } @@ -1530,7 +1533,7 @@ public ReplicaHandler recoverAppend( while (true) { try { try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, - b.getBlockPoolId())) { + "recoverAppend", b.getBlockPoolId())) { ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen); FsVolumeReference ref = replicaInfo.getVolume().obtainReference(); ReplicaInPipeline replica; @@ -1587,7 +1590,7 @@ public ReplicaHandler createRbw( boolean allowLazyPersist) throws IOException { long startTimeMs = Time.monotonicNow(); try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - b.getBlockPoolId())) { + "createRbw", b.getBlockPoolId())) { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); if (replicaInfo != null) { @@ -1840,7 +1843,7 @@ public ReplicaHandler createTemporary(StorageType storageType, boolean isInPipeline = false; do { try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, - b.getBlockPoolId())) { + "createTemporary", b.getBlockPoolId())) { ReplicaInfo currentReplicaInfo = volumeMap.get(b.getBlockPoolId(), b.getBlockId()); if (currentReplicaInfo == lastFoundReplicaInfo) { @@ -2103,7 +2106,8 @@ public Map getBlockReports(String bpid) { new HashMap(); List curVolumes = null; - try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, "getBlockReports", + bpid)) { curVolumes = volumes.getVolumes(); for (FsVolumeSpi v : curVolumes) { builders.put(v.getStorageID(), BlockListAsLongs.builder(maxDataLength)); @@ -2169,7 +2173,8 @@ public Map getBlockReports(String bpid) { */ @Override public List getFinalizedBlocks(String bpid) { - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "getFinalizedBlocks", bpid)) { ArrayList finalized = new ArrayList<>(volumeMap.size(bpid)); volumeMap.replicas(bpid, (iterator) -> { @@ -2312,7 +2317,8 @@ private void invalidate(String bpid, Block[] invalidBlks, boolean async) for (int i = 0; i < invalidBlks.length; i++) { final ReplicaInfo removing; final FsVolumeImpl v; - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, + "invalidate", bpid)) { final ReplicaInfo info = volumeMap.get(bpid, invalidBlks[i]); if (info == null) { ReplicaInfo infoByBlockId = @@ -2511,7 +2517,7 @@ public boolean isCached(String bpid, long blockId) { @Override // FsDatasetSpi public boolean contains(final ExtendedBlock block) { try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - block.getBlockPoolId())) { + "contains", block.getBlockPoolId())) { final long blockId = block.getLocalBlock().getBlockId(); final String bpid = block.getBlockPoolId(); final ReplicaInfo r = volumeMap.get(bpid, blockId); @@ -2862,7 +2868,8 @@ public ReplicaInfo getReplica(String bpid, long blockId) { @Override public String getReplicaString(String bpid, long blockId) { - try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, + "getReplicaString", bpid)) { final Replica r = volumeMap.get(bpid, blockId); return r == null ? "null" : r.toString(); } @@ -3116,7 +3123,7 @@ private ReplicaInfo updateReplicaUnderRecovery( public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException { try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - block.getBlockPoolId())) { + "getReplicaVisibleLength", block.getBlockPoolId())) { final Replica replica = getReplicaInfo(block.getBlockPoolId(), block.getBlockId()); if (replica.getGenerationStamp() < block.getGenerationStamp()) { @@ -3133,7 +3140,8 @@ public void addBlockPool(String bpid, Configuration conf) throws IOException { LOG.info("Adding block pool " + bpid); AddBlockPoolException volumeExceptions = new AddBlockPoolException(); - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, + "addBlockPool", bpid)) { try { volumes.addBlockPool(bpid, conf); } catch (AddBlockPoolException e) { @@ -3142,7 +3150,8 @@ public void addBlockPool(String bpid, Configuration conf) volumeMap.initBlockPool(bpid); Set vols = storageMap.keySet(); for (String v : vols) { - lockManager.addLock(LockLevel.VOLUME, bpid, v); + lockManager.addLock(LockLevel.VOLUME, "addBlockPool", + bpid, v); } } try { @@ -3167,7 +3176,8 @@ public static void setBlockPoolId(String bpid) { @Override public void shutdownBlockPool(String bpid) { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, + "shutdownBlockPool", bpid)) { LOG.info("Removing block pool " + bpid); Map blocksPerVolume = getBlockReports(bpid); @@ -3241,7 +3251,8 @@ public Map getVolumeInfoMap() { @Override //FsDatasetSpi public void deleteBlockPool(String bpid, boolean force) throws IOException { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, + "deleteBlockPool", bpid)) { List curVolumes = volumes.getVolumes(); if (!force) { for (FsVolumeImpl volume : curVolumes) { @@ -3271,7 +3282,7 @@ public void deleteBlockPool(String bpid, boolean force) public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, - block.getBlockPoolId())) { + "getBlockLocalPathInfo", block.getBlockPoolId())) { final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId()); if (replica == null) { @@ -3325,7 +3336,8 @@ public void clearRollingUpgradeMarker(String bpid) throws IOException { @Override public void onCompleteLazyPersist(String bpId, long blockId, long creationTime, File[] savedFiles, FsVolumeImpl targetVolume) { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpId)) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, + "onCompleteLazyPersist", bpId)) { ramDiskReplicaTracker.recordEndLazyPersist(bpId, blockId, savedFiles); targetVolume.incDfsUsedAndNumBlocks(bpId, savedFiles[0].length() @@ -3528,7 +3540,8 @@ public void evictBlocks(long bytesNeeded) throws IOException { final String bpid = replicaState.getBlockPoolId(); final FsVolumeImpl lazyPersistVolume = replicaState.getLazyPersistVolume(); - try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, + "evictBlocks", bpid)) { replicaInfo = getReplicaInfo(replicaState.getBlockPoolId(), replicaState.getBlockId()); Preconditions.checkState(replicaInfo.getVolume().isTransientStorage()); @@ -3711,7 +3724,8 @@ public int getVolumeCount() { void stopAllDataxceiverThreads(FsVolumeImpl volume) { for (String bpid : volumeMap.getBlockPoolList()) { try (AutoCloseDataSetLock lock = lockManager - .writeLock(LockLevel.BLOCK_POOl, bpid)) { + .writeLock(LockLevel.BLOCK_POOl, + "stopAllDataxceiverThreads", bpid)) { volumeMap.replicas(bpid, (iterator) -> { while (iterator.hasNext()) { ReplicaInfo replicaInfo = iterator.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java index 6ecc48a95fd2d..a7223075a7701 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReplicaMap.java @@ -76,7 +76,7 @@ private void checkBlock(Block b) { } /** - * Get the meta information of the replica that matches both block id + * Get the meta information of the replica that matches both block id. * and generation stamp * @param bpid block pool id * @param block block with its id as the key @@ -96,21 +96,22 @@ ReplicaInfo get(String bpid, Block block) { /** - * Get the meta information of the replica that matches the block id + * Get the meta information of the replica that matches the block id. * @param bpid block pool id * @param blockId a block's id * @return the replica's meta information */ ReplicaInfo get(String bpid, long blockId) { checkBlockPool(bpid); - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "ReplicaMap.get", bpid)) { LightWeightResizableGSet m = map.get(bpid); return m != null ? m.get(new Block(blockId)) : null; } } /** - * Add a replica's meta information into the map + * Add a replica's meta information into the map. * * @param bpid block pool id * @param replicaInfo a replica's meta information @@ -120,7 +121,8 @@ ReplicaInfo get(String bpid, long blockId) { ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) { checkBlockPool(bpid); checkBlock(replicaInfo); - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "ReplicaMap.add", bpid)) { LightWeightResizableGSet m = map.get(bpid); if (m == null) { // Add an entry for block pool if it does not exist already @@ -138,7 +140,8 @@ ReplicaInfo add(String bpid, ReplicaInfo replicaInfo) { ReplicaInfo addAndGet(String bpid, ReplicaInfo replicaInfo) { checkBlockPool(bpid); checkBlock(replicaInfo); - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "ReplicaMap.addAndGet", bpid)) { LightWeightResizableGSet m = map.get(bpid); if (m == null) { // Add an entry for block pool if it does not exist already @@ -170,7 +173,8 @@ void mergeAll(ReplicaMap other) { Set bplist = other.map.keySet(); for (String bp : bplist) { checkBlockPool(bp); - try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bp)) { + try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, + "ReplicaMap.mergeAll", bp)) { LightWeightResizableGSet replicaInfos = other.map.get(bp); LightWeightResizableGSet curSet = map.get(bp); HashSet replicaSet = new HashSet<>(); @@ -192,7 +196,7 @@ void mergeAll(ReplicaMap other) { } /** - * Remove the replica's meta information from the map that matches + * Remove the replica's meta information from the map that matches. * the input block's id and generation stamp * @param bpid block pool id * @param block block with its id as the key @@ -202,7 +206,8 @@ void mergeAll(ReplicaMap other) { ReplicaInfo remove(String bpid, Block block) { checkBlockPool(bpid); checkBlock(block); - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "ReplicaMap.remove", bpid)) { LightWeightResizableGSet m = map.get(bpid); if (m != null) { ReplicaInfo replicaInfo = m.get(block); @@ -217,14 +222,15 @@ ReplicaInfo remove(String bpid, Block block) { } /** - * Remove the replica's meta information from the map if present + * Remove the replica's meta information from the map if present. * @param bpid block pool id * @param blockId block id of the replica to be removed * @return the removed replica's meta information */ ReplicaInfo remove(String bpid, long blockId) { checkBlockPool(bpid); - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "ReplicaMap.remove", bpid)) { LightWeightResizableGSet m = map.get(bpid); if (m != null) { return m.remove(new Block(blockId)); @@ -234,12 +240,13 @@ ReplicaInfo remove(String bpid, long blockId) { } /** - * Get the size of the map for given block pool + * Get the size of the map for given block pool. * @param bpid block pool id * @return the number of replicas in the map */ int size(String bpid) { - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "ReplicaMap.size", bpid)) { LightWeightResizableGSet m = map.get(bpid); return m != null ? m.size() : 0; } @@ -267,7 +274,8 @@ Collection replicas(String bpid) { */ void replicas(String bpid, Consumer> consumer) { LightWeightResizableGSet m = null; - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.BLOCK_POOl, + "ReplicaMap.replicas", bpid)) { m = map.get(bpid); if (m !=null) { m.getIterator(consumer); @@ -277,7 +285,8 @@ void replicas(String bpid, Consumer> consumer) { void initBlockPool(String bpid) { checkBlockPool(bpid); - try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, + "ReplicaMap.initBlockPool", bpid)) { LightWeightResizableGSet m = map.get(bpid); if (m == null) { // Add an entry for block pool if it does not exist already @@ -289,7 +298,8 @@ void initBlockPool(String bpid) { void cleanUpBlockPool(String bpid) { checkBlockPool(bpid); - try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { + try (AutoCloseDataSetLock l = lockManager.writeLock(LockLevel.BLOCK_POOl, + "ReplicaMap.cleanUpBlockPool", bpid)) { map.remove(bpid); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java index b514accdf16e9..8c9735dc2f494 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java @@ -35,27 +35,27 @@ public void init() { @Test(timeout = 5000) public void testBaseFunc() { - manager.addLock(LockLevel.BLOCK_POOl, "BPtest"); - manager.addLock(LockLevel.VOLUME, "BPtest", "Volumetest"); + manager.addLock(LockLevel.BLOCK_POOl, "testBaseFunc", "BPtest"); + manager.addLock(LockLevel.VOLUME, "testBaseFunc", "BPtest", "Volumetest"); - AutoCloseDataSetLock lock = manager.writeLock(LockLevel.BLOCK_POOl, "BPtest"); - AutoCloseDataSetLock lock1 = manager.readLock(LockLevel.BLOCK_POOl, "BPtest"); + AutoCloseDataSetLock lock = manager.writeLock(LockLevel.BLOCK_POOl, "testBaseFunc", "BPtest"); + AutoCloseDataSetLock lock1 = manager.readLock(LockLevel.BLOCK_POOl, "testBaseFunc", "BPtest"); lock1.close(); lock.close(); manager.lockLeakCheck(); assertNull(manager.getLastException()); - AutoCloseDataSetLock lock2 = manager.writeLock(LockLevel.VOLUME, "BPtest", "Volumetest"); - AutoCloseDataSetLock lock3 = manager.readLock(LockLevel.VOLUME, "BPtest", "Volumetest"); + AutoCloseDataSetLock lock2 = manager.writeLock(LockLevel.VOLUME, "testBaseFunc", "BPtest", "Volumetest"); + AutoCloseDataSetLock lock3 = manager.readLock(LockLevel.VOLUME, "testBaseFunc", "BPtest", "Volumetest"); lock3.close(); lock2.close(); manager.lockLeakCheck(); assertNull(manager.getLastException()); - AutoCloseDataSetLock lock4 = manager.writeLock(LockLevel.BLOCK_POOl, "BPtest"); - AutoCloseDataSetLock lock5 = manager.readLock(LockLevel.VOLUME, "BPtest", "Volumetest"); + AutoCloseDataSetLock lock4 = manager.writeLock(LockLevel.BLOCK_POOl, "testBaseFunc", "BPtest"); + AutoCloseDataSetLock lock5 = manager.readLock(LockLevel.VOLUME, "testBaseFunc", "BPtest", "Volumetest"); lock5.close(); lock4.close(); @@ -72,8 +72,8 @@ public void testBaseFunc() { @Test(timeout = 5000) public void testAcquireWriteLockError() throws InterruptedException { Thread t = new Thread(() -> { - manager.readLock(LockLevel.BLOCK_POOl, "test"); - manager.writeLock(LockLevel.BLOCK_POOl, "test"); + manager.readLock(LockLevel.BLOCK_POOl,"testAcquireWriteLockError", "testAcquireWriteLockError", "test"); + manager.writeLock(LockLevel.BLOCK_POOl,"testAcquireWriteLockError", "testAcquireWriteLockError", "test"); }); t.start(); Thread.sleep(1000); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.xml index cd13532906e64..feccaff40f48e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hdfs-site.xml @@ -30,5 +30,9 @@ dfs.namenode.fs-limits.min-block-size 0 - + + + dfs.datanode.lockmanager.trace + true +