diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index d85e7c5823192..dd3193fdadff2 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1744,10 +1744,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_DATANODE_LOCKMANAGER_TRACE_DEFAULT = false; - public static final String DFS_DATANODE_DATASET_SUBLOCK_COUNT_KEY = - "dfs.datanode.dataset.sublock.count"; - public static final long DFS_DATANODE_DATASET_SUBLOCK_COUNT_DEFAULT = 1000L; - // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java index cb22a0570627f..e7a3b38357ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/DataNodeLockManager.java @@ -29,8 +29,7 @@ public interface DataNodeLockManager { */ enum LockLevel { BLOCK_POOl, - VOLUME, - DIR + VOLUME } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java index dafbb4ed50227..3abcf12fc8b12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetLockManager.java @@ -96,13 +96,6 @@ private String generateLockName(LockLevel level, String... resources) { + resources[0] + "volume lock :" + resources[1]); } return resources[0] + resources[1]; - } else if (resources.length == 3 && level == LockLevel.DIR) { - if (resources[0] == null || resources[1] == null || resources[2] == null) { - throw new IllegalArgumentException("acquire a null dataset lock : " - + resources[0] + ",volume lock :" + resources[1] - + ",subdir lock :" + resources[2]); - } - return resources[0] + resources[1] + resources[2]; } else { throw new IllegalArgumentException("lock level do not match resource"); } @@ -163,7 +156,7 @@ public DataSetLockManager(Configuration conf, DataNode dn) { public AutoCloseDataSetLock readLock(LockLevel level, String... resources) { if (level == LockLevel.BLOCK_POOl) { return getReadLock(level, resources[0]); - } else if (level == LockLevel.VOLUME){ + } else { AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]); AutoCloseDataSetLock volLock = getReadLock(level, resources); volLock.setParentLock(bpLock); @@ -172,17 +165,6 @@ public AutoCloseDataSetLock readLock(LockLevel level, String... resources) { resources[0]); } return volLock; - } else { - AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]); - AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]); - volLock.setParentLock(bpLock); - AutoCloseDataSetLock dirLock = getReadLock(level, resources); - dirLock.setParentLock(volLock); - if (openLockTrace) { - LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " + - resources[0] + resources[1]); - } - return dirLock; } } @@ -190,7 +172,7 @@ public AutoCloseDataSetLock readLock(LockLevel level, String... resources) { public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) { if (level == LockLevel.BLOCK_POOl) { return getWriteLock(level, resources[0]); - } else if (level == LockLevel.VOLUME) { + } else { AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]); AutoCloseDataSetLock volLock = getWriteLock(level, resources); volLock.setParentLock(bpLock); @@ -199,17 +181,6 @@ public AutoCloseDataSetLock writeLock(LockLevel level, String... resources) { resources[0]); } return volLock; - } else { - AutoCloseDataSetLock bpLock = getReadLock(LockLevel.BLOCK_POOl, resources[0]); - AutoCloseDataSetLock volLock = getReadLock(LockLevel.VOLUME, resources[0], resources[1]); - volLock.setParentLock(bpLock); - AutoCloseDataSetLock dirLock = getWriteLock(level, resources); - dirLock.setParentLock(volLock); - if (openLockTrace) { - LOG.debug("Sub lock " + resources[0] + resources[1] + resources[2] + " parent lock " + - resources[0] + resources[1]); - } - return dirLock; } } @@ -264,13 +235,8 @@ public void addLock(LockLevel level, String... resources) { String lockName = generateLockName(level, resources); if (level == LockLevel.BLOCK_POOl) { lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair)); - } else if (level == LockLevel.VOLUME) { - lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair)); - lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair)); } else { lockMap.addLock(resources[0], new ReentrantReadWriteLock(isFair)); - lockMap.addLock(generateLockName(LockLevel.VOLUME, resources[0], resources[1]), - new ReentrantReadWriteLock(isFair)); lockMap.addLock(lockName, new ReentrantReadWriteLock(isFair)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetSubLockStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetSubLockStrategy.java deleted file mode 100644 index 7ba1df8df5232..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataSetSubLockStrategy.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.datanode; - -import java.util.List; - -/** - * This interface is used to generate sub lock name for a blockid. - */ -public interface DataSetSubLockStrategy { - - /** - * Generate sub lock name for the given blockid. - * @param blockid the block id. - * @return sub lock name for the input blockid. - */ - String blockIdToSubLock(long blockid); - - List getAllSubLockName(); -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ModDataSetSubLockStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ModDataSetSubLockStrategy.java deleted file mode 100644 index 5e736e54716fd..0000000000000 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ModDataSetSubLockStrategy.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdfs.server.datanode; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.List; - -public class ModDataSetSubLockStrategy implements DataSetSubLockStrategy { - public static final Logger LOG = LoggerFactory.getLogger(DataSetSubLockStrategy.class); - - private static final String LOCK_NAME_PERFIX = "SubLock"; - private long modFactor; - - public ModDataSetSubLockStrategy(long mod) { - if (mod <= 0) { - mod = 1L; - } - this.modFactor = mod; - } - - @Override - public String blockIdToSubLock(long blockid) { - return LOCK_NAME_PERFIX + (blockid % modFactor); - } - - @Override - public List getAllSubLockName() { - List res = new ArrayList<>(); - for (long i = 0L; i < modFactor; i++) { - res.add(LOCK_NAME_PERFIX + i); - } - return res; - } -} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 91b12daef8143..eeec1bb728825 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -65,11 +65,9 @@ import org.apache.hadoop.hdfs.server.common.DataNodeLockManager.LockLevel; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.datanode.DataSetLockManager; -import org.apache.hadoop.hdfs.server.datanode.DataSetSubLockStrategy; import org.apache.hadoop.hdfs.server.datanode.FileIoProvider; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.LocalReplica; -import org.apache.hadoop.hdfs.server.datanode.ModDataSetSubLockStrategy; import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.protocol.Block; @@ -200,9 +198,8 @@ public FsVolumeImpl getVolume(final ExtendedBlock b) { @Override // FsDatasetSpi public Block getStoredBlock(String bpid, long blkid) throws IOException { - try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR, - bpid, getReplicaInfo(bpid, blkid).getStorageUuid(), - datasetSubLockStrategy.blockIdToSubLock(blkid))) { + try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, + bpid)) { ReplicaInfo r = volumeMap.get(bpid, blkid); if (r == null) { return null; @@ -291,9 +288,6 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) private long lastDirScannerNotifyTime; private volatile long lastDirScannerFinishTime; - private final DataSetSubLockStrategy datasetSubLockStrategy; - private final long datasetSubLockCount; - /** * An FSDataset has a directory where it loads its data files. */ @@ -398,9 +392,6 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b) DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_KEY, DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_MAX_NOTIFY_COUNT_DEFAULT); lastDirScannerNotifyTime = System.currentTimeMillis(); - datasetSubLockCount = conf.getLong(DFSConfigKeys.DFS_DATANODE_DATASET_SUBLOCK_COUNT_KEY, - DFSConfigKeys.DFS_DATANODE_DATASET_SUBLOCK_COUNT_DEFAULT); - this.datasetSubLockStrategy = new ModDataSetSubLockStrategy(datasetSubLockCount); } /** @@ -439,12 +430,6 @@ private synchronized void activateVolume( FsVolumeReference ref) throws IOException { for (String bp : volumeMap.getBlockPoolList()) { lockManager.addLock(LockLevel.VOLUME, bp, ref.getVolume().getStorageID()); - List allSubDirNameForDataSetLock = datasetSubLockStrategy.getAllSubLockName(); - for (String dir : allSubDirNameForDataSetLock) { - lockManager.addLock(LockLevel.DIR, bp, ref.getVolume().getStorageID(), dir); - LOG.info("Added DIR lock for bpid:{}, volume storageid:{}, dir:{}", - bp, ref.getVolume().getStorageID(), dir); - } } DatanodeStorage dnStorage = storageMap.get(sd.getStorageUuid()); if (dnStorage != null) { @@ -644,12 +629,6 @@ public void removeVolumes( for (String storageUuid : storageToRemove) { storageMap.remove(storageUuid); for (String bp : volumeMap.getBlockPoolList()) { - List allSubDirNameForDataSetLock = datasetSubLockStrategy.getAllSubLockName(); - for (String dir : allSubDirNameForDataSetLock) { - lockManager.removeLock(LockLevel.DIR, bp, storageUuid, dir); - LOG.info("Removed DIR lock for bpid:{}, volume storageid:{}, dir:{}", - bp, storageUuid, dir); - } lockManager.removeLock(LockLevel.VOLUME, bp, storageUuid); } } @@ -840,9 +819,8 @@ public InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException { ReplicaInfo info; - try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, + b.getBlockPoolId())) { info = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); } @@ -936,9 +914,8 @@ String getStorageUuidForLock(ExtendedBlock b) @Override // FsDatasetSpi public ReplicaInputStreams getTmpInputStreams(ExtendedBlock b, long blkOffset, long metaOffset) throws IOException { - try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseDataSetLock l = lockManager.readLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { ReplicaInfo info = getReplicaInfo(b); FsVolumeReference ref = info.getVolume().obtainReference(); try { @@ -1403,9 +1380,8 @@ static void computeChecksum(ReplicaInfo srcReplica, File dstMeta, @Override // FsDatasetSpi public ReplicaHandler append(ExtendedBlock b, long newGS, long expectedBlockLen) throws IOException { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { // If the block was successfully finalized because all packets // were successfully processed at the Datanode but the ack for // some of the packets were not received by the client. The client @@ -1457,9 +1433,8 @@ public ReplicaHandler append(ExtendedBlock b, private ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo, long newGS, long estimateBlockLen) throws IOException { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - bpid, replicaInfo.getStorageUuid(), - datasetSubLockStrategy.blockIdToSubLock(replicaInfo.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + bpid, replicaInfo.getStorageUuid())) { // If the block is cached, start uncaching it. if (replicaInfo.getState() != ReplicaState.FINALIZED) { throw new IOException("Only a Finalized replica can be appended to; " @@ -1555,9 +1530,8 @@ public ReplicaHandler recoverAppend( while (true) { try { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, + b.getBlockPoolId())) { ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen); FsVolumeReference ref = replicaInfo.getVolume().obtainReference(); ReplicaInPipeline replica; @@ -1590,9 +1564,8 @@ public Replica recoverClose(ExtendedBlock b, long newGS, b, newGS, expectedBlockLen); while (true) { try { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { // check replica's state ReplicaInfo replicaInfo = recoverCheck(b, newGS, expectedBlockLen); // bump the replica's GS @@ -1677,9 +1650,8 @@ public ReplicaHandler createRbw( } ReplicaInPipeline newReplicaInfo; - try (AutoCloseableLock l = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), v.getStorageID(), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock l = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), v.getStorageID())) { newReplicaInfo = v.createRbw(b); if (newReplicaInfo.getReplicaInfo().getState() != ReplicaState.RBW) { throw new IOException("CreateRBW returned a replica of state " @@ -1709,9 +1681,8 @@ public ReplicaHandler recoverRbw( try { while (true) { try { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { ReplicaInfo replicaInfo = getReplicaInfo(b.getBlockPoolId(), b.getBlockId()); // check the replica's state @@ -1742,9 +1713,8 @@ public ReplicaHandler recoverRbw( private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, ExtendedBlock b, long newGS, long minBytesRcvd, long maxBytesRcvd) throws IOException { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { // check generation stamp long replicaGenerationStamp = rbw.getGenerationStamp(); if (replicaGenerationStamp < b.getGenerationStamp() || @@ -1805,9 +1775,8 @@ private ReplicaHandler recoverRbwImpl(ReplicaInPipeline rbw, public ReplicaInPipeline convertTemporaryToRbw( final ExtendedBlock b) throws IOException { long startTimeMs = Time.monotonicNow(); - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { final long blockId = b.getBlockId(); final long expectedGs = b.getGenerationStamp(); final long visible = b.getNumBytes(); @@ -1946,9 +1915,8 @@ public ReplicaHandler createTemporary(StorageType storageType, .getNumBytes()); FsVolumeImpl v = (FsVolumeImpl) ref.getVolume(); ReplicaInPipeline newReplicaInfo; - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), v.getStorageID(), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), v.getStorageID())) { try { newReplicaInfo = v.createTemporary(b); LOG.debug("creating temporary for block: {} on volume: {}", @@ -2005,9 +1973,8 @@ public void finalizeBlock(ExtendedBlock b, boolean fsyncDir) ReplicaInfo replicaInfo = null; ReplicaInfo finalizedReplicaInfo = null; long startTimeMs = Time.monotonicNow(); - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { if (Thread.interrupted()) { // Don't allow data modifications from interrupted threads throw new IOException("Cannot finalize block: " + b + " from Interrupted Thread"); @@ -2043,9 +2010,8 @@ public void finalizeBlock(ExtendedBlock b, boolean fsyncDir) private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo) throws IOException { - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - bpid, replicaInfo.getStorageUuid(), - datasetSubLockStrategy.blockIdToSubLock(replicaInfo.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + bpid, replicaInfo.getStorageUuid())) { // Compare generation stamp of old and new replica before finalizing if (volumeMap.get(bpid, replicaInfo.getBlockId()).getGenerationStamp() > replicaInfo.getGenerationStamp()) { @@ -2094,9 +2060,8 @@ private ReplicaInfo finalizeReplica(String bpid, ReplicaInfo replicaInfo) @Override // FsDatasetSpi public void unfinalizeBlock(ExtendedBlock b) throws IOException { long startTimeMs = Time.monotonicNow(); - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, - b.getBlockPoolId(), getStorageUuidForLock(b), - datasetSubLockStrategy.blockIdToSubLock(b.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, + b.getBlockPoolId(), getStorageUuidForLock(b))) { ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), b.getLocalBlock()); if (replicaInfo != null && @@ -2494,8 +2459,7 @@ boolean removeReplicaFromMem(final ExtendedBlock block, final FsVolumeImpl volum final String bpid = block.getBlockPoolId(); final Block localBlock = block.getLocalBlock(); final long blockId = localBlock.getBlockId(); - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, bpid, volume.getStorageID(), - datasetSubLockStrategy.blockIdToSubLock(blockId))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.BLOCK_POOl, bpid)) { final ReplicaInfo info = volumeMap.get(bpid, localBlock); if (info == null) { ReplicaInfo infoByBlockId = volumeMap.get(bpid, blockId); @@ -2584,8 +2548,8 @@ private void cacheBlock(String bpid, long blockId) { bpid + ": ReplicaInfo not found."); return; } - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, bpid, - info.getStorageUuid(), datasetSubLockStrategy.blockIdToSubLock(blockId))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, bpid, + info.getStorageUuid())) { boolean success = false; try { info = volumeMap.get(bpid, blockId); @@ -2782,8 +2746,7 @@ public void checkAndUpdate(String bpid, ScanInfo scanInfo) lastDirScannerNotifyTime = startTimeMs; } String storageUuid = vol.getStorageID(); - try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.DIR, bpid, - vol.getStorageID(), datasetSubLockStrategy.blockIdToSubLock(blockId))) { + try (AutoCloseableLock lock = lockManager.writeLock(LockLevel.VOLUME, bpid, storageUuid)) { if (!storageMap.containsKey(storageUuid)) { // Storage was already removed return; @@ -3268,9 +3231,8 @@ private ReplicaInfo updateReplicaUnderRecovery( @Override // FsDatasetSpi public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException { - try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR, - block.getBlockPoolId(), getStorageUuidForLock(block), - datasetSubLockStrategy.blockIdToSubLock(block.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, + block.getBlockPoolId())) { final Replica replica = getReplicaInfo(block.getBlockPoolId(), block.getBlockId()); if (replica.getGenerationStamp() < block.getGenerationStamp()) { @@ -3297,12 +3259,6 @@ public void addBlockPool(String bpid, Configuration conf) Set vols = storageMap.keySet(); for (String v : vols) { lockManager.addLock(LockLevel.VOLUME, bpid, v); - List allSubDirNameForDataSetLock = datasetSubLockStrategy.getAllSubLockName(); - for (String dir : allSubDirNameForDataSetLock) { - lockManager.addLock(LockLevel.DIR, bpid, v, dir); - LOG.info("Added DIR lock for bpid:{}, volume storageid:{}, dir:{}", - bpid, v, dir); - } } } try { @@ -3430,9 +3386,8 @@ public void deleteBlockPool(String bpid, boolean force) @Override // FsDatasetSpi public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block) throws IOException { - try (AutoCloseableLock lock = lockManager.readLock(LockLevel.DIR, - block.getBlockPoolId(), getStorageUuidForLock(block), - datasetSubLockStrategy.blockIdToSubLock(block.getBlockId()))) { + try (AutoCloseableLock lock = lockManager.readLock(LockLevel.BLOCK_POOl, + block.getBlockPoolId())) { final Replica replica = volumeMap.get(block.getBlockPoolId(), block.getBlockId()); if (replica == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 6bfed9a2904c0..2ab25f8329ce6 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -6568,15 +6568,6 @@ problem. In produce default set false, because it's have little performance loss. - - - dfs.datanode.dataset.sublock.count - 1000 - - The dataset readwrite lock counts for a volume. - - - dfs.client.fsck.connect.timeout 60000ms diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java index 6cb12d2681f82..b514accdf16e9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataSetLockManager.java @@ -37,7 +37,6 @@ public void init() { public void testBaseFunc() { manager.addLock(LockLevel.BLOCK_POOl, "BPtest"); manager.addLock(LockLevel.VOLUME, "BPtest", "Volumetest"); - manager.addLock(LockLevel.DIR, "BPtest", "Volumetest", "SubDirtest"); AutoCloseDataSetLock lock = manager.writeLock(LockLevel.BLOCK_POOl, "BPtest"); AutoCloseDataSetLock lock1 = manager.readLock(LockLevel.BLOCK_POOl, "BPtest"); @@ -63,16 +62,6 @@ public void testBaseFunc() { manager.lockLeakCheck(); assertNull(manager.getLastException()); - AutoCloseDataSetLock lock6 = manager.writeLock(LockLevel.BLOCK_POOl, "BPtest"); - AutoCloseDataSetLock lock7 = manager.readLock(LockLevel.VOLUME, "BPtest", "Volumetest"); - AutoCloseDataSetLock lock8 = manager.readLock(LockLevel.DIR, - "BPtest", "Volumetest", "SubDirtest"); - lock8.close(); - lock7.close(); - lock6.close(); - manager.lockLeakCheck(); - assertNull(manager.getLastException()); - manager.writeLock(LockLevel.VOLUME, "BPtest", "Volumetest"); manager.lockLeakCheck(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java index f58ee729ef98f..975874edb1fc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java @@ -1946,12 +1946,7 @@ public void delayDeleteReplica() { assertFalse(uuids.contains(dn.getDatanodeUuid())); // This replica has deleted from datanode memory. - try { - Block storedBlock = ds.getStoredBlock(bpid, extendedBlock.getBlockId()); - assertNull(storedBlock); - } catch (Exception e) { - GenericTestUtils.assertExceptionContains("ReplicaNotFoundException", e); - } + assertNull(ds.getStoredBlock(bpid, extendedBlock.getBlockId())); } finally { cluster.shutdown(); DataNodeFaultInjector.set(oldInjector); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java index 2846c16c220e6..9d79e496102db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils; -import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.IOUtils; @@ -597,13 +596,9 @@ private int getTrueReplication(MiniDFSCluster cluster, ExtendedBlock block) throws IOException { int count = 0; for (DataNode dn : cluster.getDataNodes()) { - try { - if (DataNodeTestUtils.getFSDataset(dn).getStoredBlock( - block.getBlockPoolId(), block.getBlockId()) != null) { - count++; - } - } catch (ReplicaNotFoundException e) { - continue; + if (DataNodeTestUtils.getFSDataset(dn).getStoredBlock( + block.getBlockPoolId(), block.getBlockId()) != null) { + count++; } } return count;