diff --git a/src/Nethermind/Nethermind.Blockchain.Test/BeaconBlockRootHandlerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/BeaconBlockRootHandlerTests.cs index 7288c0d85ac1..abbd0cb530af 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/BeaconBlockRootHandlerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/BeaconBlockRootHandlerTests.cs @@ -17,6 +17,8 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class BeaconBlockRootHandlerTests { private BeaconBlockRootHandler _beaconBlockRootHandler; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/BlockFinderExtensionsTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/BlockFinderExtensionsTests.cs index 7665e94c284e..1ad1c053cbd4 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/BlockFinderExtensionsTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/BlockFinderExtensionsTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] public class BlockFinderExtensionsTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/BlockProcessorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/BlockProcessorTests.cs index 0471540f3e5c..4e180459cf7d 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/BlockProcessorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/BlockProcessorTests.cs @@ -36,6 +36,7 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] public class BlockProcessorTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeSuggestPacerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeSuggestPacerTests.cs index 34392783f02f..46a3ea978935 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeSuggestPacerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeSuggestPacerTests.cs @@ -10,6 +10,7 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] public class BlockTreeSuggestPacerTests { [Test] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeTests.cs index 3d327872c84f..d2f2a332753e 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/BlockTreeTests.cs @@ -32,6 +32,8 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class BlockTreeTests { private TestMemDb _blocksInfosDb = null!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/BlockchainProcessorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/BlockchainProcessorTests.cs index 3f9fed9e8859..df03c4b2834e 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/BlockchainProcessorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/BlockchainProcessorTests.cs @@ -28,7 +28,8 @@ namespace Nethermind.Blockchain.Test; -[Parallelizable(ParallelScope.Self)] +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class BlockchainProcessorTests { private class ProcessingTestContext diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BadBlockStoreTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BadBlockStoreTests.cs index dd149c475ccc..f8fefc3679d0 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BadBlockStoreTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BadBlockStoreTests.cs @@ -12,6 +12,7 @@ namespace Nethermind.Blockchain.Test.Blocks; +[Parallelizable(ParallelScope.All)] public class BadBlockStoreTests { [Test] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BlockStoreTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BlockStoreTests.cs index 46c3d9da71cf..861264efda96 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BlockStoreTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Blocks/BlockStoreTests.cs @@ -15,6 +15,7 @@ namespace Nethermind.Blockchain.Test.Blocks; +[Parallelizable(ParallelScope.All)] public class BlockStoreTests { private readonly Func, EquivalencyAssertionOptions> _ignoreEncodedSize = options => options.Excluding(b => b.EncodedSize); diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Blocks/HeaderStoreTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Blocks/HeaderStoreTests.cs index bc1d967eb9ec..130bda2d4829 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Blocks/HeaderStoreTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Blocks/HeaderStoreTests.cs @@ -11,9 +11,9 @@ namespace Nethermind.Blockchain.Test.Blocks; +[Parallelizable(ParallelScope.All)] public class HeaderStoreTests { - [Test] public void TestCanStoreAndGetHeader() { diff --git a/src/Nethermind/Nethermind.Blockchain.Test/CachedCodeInfoRepositoryTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/CachedCodeInfoRepositoryTests.cs index 10280c27796c..14d605000172 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/CachedCodeInfoRepositoryTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/CachedCodeInfoRepositoryTests.cs @@ -19,6 +19,7 @@ namespace Nethermind.Blockchain.Test; [TestFixture] +[Parallelizable(ParallelScope.All)] public class CachedCodeInfoRepositoryTests { private static IReleaseSpec CreateSpecWithPrecompile(Address precompileAddress) diff --git a/src/Nethermind/Nethermind.Blockchain.Test/ChainHeadReadOnlyStateProviderTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/ChainHeadReadOnlyStateProviderTests.cs index 6711e1283518..092d3b8b14cb 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/ChainHeadReadOnlyStateProviderTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/ChainHeadReadOnlyStateProviderTests.cs @@ -9,6 +9,7 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] public class ChainHeadReadOnlyStateProviderTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/ClefSignerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/ClefSignerTests.cs index 54d83e4a3617..4ecb01eee74f 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/ClefSignerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/ClefSignerTests.cs @@ -15,6 +15,7 @@ namespace Nethermind.Blockchain.Test.Consensus { [TestFixture] + [Parallelizable(ParallelScope.All)] public class ClefSignerTests { [Test] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/CompositeTxSourceTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/CompositeTxSourceTests.cs index cc61b6a7fc4a..6a9b7270cfc5 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/CompositeTxSourceTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/CompositeTxSourceTests.cs @@ -14,6 +14,7 @@ namespace Nethermind.Blockchain.Test.Consensus; +[Parallelizable(ParallelScope.All)] public class CompositeTxSourceTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NethDevSealEngineTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NethDevSealEngineTests.cs index d9f0704c5167..e06a1eeaf3d6 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NethDevSealEngineTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NethDevSealEngineTests.cs @@ -12,6 +12,7 @@ namespace Nethermind.Blockchain.Test.Consensus { [TestFixture] + [Parallelizable(ParallelScope.All)] public class NethDevSealEngineTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSealEngineTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSealEngineTests.cs index dcf4bf713101..df8bed08b4f1 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSealEngineTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSealEngineTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test.Consensus { [TestFixture] + [Parallelizable(ParallelScope.All)] public class NullSealEngineTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSignerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSignerTests.cs index 17af0b2ad38a..1de624592784 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSignerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/NullSignerTests.cs @@ -12,6 +12,7 @@ namespace Nethermind.Blockchain.Test.Consensus { [TestFixture] + [Parallelizable(ParallelScope.All)] public class NullSignerTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/OneByOneTxSourceTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/OneByOneTxSourceTests.cs index 354848108367..0c26037ba28e 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/OneByOneTxSourceTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/OneByOneTxSourceTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test.Consensus { [TestFixture] + [Parallelizable(ParallelScope.All)] public class OneByOneTxSourceTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SealEngineExceptionTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SealEngineExceptionTests.cs index 5d5fcb9602c4..c6bef99ca9bc 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SealEngineExceptionTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SealEngineExceptionTests.cs @@ -8,6 +8,7 @@ namespace Nethermind.Blockchain.Test.Consensus { [TestFixture] + [Parallelizable(ParallelScope.All)] public class SealEngineExceptionTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SignerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SignerTests.cs index edd88c1a7224..fe897e8ff3e9 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SignerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SignerTests.cs @@ -16,6 +16,7 @@ namespace Nethermind.Blockchain.Test.Consensus { [TestFixture] + [Parallelizable(ParallelScope.All)] public class SignerTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SinglePendingTxSelectorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SinglePendingTxSelectorTests.cs index feb4d47964c0..ad529fda85ba 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SinglePendingTxSelectorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Consensus/SinglePendingTxSelectorTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test.Consensus { + [Parallelizable(ParallelScope.All)] public class SinglePendingTxSelectorTests { private readonly BlockHeader _anyParent = Build.A.BlockHeader.TestObject; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/DaoDataTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/DaoDataTests.cs index b7483603e603..f2dbf4d95c20 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/DaoDataTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/DaoDataTests.cs @@ -7,6 +7,7 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] public class DaoDataTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Data/FileLocalDataSourceTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Data/FileLocalDataSourceTests.cs index 03e9008d489f..d13afa2d1143 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Data/FileLocalDataSourceTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Data/FileLocalDataSourceTests.cs @@ -16,6 +16,7 @@ namespace Nethermind.Blockchain.Test.Data { + [Parallelizable(ParallelScope.All)] public class FileLocalDataSourceTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/ExitOnBlocknumberHandlerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/ExitOnBlocknumberHandlerTests.cs index 72da5efe337e..34d04d45bf72 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/ExitOnBlocknumberHandlerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/ExitOnBlocknumberHandlerTests.cs @@ -10,6 +10,7 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] public class ExitOnBlocknumberHandlerTests { [TestCase(10, false)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Filters/AddressFilterTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Filters/AddressFilterTests.cs index 4fe896388c71..4dde1eadbff2 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Filters/AddressFilterTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Filters/AddressFilterTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test.Filters; [TestFixture] +[Parallelizable(ParallelScope.All)] public class AddressFilterTests { [Test] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Filters/FilterStoreTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Filters/FilterStoreTests.cs index 80fabb6c8e13..dfec4eac051e 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Filters/FilterStoreTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Filters/FilterStoreTests.cs @@ -18,6 +18,8 @@ namespace Nethermind.Blockchain.Test.Filters; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class FilterStoreTests { [Test, MaxTime(Timeout.MaxTestTime)] @@ -152,6 +154,7 @@ public void Correctly_creates_topics_filter(Hash256[]?[]? topics, TopicsFilter e } [Test, MaxTime(Timeout.MaxTestTime)] + [Parallelizable(ParallelScope.None)] public async Task CleanUps_filters() { List removedFilterIds = new(); diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Filters/LogFilterTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Filters/LogFilterTests.cs index 9d497daa7dd0..df6bde0db6b9 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Filters/LogFilterTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Filters/LogFilterTests.cs @@ -11,6 +11,8 @@ namespace Nethermind.Blockchain.Test.Filters; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class LogFilterTests { private int _filterCounter; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Find/LogFinderTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Find/LogFinderTests.cs index e8ae192041b0..625ded52d824 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Find/LogFinderTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Find/LogFinderTests.cs @@ -26,6 +26,8 @@ namespace Nethermind.Blockchain.Test.Find; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class LogFinderTests { private IBlockTree _blockTree = null!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/CopyTreeVisitorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/CopyTreeVisitorTests.cs index 7c5aae0c8b2b..406ae0c89e36 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/CopyTreeVisitorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/CopyTreeVisitorTests.cs @@ -21,18 +21,11 @@ namespace Nethermind.Blockchain.Test.FullPruning; -[Parallelizable(ParallelScope.Self)] +[Parallelizable(ParallelScope.All)] [TestFixture(INodeStorage.KeyScheme.HalfPath)] [TestFixture(INodeStorage.KeyScheme.Hash)] -public class CopyTreeVisitorTests +public class CopyTreeVisitorTests(INodeStorage.KeyScheme scheme) { - private readonly INodeStorage.KeyScheme _keyScheme; - - public CopyTreeVisitorTests(INodeStorage.KeyScheme scheme) - { - _keyScheme = scheme; - } - [TestCase(0, 1)] [TestCase(0, 8)] [TestCase(1, 1)] @@ -83,7 +76,7 @@ public void cancel_coping_state_between_dbs() private IPruningContext CopyDb(IPruningContext pruningContext, CancellationToken cancellationToken, MemDb trieDb, VisitingOptions? visitingOptions = null, WriteFlags writeFlags = WriteFlags.None) { LimboLogs logManager = LimboLogs.Instance; - PatriciaTree trie = Build.A.Trie(new NodeStorage(trieDb, _keyScheme)).WithAccountsByIndex(0, 100).TestObject; + PatriciaTree trie = Build.A.Trie(new NodeStorage(trieDb, scheme)).WithAccountsByIndex(0, 100).TestObject; // Create a custom DbProvider that uses the trieDb from the test IDbProvider dbProvider = Substitute.For(); @@ -94,16 +87,16 @@ private IPruningContext CopyDb(IPruningContext pruningContext, CancellationToken (IWorldState worldState, IStateReader stateReader) = TestWorldStateFactory.CreateForTestWithStateReader(dbProvider, logManager); BlockHeader? baseBlock = Build.A.BlockHeader.WithStateRoot(trie.RootHash).TestObject; - if (_keyScheme == INodeStorage.KeyScheme.Hash) + if (scheme == INodeStorage.KeyScheme.Hash) { - NodeStorage nodeStorage = new NodeStorage(pruningContext, _keyScheme); + NodeStorage nodeStorage = new NodeStorage(pruningContext, scheme); using CopyTreeVisitor copyTreeVisitor = new(nodeStorage, writeFlags, logManager, cancellationToken); stateReader.RunTreeVisitor(copyTreeVisitor, baseBlock, visitingOptions); copyTreeVisitor.Finish(); } else { - NodeStorage nodeStorage = new NodeStorage(pruningContext, _keyScheme); + NodeStorage nodeStorage = new NodeStorage(pruningContext, scheme); using CopyTreeVisitor copyTreeVisitor = new(nodeStorage, writeFlags, logManager, cancellationToken); stateReader.RunTreeVisitor(copyTreeVisitor, baseBlock, visitingOptions); copyTreeVisitor.Finish(); diff --git a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPrunerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPrunerTests.cs index c13e1e2482cc..d9b07e032cb3 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPrunerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPrunerTests.cs @@ -30,18 +30,10 @@ namespace Nethermind.Blockchain.Test.FullPruning; [TestFixture(0, 4)] [TestFixture(1, 1)] [TestFixture(1, 4)] -[Parallelizable(ParallelScope.Children)] -public class FullPrunerTests +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] +public class FullPrunerTests(int fullPrunerMemoryBudgetMb, int degreeOfParallelism) { - private readonly int _fullPrunerMemoryBudgetMb; - private readonly int _degreeOfParallelism; - - public FullPrunerTests(int fullPrunerMemoryBudgetMb, int degreeOfParallelism) - { - _fullPrunerMemoryBudgetMb = fullPrunerMemoryBudgetMb; - _degreeOfParallelism = degreeOfParallelism; - } - [Test, MaxTime(Timeout.MaxTestTime)] public async Task can_prune() { @@ -61,8 +53,8 @@ public async Task can_prune_and_switch_key_scheme(INodeStorage.KeyScheme current true, false, FullPruningCompletionBehavior.None, - _fullPrunerMemoryBudgetMb, - _degreeOfParallelism, + fullPrunerMemoryBudgetMb, + degreeOfParallelism, currentKeyScheme: currentKeyScheme, preferredKeyScheme: newKeyScheme); @@ -192,8 +184,8 @@ private TestContext CreateTest( successfulPruning, clearPrunedDb, completionBehavior, - _fullPrunerMemoryBudgetMb, - _degreeOfParallelism); + fullPrunerMemoryBudgetMb, + degreeOfParallelism); private class TestContext { diff --git a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs index d28cf1ab7be1..98483a553cce 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs @@ -30,6 +30,7 @@ namespace Nethermind.Blockchain.Test.FullPruning; +[Parallelizable(ParallelScope.All)] public class FullPruningDiskTest { public class PruningTestBlockchain : TestBlockchain @@ -109,27 +110,24 @@ public static async Task Create(IPruningConfig? pruningCo return chain; } - public class FullTestPruner : FullPruner + public class FullTestPruner( + IFullPruningDb pruningDb, + INodeStorageFactory nodeStorageFactory, + INodeStorage mainNodeStorage, + IPruningTrigger pruningTrigger, + IPruningConfig pruningConfig, + IBlockTree blockTree, + IStateReader stateReader, + IProcessExitSource processExitSource, + IDriveInfo driveInfo, + IPruningTrieStore trieStore, + IChainEstimations chainEstimations, + ILogManager logManager) + : FullPruner(pruningDb, nodeStorageFactory, mainNodeStorage, pruningTrigger, pruningConfig, blockTree, + stateReader, processExitSource, chainEstimations, driveInfo, trieStore, logManager) { public EventWaitHandle WaitHandle { get; } = new ManualResetEvent(false); - public FullTestPruner( - IFullPruningDb pruningDb, - INodeStorageFactory nodeStorageFactory, - INodeStorage mainNodeStorage, - IPruningTrigger pruningTrigger, - IPruningConfig pruningConfig, - IBlockTree blockTree, - IStateReader stateReader, - IProcessExitSource processExitSource, - IDriveInfo driveInfo, - IPruningTrieStore trieStore, - IChainEstimations chainEstimations, - ILogManager logManager) - : base(pruningDb, nodeStorageFactory, mainNodeStorage, pruningTrigger, pruningConfig, blockTree, stateReader, processExitSource, chainEstimations, driveInfo, trieStore, logManager) - { - } - protected override async Task RunFullPruning(CancellationToken cancellationToken) { await base.RunFullPruning(cancellationToken); diff --git a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/PruningTriggerPruningStrategyTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/PruningTriggerPruningStrategyTests.cs index 505f6227fb29..fc43e2dce333 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/PruningTriggerPruningStrategyTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/PruningTriggerPruningStrategyTests.cs @@ -12,7 +12,8 @@ namespace Nethermind.Blockchain.Test.FullPruning { [TestFixture] - [Parallelizable(ParallelScope.Self)] + [Parallelizable(ParallelScope.All)] + [FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class PruningTriggerPruningStrategyTests { private IFullPruningDb _fullPruningDb; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockOnEachPendingTxTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockOnEachPendingTxTests.cs index fb7dcde6abf1..f1d61f1a256e 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockOnEachPendingTxTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockOnEachPendingTxTests.cs @@ -10,6 +10,7 @@ namespace Nethermind.Blockchain.Test.Producers; +[Parallelizable(ParallelScope.All)] public class BuildBlockOnEachPendingTxTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockRegularlyTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockRegularlyTests.cs index 4aa9dac4cbba..c1279a679d17 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockRegularlyTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlockRegularlyTests.cs @@ -9,6 +9,7 @@ namespace Nethermind.Blockchain.Test.Producers; +[Parallelizable(ParallelScope.All)] public class BuildBlockRegularlyTests { [Test, MaxTime(Timeout.MaxTestTime), Retry(3)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlocksWhenRequestedTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlocksWhenRequestedTests.cs index a244c84aacb9..d3c709b6ee11 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlocksWhenRequestedTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Producers/BuildBlocksWhenRequestedTests.cs @@ -7,6 +7,7 @@ namespace Nethermind.Blockchain.Test.Producers; +[Parallelizable(ParallelScope.All)] public class BuildBlocksWhenRequestedTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Producers/CompositeBlockProductionTriggerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Producers/CompositeBlockProductionTriggerTests.cs index 478c37bbebc4..d493795f1b3e 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Producers/CompositeBlockProductionTriggerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Producers/CompositeBlockProductionTriggerTests.cs @@ -7,6 +7,7 @@ namespace Nethermind.Blockchain.Test.Producers; +[Parallelizable(ParallelScope.All)] public class CompositeBlockProductionTriggerTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Producers/DevBlockproducerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Producers/DevBlockproducerTests.cs index 733b73a3c804..b8bc669d2a8c 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Producers/DevBlockproducerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Producers/DevBlockproducerTests.cs @@ -17,6 +17,7 @@ namespace Nethermind.Blockchain.Test.Producers; +[Parallelizable(ParallelScope.All)] public class DevBlockProducerTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Producers/IfPoolIsNotEmptyTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Producers/IfPoolIsNotEmptyTests.cs index 3f2d97ac18be..14549530b899 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Producers/IfPoolIsNotEmptyTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Producers/IfPoolIsNotEmptyTests.cs @@ -9,6 +9,7 @@ namespace Nethermind.Blockchain.Test.Producers; +[Parallelizable(ParallelScope.All)] public class IfPoolIsNotEmptyTests { [MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Proofs/ReceiptTrieTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Proofs/ReceiptTrieTests.cs index 0533fde1a350..646e38ea9a12 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Proofs/ReceiptTrieTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Proofs/ReceiptTrieTests.cs @@ -17,6 +17,7 @@ namespace Nethermind.Blockchain.Test.Proofs; +[Parallelizable(ParallelScope.All)] public class ReceiptTrieTests { private static readonly IRlpStreamDecoder _decoder = Rlp.GetStreamDecoder()!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Proofs/TxTrieTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Proofs/TxTrieTests.cs index 4f8e912e1887..24fc4d6524b1 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Proofs/TxTrieTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Proofs/TxTrieTests.cs @@ -17,14 +17,11 @@ namespace Nethermind.Blockchain.Test.Proofs; [TestFixture(true)] [TestFixture(false)] -public class TxTrieTests +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] +public class TxTrieTests(bool useEip2718) { - private readonly IReleaseSpec _releaseSpec; - - public TxTrieTests(bool useEip2718) - { - _releaseSpec = useEip2718 ? Berlin.Instance : MuirGlacier.Instance; - } + private readonly IReleaseSpec _releaseSpec = useEip2718 ? Berlin.Instance : MuirGlacier.Instance; [Test, MaxTime(Timeout.MaxTestTime)] public void Can_calculate_root() diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Proofs/WithdrawalTrieTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Proofs/WithdrawalTrieTests.cs index 4dada95cceb2..02c002758efd 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Proofs/WithdrawalTrieTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Proofs/WithdrawalTrieTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test.Proofs; +[Parallelizable(ParallelScope.All)] public class WithdrawalTrieTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/ReadOnlyBlockTreeTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/ReadOnlyBlockTreeTests.cs index 2a85fb94a0e2..baa7e9d4af2e 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/ReadOnlyBlockTreeTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/ReadOnlyBlockTreeTests.cs @@ -9,6 +9,8 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class ReadOnlyBlockTreeTests { private IBlockTree _innerBlockTree = null!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/KeccaksIteratorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/KeccaksIteratorTests.cs index 96be4973fcae..2c66c04dfaa1 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/KeccaksIteratorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/KeccaksIteratorTests.cs @@ -13,6 +13,7 @@ namespace Nethermind.Blockchain.Test.Receipts; +[Parallelizable(ParallelScope.All)] public class KeccaksIteratorTests { [TestCaseSource(nameof(TestKeccaks))] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/PersistentReceiptStorageTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/PersistentReceiptStorageTests.cs index 4f7aa07eac27..caa0794e7ac3 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/PersistentReceiptStorageTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/PersistentReceiptStorageTests.cs @@ -26,23 +26,19 @@ namespace Nethermind.Blockchain.Test.Receipts; [TestFixture(true)] [TestFixture(false)] -public class PersistentReceiptStorageTests +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] +public class PersistentReceiptStorageTests(bool useCompactReceipts) { - private readonly TestSpecProvider _specProvider = new TestSpecProvider(Byzantium.Instance); + private readonly TestSpecProvider _specProvider = new(Byzantium.Instance); private TestMemColumnsDb _receiptsDb = null!; private ReceiptsRecovery _receiptsRecovery = null!; private IBlockTree _blockTree = null!; private IBlockStore _blockStore = null!; - private readonly bool _useCompactReceipts; private ReceiptConfig _receiptConfig = null!; private PersistentReceiptStorage _storage = null!; private ReceiptArrayStorageDecoder _decoder = null!; - public PersistentReceiptStorageTests(bool useCompactReceipts) - { - _useCompactReceipts = useCompactReceipts; - } - [SetUp] public void SetUp() { @@ -64,7 +60,7 @@ public void TearDown() private void CreateStorage() { - _decoder = new ReceiptArrayStorageDecoder(_useCompactReceipts); + _decoder = new ReceiptArrayStorageDecoder(useCompactReceipts); _storage = new PersistentReceiptStorage( _receiptsDb, _specProvider, @@ -387,7 +383,7 @@ public void When_NewHeadBlock_DoNotRemove_TxIndex_WhenTxIsInOtherBlockNumber() [Test] public async Task When_NewHeadBlock_Remove_TxIndex_OfRemovedBlock_Unless_ItsAlsoInNewBlock() { - _receiptConfig.CompactTxIndex = _useCompactReceipts; + _receiptConfig.CompactTxIndex = useCompactReceipts; CreateStorage(); (Block block, _) = InsertBlock(); Block block2 = Build.A.Block diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsIteratorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsIteratorTests.cs index ad7d30e52564..14f6d357d52a 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsIteratorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsIteratorTests.cs @@ -14,9 +14,10 @@ namespace Nethermind.Blockchain.Test.Receipts; +[Parallelizable(ParallelScope.All)] public class ReceiptsIteratorTests { - readonly ReceiptArrayStorageDecoder _decoder = ReceiptArrayStorageDecoder.Instance; + private readonly ReceiptArrayStorageDecoder _decoder = ReceiptArrayStorageDecoder.Instance; [Test] public void SmokeTestWithRecovery() diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRecoveryTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRecoveryTests.cs index 7cd1b0ec1292..fd10cec441d6 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRecoveryTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRecoveryTests.cs @@ -11,6 +11,8 @@ namespace Nethermind.Blockchain.Test.Receipts; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class ReceiptsRecoveryTests { private IReceiptsRecovery _receiptsRecovery = null!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRootTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRootTests.cs index 1c3db195441b..e7b8009e2e92 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRootTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Receipts/ReceiptsRootTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test.Receipts { + [Parallelizable(ParallelScope.All)] public class ReceiptsRootTests { public static IEnumerable ReceiptsRootTestCases diff --git a/src/Nethermind/Nethermind.Blockchain.Test/ReorgDepthFinalizedStateProviderTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/ReorgDepthFinalizedStateProviderTests.cs index 7e2e9b7f7e97..b8d4fd4f6c0b 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/ReorgDepthFinalizedStateProviderTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/ReorgDepthFinalizedStateProviderTests.cs @@ -11,7 +11,8 @@ namespace Nethermind.Blockchain.Test; [TestFixture] -[Parallelizable(ParallelScope.Self)] +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class ReorgDepthFinalizedStateProviderTests { private IBlockTree _blockTree = null!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Rewards/NoBlockRewardsTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Rewards/NoBlockRewardsTests.cs index 1dc264011284..212a6448829f 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Rewards/NoBlockRewardsTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Rewards/NoBlockRewardsTests.cs @@ -8,6 +8,7 @@ namespace Nethermind.Blockchain.Test.Rewards; +[Parallelizable(ParallelScope.All)] public class NoBlockRewardsTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Rewards/RewardCalculatorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Rewards/RewardCalculatorTests.cs index d4225963be51..3553483a1f69 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Rewards/RewardCalculatorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Rewards/RewardCalculatorTests.cs @@ -9,6 +9,7 @@ namespace Nethermind.Blockchain.Test.Rewards; +[Parallelizable(ParallelScope.All)] public class RewardCalculatorTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Services/HealthHintServiceTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Services/HealthHintServiceTests.cs index aee9e3a4df56..fc89b9798e8f 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Services/HealthHintServiceTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Services/HealthHintServiceTests.cs @@ -9,6 +9,7 @@ namespace Nethermind.Blockchain.Test.Services; +[Parallelizable(ParallelScope.All)] public class HealthHintServiceTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/TransactionGasPriceComparisonTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/TransactionGasPriceComparisonTests.cs index f247ffb1e153..089c1010237f 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/TransactionGasPriceComparisonTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/TransactionGasPriceComparisonTests.cs @@ -14,6 +14,7 @@ namespace Nethermind.Blockchain.Test; +[Parallelizable(ParallelScope.All)] public class TransactionComparisonTests { [MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorEip7702Tests.cs b/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorEip7702Tests.cs index 2659aa7fc129..73267a3f94a2 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorEip7702Tests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorEip7702Tests.cs @@ -25,6 +25,8 @@ namespace Nethermind.Evm.Test; [TestFixture] +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] internal class TransactionProcessorEip7702Tests { private ISpecProvider _specProvider; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorTests.cs index fe71d8075570..bb8c1fe456df 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/TransactionProcessorTests.cs @@ -32,7 +32,8 @@ namespace Nethermind.Evm.Test; [TestFixture(true)] [TestFixture(false)] [Todo(Improve.Refactor, "Check why fixture test cases did not work")] -[Parallelizable(ParallelScope.Self)] +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class TransactionProcessorTests { private readonly bool _isEip155Enabled; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/TransactionSelectorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/TransactionSelectorTests.cs index 2866322ce40b..c31d811b18f6 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/TransactionSelectorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/TransactionSelectorTests.cs @@ -28,6 +28,7 @@ namespace Nethermind.Blockchain.Test { + [Parallelizable(ParallelScope.All)] public class TransactionSelectorTests { public static IEnumerable ProperTransactionsSelectedTestCases diff --git a/src/Nethermind/Nethermind.Blockchain.Test/TransactionsExecutorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/TransactionsExecutorTests.cs index ab506b1fbab2..1282d9038c21 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/TransactionsExecutorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/TransactionsExecutorTests.cs @@ -31,6 +31,7 @@ namespace Nethermind.Blockchain.Test { + [Parallelizable(ParallelScope.All)] public class TransactionsExecutorTests { public static IEnumerable ProperTransactionsSelectedTestCases diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Utils/LastNStateRootTrackerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Utils/LastNStateRootTrackerTests.cs index 699ef5593bfa..07c79a8a1ec5 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Utils/LastNStateRootTrackerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Utils/LastNStateRootTrackerTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Blockchain.Test.Utils; +[Parallelizable(ParallelScope.All)] public class LastNStateRootTrackerTests { [Test] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Validators/BlockValidatorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Validators/BlockValidatorTests.cs index dff67022674e..78a14474285d 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Validators/BlockValidatorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Validators/BlockValidatorTests.cs @@ -18,6 +18,8 @@ namespace Nethermind.Blockchain.Test.Validators; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class BlockValidatorTests { private static BlockValidator _blockValidator = null!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Validators/HeaderValidatorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Validators/HeaderValidatorTests.cs index 445672cdf87a..fb3a63499864 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Validators/HeaderValidatorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Validators/HeaderValidatorTests.cs @@ -23,6 +23,8 @@ namespace Nethermind.Blockchain.Test.Validators; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class HeaderValidatorTests { private IHeaderValidator _validator = null!; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Validators/ShardBlobBlockValidatorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Validators/ShardBlobBlockValidatorTests.cs index 0d386ee72497..28e7fe635cbe 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Validators/ShardBlobBlockValidatorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Validators/ShardBlobBlockValidatorTests.cs @@ -15,6 +15,7 @@ namespace Nethermind.Blockchain.Test.Validators; +[Parallelizable(ParallelScope.All)] public class ShardBlobBlockValidatorTests { [TestCaseSource(nameof(BlobGasFieldsPerForkTestCases))] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Validators/TxValidatorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Validators/TxValidatorTests.cs index 8dcb6a44720f..ac04461b93fc 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Validators/TxValidatorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Validators/TxValidatorTests.cs @@ -26,12 +26,9 @@ namespace Nethermind.Blockchain.Test.Validators; +[Parallelizable(ParallelScope.All)] public class TxValidatorTests { - [SetUp] - public void Setup() - { - } [Test, MaxTime(Timeout.MaxTestTime)] public void Curve_is_correct() diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Validators/UnclesValidatorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Validators/UnclesValidatorTests.cs index 069960655340..0ecbe42eb588 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Validators/UnclesValidatorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Validators/UnclesValidatorTests.cs @@ -12,6 +12,8 @@ namespace Nethermind.Blockchain.Test.Validators; +[Parallelizable(ParallelScope.All)] +[FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class UnclesValidatorTests { private Block _greatGrandparent; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Validators/WithdrawalValidatorTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Validators/WithdrawalValidatorTests.cs index 7cc4bacbc4e1..604c64e2d497 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Validators/WithdrawalValidatorTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Validators/WithdrawalValidatorTests.cs @@ -14,6 +14,7 @@ namespace Nethermind.Blockchain.Test.Validators; +[Parallelizable(ParallelScope.All)] public class WithdrawalValidatorTests { [Test, MaxTime(Timeout.MaxTestTime)] diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Visitors/DbBlocksLoaderTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Visitors/DbBlocksLoaderTests.cs index b468c23eed9c..420a86305e72 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Visitors/DbBlocksLoaderTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Visitors/DbBlocksLoaderTests.cs @@ -17,6 +17,7 @@ namespace Nethermind.Blockchain.Test.Visitors; +[Parallelizable(ParallelScope.All)] public class DbBlocksLoaderTests { private readonly int _dbLoadTimeout = 5000; diff --git a/src/Nethermind/Nethermind.Blockchain.Test/Visitors/StartupTreeFixerTests.cs b/src/Nethermind/Nethermind.Blockchain.Test/Visitors/StartupTreeFixerTests.cs index 8e787f1a5067..1191fe481b7a 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/Visitors/StartupTreeFixerTests.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/Visitors/StartupTreeFixerTests.cs @@ -17,6 +17,7 @@ namespace Nethermind.Blockchain.Test.Visitors; +[Parallelizable(ParallelScope.All)] public class StartupTreeFixerTests { [Test, MaxTime(Timeout.MaxTestTime), Ignore("Not implemented")] diff --git a/src/Nethermind/Nethermind.Core.Test/MCSLockTests.cs b/src/Nethermind/Nethermind.Core.Test/MCSLockTests.cs index 0dbf3e1ea56e..16d4fac388eb 100644 --- a/src/Nethermind/Nethermind.Core.Test/MCSLockTests.cs +++ b/src/Nethermind/Nethermind.Core.Test/MCSLockTests.cs @@ -59,6 +59,7 @@ public void MultipleThreads() } [Test] + [Retry(3)] public void LockFairnessTest() { int numberOfThreads = 10; diff --git a/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs b/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs index 169a166c58c7..b174709b9662 100644 --- a/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs +++ b/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs @@ -23,18 +23,15 @@ public class TestMemDb : MemDb, ITunableDb public Func? ReadFunc { get; set; } public Func? WriteFunc { get; set; } - public Action? RemoveFunc { get; set; } public bool WasFlushed => FlushCount > 0; - public int FlushCount { get; set; } = 0; + public int FlushCount { get; private set; } [MethodImpl(MethodImplOptions.Synchronized)] public override byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) { _readKeys.Add((key.ToArray(), flags)); - - if (ReadFunc is not null) return ReadFunc(key.ToArray()); - return base.Get(key, flags); + return ReadFunc is not null ? ReadFunc(key.ToArray()) : base.Get(key, flags); } [MethodImpl(MethodImplOptions.Synchronized)] @@ -46,71 +43,32 @@ public override void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags base.Set(key, value, flags); } - public override Span GetSpan(ReadOnlySpan key) - { - return Get(key); - } - [MethodImpl(MethodImplOptions.Synchronized)] public override void Remove(ReadOnlySpan key) { _removedKeys.Add(key.ToArray()); - - if (RemoveFunc is not null) - { - RemoveFunc.Invoke(key.ToArray()); - return; - } base.Remove(key); } - public void Tune(ITunableDb.TuneType type) - { - _tuneTypes.Add(type); - } - - public bool WasTunedWith(ITunableDb.TuneType type) - { - return _tuneTypes.Contains(type); - } + public void Tune(ITunableDb.TuneType type) => _tuneTypes.Add(type); + public bool WasTunedWith(ITunableDb.TuneType type) => _tuneTypes.Contains(type); - public void KeyWasRead(byte[] key, int times = 1) - { + public void KeyWasRead(byte[] key, int times = 1) => _readKeys.Count(it => Bytes.AreEqual(it.Item1, key)).Should().Be(times); - } - public void KeyWasReadWithFlags(byte[] key, ReadFlags flags, int times = 1) - { + public void KeyWasReadWithFlags(byte[] key, ReadFlags flags, int times = 1) => _readKeys.Count(it => Bytes.AreEqual(it.Item1, key) && it.Item2 == flags).Should().Be(times); - } - public void KeyWasWritten(byte[] key, int times = 1) - { + public void KeyWasWritten(byte[] key, int times = 1) => _writes.Count(it => Bytes.AreEqual(it.Item1.Item1, key)).Should().Be(times); - } - public void KeyWasWritten(Func<(byte[], byte[]?), bool> cond, int times = 1) - { + public void KeyWasWritten(Func<(byte[], byte[]?), bool> cond, int times = 1) => _writes.Count(it => cond.Invoke(it.Item1)).Should().Be(times); - } - public void KeyWasWrittenWithFlags(byte[] key, WriteFlags flags, int times = 1) - { + public void KeyWasWrittenWithFlags(byte[] key, WriteFlags flags, int times = 1) => _writes.Count(it => Bytes.AreEqual(it.Item1.Item1, key) && it.Item2 == flags).Should().Be(times); - } - - public void KeyWasRemoved(Func cond, int times = 1) - { - _removedKeys.Count(cond).Should().Be(times); - } - public override IWriteBatch StartWriteBatch() - { - return new InMemoryWriteBatch(this); - } - - public override void Flush(bool onlyWal) - { - FlushCount++; - } + public void KeyWasRemoved(Func cond, int times = 1) => _removedKeys.Count(cond).Should().Be(times); + public override IWriteBatch StartWriteBatch() => new InMemoryWriteBatch(this); + public override void Flush(bool onlyWal) => FlushCount++; } diff --git a/src/Nethermind/Nethermind.Core/Buffers/ArrayMemoryManager.cs b/src/Nethermind/Nethermind.Core/Buffers/ArrayMemoryManager.cs new file mode 100644 index 000000000000..52c1bf4dd496 --- /dev/null +++ b/src/Nethermind/Nethermind.Core/Buffers/ArrayMemoryManager.cs @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Buffers; + +namespace Nethermind.Core.Buffers; + +/// +/// Simple MemoryManager that wraps a byte array without any pinning. +/// Used for in-memory stores where the array is managed and doesn't require special release handling. +/// +public sealed class ArrayMemoryManager(byte[] array) : MemoryManager +{ + protected override void Dispose(bool disposing) { } + + public override Span GetSpan() => array; + + public override MemoryHandle Pin(int elementIndex = 0) => default; + + public override void Unpin() { } +} diff --git a/src/Nethermind/Nethermind.Core/CappedArrayMemoryManager.cs b/src/Nethermind/Nethermind.Core/Buffers/CappedArrayMemoryManager.cs similarity index 100% rename from src/Nethermind/Nethermind.Core/CappedArrayMemoryManager.cs rename to src/Nethermind/Nethermind.Core/Buffers/CappedArrayMemoryManager.cs diff --git a/src/Nethermind/Nethermind.Core/DbSpanMemoryManager.cs b/src/Nethermind/Nethermind.Core/Buffers/DbSpanMemoryManager.cs similarity index 58% rename from src/Nethermind/Nethermind.Core/DbSpanMemoryManager.cs rename to src/Nethermind/Nethermind.Core/Buffers/DbSpanMemoryManager.cs index 48d881ae00c0..18a32b032a88 100644 --- a/src/Nethermind/Nethermind.Core/DbSpanMemoryManager.cs +++ b/src/Nethermind/Nethermind.Core/Buffers/DbSpanMemoryManager.cs @@ -10,24 +10,16 @@ namespace Nethermind.Core.Buffers; -public unsafe sealed class DbSpanMemoryManager : MemoryManager +public sealed unsafe class DbSpanMemoryManager(IReadOnlyKeyValueStore db, Span unmanagedSpan) : MemoryManager { - private readonly IReadOnlyKeyValueStore _db; - private void* _ptr; - private readonly int _length; - - public DbSpanMemoryManager(IReadOnlyKeyValueStore db, Span unmanagedSpan) - { - _db = db; - _ptr = Unsafe.AsPointer(ref MemoryMarshal.GetReference(unmanagedSpan)); - _length = unmanagedSpan.Length; - } + private void* _ptr = Unsafe.AsPointer(ref MemoryMarshal.GetReference(unmanagedSpan)); + private readonly int _length = unmanagedSpan.Length; protected override void Dispose(bool disposing) { if (_ptr is not null) { - _db.DangerousReleaseMemory(GetSpan()); + db.DangerousReleaseMemory(GetSpan()); } _ptr = null; @@ -53,13 +45,8 @@ public override MemoryHandle Pin(int elementIndex = 0) return new MemoryHandle(_ptr); } - public override void Unpin() - { - } + public override void Unpin() { } [DoesNotReturn, StackTraceHidden] - private static void ThrowDisposed() - { - throw new ObjectDisposedException(nameof(DbSpanMemoryManager)); - } + private static void ThrowDisposed() => throw new ObjectDisposedException(nameof(DbSpanMemoryManager)); } diff --git a/src/Nethermind/Nethermind.Core/Collections/DictionaryExtensions.cs b/src/Nethermind/Nethermind.Core/Collections/DictionaryExtensions.cs index 491e4e3528ca..9fdf91723f45 100644 --- a/src/Nethermind/Nethermind.Core/Collections/DictionaryExtensions.cs +++ b/src/Nethermind/Nethermind.Core/Collections/DictionaryExtensions.cs @@ -3,7 +3,9 @@ using System; using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; using System.Runtime.InteropServices; +using Nethermind.Core.Resettables; namespace Nethermind.Core.Collections; @@ -15,21 +17,48 @@ public static void Increment(this Dictionary dictionary, TKey k res++; } - public static ref TValue GetOrAdd(this Dictionary dictionary, - TKey key, Func factory, - out bool exists) - where TKey : notnull + extension(Dictionary dictionary) where TKey : notnull { - ref TValue? existing = ref CollectionsMarshal.GetValueRefOrAddDefault(dictionary, key, out exists); + public ref TValue GetOrAdd(TKey key, Func factory, out bool exists) + { + ref TValue? existing = ref CollectionsMarshal.GetValueRefOrAddDefault(dictionary, key, out exists); - if (!exists) - existing = factory(key); + if (!exists) + existing = factory(key); + + return ref existing!; + } + + public ref TValue GetOrAdd(TKey key, Func factory) => ref dictionary.GetOrAdd(key, factory, out _); - return ref existing!; } - public static ref TValue GetOrAdd(this Dictionary dictionary, - TKey key, Func factory) - where TKey : notnull => - ref GetOrAdd(dictionary, key, factory, out _); + /// The dictionary whose values will be returned and cleared. + /// The type of the keys in the dictionary. + /// The type of the values in the dictionary, which must implement . + extension(IDictionary dictionary) where TValue : class, IReturnable + { + /// + /// Returns all values in the dictionary to their pool by calling on each value, + /// then clears the dictionary. + /// + /// + /// Use this method when you need to both return pooled objects and clear the dictionary in one operation. + /// + public void ResetAndClear() + { + foreach (TValue value in dictionary.Values) + { + value.Return(); + } + dictionary.Clear(); + } + } + + extension(Dictionary.AlternateLookup dictionary) + where TKey : notnull where TAlternateKey : notnull, allows ref struct + { + public bool TryRemove(TAlternateKey key, [MaybeNullWhen(false)] out TValue value) => + dictionary.Remove(key, out _, out value); + } } diff --git a/src/Nethermind/Nethermind.Core/Extensions/DictionaryExtensions.cs b/src/Nethermind/Nethermind.Core/Extensions/DictionaryExtensions.cs deleted file mode 100644 index 6168e96121f4..000000000000 --- a/src/Nethermind/Nethermind.Core/Extensions/DictionaryExtensions.cs +++ /dev/null @@ -1,30 +0,0 @@ -// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited -// SPDX-License-Identifier: LGPL-3.0-only - -using System.Collections.Generic; -using Nethermind.Core.Resettables; - -namespace Nethermind.Core.Extensions; - -public static class DictionaryExtensions -{ - /// - /// Returns all values in the dictionary to their pool by calling on each value, - /// then clears the dictionary. - /// - /// The type of the keys in the dictionary. - /// The type of the values in the dictionary, which must implement . - /// The dictionary whose values will be returned and cleared. - /// - /// Use this method when you need to both return pooled objects and clear the dictionary in one operation. - /// - public static void ResetAndClear(this IDictionary dictionary) - where TValue : class, IReturnable - { - foreach (TValue value in dictionary.Values) - { - value.Return(); - } - dictionary.Clear(); - } -} diff --git a/src/Nethermind/Nethermind.Core/IKeyValueStore.cs b/src/Nethermind/Nethermind.Core/IKeyValueStore.cs index d80cfc3d40ae..d30dbcfe87df 100644 --- a/src/Nethermind/Nethermind.Core/IKeyValueStore.cs +++ b/src/Nethermind/Nethermind.Core/IKeyValueStore.cs @@ -2,6 +2,8 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; +using Nethermind.Core.Buffers; using Nethermind.Core.Extensions; namespace Nethermind.Core @@ -22,10 +24,12 @@ public interface IReadOnlyKeyValueStore byte[]? Get(scoped ReadOnlySpan key, ReadFlags flags = ReadFlags.None); /// - /// Return span. Must call `DangerousReleaseMemory` or there can be some leak. + /// Return span. Must call after use to avoid memory leaks. + /// Prefer using which handles release automatically via disposal. /// - /// - /// Can return null or empty Span on missing key + /// Key whose associated value should be read. + /// Read behavior flags that control how the value is retrieved. + /// Can return null or empty Span on a missing key Span GetSpan(scoped ReadOnlySpan key, ReadFlags flags = ReadFlags.None) => Get(key, flags); /// @@ -63,6 +67,19 @@ bool KeyExists(ReadOnlySpan key) } void DangerousReleaseMemory(in ReadOnlySpan span) { } + + /// + /// Returns a MemoryManager wrapping the value for the given key. + /// The MemoryManager must be disposed of when done to release any underlying resources. + /// + /// Key whose associated value should be read. + /// Read behavior flags that control how the value is retrieved. + /// A MemoryManager wrapping the value or null if the key doesn't exist. + MemoryManager? GetOwnedMemory(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) + { + byte[]? data = Get(key, flags); + return data is null or { Length: 0 } ? null : new ArrayMemoryManager(data); + } } public interface IReadOnlyNativeKeyValueStore diff --git a/src/Nethermind/Nethermind.Core/KeyValueStoreExtensions.cs b/src/Nethermind/Nethermind.Core/KeyValueStoreExtensions.cs index 906b39acf45d..0220b81ae103 100644 --- a/src/Nethermind/Nethermind.Core/KeyValueStoreExtensions.cs +++ b/src/Nethermind/Nethermind.Core/KeyValueStoreExtensions.cs @@ -2,7 +2,6 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; -using System.Buffers; using System.Runtime.CompilerServices; using Nethermind.Core.Buffers; using Nethermind.Core.Crypto; @@ -12,114 +11,108 @@ namespace Nethermind.Core { public static class KeyValueStoreExtensions { - public static IWriteBatch LikeABatch(this IWriteOnlyKeyValueStore keyValueStore) - { - return LikeABatch(keyValueStore, null); - } - - public static IWriteBatch LikeABatch(this IWriteOnlyKeyValueStore keyValueStore, Action? onDispose) - { - return new FakeWriteBatch(keyValueStore, onDispose); - } - - #region Getters - - public static byte[]? Get(this IReadOnlyKeyValueStore db, Hash256 key) + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void GuardKey(Hash256 key) { #if DEBUG - if (key == Keccak.OfAnEmptyString) - { - throw new InvalidOperationException(); - } + if (key == Keccak.OfAnEmptyString) throw new InvalidOperationException(); #endif - - return db[key.Bytes]; } - /// - /// - /// /// - /// - /// Can return null or empty Span on missing key - /// - public static Span GetSpan(this IReadOnlyKeyValueStore db, Hash256 key) + extension(IReadOnlyKeyValueStore db) { -#if DEBUG - if (key == Keccak.OfAnEmptyString) + public byte[]? Get(Hash256 key) { - throw new InvalidOperationException(); + GuardKey(key); + return db[key.Bytes]; } -#endif - return db.GetSpan(key.Bytes); - } + /// + /// + /// + /// + /// Can return null or empty Span on missing key + /// + public Span GetSpan(Hash256 key) + { + GuardKey(key); + return db.GetSpan(key.Bytes); + } - public static bool KeyExists(this IReadOnlyKeyValueStore db, Hash256 key) - { -#if DEBUG - if (key == Keccak.OfAnEmptyString) + public bool KeyExists(Hash256 key) { - throw new InvalidOperationException(); + GuardKey(key); + return db.KeyExists(key.Bytes); } -#endif - return db.KeyExists(key.Bytes); - } + public bool KeyExists(long key) => db.KeyExists(key.ToBigEndianSpanWithoutLeadingZeros(out _)); - public static bool KeyExists(this IReadOnlyKeyValueStore db, long key) - { - return db.KeyExists(key.ToBigEndianSpanWithoutLeadingZeros(out _)); + public byte[]? Get(long key) => db[key.ToBigEndianSpanWithoutLeadingZeros(out _)]; } - public static byte[]? Get(this IReadOnlyKeyValueStore db, long key) => db[key.ToBigEndianSpanWithoutLeadingZeros(out _)]; - - /// - /// - /// - /// - /// - /// Can return null or empty Span on missing key - public static Span GetSpan(this IReadOnlyKeyValueStore db, long key) => db.GetSpan(key.ToBigEndianSpanWithoutLeadingZeros(out _)); - - public static MemoryManager? GetOwnedMemory(this IReadOnlyKeyValueStore db, ReadOnlySpan key) + extension(IWriteOnlyKeyValueStore db) { - Span span = db.GetSpan(key); - return span.IsNullOrEmpty() ? null : new DbSpanMemoryManager(db, span); - } + public IWriteBatch LikeABatch(Action? onDispose = null) => new FakeWriteBatch(db, onDispose); + public void Set(Hash256 key, byte[] value, WriteFlags writeFlags = WriteFlags.None) + { + if (db.PreferWriteByArray) + { + db.Set(key.Bytes, value, writeFlags); + } + else + { + db.PutSpan(key.Bytes, value, writeFlags); + } + } - #endregion + public void Set(Hash256 key, in CappedArray value, WriteFlags writeFlags = WriteFlags.None) + { + if (db.PreferWriteByArray && value.IsUncapped) + { + db.Set(key.Bytes, value.UnderlyingArray, writeFlags); + } + else + { + db.PutSpan(key.Bytes, value.AsSpan(), writeFlags); + } + } + public void Set(long blockNumber, Hash256 key, ReadOnlySpan value, WriteFlags writeFlags = WriteFlags.None) + { + Span blockNumberPrefixedKey = stackalloc byte[40]; + GetBlockNumPrefixedKey(blockNumber, key, blockNumberPrefixedKey); + db.PutSpan(blockNumberPrefixedKey, value, writeFlags); + } - #region Setters + public void Set(in ValueHash256 key, Span value) + { + db.PutSpan(key.Bytes, value); + } - public static void Set(this IWriteOnlyKeyValueStore db, Hash256 key, byte[] value, WriteFlags writeFlags = WriteFlags.None) - { - if (db.PreferWriteByArray) + public void Delete(Hash256 key) { - db.Set(key.Bytes, value, writeFlags); - return; + db.Remove(key.Bytes); } - db.PutSpan(key.Bytes, value, writeFlags); - } - public static void Set(this IWriteOnlyKeyValueStore db, Hash256 key, in CappedArray value, WriteFlags writeFlags = WriteFlags.None) - { - if (value.IsUncapped && db.PreferWriteByArray) + public void Delete(long key) { - db.PutSpan(key.Bytes, value.AsSpan(), writeFlags); - return; + db.Remove(key.ToBigEndianSpanWithoutLeadingZeros(out _)); } - db.PutSpan(key.Bytes, value.AsSpan(), writeFlags); - } + [SkipLocalsInit] + public void Delete(long blockNumber, Hash256 hash) + { + Span key = stackalloc byte[40]; + GetBlockNumPrefixedKey(blockNumber, hash, key); + db.Remove(key); + } - public static void Set(this IWriteOnlyKeyValueStore db, long blockNumber, Hash256 key, ReadOnlySpan value, WriteFlags writeFlags = WriteFlags.None) - { - Span blockNumberPrefixedKey = stackalloc byte[40]; - GetBlockNumPrefixedKey(blockNumber, key, blockNumberPrefixedKey); - db.PutSpan(blockNumberPrefixedKey, value, writeFlags); + public void Set(long key, byte[] value) + { + db[key.ToBigEndianSpanWithoutLeadingZeros(out _)] = value; + } } public static void GetBlockNumPrefixedKey(long blockNumber, ValueHash256 blockHash, Span output) @@ -127,35 +120,5 @@ public static void GetBlockNumPrefixedKey(long blockNumber, ValueHash256 blockHa blockNumber.WriteBigEndian(output); blockHash!.Bytes.CopyTo(output[8..]); } - - public static void Set(this IWriteOnlyKeyValueStore db, in ValueHash256 key, Span value) - { - db.PutSpan(key.Bytes, value); - } - - public static void Delete(this IWriteOnlyKeyValueStore db, Hash256 key) - { - db.Remove(key.Bytes); - } - - public static void Delete(this IWriteOnlyKeyValueStore db, long key) - { - db.Remove(key.ToBigEndianSpanWithoutLeadingZeros(out _)); - } - - [SkipLocalsInit] - public static void Delete(this IWriteOnlyKeyValueStore db, long blockNumber, Hash256 hash) - { - Span key = stackalloc byte[40]; - GetBlockNumPrefixedKey(blockNumber, hash, key); - db.Remove(key); - } - - public static void Set(this IWriteOnlyKeyValueStore db, long key, byte[] value) - { - db[key.ToBigEndianSpanWithoutLeadingZeros(out _)] = value; - } - - #endregion } } diff --git a/src/Nethermind/Nethermind.Db.Rocks/ColumnDb.cs b/src/Nethermind/Nethermind.Db.Rocks/ColumnDb.cs index af75f303e417..94633dfd94f6 100644 --- a/src/Nethermind/Nethermind.Db.Rocks/ColumnDb.cs +++ b/src/Nethermind/Nethermind.Db.Rocks/ColumnDb.cs @@ -2,9 +2,12 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; using System.Collections.Generic; using Nethermind.Core; +using Nethermind.Core.Buffers; using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; using RocksDbSharp; using IWriteBatch = Nethermind.Core.IWriteBatch; @@ -31,52 +34,34 @@ public ColumnDb(RocksDb rocksDb, DbOnTheRocks mainDb, string name) _reader = new RocksDbReader(mainDb, mainDb.CreateReadOptions, _iteratorManager, _columnFamily); } - public void Dispose() - { - _iteratorManager.Dispose(); - } - + public void Dispose() => _iteratorManager.Dispose(); public string Name { get; } - byte[]? IReadOnlyKeyValueStore.Get(ReadOnlySpan key, ReadFlags flags) - { - return _reader.Get(key, flags); - } + byte[]? IReadOnlyKeyValueStore.Get(ReadOnlySpan key, ReadFlags flags) => _reader.Get(key, flags); - Span IReadOnlyKeyValueStore.GetSpan(scoped ReadOnlySpan key, ReadFlags flags) - { - return _reader.GetSpan(key, flags); - } + Span IReadOnlyKeyValueStore.GetSpan(scoped ReadOnlySpan key, ReadFlags flags) => _reader.GetSpan(key, flags); - int IReadOnlyKeyValueStore.Get(scoped ReadOnlySpan key, Span output, ReadFlags flags) + MemoryManager? IReadOnlyKeyValueStore.GetOwnedMemory(ReadOnlySpan key, ReadFlags flags) { - return _reader.Get(key, output, flags); + Span span = ((IReadOnlyKeyValueStore)this).GetSpan(key, flags); + return span.IsNullOrEmpty() ? null : new DbSpanMemoryManager(this, span); } - bool IReadOnlyKeyValueStore.KeyExists(ReadOnlySpan key) - { - return _reader.KeyExists(key); - } - void IReadOnlyKeyValueStore.DangerousReleaseMemory(in ReadOnlySpan key) - { - _reader.DangerousReleaseMemory(key); - } + int IReadOnlyKeyValueStore.Get(scoped ReadOnlySpan key, Span output, ReadFlags flags) => _reader.Get(key, output, flags); - public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) - { + bool IReadOnlyKeyValueStore.KeyExists(ReadOnlySpan key) => _reader.KeyExists(key); + + void IReadOnlyKeyValueStore.DangerousReleaseMemory(in ReadOnlySpan key) => _reader.DangerousReleaseMemory(key); + + public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) => _mainDb.SetWithColumnFamily(key, _columnFamily, value, flags); - } - public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags = WriteFlags.None) - { + public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags = WriteFlags.None) => _mainDb.SetWithColumnFamily(key, _columnFamily, value, writeFlags); - } - public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags = WriteFlags.None) - { + public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags = WriteFlags.None) => _mainDb.MergeWithColumnFamily(key, _columnFamily, value, writeFlags); - } public KeyValuePair[] this[byte[][] keys] { @@ -106,77 +91,48 @@ public IEnumerable GetAllValues(bool ordered = false) return _mainDb.GetAllValuesCore(iterator); } - public IWriteBatch StartWriteBatch() - { - return new ColumnsDbWriteBatch(this, (DbOnTheRocks.RocksDbWriteBatch)_mainDb.StartWriteBatch()); - } + public IWriteBatch StartWriteBatch() => new ColumnsDbWriteBatch(this, (DbOnTheRocks.RocksDbWriteBatch)_mainDb.StartWriteBatch()); - private class ColumnsDbWriteBatch : IWriteBatch + private class ColumnsDbWriteBatch(ColumnDb columnDb, DbOnTheRocks.RocksDbWriteBatch underlyingWriteBatch) + : IWriteBatch { - private readonly ColumnDb _columnDb; - private readonly DbOnTheRocks.RocksDbWriteBatch _underlyingWriteBatch; + public void Dispose() => underlyingWriteBatch.Dispose(); - public ColumnsDbWriteBatch(ColumnDb columnDb, DbOnTheRocks.RocksDbWriteBatch underlyingWriteBatch) - { - _columnDb = columnDb; - _underlyingWriteBatch = underlyingWriteBatch; - } - - public void Dispose() - { - _underlyingWriteBatch.Dispose(); - } - - public void Clear() - { - _underlyingWriteBatch.Clear(); - } + public void Clear() => underlyingWriteBatch.Clear(); public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) { if (value is null) { - _underlyingWriteBatch.Delete(key, _columnDb._columnFamily); + underlyingWriteBatch.Delete(key, columnDb._columnFamily); } else { - _underlyingWriteBatch.Set(key, value, _columnDb._columnFamily, flags); + underlyingWriteBatch.Set(key, value, columnDb._columnFamily, flags); } } - public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) - { - _underlyingWriteBatch.Set(key, value, _columnDb._columnFamily, flags); - } + public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) => + underlyingWriteBatch.Set(key, value, columnDb._columnFamily, flags); - public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) - { - _underlyingWriteBatch.Merge(key, value, _columnDb._columnFamily, flags); - } + public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) => + underlyingWriteBatch.Merge(key, value, columnDb._columnFamily, flags); } - public void Remove(ReadOnlySpan key) - { - Set(key, null); - } + public void Remove(ReadOnlySpan key) => Set(key, null); - public void Flush(bool onlyWal) - { - _mainDb.FlushWithColumnFamily(_columnFamily); - } + public void Flush(bool onlyWal) => _mainDb.FlushWithColumnFamily(_columnFamily); - public void Compact() - { + public void Compact() => _rocksDb.CompactRange(Keccak.Zero.BytesToArray(), Keccak.MaxValue.BytesToArray(), _columnFamily); - } /// /// Not sure how to handle delete of the columns DB /// /// - public void Clear() { throw new NotSupportedException(); } + public void Clear() => throw new NotSupportedException(); - // Maybe it should be column specific metric? + // Maybe it should be column-specific metric? public IDbMeta.DbMetric GatherMetric() => _mainDb.GatherMetric(); public byte[]? FirstKey @@ -199,10 +155,8 @@ public byte[]? LastKey } } - public ISortedView GetViewBetween(ReadOnlySpan firstKey, ReadOnlySpan lastKey) - { - return _mainDb.GetViewBetween(firstKey, lastKey, _columnFamily); - } + public ISortedView GetViewBetween(ReadOnlySpan firstKey, ReadOnlySpan lastKey) => + _mainDb.GetViewBetween(firstKey, lastKey, _columnFamily); public IKeyValueStoreSnapshot CreateSnapshot() { diff --git a/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs b/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs index b5488c733b3a..cca010c66f8c 100644 --- a/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs +++ b/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; using System.Buffers.Binary; using System.Collections.Concurrent; using System.Collections.Generic; @@ -18,6 +19,7 @@ using ConcurrentCollections; using Nethermind.Config; using Nethermind.Core; +using Nethermind.Core.Buffers; using Nethermind.Core.Crypto; using Nethermind.Core.Exceptions; using Nethermind.Core.Extensions; @@ -103,7 +105,7 @@ public DbOnTheRocks( IRocksDbConfigFactory rocksDbConfigFactory, ILogManager logManager, IList? columnFamilies = null, - RocksDbSharp.Native? rocksDbNative = null, + Native? rocksDbNative = null, IFileSystem? fileSystem = null, IntPtr? sharedCache = null) { @@ -111,7 +113,7 @@ public DbOnTheRocks( _settings = dbSettings; Name = _settings.DbName; _fileSystem = fileSystem ?? new FileSystem(); - _rocksDbNative = rocksDbNative ?? RocksDbSharp.Native.Instance; + _rocksDbNative = rocksDbNative ?? Native.Instance; _rocksDbConfigFactory = rocksDbConfigFactory; _perTableDbConfig = rocksDbConfigFactory.GetForDatabase(Name, null); _db = Init(basePath, dbSettings.DbPath, dbConfig, logManager, columnFamilies, dbSettings.DeleteOnStart, sharedCache); @@ -129,7 +131,6 @@ protected virtual RocksDb DoOpen(string path, (DbOptions Options, ColumnFamilies private RocksDb Open(string path, (DbOptions Options, ColumnFamilies? Families) db) { RepairIfCorrupted(db.Options); - return DoOpen(path, db); } @@ -179,7 +180,7 @@ private RocksDb Init(string basePath, string dbPath, IDbConfig dbConfig, ILogMan if (dbConfig.EnableMetricsUpdater) { - DbMetricsUpdater metricUpdater = new DbMetricsUpdater(Name, DbOptions, db, null, dbConfig, _logger); + DbMetricsUpdater metricUpdater = new(Name, DbOptions, db, null, dbConfig, _logger); metricUpdater.StartUpdating(); _metricsUpdaters.Add(metricUpdater); @@ -190,7 +191,7 @@ private RocksDb Init(string basePath, string dbPath, IDbConfig dbConfig, ILogMan if (columnFamily.Name == "default") continue; if (db.TryGetColumnFamily(columnFamily.Name, out ColumnFamilyHandle handle)) { - DbMetricsUpdater columnMetricUpdater = new DbMetricsUpdater( + DbMetricsUpdater columnMetricUpdater = new( Name + "_" + columnFamily.Name, columnFamily.Options, db, handle, dbConfig, _logger); columnMetricUpdater.StartUpdating(); _metricsUpdaters.Add(columnMetricUpdater); @@ -264,8 +265,8 @@ private void WarmupFile(string basePath, RocksDb db) } return take; }) - // We reverse them again so that lower level goes last so that it is the freshest. - // Not all of the available memory is actually available so we are probably over reading things. + // We reverse them again so that the lower level goes last so that it is the freshest. + // Not all the available memory is actually available, so we are probably over reading things. .Reverse() .ToList(); @@ -340,15 +341,11 @@ protected internal void UpdateWriteMetrics() Interlocked.Increment(ref _totalWrites); } - protected virtual long FetchTotalPropertyValue(string propertyName) - { - long value = long.TryParse(_db.GetProperty(propertyName), out long parsedValue) + protected virtual long FetchTotalPropertyValue(string propertyName) => + long.TryParse(_db.GetProperty(propertyName), out long parsedValue) ? parsedValue : 0; - return value; - } - public IDbMeta.DbMetric GatherMetric() { if (_isDisposed) @@ -491,8 +488,8 @@ public static string NormalizeRocksDbOptions(string dbOptions) protected virtual void BuildOptions(IRocksDbConfig dbConfig, Options options, IntPtr? sharedCache, IMergeOperator? mergeOperator) where T : Options { - // This section is about the table factory.. and block cache apparently. - // This effect the format of the SST files and usually require resync to take effect. + // This section is about the table factory and block cache, apparently. + // This affects the format of the SST files and usually requires resyncing to take effect. // Note: Keep in mind, the term 'index' here usually means mapping to a block, not to a value. #region TableFactory sections @@ -559,21 +556,18 @@ protected virtual void BuildOptions(IRocksDbConfig dbConfig, Options optio int writeBufferNumber = _maxWriteBufferNumber; _maxThisDbSize += (long)writeBufferSize * writeBufferNumber; Interlocked.Add(ref _maxRocksSize, _maxThisDbSize); - if (_logger.IsDebug) - _logger.Debug( - $"Expected max memory footprint of {Name} DB is {_maxThisDbSize / 1000 / 1000} MB ({writeBufferNumber} * {writeBufferSize / 1000 / 1000} MB + {blockCacheSize / 1000 / 1000} MB)"); + if (_logger.IsDebug) _logger.Debug($"Expected max memory footprint of {Name} DB is {_maxThisDbSize / 1000 / 1000} MB ({writeBufferNumber} * {writeBufferSize / 1000 / 1000} MB + {blockCacheSize / 1000 / 1000} MB)"); if (_logger.IsDebug) _logger.Debug($"Total max DB footprint so far is {_maxRocksSize / 1000 / 1000} MB"); } #endregion - // This section affect compactions, flushes and the LSM shape. + // This section affects compactions, flushes and the LSM shape. #region Compaction /* - * Multi-Threaded Compactions - * Compactions are needed to remove multiple copies of the same key that may occur if an application overwrites an existing key. Compactions also process deletions of keys. Compactions may occur in multiple threads if configured appropriately. + * Multi-Threaded Compactions are needed to remove multiple copies of the same key that may occur if an application overwrites an existing key. Compactions also process deletions of keys. Compactions may occur in multiple threads if configured appropriately. * The entire database is stored in a set of sstfiles. When a memtable is full, its content is written out to a file in Level-0 (L0). RocksDB removes duplicate and overwritten keys in the memtable when it is flushed to a file in L0. Some files are periodically read in and merged to form larger files - this is called compaction. - * The overall write throughput of an LSM database directly depends on the speed at which compactions can occur, especially when the data is stored in fast storage like SSD or RAM. RocksDB may be configured to issue concurrent compaction requests from multiple threads. It is observed that sustained write rates may increase by as much as a factor of 10 with multi-threaded compaction when the database is on SSDs, as compared to single-threaded compactions. + * The overall writing throughput of an LSM database directly depends on the speed at which compactions can occur, especially when the data is stored in fast storage like SSD or RAM. RocksDB may be configured to issue concurrent compaction requests from multiple threads. It is observed that sustained write rates may increase by as much as a factor of 10 with multi-threaded compaction when the database is on SSDs, as compared to single-threaded compactions. * TKS: Observed 500MB/s compared to ~100MB/s between multithreaded and single thread compactions on my machine (processor count is returning 12 for 6 cores with hyperthreading) * TKS: CPU goes to insane 30% usage on idle - compacting only app */ @@ -592,11 +586,11 @@ protected virtual void BuildOptions(IRocksDbConfig dbConfig, Options optio if (dbConfig.RowCacheSize > 0) { - // Row cache is basically a per-key cache. Nothing special to it. This is different from block cache - // which cache the whole block at once, so read still need to traverse the block index, so this could be + // Row cache is basically a per-key cache. Nothing special about it. This is different from a block cache + // that caches the whole block at once, so read still needs to traverse the block index, so this could be // more CPU efficient. - // Note: Memtable also act like a per-key cache, that does not get updated on read. So in some case - // maybe it make more sense to put more memory to memtable. + // Note: Memtable also acts like a per-key cache that does not get updated on read. So in some case + // maybe it makes more sense to put more memory to memtable. _rowCache = _rocksDbNative.rocksdb_cache_create_lru(new UIntPtr(dbConfig.RowCacheSize.Value)); _rocksDbNative.rocksdb_options_set_row_cache(options.Handle, _rowCache.Value); } @@ -655,7 +649,7 @@ protected virtual void BuildOptions(IRocksDbConfig dbConfig, Options optio _hintCacheMissOptions = CreateReadOptions(); _hintCacheMissOptions.SetFillCache(false); - // When readahead flag is on, the next keys are expected to be after the current key. Increasing this value, + // When a readahead flag is on, the next keys are expected to be after the current key. Increasing this value // will increase the chances that the next keys will be in the cache, which reduces iops and latency. This // increases throughput, however, if a lot of the keys are not close to the current key, it will increase read // bandwidth requirement, since each read must be at least this size. This value is tuned for a batched trie @@ -672,7 +666,7 @@ protected virtual void BuildOptions(IRocksDbConfig dbConfig, Options optio private static WriteOptions CreateWriteOptions(IRocksDbConfig dbConfig) { WriteOptions options = new(); - // potential fix for corruption on hard process termination, may cause performance degradation + // a potential fix for corruption on hard process termination may cause performance degradation options.SetSync(dbConfig.WriteAheadLogSync); return options; } @@ -684,32 +678,23 @@ internal ReadOptions CreateReadOptions() return readOptions; } - byte[]? IReadOnlyKeyValueStore.Get(ReadOnlySpan key, ReadFlags flags) - { - return _reader.Get(key, flags); - } + byte[]? IReadOnlyKeyValueStore.Get(ReadOnlySpan key, ReadFlags flags) => _reader.Get(key, flags); - Span IReadOnlyKeyValueStore.GetSpan(scoped ReadOnlySpan key, ReadFlags flags) - { - return _reader.GetSpan(key, flags); - } + Span IReadOnlyKeyValueStore.GetSpan(scoped ReadOnlySpan key, ReadFlags flags) => _reader.GetSpan(key, flags); - int IReadOnlyKeyValueStore.Get(scoped ReadOnlySpan key, Span output, ReadFlags flags) + MemoryManager? IReadOnlyKeyValueStore.GetOwnedMemory(ReadOnlySpan key, ReadFlags flags) { - return _reader.Get(key, output, flags); + Span span = ((IReadOnlyKeyValueStore)this).GetSpan(key, flags); + return span.IsNullOrEmpty() ? null : new DbSpanMemoryManager(this, span); } - bool IReadOnlyKeyValueStore.KeyExists(ReadOnlySpan key) - { - return _reader.KeyExists(key); - } + int IReadOnlyKeyValueStore.Get(scoped ReadOnlySpan key, Span output, ReadFlags flags) => _reader.Get(key, output, flags); - void IReadOnlyKeyValueStore.DangerousReleaseMemory(in ReadOnlySpan span) - { - _reader.DangerousReleaseMemory(span); - } + bool IReadOnlyKeyValueStore.KeyExists(ReadOnlySpan key) => _reader.KeyExists(key); + + void IReadOnlyKeyValueStore.DangerousReleaseMemory(in ReadOnlySpan span) => _reader.DangerousReleaseMemory(span); - internal unsafe byte[]? GetWithIterator(ReadOnlySpan key, ColumnFamilyHandle? cf, IteratorManager iteratorManager, ReadFlags flags, out bool success) + internal byte[]? GetWithIterator(ReadOnlySpan key, ColumnFamilyHandle? cf, IteratorManager iteratorManager, ReadFlags flags, out bool success) { success = true; @@ -770,10 +755,7 @@ void IReadOnlyKeyValueStore.DangerousReleaseMemory(in ReadOnlySpan span) } [DoesNotReturn, StackTraceHidden] - static unsafe void ThrowRocksDbException(nint errPtr) - { - throw new RocksDbException(errPtr); - } + static void ThrowRocksDbException(nint errPtr) => throw new RocksDbException(errPtr); } /// @@ -783,6 +765,7 @@ static unsafe void ThrowRocksDbException(nint errPtr) /// /// /// + /// /// private bool TryCloseReadAhead(Iterator iterator, ReadOnlySpan key, out byte[]? result) { @@ -842,10 +825,8 @@ private bool TryCloseReadAhead(Iterator iterator, ReadOnlySpan key, out by return false; } - public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) - { + public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) => SetWithColumnFamily(key, null, value, flags); - } internal void SetWithColumnFamily(ReadOnlySpan key, ColumnFamilyHandle? cf, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) { @@ -871,25 +852,13 @@ internal void SetWithColumnFamily(ReadOnlySpan key, ColumnFamilyHandle? cf } } - public WriteOptions? WriteFlagsToWriteOptions(WriteFlags flags) + public WriteOptions? WriteFlagsToWriteOptions(WriteFlags flags) => flags switch { - if ((flags & WriteFlags.LowPriorityAndNoWAL) == WriteFlags.LowPriorityAndNoWAL) - { - return _lowPriorityAndNoWalWrite; - } - - if ((flags & WriteFlags.DisableWAL) == WriteFlags.DisableWAL) - { - return _noWalWrite; - } - - if ((flags & WriteFlags.LowPriority) == WriteFlags.LowPriority) - { - return _lowPriorityWriteOptions; - } - - return WriteOptions; - } + _ when (flags & WriteFlags.LowPriorityAndNoWAL) == WriteFlags.LowPriorityAndNoWAL => _lowPriorityAndNoWalWrite, + _ when (flags & WriteFlags.DisableWAL) == WriteFlags.DisableWAL => _noWalWrite, + _ when (flags & WriteFlags.LowPriority) == WriteFlags.LowPriority => _lowPriorityWriteOptions, + _ => WriteOptions + }; public KeyValuePair[] this[byte[][] keys] @@ -939,15 +908,15 @@ internal unsafe int GetCStyleWithColumnFamily(scoped ReadOnlySpan key, Spa UpdateReadMetrics(); nint db = _db.Handle; - nint read_options = readOptions.Handle; + nint readOptionsHandle = readOptions.Handle; UIntPtr skLength = (UIntPtr)key.Length; IntPtr errPtr; IntPtr slice; fixed (byte* ptr = &MemoryMarshal.GetReference(key)) { slice = cf is null - ? Native.Instance.rocksdb_get_pinned(db, read_options, ptr, skLength, out errPtr) - : Native.Instance.rocksdb_get_pinned_cf(db, read_options, cf.Handle, ptr, skLength, out errPtr); + ? Native.Instance.rocksdb_get_pinned(db, readOptionsHandle, ptr, skLength, out errPtr) + : Native.Instance.rocksdb_get_pinned_cf(db, readOptionsHandle, cf.Handle, ptr, skLength, out errPtr); } if (errPtr != IntPtr.Zero) ThrowRocksDbException(errPtr); @@ -972,22 +941,15 @@ internal unsafe int GetCStyleWithColumnFamily(scoped ReadOnlySpan key, Spa return length; [DoesNotReturn, StackTraceHidden] - static unsafe void ThrowRocksDbException(nint errPtr) - { - throw new RocksDbException(errPtr); - } + static void ThrowRocksDbException(nint errPtr) => throw new RocksDbException(errPtr); [DoesNotReturn, StackTraceHidden] - static unsafe void ThrowNotEnoughMemory(int length, int bufferLength) - { + static void ThrowNotEnoughMemory(int length, int bufferLength) => throw new ArgumentException($"Output buffer not large enough. Output size: {length}, Buffer size: {bufferLength}"); - } } - public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags) - { + public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags) => SetWithColumnFamily(key, null, value, writeFlags); - } public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) { @@ -1277,8 +1239,8 @@ internal class RocksDbWriteBatch : IWriteBatch /// /// Because of how rocksdb parallelize writes, a large write batch can stall other new concurrent writes, so - /// we writes the batch in smaller batches. This removes atomicity so its only turned on when NoWAL flag is on. - /// It does not work as well as just turning on unordered_write, but Snapshot and Iterator can still works. + /// we write the batch in smaller batches. This removes atomicity so it's only turned on when the NoWAL flag is on. + /// It does not work as well as just turning on unordered_write, but Snapshot and Iterator can still work. /// private const int MaxWritesOnNoWal = 256; private int _writeCount; @@ -1316,7 +1278,6 @@ private static void ReturnWriteBatch(WriteBatch batch) public void Clear() { ObjectDisposedException.ThrowIf(_dbOnTheRocks._isDisposed, _dbOnTheRocks); - _rocksBatch.Clear(); } @@ -1368,20 +1329,14 @@ public void Set(ReadOnlySpan key, ReadOnlySpan value, ColumnFamilyHa if ((flags & WriteFlags.DisableWAL) != 0) FlushOnTooManyWrites(); } - public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) - { + public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) => Set(key, value, null, flags); - } - public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) - { + public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) => Set(key, value, null, flags); - } - public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) - { + public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) => Merge(key, value, null, flags); - } public void Merge(ReadOnlySpan key, ReadOnlySpan value, ColumnFamilyHandle? cf = null, WriteFlags flags = WriteFlags.None) { @@ -1497,18 +1452,13 @@ private class FlushOptions { internal static FlushOptions DefaultFlushOptions { get; } = new(); - public FlushOptions() - { - Handle = RocksDbSharp.Native.Instance.rocksdb_flushoptions_create(); - } - - public IntPtr Handle { get; private set; } + public IntPtr Handle { get; private set; } = Native.Instance.rocksdb_flushoptions_create(); ~FlushOptions() { if (Handle != IntPtr.Zero) { - RocksDbSharp.Native.Instance.rocksdb_flushoptions_destroy(Handle); + Native.Instance.rocksdb_flushoptions_destroy(Handle); Handle = IntPtr.Zero; } } @@ -1573,15 +1523,15 @@ public virtual void Tune(ITunableDb.TuneType type) // See https://github.com/EighteenZi/rocksdb_wiki/blob/master/RocksDB-Tuning-Guide.md switch (type) { - // Depending on tune type, allow num of L0 files to grow causing compaction to occur in larger size. This + // Depending on tune type, allow num of L0 files to grow causing compaction to occur in a larger size. This // reduces write amplification at the expense of read response time and amplification while the tune is // active. Additionally, the larger compaction causes larger spikes of IO, larger memory usage, and may temporarily - // use up large amount of disk space. User may not want to enable this if they plan to run a validator node - // while the node is still syncing, or run another node on the same machine. Specifying a rate limit + // use up a large amount of disk space. User may not want to enable this if they plan to run a validator node + // while the node is still syncing or run another node on the same machine. Specifying a rate limit // smoothens this spike somewhat by not blocking writes while allowing compaction to happen in background // at 1/10th the specified speed (if rate limited). // - // Total writes written on different tune during mainnet sync in TB. + // Total writes written on different tunes during mainnet sync in TB. // +-----------------------+-------+-------+-------+-------+-------+---------+ // | L0FileNumTarget | Total | State | Code | Header| Blocks| Receipts | // +-----------------------+-------+-------+-------+-------+-------+---------+ @@ -1592,45 +1542,45 @@ public virtual void Tune(ITunableDb.TuneType type) // | DisableCompaction | 2.215 | 0.36 | 0.031 | 0.137 | 1.14 | 0.547 | // +-----------------------+-------+-------+-------+-------+-------+---------+ // Note, in practice on my machine, the reads does not reach the SSD. Read measured from SSD is much lower - // than read measured from process. It is likely that most files are cached as I have 128GB of RAM. + // than read measured from a process. It is likely that most files are cached as I have 128GB of RAM. // Also notice that the heavier the tune, the higher the reads. case ITunableDb.TuneType.WriteBias: - // Keep the same l1 size but apply other adjustment which should increase buffer number and make - // l0 the same size as l1, but keep the LSM the same. This improve flush parallelization, and + // Keep the same l1 size but apply other adjustment which should increase the buffer number and make + // l0 the same size as l1 but keep the LSM the same. This improves flush parallelization and // write amplification due to mismatch of l0 and l1 size, but does not reduce compaction from other // levels. ApplyOptions(GetHeavyWriteOptions(_maxBytesForLevelBase)); break; case ITunableDb.TuneType.HeavyWrite: - // Compaction spikes are clear at this point. Will definitely affect attestation performance. - // Its unclear if it improve or slow down sync time. Seems to be the sweet spot. + // Compaction spikes are clear at this point. Will definitely affect attestations performance. + // It's unclear if it improves or slows down sync time. Seems to be the sweet spot. ApplyOptions(GetHeavyWriteOptions((ulong)2.GiB())); break; case ITunableDb.TuneType.AggressiveHeavyWrite: - // For when, you are desperate, but don't wanna disable compaction completely, because you don't want + // For when you are desperate, but don't wanna disable compaction completely, because you don't want // peers to drop. Tend to be faster than disabling compaction completely, except if your ratelimit // is a bit low and your compaction is lagging behind, which will trigger slowdown, so sync will hang // intermittently, but at least peer count is stable. ApplyOptions(GetHeavyWriteOptions((ulong)16.GiB())); break; case ITunableDb.TuneType.DisableCompaction: - // Completely disable compaction. On mainnet, max num of l0 files for state seems to be about 10800. - // Blocksdb are way more at 53000. Final compaction for state db need 30 minute, while blocks db need - // 13 hour. Receipts db don't show up in metrics likely because its a column db. + // Completely disable compaction. On mainnet, the max num of l0 files for state seems to be about 10800. + // Blocksdb are way more at 53000. Final compaction for state db needs 30 minutes, while blocks db need + // 13 hours. Receipts db don't show up in metrics likely because it's a column db. // Ram usage at that time was 86 GB. The default buffer size for blocks on mainnet is too low // to make this work reasonably well. - // L0 to L1 compaction is known to be slower than other level so its - // Snap sync performance suffer as it does have some read during stitching. - // If you don't specify a lower open files limit, it has a tendency to crash, like.. the whole system - // crash. I don't have any open file limit at OS level. - // Also, if a peer send a packet that causes a query to the state db during snap sync like GetNodeData - // or some of the tx filter querying state, It'll cause the network stack to hang and triggers a + // L0 to L1 compaction is known to be slower than other levels, so its + // Snap sync performance suffers as it does have some read during stitching. + // If you don't specify a lower open files limit, it tends to crash, like... the whole system + // crashes. I don't have any open file limit at OS level. + // Also, if a peer sends a packet that causes a query to the state db during snap sync like GetNodeData + // or some of the tx filter querying state, It'll cause the network stack to hang and triggers // large peer drops. Also happens on lesser tune, but weaker. - // State sync essentially hang until that completes because its read heavy, and the uncompacted db is + // State sync essentially hangs until that completes because its read heavy, and the uncompacted db is // slow to a halt. // Additionally, the number of open files handles measured from collectd jumped massively higher. Some // user config may not be able to handle this. - // With all those cons, this result in the minimum write amplification possible via tweaking compaction + // With all those cons, this results in the minimum writes amplification possible via tweaking compaction // without changing memory budget. Not recommended for mainnet, unless you are very desperate. ApplyOptions(GetDisableCompactionOptions()); break; @@ -1649,15 +1599,11 @@ public virtual void Tune(ITunableDb.TuneType type) _currentTune = type; } - protected virtual void ApplyOptions(IDictionary options) - { - _db.SetOptions(options); - } + protected virtual void ApplyOptions(IDictionary options) => _db.SetOptions(options); - private IDictionary GetStandardOptions() - { + private IDictionary GetStandardOptions() => // Defaults are from rocksdb source code - return new Dictionary() + new Dictionary() { { "write_buffer_size", _writeBufferSize.ToString() }, { "max_write_buffer_number", _maxWriteBufferNumber.ToString() }, @@ -1666,7 +1612,7 @@ private IDictionary GetStandardOptions() { "level0_slowdown_writes_trigger", 20.ToString() }, // Very high, so that after moving from HeavyWrite, we don't immediately hang. - // This does means that under very rare case, the l0 file can accumulate, which slow down the db + // This does mean that under a very rare case, the l0 file can accumulate, which slows down the db // until they get compacted. { "level0_stop_writes_trigger", 1024.ToString() }, @@ -1679,13 +1625,11 @@ private IDictionary GetStandardOptions() { "soft_pending_compaction_bytes_limit", 64.GiB().ToString() }, { "hard_pending_compaction_bytes_limit", 256.GiB().ToString() }, }; - } - private IDictionary GetHashDbOptions() - { - return new Dictionary() + private IDictionary GetHashDbOptions() => + new Dictionary() { - // Some database config is slightly faster on hash db database. These are applied when hash db is detected + // Some database config is slightly faster on a hash db database. These are applied when hash db is detected // to prevent unexpected regression. { "table_factory.block_size", "4096" }, { "table_factory.block_restart_interval", "16" }, @@ -1693,35 +1637,34 @@ private IDictionary GetHashDbOptions() { "max_bytes_for_level_multiplier", "10" }, { "max_bytes_for_level_base", "256000000" }, }; - } /// - /// Allow num of l0 file to grow very large. This dramatically increase read response time by about - /// (l0FileNumTarget / (default num (4) + max level usually (4)). but it saves write bandwidth as l0->l1 happens - /// in larger size. In addition to that, the large base l1 size means the number of level is a bit lower. - /// Note: Regardless of max_open_files config, the number of files handle jumped by this number when compacting. It - /// could be that l0->l1 compaction does not (or cant?) follow the max_open_files limit. + /// Allow number of l0 files to grow very large. This dramatically increases read response time by about + /// (l0FileNumTarget / (default num (4) + max level usually (4)), but it saves write bandwidth as l0->l1 happens + /// in larger size. In addition to that, the large base l1 size means the number of levels is a bit lower. + /// Note: Regardless of max_open_files config, the number of files handles jumped by this number when compacting. It + /// could be that l0->l1 compaction does not (or can't?) follow the max_open_files limit. /// - /// + /// /// This caps the maximum allowed number of l0 files, which is also the read response time amplification. /// /// private IDictionary GetHeavyWriteOptions(ulong l0SizeTarget) { // Make buffer (probably) smaller so that it does not take too much memory to have many of them. - // More buffer means more parallel flush, but each read have to go through all buffer one by one much like l0 + // More buffer means more parallel flush, but each read has to go through all buffers one by one, much like l0 // but no io, only cpu. - // bufferSize*maxBufferNumber = 16MB*Core count, which is the max memory used, which tend to be the case as its now - // stalled by compaction instead of flush. - // The buffer is not compressed unlike l0File, so to account for it, its size need to be slightly larger. + // bufferSize*maxBufferNumber = 16MB*Core count, which is the max memory used, which tends to be the case as it's now + // stalled by compaction instead of a flush. + // The buffer is not compressed unlike l0File, so to account for it, its size needs to be slightly larger. ulong targetFileSize = (ulong)16.MiB(); ulong bufferSize = (ulong)(targetFileSize / _perTableDbConfig.CompressibilityHint); ulong l0FileSize = targetFileSize * (ulong)_minWriteBufferToMerge; ulong maxBufferNumber = (ulong)Environment.ProcessorCount; - // Guide recommend to have l0 and l1 to be the same size. They have to be compacted together so if l1 is larger, + // Guide recommends having l0 and l1 to be the same size. They have to be compacted together, so if l1 is larger, // the extra size in l1 is basically extra rewrites. If l0 is larger... then I don't know why not. Even so, it seems to - // always get triggered when l0 size exceed max_bytes_for_level_base even if file num is less than l0FileNumTarget. + // always get triggered when l0 size exceeds max_bytes_for_level_base even if the file number is less than l0FileNumTarget. ulong l0FileNumTarget = l0SizeTarget / l0FileSize; ulong l1SizeTarget = l0SizeTarget; @@ -1749,10 +1692,10 @@ private IDictionary GetDisableCompactionOptions() IDictionary heavyWriteOption = GetHeavyWriteOptions((ulong)32.GiB()); heavyWriteOption["disable_auto_compactions"] = "true"; - // Increase the size of the write buffer, which reduces the number of l0 file by 4x. This does slows down + // Increase the size of the write buffer, which reduces the number of l0 files by 4x. This does slow down // the memtable a little bit. So if you are not write limited, you'll get memtable limited instead. // This does increase the total memory buffer size, but counterintuitively, this reduces overall memory usage - // as it ran out of bloom filter cache so it need to do actual IO. + // as it ran out of bloom filter cache, so it needs to do actual IO. heavyWriteOption["write_buffer_size"] = 64.MiB().ToString(); return heavyWriteOption; @@ -1762,17 +1705,17 @@ private IDictionary GetDisableCompactionOptions() private static IDictionary GetBlobFilesOptions() { // Enable blob files, see: https://rocksdb.org/blog/2021/05/26/integrated-blob-db.html - // This is very useful for blocks, as it almost eliminate 95% of the compaction as the main db no longer + // This is very useful for blocks, as it almost eliminates 95% of the compaction as the main db no longer // store the actual data, but only points to blob files. This config reduces total blocks db writes from about - // 4.6 TB to 0.76 TB, where even the the WAL took 0.45 TB (wal is not compressed), with peak writes of about 300MBps, + // 4.6 TB to 0.76 TB, where even the WAL took 0.45 TB (wal is not compressed), with peak writes of about 300MBps, // it may not even saturate a SATA SSD on a 1GBps internet. - // You don't want to turn this on on other DB as it does add an indirection which take up an additional iop. + // You don't want to turn this on other DB as it does add an indirection which take up an additional iop. // But for large values like blocks (3MB decompressed to 8MB), the response time increase is negligible. - // However without a large buffer size, it will create tens of thousands of small files. There are - // various workaround it, but it all increase total writes, which defeats the purpose. - // Additionally, as the `max_bytes_for_level_base` is set to very low, existing user will suddenly - // get a lot of compaction. So cant turn this on all the time. Turning this back off, will just put back + // However, without a large buffer size, it will create tens of thousands of small files. There are + // various workaround it, but it all increases total writes, which defeats the purpose. + // Additionally, as the `max_bytes_for_level_base` is set to very low, existing users will suddenly + // get a lot of compaction. So can't turn this on all the time. Turning this back off will just put back // new data to SST files. return new Dictionary() @@ -1780,9 +1723,9 @@ private static IDictionary GetBlobFilesOptions() { "enable_blob_files", "true" }, { "blob_compression_type", "kSnappyCompression" }, - // Make file size big, so we have less of them. + // Make the file size big, so we have less of them. { "write_buffer_size", 256.MiB().ToString() }, - // Current memtable + 2 concurrent writes. Can't have too many of these as it take up RAM. + // Current memtable + 2 concurrent writes. Can't have too many of these as it takes up RAM. { "max_write_buffer_number", 3.ToString() }, // These two are SST files instead of the blobs, which are now much smaller. @@ -1795,7 +1738,7 @@ private static IDictionary GetBlobFilesOptions() /// Iterators should not be kept for long as it will pin some memory block and sst file. This would show up as /// temporary higher disk usage or memory usage. /// - /// This class handles a periodic timer which periodically dispose all iterator. + /// This class handles a periodic timer that periodically disposes all iterators. /// public class IteratorManager : IDisposable { @@ -1840,35 +1783,23 @@ public void Dispose() public RentWrapper Rent(ReadFlags flags) { - - ManagedIterators iterators = _readaheadIterators; - if ((flags & ReadFlags.HintReadAhead2) != 0) - { - iterators = _readaheadIterators2; - } - else if ((flags & ReadFlags.HintReadAhead3) != 0) - { - iterators = _readaheadIterators3; - } - + ManagedIterators iterators = GetIterators(flags); IteratorHolder holder = iterators.Value!; // If null, we create a new one. Iterator? iterator = Interlocked.Exchange(ref holder.Iterator, null); return new RentWrapper(iterator ?? _rocksDb.NewIterator(_cf, _readOptions), flags, this); } - private void Return(Iterator iterator, ReadFlags flags) + private ManagedIterators GetIterators(ReadFlags flags) => flags switch { - ManagedIterators iterators = _readaheadIterators; - if ((flags & ReadFlags.HintReadAhead2) != 0) - { - iterators = _readaheadIterators2; - } - else if ((flags & ReadFlags.HintReadAhead3) != 0) - { - iterators = _readaheadIterators3; - } + _ when (flags & ReadFlags.HintReadAhead2) != 0 => _readaheadIterators2, + _ when (flags & ReadFlags.HintReadAhead3) != 0 => _readaheadIterators3, + _ => _readaheadIterators + }; + private void Return(Iterator iterator, ReadFlags flags) + { + ManagedIterators iterators = GetIterators(flags); IteratorHolder holder = iterators.Value!; // We don't keep using the same iterator for too long. @@ -1882,7 +1813,7 @@ private void Return(Iterator iterator, ReadFlags flags) holder.Usage++; Iterator? oldIterator = Interlocked.Exchange(ref holder.Iterator, iterator); - // Well... this is weird. I'll just dispose it. + // Well... this is weird. I'll just dispose of it. oldIterator?.Dispose(); } @@ -1890,21 +1821,14 @@ public readonly struct RentWrapper(Iterator iterator, ReadFlags flags, IteratorM { public Iterator Iterator => iterator; - public void Dispose() - { - manager.Return(iterator, flags); - } + public void Dispose() => manager.Return(iterator, flags); } // Note: use of threadlocal is very important as the seek forward is fast, but the seek backward is not fast. - private sealed class ManagedIterators : ThreadLocal + private sealed class ManagedIterators() : ThreadLocal(static () => new IteratorHolder(), trackAllValues: true) { private bool _disposed = false; - public ManagedIterators() : base(static () => new IteratorHolder(), trackAllValues: true) - { - } - public void ClearIterators() { if (_disposed) return; @@ -1923,7 +1847,7 @@ public void DisposeAll() protected override void Dispose(bool disposing) { - // Note: This is called from finalizer thread, so we can't use foreach to dispose all values + // Note: This is called from finalizer thread, so we can't use foreach to dispose of all values Value?.Dispose(); Value = null!; _disposed = true; @@ -1963,17 +1887,14 @@ public byte[]? LastKey } } - public ISortedView GetViewBetween(ReadOnlySpan firstKey, ReadOnlySpan lastKey) - { - return GetViewBetween(firstKey, lastKey, null); - } + public ISortedView GetViewBetween(ReadOnlySpan firstKey, ReadOnlySpan lastKey) => GetViewBetween(firstKey, lastKey, null); internal ISortedView GetViewBetween(ReadOnlySpan firstKey, ReadOnlySpan lastKey, ColumnFamilyHandle? cf) { ReadOptions readOptions = CreateReadOptions(); - IntPtr iterateLowerBound = IntPtr.Zero; - IntPtr iterateUpperBound = IntPtr.Zero; + IntPtr iterateLowerBound; + IntPtr iterateUpperBound; unsafe { @@ -2008,9 +1929,6 @@ public class RocksDbSnapshot( Snapshot snapshot ) : RocksDbReader(mainDb, readOptionsFactory, null, columnFamily), IKeyValueStoreSnapshot { - public void Dispose() - { - snapshot.Dispose(); - } + public void Dispose() => snapshot.Dispose(); } } diff --git a/src/Nethermind/Nethermind.Db.Rpc/RpcDb.cs b/src/Nethermind/Nethermind.Db.Rpc/RpcDb.cs index 61bb02e57e13..a94366c63761 100644 --- a/src/Nethermind/Nethermind.Db.Rpc/RpcDb.cs +++ b/src/Nethermind/Nethermind.Db.Rpc/RpcDb.cs @@ -2,9 +2,11 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; using System.Collections.Generic; using System.Linq; using Nethermind.Core; +using Nethermind.Core.Buffers; using Nethermind.Core.Extensions; using Nethermind.JsonRpc; using Nethermind.JsonRpc.Client; @@ -13,35 +15,25 @@ namespace Nethermind.Db.Rpc { - public class RpcDb : IDb + public class RpcDb( + string dbName, + IJsonSerializer jsonSerializer, + IJsonRpcClient rpcClient, + ILogManager logManager, + IDb recordDb) + : IDb { - private readonly string _dbName; - private readonly IJsonSerializer _jsonSerializer; - private readonly ILogger _logger; - private readonly IJsonRpcClient _rpcClient; - private readonly IDb _recordDb; - - public RpcDb(string dbName, IJsonSerializer jsonSerializer, IJsonRpcClient rpcClient, ILogManager logManager, IDb recordDb) - { - _dbName = dbName; - _rpcClient = rpcClient ?? throw new ArgumentNullException(nameof(rpcClient)); - _jsonSerializer = jsonSerializer ?? throw new ArgumentNullException(nameof(jsonSerializer)); - _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); - _recordDb = recordDb; - } + private readonly IJsonSerializer _jsonSerializer = jsonSerializer ?? throw new ArgumentNullException(nameof(jsonSerializer)); + private readonly ILogger _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); + private readonly IJsonRpcClient _rpcClient = rpcClient ?? throw new ArgumentNullException(nameof(rpcClient)); public void Dispose() { _logger.Info($"Disposing RPC DB {Name}"); - _recordDb.Dispose(); + recordDb.Dispose(); } - public long GetSize() => 0; - public long GetCacheSize() => 0; - public long GetIndexSize() => 0; - public long GetMemtableSize() => 0; - - public string Name { get; } = "RpcDb"; + public string Name => "RpcDb"; public byte[] this[ReadOnlySpan key] { @@ -49,40 +41,16 @@ public byte[] this[ReadOnlySpan key] set => Set(key, value); } - public void Set(ReadOnlySpan key, byte[] value, WriteFlags flags = WriteFlags.None) - { - ThrowWritesNotSupported(); - } - - public byte[] Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) - { - return GetThroughRpc(key); - } - + public void Set(ReadOnlySpan key, byte[] value, WriteFlags flags = WriteFlags.None) => ThrowWritesNotSupported(); + public byte[] Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) => GetThroughRpc(key); public KeyValuePair[] this[byte[][] keys] => keys.Select(k => new KeyValuePair(k, GetThroughRpc(k))).ToArray(); - - public void Remove(ReadOnlySpan key) - { - ThrowWritesNotSupported(); - } - - public bool KeyExists(ReadOnlySpan key) - { - return GetThroughRpc(key) is not null; - } - - public IDb Innermost => this; // record db is just a helper DB here - public void Flush() { } + public void Remove(ReadOnlySpan key) => ThrowWritesNotSupported(); + public bool KeyExists(ReadOnlySpan key) => GetThroughRpc(key) is not null; public void Flush(bool onlyWal = false) { } - public void Clear() { } - - public IEnumerable> GetAll(bool ordered = false) => _recordDb.GetAll(); - - public IEnumerable GetAllKeys(bool ordered = false) => _recordDb.GetAllKeys(); - - public IEnumerable GetAllValues(bool ordered = false) => _recordDb.GetAllValues(); - + public IEnumerable> GetAll(bool ordered = false) => recordDb.GetAll(); + public IEnumerable GetAllKeys(bool ordered = false) => recordDb.GetAllKeys(); + public IEnumerable GetAllValues(bool ordered = false) => recordDb.GetAllValues(); public IWriteBatch StartWriteBatch() { ThrowWritesNotSupported(); @@ -92,36 +60,24 @@ public IWriteBatch StartWriteBatch() private byte[] GetThroughRpc(ReadOnlySpan key) { - string responseJson = _rpcClient.Post("debug_getFromDb", _dbName, key.ToHexString()).Result; + string responseJson = _rpcClient.Post("debug_getFromDb", dbName, key.ToHexString()).Result; JsonRpcSuccessResponse response = _jsonSerializer.Deserialize(responseJson); byte[] value = null; if (response.Result is not null) { value = Bytes.FromHexString((string)response.Result); - if (_recordDb is not null) + if (recordDb is not null) { - _recordDb[key] = value; + recordDb[key] = value; } } return value; } - public Span GetSpan(ReadOnlySpan key) - { - return Get(key); - } - - public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags) - { - ThrowWritesNotSupported(); - } - + public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags writeFlags) => ThrowWritesNotSupported(); private static void ThrowWritesNotSupported() => throw new InvalidOperationException("RPC DB does not support writes"); - - public void DangerousReleaseMemory(in ReadOnlySpan span) - { - } + public void DangerousReleaseMemory(in ReadOnlySpan span) { } } } diff --git a/src/Nethermind/Nethermind.Db.Test/CompressingStoreTests.cs b/src/Nethermind/Nethermind.Db.Test/CompressingStoreTests.cs index 117a4b6d52b8..cb7d63a2d4ba 100644 --- a/src/Nethermind/Nethermind.Db.Test/CompressingStoreTests.cs +++ b/src/Nethermind/Nethermind.Db.Test/CompressingStoreTests.cs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2023 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only +using System; using FluentAssertions; using Nethermind.Core; using Nethermind.Core.Test; @@ -71,7 +72,15 @@ public void EOAWithSPan() ctx.Compressed.PutSpan(Key, encoded.Bytes); Assert.That(encoded.Bytes, Is.EqualTo(ctx.Compressed[Key]).AsCollection); - Assert.That(encoded.Bytes, Is.EqualTo(ctx.Compressed.GetSpan(Key).ToArray()).AsCollection); + Span span = ctx.Compressed.GetSpan(Key); + try + { + Assert.That(encoded.Bytes, Is.EqualTo(span.ToArray()).AsCollection); + } + finally + { + ctx.Compressed.DangerousReleaseMemory(span); + } ctx.Wrapped[Key]!.Length.Should().Be(5); } diff --git a/src/Nethermind/Nethermind.Db/CompressingDb.cs b/src/Nethermind/Nethermind.Db/CompressingDb.cs index 881b839f25bb..301da2f7b1d4 100644 --- a/src/Nethermind/Nethermind.Db/CompressingDb.cs +++ b/src/Nethermind/Nethermind.Db/CompressingDb.cs @@ -2,9 +2,11 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; using System.Collections.Generic; using System.Linq; using Nethermind.Core; +using Nethermind.Core.Buffers; using Nethermind.Core.Extensions; namespace Nethermind.Db @@ -18,75 +20,55 @@ public static class KeyValueStoreCompressingExtensions /// A wrapped db. public static IDb WithEOACompressed(this IDb @this) => new EOACompressingDb(@this); - private class EOACompressingDb : IDb, ITunableDb + // TODO: consider wrapping IDbWithSpan to make the read with a span, with no alloc for reading? + private class EOACompressingDb(IDb wrapped) : IDb, ITunableDb { - private readonly IDb _wrapped; - - public EOACompressingDb(IDb wrapped) - { - // TODO: consider wrapping IDbWithSpan to make the read with a span, with no alloc for reading? - _wrapped = wrapped; - } - public byte[]? this[ReadOnlySpan key] { - get => Decompress(_wrapped[key]); - set => _wrapped[key] = Compress(value); + get => Decompress(wrapped[key]); + set => wrapped[key] = Compress(value); } - public IWriteBatch StartWriteBatch() => new WriteBatch(_wrapped.StartWriteBatch()); + public IWriteBatch StartWriteBatch() => new WriteBatch(wrapped.StartWriteBatch()); - private class WriteBatch : IWriteBatch + private class WriteBatch(IWriteBatch wrapped) : IWriteBatch { - private readonly IWriteBatch _wrapped; - - public WriteBatch(IWriteBatch wrapped) => _wrapped = wrapped; + public void Dispose() => wrapped.Dispose(); - public void Dispose() => _wrapped.Dispose(); - - public void Clear() => _wrapped.Clear(); + public void Clear() => wrapped.Clear(); public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) - => _wrapped.Set(key, Compress(value), flags); + => wrapped.Set(key, Compress(value), flags); public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) - { - _wrapped.PutSpan(key, Compress(value, stackalloc byte[value.Length]), flags); - } + => wrapped.PutSpan(key, Compress(value, stackalloc byte[value.Length]), flags); public void Merge(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) - { - throw new InvalidOperationException("EOA compressing DB does not support merging"); - } + => throw new InvalidOperationException("EOA compressing DB does not support merging"); - public bool PreferWriteByArray => _wrapped.PreferWriteByArray; + public bool PreferWriteByArray => wrapped.PreferWriteByArray; public byte[]? this[ReadOnlySpan key] { - set => _wrapped[key] = Compress(value); + set => wrapped[key] = Compress(value); } } - /// /// The end of rlp of an EOA account, an empty and an empty . /// - private static ReadOnlySpan EmptyCodeHashStorageRoot => new byte[] - { + private static ReadOnlySpan EmptyCodeHashStorageRoot => + [ 160, 86, 232, 31, 23, 27, 204, 85, 166, 255, 131, 69, 230, 146, 192, 248, 110, 91, 72, 224, 27, 153, 108, 173, 192, 1, 98, 47, 181, 227, 99, 180, 33, 160, 197, 210, 70, 1, 134, 247, 35, 60, 146, 126, 125, 178, 220, 199, 3, 192, 229, 0, 182, 83, 202, 130, 39, 59, 123, 250, 216, 4, 93, 133, 164, 112 - }; + ]; private const byte PreambleLength = 1; private const byte PreambleIndex = 0; private const byte PreambleValue = 0; - private static byte[]? Compress(byte[]? bytes) - { - if (bytes is null) return null; - return Compress(bytes, stackalloc byte[bytes.Length]).ToArray(); - } + private static byte[]? Compress(byte[]? bytes) => bytes is null ? null : Compress(bytes, stackalloc byte[bytes.Length]).ToArray(); private static ReadOnlySpan Compress(ReadOnlySpan bytes, Span compressed) { @@ -125,55 +107,56 @@ private static ReadOnlySpan Compress(ReadOnlySpan bytes, Span return decompressed; } - public void Dispose() => _wrapped.Dispose(); + public void Dispose() => wrapped.Dispose(); - public string Name => _wrapped.Name; + public string Name => wrapped.Name; public KeyValuePair[] this[byte[][] keys] => throw new NotImplementedException(); - public IEnumerable> GetAll(bool ordered = false) => _wrapped.GetAll(ordered) + public IEnumerable> GetAll(bool ordered = false) => wrapped.GetAll(ordered) .Select(static kvp => new KeyValuePair(kvp.Key, Decompress(kvp.Value))); public IEnumerable GetAllKeys(bool ordered = false) => - _wrapped.GetAllKeys(ordered); + wrapped.GetAllKeys(ordered); public IEnumerable GetAllValues(bool ordered = false) => - _wrapped.GetAllValues(ordered).Select(Decompress); + wrapped.GetAllValues(ordered).Select(Decompress); - public void Remove(ReadOnlySpan key) => _wrapped.Remove(key); + public void Remove(ReadOnlySpan key) => wrapped.Remove(key); - public bool KeyExists(ReadOnlySpan key) => _wrapped.KeyExists(key); + public bool KeyExists(ReadOnlySpan key) => wrapped.KeyExists(key); - public void Flush(bool onlyWal) => _wrapped.Flush(onlyWal); + public void Flush(bool onlyWal) => wrapped.Flush(onlyWal); - public void Clear() => _wrapped.Clear(); + public void Clear() => wrapped.Clear(); - public IDbMeta.DbMetric GatherMetric() => _wrapped.GatherMetric(); + public IDbMeta.DbMetric GatherMetric() => wrapped.GatherMetric(); public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) - => _wrapped.Set(key, Compress(value), flags); + => wrapped.Set(key, Compress(value), flags); public byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) - => Decompress(_wrapped.Get(key, flags)); - + => Decompress(wrapped.Get(key, flags)); - public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) - { - _wrapped.PutSpan(key, Compress(value, stackalloc byte[value.Length]), flags); - } + public void PutSpan(ReadOnlySpan key, ReadOnlySpan value, WriteFlags flags = WriteFlags.None) => + wrapped.PutSpan(key, Compress(value, stackalloc byte[value.Length]), flags); - public Span GetSpan(scoped ReadOnlySpan key, ReadFlags flags = ReadFlags.None) - { + public Span GetSpan(scoped ReadOnlySpan key, ReadFlags flags = ReadFlags.None) => // Can't properly implement span for reading. As the decompressed span is different from the span // from DB, it would crash on DangerouslyReleaseMemory. - return Decompress(Get(key, flags)); + Decompress(Get(key, flags)); + + public MemoryManager? GetOwnedMemory(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) + { + byte[]? data = Decompress(Get(key, flags)); + return data is null or { Length: 0 } ? null : new ArrayMemoryManager(data); } - public bool PreferWriteByArray => _wrapped.PreferWriteByArray; + public bool PreferWriteByArray => wrapped.PreferWriteByArray; public void Tune(ITunableDb.TuneType type) { - if (_wrapped is ITunableDb tunable) + if (wrapped is ITunableDb tunable) tunable.Tune(type); } } diff --git a/src/Nethermind/Nethermind.Db/FullPruning/FullPruningDb.cs b/src/Nethermind/Nethermind.Db/FullPruning/FullPruningDb.cs index 32979c963832..cac0113d670a 100755 --- a/src/Nethermind/Nethermind.Db/FullPruning/FullPruningDb.cs +++ b/src/Nethermind/Nethermind.Db/FullPruning/FullPruningDb.cs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; using System.Collections.Generic; using System.Threading; using Nethermind.Core; @@ -71,6 +72,17 @@ public Span GetSpan(scoped ReadOnlySpan key, ReadFlags flags = ReadF return value; } + public MemoryManager? GetOwnedMemory(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) + { + MemoryManager? memoryManager = _currentDb.GetOwnedMemory(key, flags); + if (memoryManager is not null && _pruningContext?.DuplicateReads == true && (flags & ReadFlags.SkipDuplicateRead) == 0) + { + Duplicate(_pruningContext.CloningDb, key, memoryManager.GetSpan(), WriteFlags.None); + } + + return memoryManager; + } + public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) { _currentDb.Set(key, value, flags); // we are writing to the main DB @@ -143,6 +155,8 @@ public void Remove(ReadOnlySpan key) public bool KeyExists(ReadOnlySpan key) => _currentDb.KeyExists(key); + public void DangerousReleaseMemory(in ReadOnlySpan span) => _currentDb.DangerousReleaseMemory(span); + // inner DB's can be deleted in the future and // we cannot expose a DB that will potentially be later deleted public IDb Innermost => this; diff --git a/src/Nethermind/Nethermind.Db/MemDb.cs b/src/Nethermind/Nethermind.Db/MemDb.cs index 39490693200f..e68055560f3c 100644 --- a/src/Nethermind/Nethermind.Db/MemDb.cs +++ b/src/Nethermind/Nethermind.Db/MemDb.cs @@ -5,6 +5,8 @@ using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; using System.Threading; using Nethermind.Core; using Nethermind.Core.Extensions; @@ -19,14 +21,13 @@ public class MemDb : IFullDb public long WritesCount { get; private set; } #if ZK - private readonly Dictionary _db; + private readonly Dictionary _db = new(Bytes.EqualityComparer); private readonly Dictionary.AlternateLookup> _spanDb; #else - private readonly ConcurrentDictionary _db; + private readonly ConcurrentDictionary _db = new(Bytes.EqualityComparer); private readonly ConcurrentDictionary.AlternateLookup> _spanDb; #endif - public MemDb(string name) : this(0, 0) { @@ -35,7 +36,7 @@ public MemDb(string name) public static MemDb CopyFrom(IDb anotherDb) { - MemDb newDb = new MemDb(); + MemDb newDb = new(); foreach (KeyValuePair kv in anotherDb.GetAll()) { newDb[kv.Key] = kv.Value; @@ -52,11 +53,6 @@ public MemDb(int writeDelay, int readDelay) { _writeDelay = writeDelay; _readDelay = readDelay; -#if ZK - _db = new Dictionary(Bytes.EqualityComparer); -#else - _db = new ConcurrentDictionary(Bytes.EqualityComparer); -#endif _spanDb = _db.GetAlternateLookup>(); } @@ -64,14 +60,8 @@ public MemDb(int writeDelay, int readDelay) public virtual byte[]? this[ReadOnlySpan key] { - get - { - return Get(key); - } - set - { - Set(key, value); - } + get => Get(key); + set => Set(key, value); } public KeyValuePair[] this[byte[][] keys] @@ -84,18 +74,11 @@ public virtual byte[]? this[ReadOnlySpan key] } ReadsCount += keys.Length; - return keys.Select(k => new KeyValuePair(k, _db.TryGetValue(k, out var value) ? value : null)).ToArray(); + return keys.Select(k => new KeyValuePair(k, _db.GetValueOrDefault(k))).ToArray(); } } - public virtual void Remove(ReadOnlySpan key) - { -#if ZK - _spanDb.Remove(key); -#else - _spanDb.TryRemove(key, out _); -#endif - } + public virtual void Remove(ReadOnlySpan key) => _spanDb.TryRemove(key, out _); public bool KeyExists(ReadOnlySpan key) => _spanDb.ContainsKey(key); @@ -120,9 +103,7 @@ public void Dispose() { } public bool PreferWriteByArray => true; - public virtual Span GetSpan(ReadOnlySpan key) => Get(key).AsSpan(); - - public void DangerousReleaseMemory(in ReadOnlySpan span) { } + public unsafe void DangerousReleaseMemory(in ReadOnlySpan span) { } public virtual byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) { @@ -135,6 +116,9 @@ public void DangerousReleaseMemory(in ReadOnlySpan span) { } return _spanDb.TryGetValue(key, out byte[] value) ? value : null; } + public unsafe Span GetSpan(scoped ReadOnlySpan key, ReadFlags flags = ReadFlags.None) + => Get(key).AsSpan(); + public virtual void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) { if (_writeDelay > 0) diff --git a/src/Nethermind/Nethermind.Db/ReadOnlyDb.cs b/src/Nethermind/Nethermind.Db/ReadOnlyDb.cs index bc734119beb3..98a7d045220f 100644 --- a/src/Nethermind/Nethermind.Db/ReadOnlyDb.cs +++ b/src/Nethermind/Nethermind.Db/ReadOnlyDb.cs @@ -2,9 +2,11 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; using System.Collections.Generic; using System.Linq; using Nethermind.Core; +using Nethermind.Core.Buffers; namespace Nethermind.Db { @@ -12,17 +14,12 @@ public class ReadOnlyDb(IDb wrappedDb, bool createInMemWriteStore) : IReadOnlyDb { private readonly MemDb _memDb = new(); - public void Dispose() - { - _memDb.Dispose(); - } + public void Dispose() => _memDb.Dispose(); public string Name { get => wrappedDb.Name; } - public byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) - { - return _memDb.Get(key, flags) ?? wrappedDb.Get(key, flags); - } + public byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) => + _memDb.Get(key, flags) ?? wrappedDb.Get(key, flags); public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) { @@ -38,11 +35,11 @@ public KeyValuePair[] this[byte[][] keys] { get { - var result = wrappedDb[keys]; - var memResult = _memDb[keys]; + KeyValuePair[]? result = wrappedDb[keys]; + KeyValuePair[]? memResult = _memDb[keys]; for (int i = 0; i < memResult.Length; i++) { - var memValue = memResult[i]; + KeyValuePair memValue = memResult[i]; if (memValue.Value is not null) { result[i] = memValue; @@ -73,7 +70,6 @@ public void Flush(bool onlyWal) { } public virtual void ClearTempChanges() => _memDb.Clear(); - public Span GetSpan(ReadOnlySpan key) => Get(key).AsSpan(); public void PutSpan(ReadOnlySpan keyBytes, ReadOnlySpan value, WriteFlags writeFlags = WriteFlags.None) { if (!createInMemWriteStore) diff --git a/src/Nethermind/Nethermind.Db/SimpleFilePublicKeyDb.cs b/src/Nethermind/Nethermind.Db/SimpleFilePublicKeyDb.cs index d69a532c8735..7f49f1a1367a 100644 --- a/src/Nethermind/Nethermind.Db/SimpleFilePublicKeyDb.cs +++ b/src/Nethermind/Nethermind.Db/SimpleFilePublicKeyDb.cs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers; using System.Collections.Concurrent; using System.Collections.Generic; using System.IO; @@ -23,12 +24,11 @@ public class SimpleFilePublicKeyDb : IFullDb private readonly ILogger _logger; private bool _hasPendingChanges; - private ConcurrentDictionary _cache; - private ConcurrentDictionary.AlternateLookup> _cacheSpan; + private readonly ConcurrentDictionary _cache = new(Bytes.EqualityComparer); + private readonly ConcurrentDictionary.AlternateLookup> _cacheSpan; - public string DbPath { get; } + private string DbPath { get; } public string Name { get; } - public string Description { get; } public ICollection Keys => _cache.Keys.ToArray(); public ICollection Values => _cache.Values; @@ -40,26 +40,27 @@ public SimpleFilePublicKeyDb(string name, string dbDirectoryPath, ILogManager lo ArgumentNullException.ThrowIfNull(dbDirectoryPath); Name = name ?? throw new ArgumentNullException(nameof(name)); DbPath = Path.Combine(dbDirectoryPath, DbFileName); - Description = $"{Name}|{DbPath}"; if (!Directory.Exists(dbDirectoryPath)) { Directory.CreateDirectory(dbDirectoryPath); } - LoadData(); + _cacheSpan = _cache.GetAlternateLookup>(); + + if (File.Exists(DbPath)) + { + LoadData(); + } } public byte[]? this[ReadOnlySpan key] { - get => Get(key, ReadFlags.None); - set => Set(key, value, WriteFlags.None); + get => Get(key); + set => Set(key, value); } - public byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) - { - return _cacheSpan[key]; - } + public byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) => _cacheSpan[key]; public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) { @@ -98,10 +99,7 @@ public void Remove(ReadOnlySpan key) } } - public bool KeyExists(ReadOnlySpan key) - { - return _cacheSpan.ContainsKey(key); - } + public bool KeyExists(ReadOnlySpan key) => _cacheSpan.ContainsKey(key); public void Flush(bool onlyWal = false) { } @@ -117,10 +115,7 @@ public void Clear() public IEnumerable GetAllValues(bool ordered = false) => _cache.Values; - public IWriteBatch StartWriteBatch() - { - return this.LikeABatch(CommitBatch); - } + public IWriteBatch StartWriteBatch() => this.LikeABatch(CommitBatch); private void CommitBatch() { @@ -219,14 +214,6 @@ private void LoadData() { const int maxLineLength = 2048; - _cache = new ConcurrentDictionary(Bytes.EqualityComparer); - _cacheSpan = _cache.GetAlternateLookup>(); - - if (!File.Exists(DbPath)) - { - return; - } - using SafeFileHandle fileHandle = File.OpenHandle(DbPath, FileMode.OpenOrCreate); using var handle = ArrayPoolDisposableReturn.Rent(maxLineLength, out byte[] rentedBuffer); @@ -306,24 +293,6 @@ void RecordError(Span data) } } - private byte[] Update(byte[] oldValue, byte[] newValue) - { - if (!Bytes.AreEqual(oldValue, newValue)) - { - _hasPendingChanges = true; - } - - return newValue; - } - - private byte[] Add(byte[] value) - { - _hasPendingChanges = true; - return value; - } - - public void Dispose() - { - } + public void Dispose() { } } } diff --git a/src/Nethermind/Nethermind.Trie.Test/CacheTests.cs b/src/Nethermind/Nethermind.Trie.Test/CacheTests.cs index 8891482ff7f4..02f361be5b42 100644 --- a/src/Nethermind/Nethermind.Trie.Test/CacheTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/CacheTests.cs @@ -10,6 +10,7 @@ namespace Nethermind.Trie.Test { [TestFixture] + [Parallelizable(ParallelScope.All)] public class CacheTests { [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/HexPrefixTests.cs b/src/Nethermind/Nethermind.Trie.Test/HexPrefixTests.cs index bf35cbb1232d..c8a8bc507a07 100644 --- a/src/Nethermind/Nethermind.Trie.Test/HexPrefixTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/HexPrefixTests.cs @@ -7,6 +7,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class HexPrefixTests { [TestCase(false, (byte)3, (byte)19)] diff --git a/src/Nethermind/Nethermind.Trie.Test/NibbleTests.cs b/src/Nethermind/Nethermind.Trie.Test/NibbleTests.cs index 1338eafa0183..a575cb2ddbbd 100644 --- a/src/Nethermind/Nethermind.Trie.Test/NibbleTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/NibbleTests.cs @@ -6,6 +6,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class NibbleTests { private readonly byte[][] _hexEncoding = diff --git a/src/Nethermind/Nethermind.Trie.Test/NodeStorageFactoryTests.cs b/src/Nethermind/Nethermind.Trie.Test/NodeStorageFactoryTests.cs index 5e0308ea2a5c..740048565d78 100644 --- a/src/Nethermind/Nethermind.Trie.Test/NodeStorageFactoryTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/NodeStorageFactoryTests.cs @@ -10,6 +10,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class NodeStorageFactoryTests { [TestCase(INodeStorage.KeyScheme.Hash)] diff --git a/src/Nethermind/Nethermind.Trie.Test/NodeStorageTests.cs b/src/Nethermind/Nethermind.Trie.Test/NodeStorageTests.cs index 51702876eec0..c212d15a8e8e 100644 --- a/src/Nethermind/Nethermind.Trie.Test/NodeStorageTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/NodeStorageTests.cs @@ -13,31 +13,25 @@ namespace Nethermind.Trie.Test; [TestFixture(INodeStorage.KeyScheme.Hash)] [TestFixture(INodeStorage.KeyScheme.HalfPath)] -public class NodeStorageTests +[Parallelizable(ParallelScope.All)] +public class NodeStorageTests(INodeStorage.KeyScheme currentKeyScheme) { - private readonly INodeStorage.KeyScheme _currentKeyScheme; - - public NodeStorageTests(INodeStorage.KeyScheme currentKeyScheme) - { - _currentKeyScheme = currentKeyScheme; - } - [Test] public void Should_StoreAndRead() { TestMemDb testDb = new TestMemDb(); - NodeStorage nodeStorage = new NodeStorage(testDb, _currentKeyScheme); + NodeStorage nodeStorage = new NodeStorage(testDb, currentKeyScheme); nodeStorage.KeyExists(null, TreePath.Empty, TestItem.KeccakA).Should().BeFalse(); nodeStorage.Set(null, TreePath.Empty, TestItem.KeccakA, TestItem.KeccakA.BytesToArray()); nodeStorage.Get(null, TreePath.Empty, TestItem.KeccakA).Should().BeEquivalentTo(TestItem.KeccakA.BytesToArray()); nodeStorage.KeyExists(null, TreePath.Empty, TestItem.KeccakA).Should().BeTrue(); - if (_currentKeyScheme == INodeStorage.KeyScheme.Hash) + if (currentKeyScheme == INodeStorage.KeyScheme.Hash) { testDb[TestItem.KeccakA.Bytes].Should().NotBeNull(); } - else if (_currentKeyScheme == INodeStorage.KeyScheme.HalfPath) + else if (currentKeyScheme == INodeStorage.KeyScheme.HalfPath) { testDb[NodeStorage.GetHalfPathNodeStoragePath(null, TreePath.Empty, TestItem.KeccakA)].Should().NotBeNull(); } @@ -47,18 +41,18 @@ public void Should_StoreAndRead() public void Should_StoreAndRead_WithStorage() { TestMemDb testDb = new TestMemDb(); - NodeStorage nodeStorage = new NodeStorage(testDb, _currentKeyScheme); + NodeStorage nodeStorage = new NodeStorage(testDb, currentKeyScheme); nodeStorage.KeyExists(TestItem.KeccakB, TreePath.Empty, TestItem.KeccakA).Should().BeFalse(); nodeStorage.Set(TestItem.KeccakB, TreePath.Empty, TestItem.KeccakA, TestItem.KeccakA.BytesToArray()); nodeStorage.Get(TestItem.KeccakB, TreePath.Empty, TestItem.KeccakA).Should().BeEquivalentTo(TestItem.KeccakA.BytesToArray()); nodeStorage.KeyExists(TestItem.KeccakB, TreePath.Empty, TestItem.KeccakA).Should().BeTrue(); - if (_currentKeyScheme == INodeStorage.KeyScheme.Hash) + if (currentKeyScheme == INodeStorage.KeyScheme.Hash) { testDb[TestItem.KeccakA.Bytes].Should().NotBeNull(); } - else if (_currentKeyScheme == INodeStorage.KeyScheme.HalfPath) + else if (currentKeyScheme == INodeStorage.KeyScheme.HalfPath) { testDb[NodeStorage.GetHalfPathNodeStoragePath(TestItem.KeccakB, TreePath.Empty, TestItem.KeccakA)].Should().NotBeNull(); } @@ -68,7 +62,7 @@ public void Should_StoreAndRead_WithStorage() public void When_KeyNotExist_Should_TryBothKeyType() { TestMemDb testDb = new TestMemDb(); - NodeStorage nodeStorage = new NodeStorage(testDb, _currentKeyScheme); + NodeStorage nodeStorage = new NodeStorage(testDb, currentKeyScheme); nodeStorage.Get(TestItem.KeccakB, TreePath.Empty, TestItem.KeccakA).Should().BeNull(); @@ -80,9 +74,9 @@ public void When_KeyNotExist_Should_TryBothKeyType() public void When_EntryOfDifferentScheme_Should_StillBeAbleToRead() { TestMemDb testDb = new TestMemDb(); - NodeStorage nodeStorage = new NodeStorage(testDb, _currentKeyScheme); + NodeStorage nodeStorage = new NodeStorage(testDb, currentKeyScheme); - if (_currentKeyScheme == INodeStorage.KeyScheme.Hash) + if (currentKeyScheme == INodeStorage.KeyScheme.Hash) { testDb[NodeStorage.GetHalfPathNodeStoragePath(TestItem.KeccakB, TreePath.Empty, TestItem.KeccakA)] = TestItem.KeccakA.BytesToArray(); @@ -109,7 +103,7 @@ public void When_EntryOfDifferentScheme_Should_StillBeAbleToRead() [TestCase(true, 32, "0211111111111111111111111111111111111111111111111111111111111111112222222222222222203333333333333333333333333333333333333333333333333333333333333333")] public void Test_HalfPathEncoding(bool hasAddress, int pathLength, string expectedKey) { - if (_currentKeyScheme == INodeStorage.KeyScheme.Hash) return; + if (currentKeyScheme == INodeStorage.KeyScheme.Hash) return; Hash256? address = null; if (hasAddress) @@ -130,10 +124,10 @@ public void Test_HalfPathEncoding(bool hasAddress, int pathLength, string expect [TestCase(true, 3, ReadFlags.HintReadAhead | ReadFlags.HintReadAhead3)] public void Test_WhenReadaheadUseDifferentReadaheadOnDifferentSection(bool hasAddress, int pathLength, ReadFlags expectedReadFlags) { - if (_currentKeyScheme == INodeStorage.KeyScheme.Hash) return; + if (currentKeyScheme == INodeStorage.KeyScheme.Hash) return; TestMemDb testDb = new TestMemDb(); - NodeStorage nodeStorage = new NodeStorage(testDb, _currentKeyScheme); + NodeStorage nodeStorage = new NodeStorage(testDb, currentKeyScheme); Hash256? address = null; if (hasAddress) diff --git a/src/Nethermind/Nethermind.Trie.Test/OverlayTrieStoreTests.cs b/src/Nethermind/Nethermind.Trie.Test/OverlayTrieStoreTests.cs index ed8c23ee2640..2c70a4944949 100644 --- a/src/Nethermind/Nethermind.Trie.Test/OverlayTrieStoreTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/OverlayTrieStoreTests.cs @@ -15,6 +15,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class OverlayTrieStoreTests { [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/Pruning/MaxBlockInCachePruneStrategyTests.cs b/src/Nethermind/Nethermind.Trie.Test/Pruning/MaxBlockInCachePruneStrategyTests.cs index f288a30f6e0e..93c61e44ed48 100644 --- a/src/Nethermind/Nethermind.Trie.Test/Pruning/MaxBlockInCachePruneStrategyTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/Pruning/MaxBlockInCachePruneStrategyTests.cs @@ -8,7 +8,8 @@ namespace Nethermind.Trie.Test.Pruning { [TestFixture] - [Parallelizable(ParallelScope.Self)] + [Parallelizable(ParallelScope.All)] + [FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class MaxBlockInCachePruneStrategyTests { private IPruningStrategy _baseStrategy; diff --git a/src/Nethermind/Nethermind.Trie.Test/Pruning/MinBlockInCachePruneStrategyTests.cs b/src/Nethermind/Nethermind.Trie.Test/Pruning/MinBlockInCachePruneStrategyTests.cs index 27814dd58fce..63930144e889 100644 --- a/src/Nethermind/Nethermind.Trie.Test/Pruning/MinBlockInCachePruneStrategyTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/Pruning/MinBlockInCachePruneStrategyTests.cs @@ -9,7 +9,8 @@ namespace Nethermind.Trie.Test.Pruning { [TestFixture] - [Parallelizable(ParallelScope.Self)] + [Parallelizable(ParallelScope.All)] + [FixtureLifeCycle(LifeCycle.InstancePerTestCase)] public class MinBlockInCachePruneStrategyTests { private IPruningStrategy _baseStrategy; diff --git a/src/Nethermind/Nethermind.Trie.Test/Pruning/TreeStoreTests.cs b/src/Nethermind/Nethermind.Trie.Test/Pruning/TreeStoreTests.cs index 2a98ac0b6aa9..319ec65f7f33 100644 --- a/src/Nethermind/Nethermind.Trie.Test/Pruning/TreeStoreTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/Pruning/TreeStoreTests.cs @@ -28,18 +28,13 @@ namespace Nethermind.Trie.Test.Pruning { [TestFixture(INodeStorage.KeyScheme.HalfPath)] [TestFixture(INodeStorage.KeyScheme.Hash)] - public class TreeStoreTests + [Parallelizable(ParallelScope.All)] + public class TreeStoreTests(INodeStorage.KeyScheme scheme) { private readonly ILogManager _logManager = LimboLogs.Instance; // new OneLoggerLogManager(new NUnitLogger(LogLevel.Trace)); private readonly AccountDecoder _accountDecoder = new(); - private readonly INodeStorage.KeyScheme _scheme; - - public TreeStoreTests(INodeStorage.KeyScheme scheme) - { - _scheme = scheme; - } private TrieStore CreateTrieStore( IPruningStrategy? pruningStrategy = null, @@ -59,7 +54,7 @@ private TrieStore CreateTrieStore( finalizedStateProvider ??= new TestFinalizedStateProvider(pruningConfig.PruningBoundary); TrieStore trieStore = new( - new NodeStorage(kvStore, _scheme, requirePath: _scheme == INodeStorage.KeyScheme.HalfPath), + new NodeStorage(kvStore, scheme, requirePath: scheme == INodeStorage.KeyScheme.HalfPath), pruningStrategy, persistenceStrategy, finalizedStateProvider, @@ -148,7 +143,7 @@ public void Pruning_off_cache_should_change_commit_node() committer.CommitNode(ref emptyPath, trieNode3); } fullTrieStore.WaitForPruning(); - fullTrieStore.MemoryUsedByDirtyCache.Should().Be(_scheme == INodeStorage.KeyScheme.HalfPath ? 832 : 676); + fullTrieStore.MemoryUsedByDirtyCache.Should().Be(scheme == INodeStorage.KeyScheme.HalfPath ? 832 : 676); } [Test] @@ -177,7 +172,7 @@ public void Pruning_off_cache_should_find_cached_or_unknown() Assert.That(returnedNode2.NodeType, Is.EqualTo(NodeType.Unknown)); Assert.That(returnedNode3.NodeType, Is.EqualTo(NodeType.Unknown)); trieStore.WaitForPruning(); - trieStore.MemoryUsedByDirtyCache.Should().Be(_scheme == INodeStorage.KeyScheme.HalfPath ? 552 : 396); + trieStore.MemoryUsedByDirtyCache.Should().Be(scheme == INodeStorage.KeyScheme.HalfPath ? 552 : 396); } [Test] @@ -242,7 +237,7 @@ public void Memory_with_concurrent_commits_is_correct() tree.Commit(); } - fullTrieStore.MemoryUsedByDirtyCache.Should().Be(_scheme == INodeStorage.KeyScheme.Hash ? 545956 : 616104L); + fullTrieStore.MemoryUsedByDirtyCache.Should().Be(scheme == INodeStorage.KeyScheme.Hash ? 545956 : 616104L); fullTrieStore.CommittedNodesCount.Should().Be(1349); } @@ -871,7 +866,7 @@ public void ReadOnly_store_returns_copies(bool pruning) readOnlyNode.Key?.ToString().Should().Be(originalNode.Key?.ToString()); } - private long ExpectedPerNodeKeyMemorySize => (_scheme == INodeStorage.KeyScheme.Hash ? 0 : TrieStoreDirtyNodesCache.Key.MemoryUsage) + MemorySizes.ObjectHeaderMethodTable + MemorySizes.RefSize + 4 + MemorySizes.RefSize; + private long ExpectedPerNodeKeyMemorySize => (scheme == INodeStorage.KeyScheme.Hash ? 0 : TrieStoreDirtyNodesCache.Key.MemoryUsage) + MemorySizes.ObjectHeaderMethodTable + MemorySizes.RefSize + 4 + MemorySizes.RefSize; [Test] public void After_commit_should_have_has_root() @@ -930,7 +925,7 @@ public async Task Will_RemovePastKeys_OnSnapshot() await Task.Delay(TimeSpan.FromMilliseconds(10)); } - if (_scheme == INodeStorage.KeyScheme.Hash) + if (scheme == INodeStorage.KeyScheme.Hash) { memDb.Count.Should().NotBe(1); } @@ -1021,7 +1016,7 @@ public void When_SomeKindOfNonResolvedNotInMainWorldState_OnPrune_DoNotDeleteNod Address address = TestItem.AddressA; UInt256 slot = 1; - INodeStorage nodeStorage = new NodeStorage(memDbProvider.StateDb, _scheme); + INodeStorage nodeStorage = new NodeStorage(memDbProvider.StateDb, scheme); (Hash256 stateRoot, ValueHash256 storageRoot) = SetupStartingState(); nodeStorage.Get(address.ToAccountPath.ToCommitment(), TreePath.Empty, storageRoot).Should().NotBeNull(); @@ -1104,7 +1099,7 @@ public Task When_Prune_ClearRecommittedPersistedNode() } memDb.Count.Should().Be(1); - fullTrieStore.MemoryUsedByDirtyCache.Should().Be(_scheme == INodeStorage.KeyScheme.Hash ? 12032 : 15360); + fullTrieStore.MemoryUsedByDirtyCache.Should().Be(scheme == INodeStorage.KeyScheme.Hash ? 12032 : 15360); fullTrieStore.PersistCache(default); memDb.Count.Should().Be(64); @@ -1145,7 +1140,7 @@ public void OnDispose_PersistAtLeastOneCommitSet() [Test] public void Will_NotPruneTopLevelNode() { - if (_scheme == INodeStorage.KeyScheme.Hash) Assert.Ignore("Not applicable for hash"); + if (scheme == INodeStorage.KeyScheme.Hash) Assert.Ignore("Not applicable for hash"); MemDb memDb = new(); TestPruningStrategy testPruningStrategy = new TestPruningStrategy( @@ -1224,7 +1219,7 @@ void WriteRandomData(int seed) [Test] public void Can_Prune_StorageTreeRoot() { - if (_scheme == INodeStorage.KeyScheme.Hash) Assert.Ignore("Not applicable for hash"); + if (scheme == INodeStorage.KeyScheme.Hash) Assert.Ignore("Not applicable for hash"); MemDb memDb = new(); TestPruningStrategy testPruningStrategy = new TestPruningStrategy( @@ -1593,7 +1588,7 @@ void VerifyAllTrieExceptGenesis() long cachedPersistedNode = fullTrieStore.CachedNodesCount - fullTrieStore.DirtyCachedNodesCount; long perStatePersistedNode = 20; - if (_scheme == INodeStorage.KeyScheme.Hash) + if (scheme == INodeStorage.KeyScheme.Hash) { cachedPersistedNode.Should().Be(perStatePersistedNode + 3); } diff --git a/src/Nethermind/Nethermind.Trie.Test/PruningScenariosTests.cs b/src/Nethermind/Nethermind.Trie.Test/PruningScenariosTests.cs index 223282c66048..cd0cd0d3ee19 100644 --- a/src/Nethermind/Nethermind.Trie.Test/PruningScenariosTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/PruningScenariosTests.cs @@ -27,6 +27,7 @@ namespace Nethermind.Trie.Test { [TestFixture] + [Parallelizable(ParallelScope.All)] public class PruningScenariosTests { private ILogger _logger; @@ -1222,7 +1223,7 @@ public void Retain_Some_PersistedNodes() ctx .AssertThatDirtyNodeCountIs(9) - .AssertThatCachedNodeCountMoreThan(280); + .AssertThatCachedNodeCountMoreThan(275); } [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/RawTrieStoreTests.cs b/src/Nethermind/Nethermind.Trie.Test/RawTrieStoreTests.cs index 7663cff436f4..2535489626fb 100644 --- a/src/Nethermind/Nethermind.Trie.Test/RawTrieStoreTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/RawTrieStoreTests.cs @@ -10,6 +10,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class RawTrieStoreTests { [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/TinyTreePathTests.cs b/src/Nethermind/Nethermind.Trie.Test/TinyTreePathTests.cs index a2d51d1a5a18..d6c93f2b3f96 100644 --- a/src/Nethermind/Nethermind.Trie.Test/TinyTreePathTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/TinyTreePathTests.cs @@ -8,6 +8,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class TinyTreePathTests { [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/TrackingCappedArrayPoolTests.cs b/src/Nethermind/Nethermind.Trie.Test/TrackingCappedArrayPoolTests.cs index 322804621d13..bb30481d6f14 100644 --- a/src/Nethermind/Nethermind.Trie.Test/TrackingCappedArrayPoolTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/TrackingCappedArrayPoolTests.cs @@ -8,6 +8,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class TrackingCappedArrayPoolTests { [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs b/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs index b398b4b3b65e..da83b8a3da8f 100644 --- a/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs @@ -9,6 +9,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class TreePathTests { [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/TrieNodeResolverWithReadFlagsTests.cs b/src/Nethermind/Nethermind.Trie.Test/TrieNodeResolverWithReadFlagsTests.cs index df8fcf6dac17..6d28229901d0 100644 --- a/src/Nethermind/Nethermind.Trie.Test/TrieNodeResolverWithReadFlagsTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/TrieNodeResolverWithReadFlagsTests.cs @@ -11,6 +11,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class TrieNodeResolverWithReadFlagsTests { [Test] diff --git a/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs b/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs index cc8bbf893719..15cea826d4b4 100644 --- a/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs @@ -27,6 +27,7 @@ namespace Nethermind.Trie.Test { [TestFixture] + [Parallelizable(ParallelScope.All)] public class TrieTests { private ILogger _logger; diff --git a/src/Nethermind/Nethermind.Trie.Test/VisitingTests.cs b/src/Nethermind/Nethermind.Trie.Test/VisitingTests.cs index 2fd6f53b2b60..7221240b00a8 100644 --- a/src/Nethermind/Nethermind.Trie.Test/VisitingTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/VisitingTests.cs @@ -20,6 +20,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class VisitingTests { [TestCaseSource(nameof(GetOptions))] diff --git a/src/Nethermind/Nethermind.Trie.Test/VisitorProgressTrackerTests.cs b/src/Nethermind/Nethermind.Trie.Test/VisitorProgressTrackerTests.cs index 3b3e4b1b2a46..f22b68a1123b 100644 --- a/src/Nethermind/Nethermind.Trie.Test/VisitorProgressTrackerTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/VisitorProgressTrackerTests.cs @@ -8,6 +8,7 @@ namespace Nethermind.Trie.Test; +[Parallelizable(ParallelScope.All)] public class VisitorProgressTrackerTests { [Test] diff --git a/src/Nethermind/Nethermind.Xdc.Test/ModuleTests/RewardTests.cs b/src/Nethermind/Nethermind.Xdc.Test/ModuleTests/RewardTests.cs index 757734e69090..becec46623a9 100644 --- a/src/Nethermind/Nethermind.Xdc.Test/ModuleTests/RewardTests.cs +++ b/src/Nethermind/Nethermind.Xdc.Test/ModuleTests/RewardTests.cs @@ -117,12 +117,24 @@ await chain.AddBlock(BuildSigningTx( long current = chain.BlockTree.Head!.Number; await chain.AddBlocks((int)(targetIncludingBlockForSecondSign - current - 1)); // move so AddBlockMayHaveExtraTx produces the target + // For 4E reward calculation, the masternodes come from the second epoch switch found + // when walking backwards from 4E. The signed header (3E - mergeSignRange) is in the + // range [2E+1, 3E), so its epoch switch info provides the relevant masternodes. + // Use a masternode from that epoch to ensure the signature is counted. + EpochSwitchInfo? epochSwitchInfoFor2E = chain.EpochSwitchManager.GetEpochSwitchInfo(signedHeader3EMinusMerge); + Assert.That(epochSwitchInfoFor2E, Is.Not.Null); + PrivateKey signerForPart2 = chain.MasterNodeCandidates.First(k => k.Address == epochSwitchInfoFor2E!.Masternodes[0]); + + // Set the chain's signer to our chosen masternode - required because + // SignTransactionFilter rejects signing txs from non-current-signers + chain.Signer.SetSigner(signerForPart2); + await chain.AddBlock(BuildSigningTx( spec, signedHeader3EMinusMerge.Number, signedHeader3EMinusMerge.Hash ?? signedHeader3EMinusMerge.CalculateHash().ToHash256(), - chain.Signer.Key!, - (long)chain.ReadOnlyState.GetNonce(chain.Signer.Address))); + signerForPart2, + (long)chain.ReadOnlyState.GetNonce(signerForPart2.Address))); // --- Evaluate rewards at checkpoint (4E) --- long checkpoint4E = 4 * epochLength;