diff --git a/.github/workflows/nethermind-tests-flat.yml b/.github/workflows/nethermind-tests-flat.yml new file mode 100644 index 00000000000..ee79846610c --- /dev/null +++ b/.github/workflows/nethermind-tests-flat.yml @@ -0,0 +1,96 @@ +name: Nethermind tests (Flat DB) + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + push: + branches: [master] + workflow_dispatch: + +env: + TEST_USE_FLAT: "1" + DOTNET_SYSTEM_CONSOLE_ALLOW_ANSI_COLOR_REDIRECTION: 1 + TERM: xterm + +jobs: + tests: + name: Run ${{ matrix.project }}${{ matrix.chunk && format(' ({0})', matrix.chunk) || '' }} + runs-on: ubuntu-latest + continue-on-error: true + strategy: + matrix: + project: + - Ethereum.Abi.Test + - Ethereum.Basic.Test + - Ethereum.Blockchain.Block.Test + - Ethereum.Blockchain.Pyspec.Test + - Ethereum.Difficulty.Test + - Ethereum.HexPrefix.Test + - Ethereum.KeyAddress.Test + - Ethereum.KeyStore.Test + - Ethereum.Legacy.Blockchain.Block.Test + - Ethereum.Legacy.Transition.Test + - Ethereum.Legacy.VM.Test + - Ethereum.PoW.Test + - Ethereum.Rlp.Test + - Ethereum.Transaction.Test + - Ethereum.Trie.Test + - Nethermind.Consensus.Test + - Nethermind.Core.Test + - Nethermind.Db.Test + - Nethermind.Runner.Test + chunk: [''] + include: + - project: Ethereum.Legacy.Blockchain.Test + chunk: 1of4 + - project: Ethereum.Legacy.Blockchain.Test + chunk: 2of4 + - project: Ethereum.Legacy.Blockchain.Test + chunk: 3of4 + - project: Ethereum.Legacy.Blockchain.Test + chunk: 4of4 + steps: + - name: Check out repository + uses: actions/checkout@v6 + with: + submodules: ${{ startsWith(matrix.project, 'Ethereum.') && 'recursive' || 'false' }} + + - name: Set up .NET + uses: actions/setup-dotnet@v5 + + - name: ${{ matrix.project }} + id: test + working-directory: src/Nethermind/${{ matrix.project }} + env: + TEST_CHUNK: ${{ matrix.chunk }} + run: | + dotnet test --project ${{ matrix.project }}.csproj -c release + + - name: Save test outcome + if: success() || failure() + run: echo "${{ steps.test.outcome == 'success' }}," >> test.outcome + + - name: Upload test outcome + if: success() || failure() + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.project }}${{ matrix.chunk && format('-{0}', matrix.chunk) || '' }}-flat-outcome + path: test.outcome + retention-days: 1 + + tests-summary: + name: Tests summary + needs: tests + runs-on: ubuntu-latest + steps: + - name: Download test outcomes + uses: actions/download-artifact@v4 + + - name: Ensure all tests passed + run: | + data=$(cat **/test.outcome) && data=${data%?} + passed=$(echo "[$data]" | jq -r 'all') + [[ "$passed" == "true" ]] && exit 0 || exit 1 diff --git a/.gitignore b/.gitignore index 6f14b7b2d89..7ca90b44ee1 100644 --- a/.gitignore +++ b/.gitignore @@ -445,3 +445,6 @@ FodyWeavers.xsd ## Nethermind keystore/ /.githooks + +# Worktrees +.worktrees/ diff --git a/Directory.Packages.props b/Directory.Packages.props index 2112ae3eadd..964bd02f599 100644 --- a/Directory.Packages.props +++ b/Directory.Packages.props @@ -82,6 +82,7 @@ + diff --git a/cspell.json b/cspell.json index 313bcb6496c..ec2128db069 100644 --- a/cspell.json +++ b/cspell.json @@ -101,12 +101,14 @@ "bootnodes", "bottlenecked", "browsable", + "bsearch", "btcs", "buildtransitive", "bulkset", "bursty", "buterin", "bylica", + "cacheline", "bytecodes", "callcode", "calldatacopy", @@ -170,6 +172,7 @@ "deserialised", "dests", "devirtualize", + "devirtualized", "devnet", "devnets", "devp2p", @@ -237,7 +240,9 @@ "extopcodes", "extradata", "extstaticcall", + "Exitor", "fastbin", + "Fastpath", "fastlz", "fastmod", "fastsync", @@ -292,6 +297,7 @@ "hostnames", "hotstuff", "hyperthreading", + "HUGEPAGE", "idxs", "iface", "ikvp", @@ -345,6 +351,7 @@ "ldarg", "ldfld", "lemire's", + "levelname", "libc", "libdl", "libp", @@ -361,6 +368,8 @@ "machdep", "machinename", "madv", + "Madvise", + "madvise", "maiboroda", "mainchain", "mallopt", @@ -379,10 +388,12 @@ "maxlevel", "maxpriorityfee", "mclbn", + "mbpk", "mcmc", "mcopy", "mellor", "memberwise", + "memcolumndb", "memin", "meminstart", "meminsz", @@ -411,6 +422,7 @@ "morden", "movbe", "movzx", + "Mpmc", "mres", "mscorlib", "msgrecv", @@ -457,6 +469,7 @@ "nodestore", "nodetype", "nofile", + "noninteractive", "nonposdao", "nonstring", "nops", @@ -483,6 +496,7 @@ "owlf", "pacaya", "parallelizable", + "patriciatree", "paweł", "pctg", "pearce", @@ -490,6 +504,7 @@ "pendingtxs", "perfnet", "perfolizer", + "Persistences", "permissioned", "pgrep", "physicalcpu", @@ -499,6 +514,7 @@ "pkcs", "pmsg", "poacore", + "Populator", "poaps", "podc", "popcnt", @@ -509,6 +525,7 @@ "powm", "preconf", "preconfirmation", + "pregenesis", "predeploy", "prefixedf", "prefund", @@ -560,6 +577,7 @@ "resettables", "retesteth", "retf", + "ribbonfilter", "returncode", "returndata", "returndatacopy", @@ -578,6 +596,7 @@ "rocksdb", "ronin", "roothash", + "rootref", "rormask", "rpcurl", "runtimeconfig", @@ -611,11 +630,13 @@ "sload", "smod", "somelabel", + "Spmc", "spaceneth", "spammy", "sparkline", "spinlocks", "squarify", + "srcset", "ssse", "sstfiles", "sstore", @@ -680,6 +701,7 @@ "trienode", "triestore", "trietest", + "Triewarmer", "trietestnextprev", "triggerable", "tstore", @@ -723,6 +745,8 @@ "upto", "upvoting", "vbmi", + "verifytrie", + "verkle", "vitalik", "vmovups", "vmtrace", @@ -734,6 +758,8 @@ "vpor", "vptest", "vzeroupper", + "Wakeup", + "wakeup", "wamp", "warmcoinbase", "wblob", @@ -744,6 +770,7 @@ "worklet", "worklist", "worldchain", + "worldscope", "worldstate", "writebatch", "writeoptions", @@ -758,6 +785,7 @@ "yparity", "zcompressor", "zdecompressor", + "zerobytes", "zhizhu", "zstandard", "zstd", diff --git a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs index 98483a553cc..ec91713897a 100644 --- a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs +++ b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs @@ -17,6 +17,7 @@ using Nethermind.Core.Extensions; using Nethermind.Core.Test.Blockchain; using Nethermind.Core.Test.IO; +using Nethermind.Core.Test.Modules; using Nethermind.Db; using Nethermind.Db.FullPruning; using Nethermind.Db.Rocks; @@ -136,6 +137,12 @@ protected override async Task RunFullPruning(CancellationToken cancellationToken } } + [SetUp] + public void Setup() + { + if (PseudoNethermindModule.TestUseFlat) Assert.Ignore("Disabled in flat"); + } + [Test, MaxTime(Timeout.LongTestTime)] public async Task prune_on_disk_multiple_times() { diff --git a/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs b/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs index e8d1c1f95a7..0d6f6d8a949 100644 --- a/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs +++ b/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs @@ -7,6 +7,8 @@ using Nethermind.Core; using Nethermind.Core.Test.Builders; using Nethermind.Core.Test.Modules; +using Nethermind.Evm.State; +using Nethermind.Evm.TransactionProcessing; using NUnit.Framework; namespace Nethermind.Consensus.Test; @@ -19,8 +21,8 @@ public void OnSubsequentBuild_GiveDifferentWorldState() using IContainer container = new ContainerBuilder().AddModule(new TestNethermindModule()).Build(); IShareableTxProcessorSource shareableSource = container.Resolve(); - var scope1 = shareableSource.Build(Build.A.BlockHeader.TestObject); - var scope2 = shareableSource.Build(Build.A.BlockHeader.TestObject); + var scope1 = shareableSource.Build(IWorldState.PreGenesis); + var scope2 = shareableSource.Build(IWorldState.PreGenesis); scope1.WorldState.Should().NotBeSameAs(scope2.WorldState); } @@ -31,9 +33,9 @@ public void OnSubsequentBuild_AfterFirstScopeDispose_GiveSameWorldState() using IContainer container = new ContainerBuilder().AddModule(new TestNethermindModule()).Build(); IShareableTxProcessorSource shareableSource = container.Resolve(); - var scope1 = shareableSource.Build(Build.A.BlockHeader.TestObject); + var scope1 = shareableSource.Build(IWorldState.PreGenesis); scope1.Dispose(); - var scope2 = shareableSource.Build(Build.A.BlockHeader.TestObject); + var scope2 = shareableSource.Build(IWorldState.PreGenesis); scope1.WorldState.Should().BeSameAs(scope2.WorldState); } diff --git a/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs b/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs index 719adc62367..ce070783a71 100644 --- a/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs +++ b/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs @@ -70,8 +70,7 @@ public Block[] Process(BlockHeader? baseBlock, IReadOnlyList suggestedBlo } else { - BlockHeader? scopeBaseBlock = baseBlock ?? (suggestedBlock.IsGenesis ? suggestedBlock.Header : null); - worldStateCloser = stateProvider.BeginScope(scopeBaseBlock); + worldStateCloser = stateProvider.BeginScope(baseBlock); } CancellationTokenSource? backgroundCancellation = new(); diff --git a/src/Nethermind/Nethermind.Core.Test/Modules/FlatDbManagerTestCompat.cs b/src/Nethermind/Nethermind.Core.Test/Modules/FlatDbManagerTestCompat.cs new file mode 100644 index 00000000000..3e3ef35a28f --- /dev/null +++ b/src/Nethermind/Nethermind.Core.Test/Modules/FlatDbManagerTestCompat.cs @@ -0,0 +1,56 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Threading; +using Nethermind.Core.Crypto; +using Nethermind.State.Flat; +using Nethermind.Trie.Pruning; +using NUnit.Framework; + +namespace Nethermind.Core.Test.Modules; + +/// +/// A LOT of test rely on the fact that trie store will assume state is available as long as the state root is +/// empty tree even if the blocknumber is not -1. This does not work with flat. We will ignore it for now. +/// +/// +internal class FlatDbManagerTestCompat(IFlatDbManager flatDbManager) : IFlatDbManager +{ + public SnapshotBundle GatherSnapshotBundle(in StateId baseBlock, ResourcePool.Usage usage) + { + IgnoreOnInvalidState(baseBlock); + return flatDbManager.GatherSnapshotBundle(baseBlock, usage); + } + + public ReadOnlySnapshotBundle GatherReadOnlySnapshotBundle(in StateId baseBlock) + { + IgnoreOnInvalidState(baseBlock); + return flatDbManager.GatherReadOnlySnapshotBundle(baseBlock); + } + + public bool HasStateForBlock(in StateId stateId) + { + IgnoreOnInvalidState(stateId); + return flatDbManager.HasStateForBlock(stateId); + } + + private void IgnoreOnInvalidState(StateId stateId) + { + if (stateId.StateRoot == Keccak.EmptyTreeHash && stateId.BlockNumber != -1 && + !flatDbManager.HasStateForBlock(stateId)) + { + Assert.Ignore("Incompatible test"); + } + } + + public void FlushCache(CancellationToken cancellationToken) => flatDbManager.FlushCache(cancellationToken); + + public void AddSnapshot(Snapshot snapshot, TransientResource transientResource) => flatDbManager.AddSnapshot(snapshot, transientResource); + + public event EventHandler? ReorgBoundaryReached + { + add => flatDbManager.ReorgBoundaryReached += value; + remove => flatDbManager.ReorgBoundaryReached -= value; + } +} diff --git a/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs b/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs index bc2b6a043fd..3381c6616c4 100644 --- a/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs +++ b/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs @@ -1,13 +1,16 @@ // SPDX-FileCopyrightText: 2024 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only +using System; using System.Reflection; using Autofac; using Nethermind.Api; +using Nethermind.Blockchain.Synchronization; using Nethermind.Config; using Nethermind.Consensus; using Nethermind.Consensus.Processing; using Nethermind.Consensus.Scheduler; +using Nethermind.Db; using Nethermind.Init.Modules; using Nethermind.JsonRpc; using Nethermind.KeyStore; @@ -16,9 +19,12 @@ using Nethermind.Serialization.Json; using Nethermind.Serialization.Rlp; using Nethermind.Specs.ChainSpecStyle; +using Nethermind.State.Flat; +using Nethermind.State.Flat.ScopeProvider; using Nethermind.TxPool; using Nethermind.Wallet; using NSubstitute; +using NUnit.Framework; using Module = Autofac.Module; namespace Nethermind.Core.Test.Modules; @@ -32,9 +38,20 @@ namespace Nethermind.Core.Test.Modules; /// public class PseudoNethermindModule(ChainSpec spec, IConfigProvider configProvider, ILogManager logManager) : Module { + public static bool TestUseFlat = Environment.GetEnvironmentVariable("TEST_USE_FLAT") == "1"; + protected override void Load(ContainerBuilder builder) { IInitConfig initConfig = configProvider.GetConfig(); + if (TestUseFlat) + { + ISyncConfig syncConfig = configProvider.GetConfig(); + if (syncConfig.FastSync || syncConfig.SnapSync) + { + Assert.Ignore("Flat does not work with fast sync or snap sync"); + } + configProvider.GetConfig().Enabled = true; + } base.Load(builder); builder @@ -58,6 +75,15 @@ protected override void Load(ContainerBuilder builder) .AddSingleton() .AddSingleton(Substitute.For()) + // Flatdb (if used) need a more complete memcolumndb implementation with snapshots and sorted view. + .AddSingleton>((_) => new TestMemColumnsDb()) + .AddDecorator() + .Intercept((flatDbConfig) => + { + // Dont want to make it very slow + flatDbConfig.TrieWarmerWorkerCount = 2; + }) + // Rpc .AddSingleton() ; diff --git a/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs b/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs index 2bd914a09d3..e031e6de2a9 100644 --- a/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs +++ b/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs @@ -7,6 +7,7 @@ using Nethermind.Config; using Nethermind.Core.Specs; using Nethermind.Core.Test.Builders; +using Nethermind.Db; using Nethermind.Logging; using Nethermind.Serialization.Json; using Nethermind.Specs; diff --git a/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs b/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs index 630f69bd37f..e1df1c913bb 100644 --- a/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs +++ b/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs @@ -8,9 +8,9 @@ namespace Nethermind.Core.Test; public class TestMemColumnsDb : IColumnsDb - where TKey : notnull + where TKey : struct, Enum { - private readonly IDictionary _columnDbs = new Dictionary(); + private readonly IDictionary _columnDbs = new Dictionary(); public TestMemColumnsDb() { @@ -18,7 +18,7 @@ public TestMemColumnsDb() public TestMemColumnsDb(params TKey[] keys) { - foreach (var key in keys) + foreach (TKey key in keys) { GetColumnDb(key); } @@ -29,14 +29,31 @@ public TestMemColumnsDb(params TKey[] keys) public IColumnsWriteBatch StartWriteBatch() { + EnsureAllKey(); return new InMemoryColumnWriteBatch(this); } public IColumnDbSnapshot CreateSnapshot() { - throw new NotSupportedException("Snapshot not implemented"); + EnsureAllKey(); + return new Snapshot(_columnDbs); } public void Dispose() { } public void Flush(bool onlyWal = false) { } + + private void EnsureAllKey() + { + foreach (TKey key in Enum.GetValues()) + { + GetColumnDb(key); + } + } + + private class Snapshot(IDictionary columns) : IColumnDbSnapshot + { + public IReadOnlyKeyValueStore GetColumn(TKey key) => columns[key]; + + public void Dispose() { } + } } diff --git a/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs b/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs index b174709b966..c57b824934b 100644 --- a/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs +++ b/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs @@ -6,6 +6,8 @@ using System.Linq; using System.Runtime.CompilerServices; using FluentAssertions; +using Nethermind.Core.Collections; +using Nethermind.Core.Extensions; using Nethermind.Db; using Bytes = Nethermind.Core.Extensions.Bytes; @@ -14,7 +16,7 @@ namespace Nethermind.Core.Test; /// /// MemDB with additional tools for testing purposes since you can't use NSubstitute with refstruct /// -public class TestMemDb : MemDb, ITunableDb +public class TestMemDb : MemDb, ITunableDb, ISortedKeyValueStore { private readonly List<(byte[], ReadFlags)> _readKeys = new(); private readonly List<((byte[], byte[]?), WriteFlags)> _writes = new(); @@ -71,4 +73,68 @@ public void KeyWasWrittenWithFlags(byte[] key, WriteFlags flags, int times = 1) public void KeyWasRemoved(Func cond, int times = 1) => _removedKeys.Count(cond).Should().Be(times); public override IWriteBatch StartWriteBatch() => new InMemoryWriteBatch(this); public override void Flush(bool onlyWal) => FlushCount++; + + public byte[]? FirstKey => Keys.Min(); + public byte[]? LastKey => Keys.Max(); + public ISortedView GetViewBetween(ReadOnlySpan firstKeyInclusive, ReadOnlySpan lastKeyExclusive) + { + ArrayPoolList<(byte[], byte[]?)> sortedValue = new(1); + + foreach (KeyValuePair keyValuePair in GetAll()) + { + if (Bytes.BytesComparer.Compare(keyValuePair.Key, firstKeyInclusive) < 0) + { + continue; + } + + if (Bytes.BytesComparer.Compare(keyValuePair.Key, lastKeyExclusive) >= 0) + { + continue; + } + sortedValue.Add((keyValuePair.Key, keyValuePair.Value)); + } + + sortedValue.AsSpan().Sort((it1, it2) => Bytes.BytesComparer.Compare(it1.Item1, it2.Item1)); + return new FakeSortedView(sortedValue); + } + + private class FakeSortedView(ArrayPoolList<(byte[], byte[]?)> list) : ISortedView + { + private int idx = -1; + + public void Dispose() + { + list.Dispose(); + } + + public bool StartBefore(ReadOnlySpan value) + { + if (list.Count == 0) return false; + + idx = 0; + while (idx < list.Count) + { + if (Bytes.BytesComparer.Compare(list[idx].Item1, value) >= 0) + { + idx--; + return true; + } + idx++; + } + + // All keys are less than value - position at last element (largest key <= value) + idx = list.Count - 1; + return true; + } + + public bool MoveNext() + { + idx++; + if (idx >= list.Count) return false; + return true; + } + + public ReadOnlySpan CurrentKey => list[idx].Item1; + public ReadOnlySpan CurrentValue => list[idx].Item2; + } } diff --git a/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs b/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs index 0ebc2b3bcb4..34072a55435 100644 --- a/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs +++ b/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs @@ -24,4 +24,19 @@ public void ThreadLimiterWillLimit() limiter.TryTakeSlot(out _).Should().Be(true); limiter.TryTakeSlot(out _).Should().Be(false); } + + [Test] + public void ThreadLimiterWillLimitWithManualRequest() + { + ConcurrencyController limiter = new ConcurrencyController(3); + + limiter.TryRequestConcurrencyQuota().Should().Be(true); + limiter.TryRequestConcurrencyQuota().Should().Be(true); + limiter.TryRequestConcurrencyQuota().Should().Be(false); + + limiter.ReturnConcurrencyQuota(); + + limiter.TryRequestConcurrencyQuota().Should().Be(true); + limiter.TryRequestConcurrencyQuota().Should().Be(false); + } } diff --git a/src/Nethermind/Nethermind.Core.Test/Utils/RefCountingTests.cs b/src/Nethermind/Nethermind.Core.Test/Utils/RefCountingTests.cs new file mode 100644 index 00000000000..949a9179cbd --- /dev/null +++ b/src/Nethermind/Nethermind.Core.Test/Utils/RefCountingTests.cs @@ -0,0 +1,72 @@ +using System.Threading; +using FluentAssertions; +using Nethermind.Core.Utils; +using NUnit.Framework; + +namespace Nethermind.Core.Test.Utils; + +public class RefCountingTests +{ + private class TestRefCounting : RefCountingDisposable + { + private const int Used = 0; + private const int Cleaned = 1; + + private int _cleaned = Used; + private int _tryCount; + + public long TryCount => _tryCount; + + public bool Try() + { + Interlocked.Increment(ref _tryCount); + return TryAcquireLease(); + } + + protected override void CleanUp() + { + var existing = Interlocked.Exchange(ref _cleaned, Cleaned); + + // should be called only once and set it to used + existing.Should().Be(Used); + } + } + + [Test] + public void Two_threads() + { + const int sleepInMs = 100; + + var counter = new TestRefCounting(); + + var thread1 = new Thread(LeaseRelease); + var thread2 = new Thread(LeaseRelease); + + thread1.Start(); + thread2.Start(); + + Thread.Sleep(sleepInMs); + + // dispose once + counter.Dispose(); + + thread1.Join(); + thread2.Join(); + + const int minLeasesPerSecond = 1_000_000; + const int msInSec = 1000; + const int minLeaseCount = minLeasesPerSecond * sleepInMs / msInSec; + + counter.TryCount.Should().BeGreaterThan(minLeaseCount, + $"On modern CPUs the speed of lease should be bigger than {minLeasesPerSecond} / s"); + + void LeaseRelease() + { + while (counter.Try()) + { + // after lease, dispose + counter.Dispose(); + } + } + } +} diff --git a/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs b/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs index bf689b2519f..596af6faa9c 100644 --- a/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs +++ b/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs @@ -49,6 +49,14 @@ public static void AddRange(this ICollection list, IList items) } } + public static void AddOrUpdateRange(this IDictionary dict, IEnumerable> items) + { + foreach (KeyValuePair kv in items) + { + dict[kv.Key] = kv.Value; + } + } + [OverloadResolutionPriority(1)] public static void AddRange(this ICollection list, IReadOnlyList items) { diff --git a/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs b/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs index e841b44cd23..8242ca854d6 100644 --- a/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs +++ b/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs @@ -49,4 +49,17 @@ public void Dispose() limiter.ReturnSlot(); } } + + public bool TryRequestConcurrencyQuota() + { + if (Interlocked.Decrement(ref _slots) > 0) + { + return true; + } + + ReturnConcurrencyQuota(); + return false; + } + + public void ReturnConcurrencyQuota() => Interlocked.Increment(ref _slots); } diff --git a/src/Nethermind/Nethermind.Core/Threading/ReadWriteLockBox.cs b/src/Nethermind/Nethermind.Core/Threading/ReadWriteLockBox.cs new file mode 100644 index 00000000000..e9c715585c7 --- /dev/null +++ b/src/Nethermind/Nethermind.Core/Threading/ReadWriteLockBox.cs @@ -0,0 +1,60 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Threading; + +namespace Nethermind.Core.Threading; + +/// +/// Rust style wrapper of locked item. Make it a bit easier to know which object this lock is protecting. +/// +/// +public readonly struct ReadWriteLockBox(T item) +{ + private readonly ReaderWriterLockSlim _lock = new(); + + public Lock EnterReadLock(out T item1) + { + item1 = item; + return new Lock(_lock, true); + } + + public Lock EnterWriteLock(out T item1) + { + item1 = item; + return new Lock(_lock, false); + } + + public readonly ref struct Lock : IDisposable + { + private readonly ReaderWriterLockSlim _rwLock; + private readonly bool _read; + + public Lock(ReaderWriterLockSlim rwLock, bool read) + { + _rwLock = rwLock; + _read = read; + if (_read) + { + _rwLock.EnterReadLock(); + } + else + { + _rwLock.EnterWriteLock(); + } + } + + public void Dispose() + { + if (_read) + { + _rwLock.ExitReadLock(); + } + else + { + _rwLock.ExitWriteLock(); + } + } + } +} diff --git a/src/Nethermind/Nethermind.Core/Utils/RefCountingDisposable.cs b/src/Nethermind/Nethermind.Core/Utils/RefCountingDisposable.cs new file mode 100644 index 00000000000..042f9f572ad --- /dev/null +++ b/src/Nethermind/Nethermind.Core/Utils/RefCountingDisposable.cs @@ -0,0 +1,139 @@ +using System; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.InteropServices; +using System.Threading; + +namespace Nethermind.Core.Utils; + +/// +/// Provides a component that can be disposed multiple times and runs only on the last dispose. +/// +public abstract class RefCountingDisposable : IDisposable +{ + private const int Single = 1; + private const int NoAccessors = 0; + private const int Disposing = -1; + + protected PaddedValue _leases; + + protected RefCountingDisposable(int initialCount = Single) + { + _leases.Value = initialCount; + } + + public void AcquireLease() + { + if (!TryAcquireLease()) + { + ThrowCouldNotAcquire(); + } + + [DoesNotReturn] + [StackTraceHidden] + static void ThrowCouldNotAcquire() + { + throw new InvalidOperationException("The lease cannot be acquired"); + } + } + + protected bool TryAcquireLease() + { + // Volatile read for starting value + long current = Volatile.Read(ref _leases.Value); + if (current == Disposing) + { + // Already disposed + return false; + } + + while (true) + { + long prev = Interlocked.CompareExchange(ref _leases.Value, current + Single, current); + if (prev == current) + { + // Successfully acquired + return true; + } + if (prev == Disposing) + { + // Already disposed + return false; + } + + // Try again with new starting value + current = prev; + // Add PAUSE instruction to reduce shared core contention + Thread.SpinWait(1); + } + } + + /// + /// Disposes it once, decreasing the lease count by 1. + /// + public void Dispose() => ReleaseLeaseOnce(); + + private void ReleaseLeaseOnce() + { + // Volatile read for starting value + long current = Volatile.Read(ref _leases.Value); + if (current <= NoAccessors) + { + // Mismatched Acquire/Release + ThrowOverDisposed(); + } + + while (true) + { + long prev = Interlocked.CompareExchange(ref _leases.Value, current - Single, current); + if (prev != current) + { + current = prev; + // Add PAUSE instruction to reduce shared core contention + Thread.SpinWait(1); + continue; + } + if (prev == Single) + { + // Last use, try to dispose underlying + break; + } + if (prev <= NoAccessors) + { + // Mismatched Acquire/Release + ThrowOverDisposed(); + } + + // Successfully released + return; + } + + if (Interlocked.CompareExchange(ref _leases.Value, Disposing, NoAccessors) == NoAccessors) + { + // set to disposed by this Release + CleanUp(); + } + + [DoesNotReturn] + [StackTraceHidden] + static void ThrowOverDisposed() + { + throw new ObjectDisposedException("The lease has already been disposed"); + } + } + + protected abstract void CleanUp(); + + public override string ToString() + { + var leases = Volatile.Read(ref _leases.Value); + return leases == Disposing ? "Disposed" : $"Leases: {leases}"; + } + + [StructLayout(LayoutKind.Explicit, Size = 128)] + protected struct PaddedValue + { + [FieldOffset(64)] + public long Value; + } +} diff --git a/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs b/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs index c6004bcfc71..6b370d5f73b 100644 --- a/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs +++ b/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only +using System; using Nethermind.Core.Extensions; namespace Nethermind.Db.Rocks.Config; @@ -182,7 +183,7 @@ public class DbConfig : IDbConfig public ulong StateDbWriteBufferSize { get; set; } = (ulong)64.MB(); public ulong StateDbWriteBufferNumber { get; set; } = 4; - public bool? StateDbVerifyChecksum { get; set; } + public bool? StateDbVerifyChecksum { get; set; } = true; public ulong? StateDbRowCacheSize { get; set; } public bool StateDbEnableFileWarmer { get; set; } = false; public double StateDbCompressibilityHint { get; set; } = 0.45; @@ -234,6 +235,9 @@ public class DbConfig : IDbConfig // Default is 1 MB. "max_write_batch_group_size_bytes=4000000;" + + // Dont do periodic compaction + "ttl=0;" + + "periodic_compaction_seconds=0;" + ""; public string StateDbLargeMemoryRocksDbOptions { get; set; } = @@ -271,4 +275,129 @@ public class DbConfig : IDbConfig public string L1OriginDbRocksDbOptions { get; set; } = ""; public string? L1OriginDbAdditionalRocksDbOptions { get; set; } + + public bool? FlatDbVerifyChecksum { get; set; } = true; + public string FlatDbRocksDbOptions { get; set; } = + + // Common across flat columns. + "min_write_buffer_number_to_merge=2;" + + "block_based_table_factory.block_restart_interval=4;" + + "block_based_table_factory.data_block_index_type=kDataBlockBinaryAndHash;" + + "block_based_table_factory.data_block_hash_table_util_ratio=0.7;" + + "block_based_table_factory.block_size=16000;" + + "block_based_table_factory.filter_policy=ribbonfilter:10:3;" + + "max_write_batch_group_size_bytes=4000000;" + + "block_based_table_factory.pin_l0_filter_and_index_blocks_in_cache=true;" + + "block_based_table_factory.prepopulate_block_cache=kFlushOnly;" + + "block_based_table_factory.whole_key_filtering=true;" + // should be default. Just in case. + "level_compaction_dynamic_level_bytes=false;" + + + // We bsearch instead of partitioned tree. This take up memory for improved latency. + "block_based_table_factory.partition_filters=false;" + + "block_based_table_factory.index_type=kBinarySearch;" + + + "ttl=0;" + + "periodic_compaction_seconds=0;" + + "compression=kLZ4Compression;" + + + // Reduce num of files. Tend to be a good thing. + "target_file_size_multiplier=2;" + + + // Wal flushed manually in persistence. + "manual_wal_flush=true;" + + + // When an SST is removed, also remove the cached blocks instead of waiting for it to disappear + "uncache_aggressiveness=1000;" + + + // Small by default, column will override + "write_buffer_size=1000000;" + + ""; + public string? FlatDbAdditionalRocksDbOptions { get; set; } + + public string? FlatMetadataDbRocksDbOptions { get; set; } = "max_bytes_for_level_base=1000000;"; + public string? FlatMetadataDbAdditionalRocksDbOptions { get; set; } + + // Account is too small so we make it so that the file and buffer is smaller so that it does not compact too much + // at once + public string? FlatAccountDbRocksDbOptions { get; set; } = + // The account db is small, already using slim encoding. Disabling compression does not lose much. + "compression=kNoCompression;" + + + // Keep last level bloom filter. Take up most index memory + "optimize_filters_for_hits=false;" + + + // account db is really small in writes, so we set low buffer size to prevent too many different version account + // in the same memtable. + "target_file_size_multiplier=3;" + + "target_file_size_base=32000000;" + + "max_bytes_for_level_multiplier=15;" + // Reduce level count + "max_bytes_for_level_base=128000000;" + + + // account db have no benefit in locality whatsoever, and have compression disabled. + "block_based_table_factory.block_size=4096;" + + + // Smaller + "write_buffer_size=16000000;" + + "max_write_buffer_number=4;" + + ""; + public string? FlatAccountDbAdditionalRocksDbOptions { get; set; } + + public string? FlatStorageDbRocksDbOptions { get; set; } = + // Keep last level bloom filter. Take up most index memory + "optimize_filters_for_hits=false;" + + + // Much like account kinda small. + "target_file_size_base=64000000;" + + + // Using 4kb size is faster, IO wise, but uses additional 500 MB of memory, which if put on block cache is much better. + "block_based_table_factory.block_size=8000;" + + + // Smaller + "write_buffer_size=32000000;" + + "max_write_buffer_number=4;" + + ""; + + public string? FlatStorageDbAdditionalRocksDbOptions { get; set; } + + const string? FlatDbCommonTrieOptions = + "level_compaction_dynamic_level_bytes=true;" + + "block_based_table_factory.block_restart_interval=8;" + + "block_based_table_factory.block_size=16000;" + + ""; + + // Only 1 gig in total, but almost 1/3rd of the writes. + public string? FlatStateTopNodesDbRocksDbOptions { get; set; } = + FlatDbCommonTrieOptions + + "write_buffer_size=64000000;" + + "max_write_buffer_number=4;" + + ""; + public string? FlatStateNodesDbAdditionalRocksDbOptions { get; set; } + + // So not written as much so lower buffer size + public string? FlatStateNodesDbRocksDbOptions { get; set; } = + FlatDbCommonTrieOptions + + "write_buffer_size=32000000;" + + "max_write_buffer_number=4;" + + ""; + public string? FlatStateTopNodesDbAdditionalRocksDbOptions { get; set; } + + // Most writes + public string? FlatStorageNodesDbRocksDbOptions { get; set; } = + FlatDbCommonTrieOptions + + // Slight increase to account for high writes + "max_bytes_for_level_base=350000000;" + + "write_buffer_size=64000000;" + + "max_write_buffer_number=8;" + + ""; + public string? FlatStorageNodesDbAdditionalRocksDbOptions { get; set; } + + public string? FlatFallbackNodesNodesDbRocksDbOptions { get; set; } = + FlatDbCommonTrieOptions + + // Fallback nodes is tiny. Like KB level small. This is generous. + "max_bytes_for_level_base=4000000;" + + ""; + public string? FlatFallbackNodesNodesDbAdditionalRocksDbOptions { get; set; } + + public string? PreimageDbRocksDbOptions { get; set; } = ""; + public string? PreimageDbAdditionalRocksDbOptions { get; set; } } diff --git a/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs b/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs index fa7c8e2023b..a68b3b348a0 100644 --- a/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs +++ b/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs @@ -102,4 +102,32 @@ public interface IDbConfig : IConfig string L1OriginDbRocksDbOptions { get; set; } string? L1OriginDbAdditionalRocksDbOptions { get; set; } + + bool? FlatDbVerifyChecksum { get; set; } + string FlatDbRocksDbOptions { get; set; } + string? FlatDbAdditionalRocksDbOptions { get; set; } + + string? FlatMetadataDbRocksDbOptions { get; set; } + string? FlatMetadataDbAdditionalRocksDbOptions { get; set; } + + string? FlatAccountDbRocksDbOptions { get; set; } + string? FlatAccountDbAdditionalRocksDbOptions { get; set; } + + string? FlatStorageDbRocksDbOptions { get; set; } + string? FlatStorageDbAdditionalRocksDbOptions { get; set; } + + string? FlatStateNodesDbRocksDbOptions { get; set; } + string? FlatStateNodesDbAdditionalRocksDbOptions { get; set; } + + string? FlatStateTopNodesDbRocksDbOptions { get; set; } + string? FlatStateTopNodesDbAdditionalRocksDbOptions { get; set; } + + string? FlatStorageNodesDbRocksDbOptions { get; set; } + string? FlatStorageNodesDbAdditionalRocksDbOptions { get; set; } + + string? FlatFallbackNodesNodesDbRocksDbOptions { get; set; } + string? FlatFallbackNodesNodesDbAdditionalRocksDbOptions { get; set; } + + string? PreimageDbRocksDbOptions { get; set; } + public string? PreimageDbAdditionalRocksDbOptions { get; set; } } diff --git a/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs b/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs index cca010c66f8..8590a662a8c 100644 --- a/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs +++ b/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs @@ -221,7 +221,6 @@ private RocksDb Init(string basePath, string dbPath, IDbConfig dbConfig, ILogMan CreateMarkerIfCorrupt(x); throw; } - } private void WarmupFile(string basePath, RocksDb db) @@ -438,7 +437,7 @@ private long GetMemtableSize() return 0; } - [GeneratedRegex("(?[^; ]+)\\=(?[^; ]+);", RegexOptions.Singleline | RegexOptions.NonBacktracking | RegexOptions.ExplicitCapture)] + [GeneratedRegex("(?[A-Za-z0-9_\\.]+)\\=(?[^; ]+);", RegexOptions.Singleline | RegexOptions.NonBacktracking | RegexOptions.ExplicitCapture)] private static partial Regex ExtractDbOptionsRegex(); public static IDictionary ExtractOptions(string dbOptions) @@ -532,7 +531,7 @@ protected virtual void BuildOptions(IRocksDbConfig dbConfig, Options optio Marshal.FreeHGlobal(optsPtr); } - if (dbConfig.WriteBufferSize is not null) + if (dbConfig.WriteBufferSize > 0) { _writeBufferSize = dbConfig.WriteBufferSize.Value; options.SetWriteBufferSize(dbConfig.WriteBufferSize.Value); @@ -1484,8 +1483,7 @@ private void ReleaseUnmanagedResources() public void Dispose() { - if (_isDisposing) return; - _isDisposing = true; + if (Interlocked.CompareExchange(ref _isDisposing, true, false)) return; if (_logger.IsInfo) _logger.Info($"Disposing DB {Name}"); diff --git a/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs b/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs index a47654fbd37..03d722ab29a 100644 --- a/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs +++ b/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs @@ -18,6 +18,7 @@ public HyperClockCacheWrapper(ulong capacity = 32_000_000) : base(ownsHandle: tr protected override bool ReleaseHandle() { + // Temporary disable to see if it fix crash RocksDbSharp.Native.Instance.rocksdb_cache_destroy(handle); return true; } diff --git a/src/Nethermind/Nethermind.Db/DbNames.cs b/src/Nethermind/Nethermind.Db/DbNames.cs index 2030be8e2bd..821a46eee2d 100644 --- a/src/Nethermind/Nethermind.Db/DbNames.cs +++ b/src/Nethermind/Nethermind.Db/DbNames.cs @@ -7,6 +7,7 @@ public static class DbNames { public const string Storage = "storage"; public const string State = "state"; + public const string Flat = "flat"; public const string Code = "code"; public const string Blocks = "blocks"; public const string Headers = "headers"; @@ -20,5 +21,6 @@ public static class DbNames public const string DiscoveryNodes = "discoveryNodes"; public const string DiscoveryV5Nodes = "discoveryV5Nodes"; public const string PeersDb = "peers"; + public const string Preimage = "preimage"; } } diff --git a/src/Nethermind/Nethermind.Db/FlatDbConfig.cs b/src/Nethermind/Nethermind.Db/FlatDbConfig.cs new file mode 100644 index 00000000000..12c903a3da9 --- /dev/null +++ b/src/Nethermind/Nethermind.Db/FlatDbConfig.cs @@ -0,0 +1,24 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core.Extensions; + +namespace Nethermind.Db; + +public class FlatDbConfig : IFlatDbConfig +{ + public bool Enabled { get; set; } = false; + public bool EnablePreimageRecording { get; set; } = false; + public bool ImportFromPruningTrieState { get; set; } = false; + public bool InlineCompaction { get; set; } = false; + public bool VerifyWithTrie { get; set; } = false; + public FlatLayout Layout { get; set; } = FlatLayout.Flat; + public int CompactSize { get; set; } = 32; + public int MaxInFlightCompactJob { get; set; } = 32; + public int MaxReorgDepth { get; set; } = 256; + public int MidCompactSize { get; set; } = 4; + public int MinReorgDepth { get; set; } = 128; + public int TrieWarmerWorkerCount { get; set; } = -1; + public long BlockCacheSizeBudget { get; set; } = 1.GiB(); + public long TrieCacheMemoryBudget { get; set; } = 512.MiB(); +} diff --git a/src/Nethermind/Nethermind.Db/FlatLayout.cs b/src/Nethermind/Nethermind.Db/FlatLayout.cs new file mode 100644 index 00000000000..fcf818f6e54 --- /dev/null +++ b/src/Nethermind/Nethermind.Db/FlatLayout.cs @@ -0,0 +1,11 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +namespace Nethermind.Db; + +public enum FlatLayout +{ + Flat, + FlatInTrie, + PreimageFlat, +} diff --git a/src/Nethermind/Nethermind.Db/IFlatDbConfig.cs b/src/Nethermind/Nethermind.Db/IFlatDbConfig.cs new file mode 100644 index 00000000000..1f10c45aacd --- /dev/null +++ b/src/Nethermind/Nethermind.Db/IFlatDbConfig.cs @@ -0,0 +1,51 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Config; + +namespace Nethermind.Db; + +public interface IFlatDbConfig : IConfig +{ + [ConfigItem(Description = "Block cache size budget", DefaultValue = "1073741824")] + long BlockCacheSizeBudget { get; set; } + + [ConfigItem(Description = "Compact size", DefaultValue = "32")] + int CompactSize { get; set; } + + [ConfigItem(Description = "Enabled", DefaultValue = "false")] + bool Enabled { get; set; } + + [ConfigItem(Description = "Enable recording of preimages (address/slot hash to original bytes)", DefaultValue = "false")] + bool EnablePreimageRecording { get; set; } + + [ConfigItem(Description = "Import from pruning trie state db", DefaultValue = "false")] + bool ImportFromPruningTrieState { get; set; } + + [ConfigItem(Description = "Inline compaction", DefaultValue = "false")] + bool InlineCompaction { get; set; } + + [ConfigItem(Description = "Flat db layout", DefaultValue = "Flat")] + FlatLayout Layout { get; set; } + + [ConfigItem(Description = "Max in flight compact job", DefaultValue = "32")] + int MaxInFlightCompactJob { get; set; } + + [ConfigItem(Description = "Max reorg depth", DefaultValue = "256")] + int MaxReorgDepth { get; set; } + + [ConfigItem(Description = "Compact interval", DefaultValue = "4")] + int MidCompactSize { get; set; } + + [ConfigItem(Description = "Minimum reorg depth", DefaultValue = "128")] + int MinReorgDepth { get; set; } + + [ConfigItem(Description = "Trie cache memory target", DefaultValue = "536870912")] + long TrieCacheMemoryBudget { get; set; } + + [ConfigItem(Description = "Trie warmer worker count (-1 for processor count - 1, 0 to disable)", DefaultValue = "-1")] + int TrieWarmerWorkerCount { get; set; } + + [ConfigItem(Description = "Verify with trie", DefaultValue = "false")] + bool VerifyWithTrie { get; set; } +} diff --git a/src/Nethermind/Nethermind.Init/Modules/FlatRocksDbConfigAdjuster.cs b/src/Nethermind/Nethermind.Init/Modules/FlatRocksDbConfigAdjuster.cs new file mode 100644 index 00000000000..30122863ebb --- /dev/null +++ b/src/Nethermind/Nethermind.Init/Modules/FlatRocksDbConfigAdjuster.cs @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Extensions; +using Nethermind.Db; +using Nethermind.Db.Rocks; +using Nethermind.Db.Rocks.Config; +using Nethermind.Logging; +using Nethermind.State.Flat; + +namespace Nethermind.Init.Modules; + +/// +/// Adjust rocksdb config depending on the flatdb config +/// +internal class FlatRocksDbConfigAdjuster( + IRocksDbConfigFactory rocksDbConfigFactory, + IFlatDbConfig flatDbConfig, + IDisposableStack disposeStack, + ILogManager logManager) + : IRocksDbConfigFactory +{ + private readonly ILogger _logger = logManager.GetClassLogger(); + + public IRocksDbConfig GetForDatabase(string databaseName, string? columnName) + { + IRocksDbConfig config = rocksDbConfigFactory.GetForDatabase(databaseName, columnName); + if (databaseName == nameof(DbNames.Flat)) + { + string additionalConfig = ""; + if (flatDbConfig.Layout == FlatLayout.FlatInTrie) + { + // For flat in trie, add optimize filter for hits and turn on partitioned index, this reduces + // memory at expense of latency. + additionalConfig = config.RocksDbOptions + + "optimize_filters_for_hits=true;" + + "block_based_table_factory.partition_filters=true;" + + "block_based_table_factory.index_type=kTwoLevelIndexSearch;"; + } + + IntPtr? cacheHandle = null; + if (columnName == nameof(FlatDbColumns.Account)) + { + ulong cacheCapacity = (ulong)(flatDbConfig.BlockCacheSizeBudget * 0.3); + if (_logger.IsInfo) _logger.Info($"Setting {(cacheCapacity / (ulong)1.MiB()):N0} MB of block cache to account"); + HyperClockCacheWrapper cacheWrapper = new(cacheCapacity); + cacheHandle = cacheWrapper.Handle; + disposeStack.Push(cacheWrapper); + } + + if (columnName == nameof(FlatDbColumns.Storage)) + { + ulong cacheCapacity = (ulong)(flatDbConfig.BlockCacheSizeBudget * 0.7); + if (_logger.IsInfo) _logger.Info($"Setting {(cacheCapacity / (ulong)1.MiB()):N0} MB of block cache to storage"); + HyperClockCacheWrapper cacheWrapper = new(cacheCapacity); + cacheHandle = cacheWrapper.Handle; + disposeStack.Push(cacheWrapper); + } + + config = new AdjustedRocksdbConfig(config, additionalConfig, config.WriteBufferSize.GetValueOrDefault(), cacheHandle); + } + + return config; + } +} diff --git a/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs b/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs new file mode 100644 index 00000000000..c603ae8befe --- /dev/null +++ b/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Autofac; +using Microsoft.AspNetCore.Http; +using Nethermind.Api.Steps; +using Nethermind.Blockchain; +using Nethermind.Blockchain.Find; +using Nethermind.Blockchain.FullPruning; +using Nethermind.Blockchain.Synchronization; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Db; +using Nethermind.Db.Rocks.Config; +using Nethermind.Init.Steps; +using Nethermind.JsonRpc; +using Nethermind.JsonRpc.Modules.Admin; +using Nethermind.Logging; +using Nethermind.Monitoring.Config; +using Nethermind.State; +using Nethermind.State.Flat; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; + +namespace Nethermind.Init.Modules; + +public class FlatWorldStateModule(IFlatDbConfig flatDbConfig) : Module +{ + protected override void Load(ContainerBuilder builder) + { + builder + + // Implementation of nethermind interfaces + .AddSingleton() + .OnActivate((worldStateManager, ctx) => + { + new TrieStoreBoundaryWatcher(worldStateManager, ctx.Resolve(), ctx.Resolve()); + }) + .AddSingleton() + + // Disable some pruning trie store specific components + .AddSingleton() + .AddSingleton(_ => throw new NotSupportedException($"{nameof(MainPruningTrieStoreFactory)} disabled.")) + .AddSingleton(_ => throw new NotSupportedException($"{nameof(PruningTrieStateFactory)} disabled.")) + + // The actual flatDb components + .AddSingleton((ctx) => new FlatDbManager( + ctx.Resolve(), + ctx.Resolve(), + ctx.Resolve(), + ctx.Resolve(), + ctx.Resolve(), + ctx.Resolve(), + ctx.Resolve(), + ctx.Resolve(), + ctx.Resolve().EnableDetailedMetric)) + .AddSingleton() + .AddSingleton() + .AddSingleton() + .AddSingleton() + .AddSingleton() + .AddSingleton(flatDbConfig.TrieWarmerWorkerCount == 0 + ? _ => new NoopTrieWarmer() + : ctx => ctx.Resolve()) + .AddSingleton() + .Add() + + // Persistences + .AddColumnDatabase(DbNames.Flat) + .AddSingleton() + .AddSingleton() + .AddDecorator() + + .AddSingleton() + .AddDatabase(DbNames.Preimage) + + .AddSingleton((flatDbConfig, exitSource, logManager, ctx) => + { + IPersistence persistence = flatDbConfig.Layout switch + { + FlatLayout.Flat => ctx.Resolve(), + FlatLayout.FlatInTrie => ctx.Resolve(), + FlatLayout.PreimageFlat => ctx.Resolve(), + _ => throw new NotSupportedException($"Unsupported layout {flatDbConfig.Layout}") + }; + + if (flatDbConfig.EnablePreimageRecording) + { + IDb preimageDb = ctx.ResolveKeyed(DbNames.Preimage); + persistence = new PreimageRecordingPersistence(persistence, preimageDb); + } + + return new CachedReaderPersistence(persistence, exitSource, logManager); + }) + ; + + if (flatDbConfig.ImportFromPruningTrieState) + { + builder + .AddSingleton() + .AddStep(typeof(ImportFlatDb)); + } + else + { + builder + .AddDecorator((ctx, syncConfig) => + { + ILogger logger = ctx.Resolve().GetClassLogger(); + if (syncConfig.FastSync || syncConfig.SnapSync) + { + if (logger.IsWarn) logger.Warn("Fast sync and snap sync turned off with FlatDB"); + syncConfig.FastSync = false; + syncConfig.SnapSync = false; + } + return syncConfig; + }); + } + } + + /// + /// Need to stub out, or it will register trie store specific module + /// + private class PruningTrieStateAdminRpcModuleStub : IPruningTrieStateAdminRpcModule + { + public ResultWrapper admin_prune() => ResultWrapper.Success(PruningStatus.Disabled); + + public ResultWrapper admin_verifyTrie(BlockParameter block) => ResultWrapper.Success("disable"); + } +} diff --git a/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs b/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs index 41df70db238..00828ee5e45 100644 --- a/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs +++ b/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only +using System; using System.IO.Abstractions; using Autofac; using Nethermind.Abi; @@ -15,6 +16,7 @@ using Nethermind.Core.Specs; using Nethermind.Core.Timers; using Nethermind.Crypto; +using Nethermind.Db; using Nethermind.Era1; using Nethermind.JsonRpc; using Nethermind.Logging; @@ -81,6 +83,9 @@ protected override void Load(ContainerBuilder builder) { builder.AddSingleton(NullBlobTxStorage.Instance); } + + if (configProvider.GetConfig().Enabled) + builder.AddModule(new FlatWorldStateModule(configProvider.GetConfig())); } // Just a wrapper to make it clear, these three are expected to be available at the time of configurations. diff --git a/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs b/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs index a5131853f84..48f3a30e016 100644 --- a/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs +++ b/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only +using System; using Autofac; using Nethermind.Blockchain; using Nethermind.Config; @@ -41,6 +42,8 @@ protected override void Load(ContainerBuilder builder) // module, so singleton here is like scoped but exclude inner prewarmer lifetime. .AddSingleton() .AddScoped() + + // This class create the block processing env with worldstate that populate the cache .Add() // These are the actual decorated component that provide cached result diff --git a/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj b/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj index f1473282530..3f3637408f2 100644 --- a/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj +++ b/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj @@ -13,6 +13,7 @@ + <_Parameter1>Nethermind.Runner.Test diff --git a/src/Nethermind/Nethermind.Init/Steps/ImportFlatDb.cs b/src/Nethermind/Nethermind.Init/Steps/ImportFlatDb.cs new file mode 100644 index 00000000000..776e8af38f1 --- /dev/null +++ b/src/Nethermind/Nethermind.Init/Steps/ImportFlatDb.cs @@ -0,0 +1,73 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Threading; +using System.Threading.Tasks; +using Nethermind.Api.Steps; +using Nethermind.Blockchain; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Db; +using Nethermind.Logging; +using Nethermind.Monitoring; +using Nethermind.State.Flat; +using Nethermind.State.Flat.Persistence; + +namespace Nethermind.Init.Steps; + +[RunnerStepDependencies( + dependencies: [typeof(InitializeBlockTree)], + dependents: [typeof(InitializeBlockchain)] +)] +public class ImportFlatDb( + IBlockTree blockTree, + IPersistence persistence, + Importer importer, + IProcessExitSource exitSource, + IFlatDbConfig flatDbConfig, + ILogManager logManager +) : IStep +{ + ILogger _logger = logManager.GetClassLogger(); + + public async Task Execute(CancellationToken cancellationToken) + { + // Validate that we're not using PreimageFlat layout + if (flatDbConfig.Layout == FlatLayout.PreimageFlat) + { + if (_logger.IsError) _logger.Error("Cannot import with FlatLayout.PreimageFlat. Use FlatLayout.Flat or FlatLayout.FlatInTrie instead."); + if (_logger.IsError) _logger.Error("PreimageFlat mode does not support importing from trie state because the importer uses hash-based raw operations."); + exitSource.Exit(1); + return; + } + + BlockHeader? head = blockTree.Head?.Header; + if (head is null) return; + + using (var reader = persistence.CreateReader()) + { + if (_logger.IsWarn) _logger.Warn($"Current state is {reader.CurrentState}"); + if (reader.CurrentState.BlockNumber > 0) + { + if (_logger.IsInfo) _logger.Info("Flat db already exist"); + return; + } + } + + if (_logger.IsInfo) _logger.Info($"Copying state {head.ToString(BlockHeader.Format.Short)} with state root {head.StateRoot}"); + + try + { + await importer.Copy(new StateId(head), cancellationToken); + } + catch (OperationCanceledException) + { + if (_logger.IsInfo) _logger.Info("Import cancelled by user"); + exitSource.Exit(1); + return; + } + + exitSource.Exit(0); + } +} diff --git a/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs b/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs index 5d849fd3eb3..c4abe9ab5b8 100644 --- a/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs +++ b/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs @@ -40,7 +40,7 @@ public void BuildInSteps_IsCorrect() steps.AddRange(LoadStepInfoFromAssembly(typeof(InitializeBlockTree).Assembly)); steps.AddRange(LoadStepInfoFromAssembly(typeof(EthereumRunner).Assembly)); - HashSet optionalSteps = [typeof(RunVerifyTrie), typeof(ExitOnInvalidBlock)]; + HashSet optionalSteps = [typeof(RunVerifyTrie), typeof(ExitOnInvalidBlock), typeof(ImportFlatDb)]; steps = steps.Where((s) => !optionalSteps.Contains(s.StepBaseType)).ToHashSet(); using IContainer container = new ContainerBuilder() diff --git a/src/Nethermind/Nethermind.Runner.Test/Module/FlatRocksDbConfigAdjusterTests.cs b/src/Nethermind/Nethermind.Runner.Test/Module/FlatRocksDbConfigAdjusterTests.cs new file mode 100644 index 00000000000..3e2e87f6dc5 --- /dev/null +++ b/src/Nethermind/Nethermind.Runner.Test/Module/FlatRocksDbConfigAdjusterTests.cs @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using FluentAssertions; +using Nethermind.Core; +using Nethermind.Db; +using Nethermind.Db.Rocks.Config; +using Nethermind.Init.Modules; +using Nethermind.Logging; +using Nethermind.State.Flat; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.Runner.Test.Module; + +[TestFixture] +[Parallelizable(ParallelScope.Self)] +public class FlatRocksDbConfigAdjusterTests +{ + private IRocksDbConfigFactory _baseFactory = null!; + private IFlatDbConfig _flatDbConfig = null!; + private IDisposableStack _disposeStack = null!; + private IRocksDbConfig _baseConfig = null!; + + [SetUp] + public void SetUp() + { + _baseFactory = Substitute.For(); + _flatDbConfig = Substitute.For(); + _disposeStack = Substitute.For(); + _baseConfig = Substitute.For(); + + _baseConfig.RocksDbOptions.Returns("base_options=true;"); + _baseConfig.WriteBufferSize.Returns((ulong)64_000_000); + + _baseFactory.GetForDatabase(Arg.Any(), Arg.Any()).Returns(_baseConfig); + } + + [Test] + public void NonFlatDatabase_ReturnsBaseConfig() + { + _flatDbConfig.Layout.Returns(FlatLayout.Flat); + _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L); + + var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance); + + IRocksDbConfig result = adjuster.GetForDatabase("State0", null); + + result.Should().BeSameAs(_baseConfig); + } + + [Test] + public void FlatDatabase_WithFlatLayout_DoesNotAddPartitionedIndexOptions() + { + _flatDbConfig.Layout.Returns(FlatLayout.Flat); + _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L); + + var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance); + + IRocksDbConfig result = adjuster.GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Metadata)); + + result.RocksDbOptions.Should().NotContain("optimize_filters_for_hits"); + result.RocksDbOptions.Should().NotContain("partition_filters"); + result.RocksDbOptions.Should().NotContain("kTwoLevelIndexSearch"); + } + + [Test] + public void FlatDatabase_WithFlatInTrieLayout_AddsPartitionedIndexOptions() + { + _flatDbConfig.Layout.Returns(FlatLayout.FlatInTrie); + _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L); + + var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance); + + IRocksDbConfig result = adjuster.GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Metadata)); + + result.RocksDbOptions.Should().Contain("optimize_filters_for_hits=true;"); + result.RocksDbOptions.Should().Contain("block_based_table_factory.partition_filters=true;"); + result.RocksDbOptions.Should().Contain("block_based_table_factory.index_type=kTwoLevelIndexSearch;"); + } + + [Test] + public void FlatDatabase_DelegatesToBaseFactoryWithCorrectParameters() + { + _flatDbConfig.Layout.Returns(FlatLayout.Flat); + _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L); + + var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance); + + adjuster.GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Account)); + + _baseFactory.Received(1).GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Account)); + } +} diff --git a/src/Nethermind/Nethermind.Runner/packages.lock.json b/src/Nethermind/Nethermind.Runner/packages.lock.json index 858ddab75fd..b406492fc3a 100644 --- a/src/Nethermind/Nethermind.Runner/packages.lock.json +++ b/src/Nethermind/Nethermind.Runner/packages.lock.json @@ -885,7 +885,8 @@ "Nethermind.Network.Discovery": "[1.37.0-unstable, )", "Nethermind.Network.Dns": "[1.37.0-unstable, )", "Nethermind.Network.Enr": "[1.37.0-unstable, )", - "Nethermind.Specs": "[1.37.0-unstable, )" + "Nethermind.Specs": "[1.37.0-unstable, )", + "Nethermind.State.Flat": "[1.37.0-unstable, )" } }, "nethermind.init.snapshot": { @@ -1117,6 +1118,18 @@ "Nethermind.Trie": "[1.37.0-unstable, )" } }, + "nethermind.state.flat": { + "type": "Project", + "dependencies": { + "Nethermind.Core": "[1.37.0-unstable, )", + "Nethermind.Db": "[1.37.0-unstable, )", + "Nethermind.Evm": "[1.37.0-unstable, )", + "Nethermind.Serialization.Rlp": "[1.37.0-unstable, )", + "Nethermind.State": "[1.37.0-unstable, )", + "Nethermind.Trie": "[1.37.0-unstable, )", + "System.IO.Hashing": "[10.0.2, )" + } + }, "nethermind.synchronization": { "type": "Project", "dependencies": { @@ -1569,6 +1582,12 @@ "System.Security.Cryptography.ProtectedData": "10.0.1" } }, + "System.IO.Hashing": { + "type": "CentralTransitive", + "requested": "[10.0.2, )", + "resolved": "10.0.2", + "contentHash": "AKJknIFi9O3+rGExxTry188JPvUoZAPcCtS2qdqyFhIzsxQ1Ap94BeGDG0VzVEHakhmRxmJtVih6TsHoghIt/g==" + }, "System.Security.Cryptography.ProtectedData": { "type": "CentralTransitive", "requested": "[10.0.1, )", diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatDbManagerTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatDbManagerTests.cs new file mode 100644 index 00000000000..1be8ff58074 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatDbManagerTests.cs @@ -0,0 +1,161 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Threading; +using System.Threading.Tasks; +using Nethermind.Config; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class FlatDbManagerTests +{ + private IResourcePool _resourcePool = null!; + private IProcessExitSource _processExitSource = null!; + private ITrieNodeCache _trieNodeCache = null!; + private ISnapshotCompactor _snapshotCompactor = null!; + private ISnapshotRepository _snapshotRepository = null!; + private IPersistenceManager _persistenceManager = null!; + private IFlatDbConfig _config = null!; + private CancellationTokenSource _cts = null!; + + [SetUp] + public void SetUp() + { + _resourcePool = Substitute.For(); + _cts = new CancellationTokenSource(); + _processExitSource = Substitute.For(); + _processExitSource.Token.Returns(_cts.Token); + _trieNodeCache = Substitute.For(); + _snapshotCompactor = Substitute.For(); + _snapshotRepository = Substitute.For(); + _persistenceManager = Substitute.For(); + _config = new FlatDbConfig { CompactSize = 16, MaxInFlightCompactJob = 4, InlineCompaction = true }; + } + + [TearDown] + public void TearDown() + { + _cts.Cancel(); + _cts.Dispose(); + } + + private FlatDbManager CreateManager() => new( + _resourcePool, + _processExitSource, + _trieNodeCache, + _snapshotCompactor, + _snapshotRepository, + _persistenceManager, + _config, + LimboLogs.Instance, + enableDetailedMetrics: false); + + private static StateId CreateStateId(long blockNumber, byte rootByte = 0) + { + byte[] bytes = new byte[32]; + bytes[0] = rootByte; + return new StateId(blockNumber, new ValueHash256(bytes)); + } + + [Test] + public async Task HasStateForBlock_FoundInRepository_ReturnsTrue() + { + StateId stateId = CreateStateId(10); + _snapshotRepository.HasState(stateId).Returns(true); + _persistenceManager.GetCurrentPersistedStateId().Returns(CreateStateId(5)); + + await using FlatDbManager manager = CreateManager(); + bool result = manager.HasStateForBlock(stateId); + + Assert.That(result, Is.True); + } + + [Test] + public async Task HasStateForBlock_FoundInPersistence_ReturnsTrue() + { + StateId stateId = CreateStateId(10); + _snapshotRepository.HasState(stateId).Returns(false); + _persistenceManager.GetCurrentPersistedStateId().Returns(stateId); + + await using FlatDbManager manager = CreateManager(); + bool result = manager.HasStateForBlock(stateId); + + Assert.That(result, Is.True); + } + + [Test] + public async Task HasStateForBlock_NotFound_ReturnsFalse() + { + StateId stateId = CreateStateId(10); + _snapshotRepository.HasState(stateId).Returns(false); + _persistenceManager.GetCurrentPersistedStateId().Returns(CreateStateId(5)); + + await using FlatDbManager manager = CreateManager(); + bool result = manager.HasStateForBlock(stateId); + + Assert.That(result, Is.False); + } + + [Test] + public async Task AddSnapshot_BlockBelowPersistedState_ReturnsEarlyAndLogsWarning() + { + StateId persistedStateId = CreateStateId(100); + _persistenceManager.GetCurrentPersistedStateId().Returns(persistedStateId); + + ResourcePool realResourcePool = new(_config); + StateId snapshotFrom = CreateStateId(50); + StateId snapshotTo = CreateStateId(51); + Snapshot snapshot = realResourcePool.CreateSnapshot(snapshotFrom, snapshotTo, ResourcePool.Usage.MainBlockProcessing); + TransientResource transientResource = realResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + + await using FlatDbManager manager = CreateManager(); + manager.AddSnapshot(snapshot, transientResource); + + _snapshotRepository.DidNotReceive().TryAddSnapshot(Arg.Any()); + } + + [Test] + public async Task AddSnapshot_ValidSnapshot_AddsToRepository() + { + StateId persistedStateId = CreateStateId(5); + _persistenceManager.GetCurrentPersistedStateId().Returns(persistedStateId); + _snapshotRepository.TryAddSnapshot(Arg.Any()).Returns(true); + + ResourcePool realResourcePool = new(_config); + StateId snapshotFrom = CreateStateId(10); + StateId snapshotTo = CreateStateId(11); + Snapshot snapshot = realResourcePool.CreateSnapshot(snapshotFrom, snapshotTo, ResourcePool.Usage.MainBlockProcessing); + TransientResource transientResource = realResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + + await using FlatDbManager manager = CreateManager(); + manager.AddSnapshot(snapshot, transientResource); + + _snapshotRepository.Received(1).TryAddSnapshot(snapshot); + } + + [Test] + public async Task AddSnapshot_DuplicateSnapshot_DisposesSnapshotAndReturnsResource() + { + StateId persistedStateId = CreateStateId(5); + _persistenceManager.GetCurrentPersistedStateId().Returns(persistedStateId); + _snapshotRepository.TryAddSnapshot(Arg.Any()).Returns(false); + + ResourcePool realResourcePool = new(_config); + StateId snapshotFrom = CreateStateId(10); + StateId snapshotTo = CreateStateId(11); + Snapshot snapshot = realResourcePool.CreateSnapshot(snapshotFrom, snapshotTo, ResourcePool.Usage.MainBlockProcessing); + TransientResource transientResource = realResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + + await using FlatDbManager manager = CreateManager(); + manager.AddSnapshot(snapshot, transientResource); + + _resourcePool.Received(1).ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, transientResource); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatOverridableWorldScopeTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatOverridableWorldScopeTests.cs new file mode 100644 index 00000000000..ca5a4c9ecae --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatOverridableWorldScopeTests.cs @@ -0,0 +1,287 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using Autofac; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Evm.State; +using Nethermind.Init.Modules; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +public class FlatOverridableWorldScopeTests +{ + private class TestContext : IDisposable + { + private readonly ContainerBuilder _containerBuilder; + private readonly CancellationTokenSource _cancellationTokenSource = new(); + + private IContainer? _container; + private IContainer Container => _container ??= _containerBuilder.Build(); + + public ResourcePool ResourcePool => field ??= Container.Resolve(); + public IFlatDbManager FlatDbManager => field ??= Container.Resolve(); + public FlatOverridableWorldScope OverridableScope => field ??= Container.Resolve(); + public List<(Snapshot Snapshot, TransientResource Resource)> FlatDbManagerAddSnapshotCalls { get; } = []; + + public TestContext(FlatDbConfig? config = null) + { + config ??= new FlatDbConfig(); + IPersistence.IPersistenceReader persistenceReader = Substitute.For(); + + _containerBuilder = new ContainerBuilder() + .AddModule(new FlatWorldStateModule(config)) + .AddSingleton(_ => persistenceReader) + .AddSingleton(ctx => + { + IFlatDbManager flatDbManager = Substitute.For(); + flatDbManager.When(it => it.AddSnapshot(Arg.Any(), Arg.Any())) + .Do(c => + { + Snapshot snapshot = (Snapshot)c[0]; + TransientResource transientResource = (TransientResource)c[1]; + FlatDbManagerAddSnapshotCalls.Add((snapshot, transientResource)); + }); + + flatDbManager.GatherReadOnlySnapshotBundle(Arg.Any()) + .Returns(_ => + { + SnapshotPooledList snapshotList = new(0); + return new ReadOnlySnapshotBundle(snapshotList, Substitute.For(), false); + }); + + flatDbManager.HasStateForBlock(Arg.Any()) + .Returns(false); + + return flatDbManager; + }) + .Bind() + .AddSingleton(_ => new CancellationTokenSourceProcessExitSource(_cancellationTokenSource)) + .AddSingleton(LimboLogs.Instance) + .AddSingleton(config) + .AddSingleton(_ => Substitute.For()) + .AddSingleton(_ => new TrieStoreScopeProvider.KeyValueWithBatchingBackedCodeDb(new TestMemDb())); + + // Register keyed IDb for code database + _containerBuilder.RegisterInstance(new TestMemDb()).Keyed(DbNames.Code); + } + + public void Dispose() + { + _cancellationTokenSource.Cancel(); + + foreach ((Snapshot snapshot, TransientResource resource) in FlatDbManagerAddSnapshotCalls) + { + snapshot.Dispose(); + ResourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, resource); + } + + _container?.Dispose(); + _cancellationTokenSource.Dispose(); + } + + private class CancellationTokenSourceProcessExitSource(CancellationTokenSource cancellationTokenSource) : IProcessExitSource + { + public CancellationToken Token => cancellationTokenSource.Token; + public void Exit(int exitCode) => throw new NotImplementedException(); + } + } + + [Test] + public void CommitThroughOverridableScope_StoresSnapshotLocally_ReadableWithinOverridableScope() + { + using TestContext ctx = new(); + FlatOverridableWorldScope overridableScope = ctx.OverridableScope; + + Address testAddress = TestItem.AddressA; + Account testAccount = TestItem.GenerateRandomAccount(); + UInt256 storageIndex1 = 42; + UInt256 storageIndex2 = 100; + byte[] storageValue1 = [1, 2, 3, 4]; + byte[] storageValue2 = [5, 6, 7, 8, 9, 10]; + + // Write account and storage, then commit + BlockHeader? baseBlock = null; + using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(null)) + { + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(testAddress, testAccount); + + using (IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 2)) + { + storageBatch.Set(storageIndex1, storageValue1); + storageBatch.Set(storageIndex2, storageValue2); + } + } + scope.Commit(1); + baseBlock = Build.A.BlockHeader.WithNumber(1).WithStateRoot(scope.RootHash).TestObject; + } + + // Verify account readable within new scope + using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(baseBlock)) + { + Account? readAccount = scope.Get(testAddress); + Assert.That(readAccount, Is.Not.Null); + Assert.That(readAccount!.Balance, Is.EqualTo(testAccount.Balance)); + } + + // Verify account readable through GlobalStateReader + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(baseBlock, testAddress, out AccountStruct acc), Is.True); + Assert.That(acc.Balance, Is.EqualTo(testAccount.Balance)); + + // Verify storage readable through GlobalStateReader + ReadOnlySpan readValue1 = overridableScope.GlobalStateReader.GetStorage(baseBlock, testAddress, storageIndex1); + ReadOnlySpan readValue2 = overridableScope.GlobalStateReader.GetStorage(baseBlock, testAddress, storageIndex2); + Assert.That(readValue1.ToArray(), Is.EqualTo(storageValue1), "Storage slot 1 should be readable"); + Assert.That(readValue2.ToArray(), Is.EqualTo(storageValue2), "Storage slot 2 should be readable"); + + // Verify non-existent slot returns zeros + ReadOnlySpan nonExistent = overridableScope.GlobalStateReader.GetStorage(baseBlock, testAddress, 999); + Assert.That(nonExistent.ToArray().All(b => b == 0), Is.True, "Non-existent storage slot should return zeros"); + } + + [Test] + public void CommitThroughOverridableScope_DoesNotCallMainFlatDbManager() + { + using TestContext ctx = new(); + FlatOverridableWorldScope overridableScope = ctx.OverridableScope; + + Address testAddress = TestItem.AddressA; + Account testAccount = TestItem.GenerateRandomAccount(); + + BlockHeader? baseBlock = null; + using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(baseBlock)) + { + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(testAddress, testAccount); + } + scope.Commit(1); + } + + // The main FlatDbManager should NOT receive any AddSnapshot calls + // because commits go to FlatOverridableWorldScope's local _snapshots dictionary + Assert.That(ctx.FlatDbManagerAddSnapshotCalls, Is.Empty); + } + + [Test] + public void MultipleCommits_CreateChainedSnapshots_AllReadable() + { + using TestContext ctx = new(); + FlatOverridableWorldScope overridableScope = ctx.OverridableScope; + + Address addressA = TestItem.AddressA; + Address addressB = TestItem.AddressB; + Address addressC = TestItem.AddressC; + Account accountA = TestItem.GenerateRandomAccount(); + Account accountB = TestItem.GenerateRandomAccount(); + Account accountC = TestItem.GenerateRandomAccount(); + + // Commit block 1 with account A + BlockHeader? block1 = null; + using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(null)) + { + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(addressA, accountA); + } + scope.Commit(1); + block1 = Build.A.BlockHeader.WithNumber(1).WithStateRoot(scope.RootHash).TestObject; + } + + // Commit block 2 with account B (building on block 1) + BlockHeader? block2 = null; + using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(block1)) + { + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(addressB, accountB); + } + scope.Commit(2); + block2 = Build.A.BlockHeader.WithNumber(2).WithStateRoot(scope.RootHash).TestObject; + } + + // Commit block 3 with account C (building on block 2) + BlockHeader? block3 = null; + using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(block2)) + { + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(addressC, accountC); + } + scope.Commit(3); + block3 = Build.A.BlockHeader.WithNumber(3).WithStateRoot(scope.RootHash).TestObject; + } + + // Verify final state (block 3) sees all three accounts + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block3, addressA, out AccountStruct accA3), Is.True, "Block 3 should see account A"); + Assert.That(accA3.Balance, Is.EqualTo(accountA.Balance)); + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block3, addressB, out AccountStruct accB3), Is.True, "Block 3 should see account B"); + Assert.That(accB3.Balance, Is.EqualTo(accountB.Balance)); + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block3, addressC, out AccountStruct accC3), Is.True, "Block 3 should see account C"); + Assert.That(accC3.Balance, Is.EqualTo(accountC.Balance)); + + // Verify intermediate state (block 2) sees A+B but not C + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block2, addressA, out AccountStruct accA2), Is.True, "Block 2 should see account A"); + Assert.That(accA2.Balance, Is.EqualTo(accountA.Balance)); + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block2, addressB, out AccountStruct accB2), Is.True, "Block 2 should see account B"); + Assert.That(accB2.Balance, Is.EqualTo(accountB.Balance)); + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block2, addressC, out _), Is.False, "Block 2 should NOT see account C"); + + // Verify initial state (block 1) sees only A + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, addressA, out AccountStruct accA1), Is.True, "Block 1 should see account A"); + Assert.That(accA1.Balance, Is.EqualTo(accountA.Balance)); + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, addressB, out _), Is.False, "Block 1 should NOT see account B"); + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, addressC, out _), Is.False, "Block 1 should NOT see account C"); + + // Verify no calls to main FlatDbManager + Assert.That(ctx.FlatDbManagerAddSnapshotCalls, Is.Empty); + } + + [Test] + public void ResetOverrides_DisposesAllLocalSnapshots() + { + using TestContext ctx = new(); + FlatOverridableWorldScope overridableScope = ctx.OverridableScope; + + Address testAddress = TestItem.AddressA; + Account testAccount = TestItem.GenerateRandomAccount(); + + // Commit multiple states + BlockHeader? block1 = null; + using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(null)) + { + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(testAddress, testAccount); + } + scope.Commit(1); + block1 = Build.A.BlockHeader.WithNumber(1).WithStateRoot(scope.RootHash).TestObject; + } + + // Verify state exists before reset + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, testAddress, out _), Is.True, "Should see account before reset"); + + // Reset overrides + overridableScope.ResetOverrides(); + + // After reset, the local snapshots are cleared, so state falls through to main FlatDbManager + // which is mocked to return empty/not found + Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, testAddress, out _), Is.False, "Should NOT see account after reset"); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs new file mode 100644 index 00000000000..381201f1fa4 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs @@ -0,0 +1,392 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Collections.Generic; +using System.Threading; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Core.Test; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Serialization.Rlp; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +/// +/// Tests for FlatTrieVerifier which handles both hashed mode (single-pass co-iteration) +/// and preimage mode (two-pass verification). +/// +[TestFixture(FlatLayout.Flat)] +[TestFixture(FlatLayout.PreimageFlat)] +public class FlatTrieVerifierTests(FlatLayout layout) +{ + private MemDb _trieDb = null!; + private RawScopedTrieStore _trieStore = null!; + private StateTree _stateTree = null!; + private ILogManager _logManager = null!; + private TestMemColumnsDb _columnsDb = null!; + private IPersistence _persistence = null!; + + [SetUp] + public void SetUp() + { + _trieDb = new MemDb(); + _trieStore = new RawScopedTrieStore(_trieDb); + _stateTree = new StateTree(_trieStore, LimboLogs.Instance); + _logManager = LimboLogs.Instance; + + _columnsDb = new TestMemColumnsDb(); + _persistence = layout == FlatLayout.PreimageFlat + ? new PreimageRocksdbPersistence(_columnsDb) + : new RocksDbPersistence(_columnsDb); + } + + [TearDown] + public void TearDown() + { + _trieDb.Dispose(); + _columnsDb.Dispose(); + } + + private StateId GetCurrentState() + { + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + return reader.CurrentState; + } + + private void WriteAccountToFlat(Address address, Account account, StateId toState) + { + StateId fromState = GetCurrentState(); + using IPersistence.IWriteBatch batch = _persistence.CreateWriteBatch(fromState, toState, WriteFlags.DisableWAL); + batch.SetAccount(address, account); + } + + private void WriteAccountsToFlat((Address address, Account account)[] accounts, StateId toState) + { + StateId fromState = GetCurrentState(); + using IPersistence.IWriteBatch batch = _persistence.CreateWriteBatch(fromState, toState, WriteFlags.DisableWAL); + foreach ((Address address, Account account) in accounts) + { + batch.SetAccount(address, account); + } + } + + private void WriteStorageDirectToDb(Address address, UInt256 slot, byte[] value) + { + TestMemDb storageDb = (TestMemDb)_columnsDb.GetColumnDb(FlatDbColumns.Storage); + + ValueHash256 addrHash; + ValueHash256 slotHash; + + if (layout == FlatLayout.PreimageFlat) + { + addrHash = CreatePreimageAddressKey(address); + slotHash = ValueKeccak.Zero; + slot.ToBigEndian(slotHash.BytesAsSpan); + } + else + { + addrHash = ValueKeccak.Compute(address.Bytes); + Span slotBytes = stackalloc byte[32]; + slot.ToBigEndian(slotBytes); + slotHash = ValueKeccak.Compute(slotBytes); + } + + byte[] storageKey = new byte[52]; + addrHash.Bytes[..4].CopyTo(storageKey.AsSpan()[..4]); + slotHash.Bytes.CopyTo(storageKey.AsSpan()[4..36]); + addrHash.Bytes[4..20].CopyTo(storageKey.AsSpan()[36..52]); + + storageDb.Set(storageKey, ((ReadOnlySpan)value).WithoutLeadingZeros().ToArray()); + } + + private void CorruptAccountInFlat(Address address, Account corruptedAccount) + { + TestMemDb accountDb = (TestMemDb)_columnsDb.GetColumnDb(FlatDbColumns.Account); + ValueHash256 addrKey = layout == FlatLayout.PreimageFlat + ? CreatePreimageAddressKey(address) + : ValueKeccak.Compute(address.Bytes); + + using var stream = AccountDecoder.Slim.EncodeToNewNettyStream(corruptedAccount); + accountDb.Set(addrKey.BytesAsSpan[..20], stream.AsSpan().ToArray()); + } + + private static ValueHash256 CreatePreimageAddressKey(Address address) + { + ValueHash256 fakeHash = ValueKeccak.Zero; + address.Bytes.CopyTo(fakeHash.BytesAsSpan); + return fakeHash; + } + + private StorageTree CreateStorageTree(Address address, (UInt256 slot, byte[] value)[] slots) + { + Hash256 addressHash = Keccak.Compute(address.Bytes); + IScopedTrieStore storageTrieStore = (IScopedTrieStore)_trieStore.GetStorageTrieNodeResolver(addressHash); + StorageTree storageTree = new StorageTree(storageTrieStore, _logManager); + + foreach ((UInt256 slot, byte[] value) in slots) + { + storageTree.Set(slot, value); + } + storageTree.Commit(); + return storageTree; + } + + [Test] + public void Verify_EmptyState_Succeeds() + { + Hash256 stateRoot = Keccak.EmptyTreeHash; + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(0)); + Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0)); + } + + [Test] + public void Verify_SingleAccount_Matches() + { + Address address = TestItem.AddressA; + Account account = new Account(1, 100); + + _stateTree.Set(address, account); + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + StateId toState = new StateId(1, stateRoot); + WriteAccountToFlat(address, account, toState); + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1)); + Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0)); + } + + [Test] + public void Verify_MultipleAccounts_AllMatch() + { + Address addressA = TestItem.AddressA; + Address addressB = TestItem.AddressB; + Address addressC = TestItem.AddressC; + + Account accountA = new Account(1, 100); + Account accountB = new Account(2, 200); + Account accountC = new Account(3, 300); + + _stateTree.Set(addressA, accountA); + _stateTree.Set(addressB, accountB); + _stateTree.Set(addressC, accountC); + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + StateId toState = new StateId(1, stateRoot); + WriteAccountsToFlat([(addressA, accountA), (addressB, accountB), (addressC, accountC)], toState); + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(3)); + Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0)); + } + + [TestCase(1UL, 100UL, 1UL, 200UL, Description = "Mismatched balance")] + [TestCase(5UL, 100UL, 10UL, 100UL, Description = "Mismatched nonce")] + public void Verify_MismatchedAccount_DetectsMismatch(ulong trieNonce, ulong trieBalance, ulong flatNonce, ulong flatBalance) + { + Address address = TestItem.AddressA; + Account trieAccount = new Account(trieNonce, trieBalance); + Account flatAccount = new Account(flatNonce, flatBalance); + + _stateTree.Set(address, trieAccount); + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + StateId toState = new StateId(1, stateRoot); + WriteAccountToFlat(address, trieAccount, toState); + CorruptAccountInFlat(address, flatAccount); + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1)); + Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(1)); + } + + [Test] + public void Verify_AccountInTrieNotInFlat_DetectsMissingInFlat() + { + Address address = TestItem.AddressA; + Account account = new Account(1, 100); + + // Add to trie but not to flat + _stateTree.Set(address, account); + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1)); + Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(1)); + Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0)); + } + + [Test] + public void Verify_AccountInFlatNotInTrie_DetectsMissingInTrie() + { + Address address = TestItem.AddressA; + Account account = new Account(1, 100); + + // Empty trie + Hash256 stateRoot = Keccak.EmptyTreeHash; + + // Add to flat only + StateId toState = new StateId(1, stateRoot); + WriteAccountToFlat(address, account, toState); + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1)); + Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(1)); + } + + [Test] + public void Verify_FlatHasExtraAccounts_ReportsMissing() + { + // Trie has 2 accounts, flat has 3 (1 extra) + Address addressA = TestItem.AddressA; + Address addressB = TestItem.AddressB; + Address addressExtra = TestItem.AddressC; + + Account accountA = new Account(1, 100); + Account accountB = new Account(2, 200); + Account accountExtra = new Account(3, 300); + + _stateTree.Set(addressA, accountA); + _stateTree.Set(addressB, accountB); + // Note: addressExtra NOT added to trie + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + StateId toState = new StateId(1, stateRoot); + WriteAccountsToFlat([(addressA, accountA), (addressB, accountB), (addressExtra, accountExtra)], toState); + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(3)); + Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0)); + Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(1)); + } + + [Test] + public void Verify_Storage_AllMatch() + { + Address address = TestItem.AddressA; + StorageTree storageTree = CreateStorageTree(address, [((UInt256)1, [0x11]), ((UInt256)2, [0x22])]); + Account account = new Account(1, 100, storageTree.RootHash, Keccak.Compute([1])); + + _stateTree.Set(address, account); + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + StateId toState = new StateId(1, stateRoot); + WriteAccountToFlat(address, account, toState); + WriteStorageDirectToDb(address, 1, [0x11]); + WriteStorageDirectToDb(address, 2, [0x22]); + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1)); + Assert.That(verifier.Stats.SlotCount, Is.EqualTo(2)); + Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0)); + Assert.That(verifier.Stats.MismatchedSlot, Is.EqualTo(0)); + } + + [Test] + public void Verify_Storage_Mismatch() + { + Address address = TestItem.AddressA; + StorageTree storageTree = CreateStorageTree(address, [((UInt256)1, [0x11])]); + Account account = new Account(1, 100, storageTree.RootHash, Keccak.Compute([1])); + + _stateTree.Set(address, account); + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + StateId toState = new StateId(1, stateRoot); + WriteAccountToFlat(address, account, toState); + WriteStorageDirectToDb(address, 1, [0xFF]); // Wrong value + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1)); + Assert.That(verifier.Stats.SlotCount, Is.EqualTo(1)); + Assert.That(verifier.Stats.MismatchedSlot, Is.EqualTo(1)); + } + + [Test] + public void Verify_MixedScenario_DetectsAllIssues() + { + // Account A: in both, matches + Address addressA = TestItem.AddressA; + Account accountA = new Account(1, 100); + + // Account B: in trie only (missing in flat) + Address addressB = TestItem.AddressB; + Account accountB = new Account(2, 200); + + // Account C: mismatched + Address addressC = TestItem.AddressC; + Account trieAccountC = new Account(3, 300); + Account flatAccountC = new Account(3, 999); + + _stateTree.Set(addressA, accountA); + _stateTree.Set(addressB, accountB); + _stateTree.Set(addressC, trieAccountC); + _stateTree.Commit(); + Hash256 stateRoot = _stateTree.RootHash; + + StateId toState = new StateId(1, stateRoot); + WriteAccountsToFlat([(addressA, accountA), (addressC, trieAccountC)], toState); + // Note: addressB not added to flat + CorruptAccountInFlat(addressC, flatAccountC); + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager); + verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None); + + Assert.That(verifier.Stats.AccountCount, Is.EqualTo(3)); + Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(1)); // Account C mismatched + Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(1)); // Account B missing in flat + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatWorldStateScopeProviderTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatWorldStateScopeProviderTests.cs new file mode 100644 index 00000000000..e0291d3d6df --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatWorldStateScopeProviderTests.cs @@ -0,0 +1,764 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Threading; +using Autofac; +using Nethermind.Blockchain.Synchronization; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Evm.State; +using Nethermind.Init.Modules; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +public class FlatWorldStateScopeProviderTests +{ + + private class TestContext : IDisposable + { + private readonly ContainerBuilder _containerBuilder; + private readonly CancellationTokenSource _cancellationTokenSource = new CancellationTokenSource(); + + private IContainer? _container; + private IContainer Container => _container ??= _containerBuilder.Build(); + + public ResourcePool ResourcePool => field ??= Container.Resolve(); + public SnapshotPooledList ReadOnlySnapshots = new SnapshotPooledList(0); + public IPersistence.IPersistenceReader PersistenceReader => field ??= Container.Resolve(); + public Snapshot? LastCommittedSnapshot { get; set; } + public TransientResource? LastCreatedCachedResource { get; set; } + + public TestContext(FlatDbConfig? config = null) + { + config ??= new FlatDbConfig(); + + _containerBuilder = new ContainerBuilder() + .AddModule(new FlatWorldStateModule(config)) + .AddSingleton(_ => Substitute.For()) + .AddSingleton((ctx) => + { + ResourcePool resourcePool = ctx.Resolve(); + IFlatDbManager flatDiff = Substitute.For(); + flatDiff.When(it => it.AddSnapshot(Arg.Any(), Arg.Any())) + .Do(c => + { + Snapshot snapshot = (Snapshot)c[0]; + TransientResource transientResource = (TransientResource)c[1]; + + if (LastCommittedSnapshot is not null) + { + LastCommittedSnapshot.Dispose(); + } + LastCommittedSnapshot = snapshot; + + if (LastCreatedCachedResource is not null) + { + resourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, transientResource); + } + LastCreatedCachedResource = transientResource; + }); + + return flatDiff; + }) + .Bind() + .AddSingleton(_ => new CancellationTokenSourceProcessExitSource(_cancellationTokenSource)) + .AddSingleton(LimboLogs.Instance) + .AddSingleton(config) + .AddSingleton(_ => new TrieStoreScopeProvider.KeyValueWithBatchingBackedCodeDb(new TestMemDb())) + ; + + // Externally owned because snapshot bundle take ownership + _containerBuilder.RegisterType() + .WithParameter(TypedParameter.From(false)) // recordDetailedMetrics + .WithParameter(TypedParameter.From(ReadOnlySnapshots)) + .ExternallyOwned(); + + ConfigureSnapshotBundle(); + ConfigureFlatWorldStateScope(); + } + + private void ConfigureSnapshotBundle() + { + _containerBuilder.RegisterType() + .SingleInstance() + .WithParameter(TypedParameter.From(ResourcePool.Usage.MainBlockProcessing)) + .ExternallyOwned(); + ; + } + + private void ConfigureFlatWorldStateScope() + { + _containerBuilder.RegisterType() + .SingleInstance() + .WithParameter(TypedParameter.From(new StateId(0, Keccak.EmptyTreeHash))) + ; + } + + public FlatWorldStateScope Scope => Container.Resolve(); + + public void Dispose() + { + _cancellationTokenSource.Cancel(); + + LastCommittedSnapshot?.Dispose(); + if (LastCreatedCachedResource is not null) ResourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, LastCreatedCachedResource); + + _container?.Dispose(); + _cancellationTokenSource.Dispose(); + } + + public class CancellationTokenSourceProcessExitSource(CancellationTokenSource cancellationTokenSource) : IProcessExitSource + { + public CancellationToken Token => cancellationTokenSource.Token; + + public void Exit(int exitCode) => throw new NotImplementedException(); + } + + public void AddSnapshot(Action populator) + { + SnapshotContent snapshotContent = ResourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing); + populator(snapshotContent); + + ReadOnlySnapshots.Add(new Snapshot( + StateId.PreGenesis, + StateId.PreGenesis, + snapshotContent, + ResourcePool, + ResourcePool.Usage.MainBlockProcessing)); + } + } + + + #region Account and Slot Layering Tests + + [Test] + public void TestAccountAndSlotShadowingInSnapshots() + { + using TestContext ctx = new TestContext(); + + Address testAddress = TestItem.AddressA; + UInt256 slotIndex = 1; + + Account olderAccount = TestItem.GenerateRandomAccount(); + byte[] olderSlotValue = { 0x01, 0x02 }; + + Account newerAccount = TestItem.GenerateRandomAccount(); + byte[] newerSlotValue = { 0x03, 0x04, 0x05 }; + + // Layer 1: Older snapshot + ctx.AddSnapshot(content => + { + content.Accounts[testAddress] = olderAccount; + content.Storages[(testAddress, slotIndex)] = SlotValue.FromSpanWithoutLeadingZero(olderSlotValue); + }); + + // Layer 2: Newer snapshot (shadowing Layer 1) + ctx.AddSnapshot(content => + { + content.Accounts[testAddress] = newerAccount; + content.Storages[(testAddress, slotIndex)] = SlotValue.FromSpanWithoutLeadingZero(newerSlotValue); + }); + + // Layer 3: Another newer snapshot, but only for account + Account newestAccount = TestItem.GenerateRandomAccount(); + ctx.AddSnapshot(content => content.Accounts[testAddress] = newestAccount); + + // Verify account shadowed by newest snapshot (newestAccount) + Assert.That(ctx.Scope.Get(testAddress), Is.EqualTo(newestAccount)); + + // Verify slot shadowed by Layer 2 snapshot (newerSlotValue) + IWorldStateScopeProvider.IStorageTree storageTree = ctx.Scope.CreateStorageTree(testAddress); + Assert.That(storageTree.Get(slotIndex), Is.EqualTo(newerSlotValue)); + } + + [Test] + public void TestAccountAndSlotFromPersistence() + { + using TestContext ctx = new TestContext(); + + Address testAddress = TestItem.AddressA; + UInt256 slotIndex = 1; + Account persistedAccount = TestItem.GenerateRandomAccount(); + byte[] persistedSlotValue = { 0xDE, 0xAD, 0xBE, 0xEF }; + + // Setup Persistence Reader + ctx.PersistenceReader.GetAccount(testAddress).Returns(persistedAccount); + SlotValue outValue = SlotValue.FromSpanWithoutLeadingZero(persistedSlotValue); + ctx.PersistenceReader.TryGetSlot(testAddress, slotIndex, ref Arg.Any()) + .Returns(x => + { + x[2] = outValue; + return true; + }); + + // Verify both are retrieved from persistence + Assert.That(ctx.Scope.Get(testAddress), Is.EqualTo(persistedAccount)); + + IWorldStateScopeProvider.IStorageTree storageTree = ctx.Scope.CreateStorageTree(testAddress); + Assert.That(storageTree.Get(slotIndex), Is.EqualTo(persistedSlotValue)); + } + + [Test] + public void TestAccountAndSlotFromWrittenBatch() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slotIndex = 1; + Account testAccount = TestItem.GenerateRandomAccount(); + byte[] writtenSlotValue = { 0xFF, 0xFF }; + + Account persistenceAccount = TestItem.GenerateRandomAccount(); + ctx.PersistenceReader.GetAccount(testAddress).Returns(persistenceAccount); + + // Add dummy snapshot + ctx.AddSnapshot(content => { }); + + // Write directly to write batch + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(testAddress, testAccount); + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1); + storageBatch.Set(slotIndex, writtenSlotValue); + storageBatch.Dispose(); + } + + // Verify written items shadow everything else + Account? resultAccount = scope.Get(testAddress); + Assert.That(resultAccount!.Balance, Is.EqualTo(testAccount.Balance)); + Assert.That(resultAccount!.Nonce, Is.EqualTo(testAccount.Nonce)); + + IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(testAddress); + Assert.That(storageTree.Get(slotIndex), Is.EqualTo(writtenSlotValue)); + } + + [Test] + public void TestAccountAndSlotAfterCommit() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slotIndex = 1; + Account testAccount = TestItem.GenerateRandomAccount(); + byte[] slotValue = { 0xCA, 0xFE }; + + // Write both + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(testAddress, testAccount); + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1); + storageBatch.Set(slotIndex, slotValue); + storageBatch.Dispose(); + } + + // Commit both + scope.Commit(1); + + // Verify in snapshot + Assert.That(ctx.LastCommittedSnapshot, Is.Not.Null); + ctx.LastCommittedSnapshot!.TryGetAccount(testAddress, out Account? committedAccount); + Assert.That(committedAccount!.Balance, Is.EqualTo(testAccount.Balance)); + Assert.That(committedAccount!.Nonce, Is.EqualTo(testAccount.Nonce)); + + ctx.LastCommittedSnapshot!.TryGetStorage(testAddress, slotIndex, out SlotValue? committedSlot); + Assert.That(committedSlot!.Value.ToEvmBytes(), Is.EqualTo(slotValue)); + } + + #endregion + + #region Selfdestruct Interaction Tests + + [Test] + public void TestSelfDestructBlocksEarlierAccountAndSlot() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slotIndex = 1; + Account oldAccount = TestItem.GenerateRandomAccount(); + byte[] oldSlotValue = { 0x01, 0x02, 0x03 }; + + // Layer 1: Account and Slot data + ctx.AddSnapshot(content => + { + content.Accounts[testAddress] = oldAccount; + content.Storages[(testAddress, slotIndex)] = SlotValue.FromSpanWithoutLeadingZero(oldSlotValue); + }); + + // Layer 2: SELFDESTRUCT + // isNewAccount = false means there was storage to clear + ctx.AddSnapshot(content => content.SelfDestructedStorageAddresses[testAddress] = false); + + // Layer 3: Empty snapshot after selfdestruct + ctx.AddSnapshot(content => { }); + + // Slot should be blocked by selfdestruct + IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(testAddress); + Assert.That(storageTree.Get(slotIndex), Is.EqualTo(StorageTree.ZeroBytes)); + } + + [Test] + public void TestSelfDestructIdxIsPassedCorrectly() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slot1 = 1; + UInt256 slot2 = 2; + byte[] slot1BeforeValue = { 0x01 }; + byte[] slot2AfterValue = { 0x02 }; + + // Snapshot 0: slot1 exists + ctx.AddSnapshot(content => content.Storages[(testAddress, slot1)] = SlotValue.FromSpanWithoutLeadingZero(slot1BeforeValue)); + + // Snapshot 1: selfdestruct happens at this index + ctx.AddSnapshot(content => content.SelfDestructedStorageAddresses[testAddress] = false); + + // Snapshot 2: slot2 is set after selfdestruct + ctx.AddSnapshot(content => content.Storages[(testAddress, slot2)] = SlotValue.FromSpanWithoutLeadingZero(slot2AfterValue)); + + IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(testAddress); + + // slot1 should return zero (blocked by selfdestruct) + Assert.That(storageTree.Get(slot1), Is.EqualTo(StorageTree.ZeroBytes)); + + // slot2 should return the value (written after selfdestruct) + Assert.That(storageTree.Get(slot2), Is.EqualTo(slot2AfterValue)); + } + + #endregion + + #region Storage Root Tests + + [Test] + public void TestStorageRootAfterSingleSlotSet() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slotIndex = 1; + byte[] slotValue = { 0xAB, 0xCD }; + + Account initialAccount = TestItem.GenerateRandomAccount(); + ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount); + + // Set a single slot + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1); + storageBatch.Set(slotIndex, slotValue); + storageBatch.Dispose(); + } + + // Commit to update storage root + scope.Commit(1); + + // Compute expected storage root using standalone StorageTree + TestMemDb testDb = new TestMemDb(); + RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb); + StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance); + expectedTree.Set(slotIndex, slotValue); + expectedTree.UpdateRootHash(); + Hash256 expectedRoot = expectedTree.RootHash; + + // Verify actual storage root matches expected + Account? resultAccount = scope.Get(testAddress); + Assert.That(resultAccount, Is.Not.Null); + Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot)); + } + + [Test] + public void TestStorageRootAfterMultipleSlotsSingleCommit() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slot1 = 1; + UInt256 slot2 = 2; + UInt256 slot3 = 100; + byte[] value1 = { 0x01, 0x02 }; + byte[] value2 = { 0xAA, 0xBB, 0xCC }; + byte[] value3 = { 0xFF }; + + Account initialAccount = TestItem.GenerateRandomAccount(); + ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount); + + // Set multiple slots in single commit + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 3); + storageBatch.Set(slot1, value1); + storageBatch.Set(slot2, value2); + storageBatch.Set(slot3, value3); + storageBatch.Dispose(); + } + + scope.Commit(1); + + // Compute expected storage root + TestMemDb testDb = new TestMemDb(); + RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb); + StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance); + expectedTree.Set(slot1, value1); + expectedTree.Set(slot2, value2); + expectedTree.Set(slot3, value3); + expectedTree.UpdateRootHash(); + Hash256 expectedRoot = expectedTree.RootHash; + + // Verify + Account? resultAccount = scope.Get(testAddress); + Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot)); + } + + [Test] + public void TestStorageRootAfterMultipleCommits() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slot1 = 1; + UInt256 slot2 = 2; + byte[] value1 = { 0x11 }; + byte[] value2 = { 0x22 }; + + Account initialAccount = TestItem.GenerateRandomAccount(); + ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount); + + // First commit - set slot1 + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1); + storageBatch.Set(slot1, value1); + storageBatch.Dispose(); + } + scope.Commit(1); + + // Second commit - set slot2 + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1); + storageBatch.Set(slot2, value2); + storageBatch.Dispose(); + } + scope.Commit(2); + + // Compute expected storage root with both slots + TestMemDb testDb = new TestMemDb(); + RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb); + StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance); + expectedTree.Set(slot1, value1); + expectedTree.Set(slot2, value2); + expectedTree.UpdateRootHash(); + Hash256 expectedRoot = expectedTree.RootHash; + + // Verify + Account? resultAccount = scope.Get(testAddress); + Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot)); + } + + [Test] + public void TestStorageRootAfterSelfDestructAndNewSlots() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + UInt256 slot1 = 1; + UInt256 slot2 = 2; + byte[] value1 = { 0xAA }; + byte[] value2 = { 0xBB }; + + Account initialAccount = TestItem.GenerateRandomAccount(); + ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount); + + // Set initial slot + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1); + storageBatch.Set(slot1, value1); + storageBatch.Dispose(); + } + scope.Commit(1); + + // SelfDestruct - should clear storage + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 0); + storageBatch.Clear(); + storageBatch.Dispose(); + } + scope.Commit(2); + + // Set new slot after selfdestruct + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1); + storageBatch.Set(slot2, value2); + storageBatch.Dispose(); + } + scope.Commit(3); + + // Expected: only slot2 should exist (storage was cleared) + TestMemDb testDb = new TestMemDb(); + RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb); + StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance); + expectedTree.Set(slot2, value2); + expectedTree.UpdateRootHash(); + Hash256 expectedRoot = expectedTree.RootHash; + + // Verify + Account? resultAccount = scope.Get(testAddress); + Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot)); + } + + [Test] + public void TestEmptyStorageRootWhenNoSlots() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address testAddress = TestItem.AddressA; + + Account initialAccount = new Account(0, 0); + ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount); + + // Don't set any slots, just get the account + Account? resultAccount = scope.Get(testAddress); + + // Verify storage root is EmptyTreeHash + Assert.That(resultAccount, Is.Not.Null); + Assert.That(resultAccount!.StorageRoot, Is.EqualTo(Keccak.EmptyTreeHash)); + } + + #endregion + + #region Account Snapshot Commit Tests + + [Test] + public void TestMultipleAccountsAndSlotsCommittedInSnapshot() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address addr1 = TestItem.AddressA; + Address addr2 = TestItem.AddressB; + Account acc1 = new Account(100, 1000); + Account acc2 = new Account(200, 2000); + UInt256 slot1 = 1; + byte[] val1 = { 0x01 }; + + // Set multiple items + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(2)) + { + writeBatch.Set(addr1, acc1); + writeBatch.Set(addr2, acc2); + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr1, 1); + storageBatch.Set(slot1, val1); + storageBatch.Dispose(); + } + + scope.Commit(1); + + // Verify all committed to snapshot + Assert.That(ctx.LastCommittedSnapshot, Is.Not.Null); + ctx.LastCommittedSnapshot!.TryGetAccount(addr1, out Account? committedAcc1); + Assert.That(committedAcc1!.Balance, Is.EqualTo(acc1.Balance)); + + ctx.LastCommittedSnapshot!.TryGetAccount(addr2, out Account? committedAcc2); + Assert.That(committedAcc2!.Balance, Is.EqualTo(acc2.Balance)); + + ctx.LastCommittedSnapshot!.TryGetStorage(addr1, slot1, out SlotValue? committedSlot); + Assert.That(committedSlot!.Value.ToEvmBytes(), Is.EqualTo(val1)); + } + + [Test] + public void TestMultipleCommitsAccumulateData() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address addr1 = TestItem.AddressA; + Address addr2 = TestItem.AddressB; + Account acc1 = new Account(100, 1000); + Account acc2 = new Account(200, 2000); + + // Commit 1 + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(addr1, acc1); + } + scope.Commit(1); + + // Commit 2 + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(addr2, acc2); + } + scope.Commit(2); + + // Verify scope Sees both + Assert.That(scope.Get(addr1), Is.EqualTo(acc1)); + Assert.That(scope.Get(addr2), Is.EqualTo(acc2)); + } + + #endregion + + #region Comprehensive Selfdestruct Blocking Tests + + [Test] + public void TestSelfDestructBlocksPersistenceAndAllSnapshotLayers() + { + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address addr = TestItem.AddressA; + UInt256 slot = 1; + byte[] persistedVal = { 0xDE, 0xAD }; + byte[] snapshotVal = { 0x01, 0x02 }; + + // Persistence setup + ctx.PersistenceReader.GetAccount(addr).Returns(TestItem.GenerateRandomAccount()); + SlotValue outVal = SlotValue.FromSpanWithoutLeadingZero(persistedVal); + ctx.PersistenceReader.TryGetSlot(addr, slot, ref Arg.Any()) + .Returns(x => { x[2] = outVal; return true; }); + + // Snapshot Setup + ctx.AddSnapshot(content => content.Storages[(addr, slot)] = SlotValue.FromSpanWithoutLeadingZero(snapshotVal)); + ctx.AddSnapshot(content => content.SelfDestructedStorageAddresses[addr] = true); + ctx.AddSnapshot(content => { }); + + // Verify both are blocked + IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(addr); + Assert.That(storageTree.Get(slot), Is.EqualTo(StorageTree.ZeroBytes)); + } + + [Test] + public void TestStorageNodeLookupWithoutSelfDestructFallsThroughToReadOnlyBundle() + { + // This test verifies the fix for the bug where storage node lookup would exit early + // when selfDestructStateIdx == -1 (no self-destruct) and local _snapshots exist but + // don't contain the storage node. Before the fix, the condition `i >= currentBundleSelfDestructIdx` + // was always true when selfDestructStateIdx == -1, causing early exit. + + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address addr1 = TestItem.AddressA; + Address addr2 = TestItem.AddressB; + Hash256 addr1Hash = Keccak.Compute(addr1.Bytes); + UInt256 slot1 = 1; + byte[] value1 = { 0x01 }; + + Account acc1 = TestItem.GenerateRandomAccount(); + ctx.PersistenceReader.GetAccount(addr1).Returns(acc1); + + // Add storage slot AND trie node for addr1 to ReadOnlySnapshots + ctx.AddSnapshot(content => + { + content.Storages[(addr1, slot1)] = SlotValue.FromSpanWithoutLeadingZero(value1); + + // Also add a storage trie node for addr1 at root path + TrieNode storageNode = new TrieNode(NodeType.Leaf, Keccak.Zero); + content.StorageNodes[(addr1Hash, TreePath.Empty)] = storageNode; + }); + + // Create local commits for addr2 (NOT addr1) - this creates local _snapshots + Account acc2 = TestItem.GenerateRandomAccount(); + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + writeBatch.Set(addr2, acc2); + } + scope.Commit(1); + + // Now lookup storage for addr1 - should fall through local _snapshots to ReadOnlySnapshots + // Before the fix: would fail because DoTryFindStorageNodeExternal exited early + // After the fix: properly falls through and finds storage in ReadOnlySnapshots + IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(addr1); + Assert.That(storageTree.Get(slot1), Is.EqualTo(value1)); + } + + [Test] + public void TestSelfDestructInLocalSnapshotsStopsAtExpectedSnapshot() + { + // This test verifies that when self-destruct is in local _snapshots (SnapshotBundle), + // the storage lookup correctly: + // 1. Finds storage added AFTER self-destruct (in newer snapshots) + // 2. Finds storage added AT the same commit as self-destruct + // 3. Returns null for storage that existed BEFORE self-destruct (blocked by self-destruct) + + using TestContext ctx = new TestContext(); + FlatWorldStateScope scope = ctx.Scope; + + Address addr = TestItem.AddressA; + UInt256 slotBefore = 1; + UInt256 slotAtSelfDestruct = 2; + UInt256 slotAfter = 3; + byte[] valueBefore = { 0x01 }; + byte[] valueAtSelfDestruct = { 0x02 }; + byte[] valueAfter = { 0x03 }; + + Account acc = TestItem.GenerateRandomAccount(); + ctx.PersistenceReader.GetAccount(addr).Returns(acc); + + // Commit 1: Set slot BEFORE self-destruct + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr, 1); + storageBatch.Set(slotBefore, valueBefore); + storageBatch.Dispose(); + } + scope.Commit(1); + + // Commit 2: Self-destruct AND set new slot in same commit + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr, 1); + storageBatch.Clear(); + storageBatch.Set(slotAtSelfDestruct, valueAtSelfDestruct); + storageBatch.Dispose(); + } + scope.Commit(2); + + // Commit 3: Set slot AFTER self-destruct + using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1)) + { + IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr, 1); + storageBatch.Set(slotAfter, valueAfter); + storageBatch.Dispose(); + } + scope.Commit(3); + + // Verify storage behavior: + // - slotBefore should be blocked by self-destruct (return zero) + // - slotAtSelfDestruct should be found (set in same commit as self-destruct) + // - slotAfter should be found (added after self-destruct) + IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(addr); + Assert.That(storageTree.Get(slotBefore), Is.EqualTo(StorageTree.ZeroBytes), "Slot before self-destruct should be zero"); + Assert.That(storageTree.Get(slotAtSelfDestruct), Is.EqualTo(valueAtSelfDestruct), "Slot at self-destruct should be found"); + Assert.That(storageTree.Get(slotAfter), Is.EqualTo(valueAfter), "Slot after self-destruct should be found"); + } + + #endregion + +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/MpmcRingBufferTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/MpmcRingBufferTests.cs new file mode 100644 index 00000000000..90796eb6f41 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/MpmcRingBufferTests.cs @@ -0,0 +1,147 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +public class MpmcRingBufferTests +{ + [Test] + public void SmokeTest() + { + MpmcRingBuffer jobQueue = new MpmcRingBuffer(16); + + jobQueue.TryEnqueue(1); + jobQueue.TryEnqueue(2); + jobQueue.TryEnqueue(3); + jobQueue.TryEnqueue(4); + jobQueue.TryEnqueue(5); + + jobQueue.TryDequeue(out int j).Should().BeTrue(); + j.Should().Be(1); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(2); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(3); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(4); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(5); + } + + [Test] + public void RollingSmokeTest() + { + MpmcRingBuffer jobQueue = new MpmcRingBuffer(16); + + jobQueue.TryEnqueue(1); + jobQueue.TryEnqueue(2); + jobQueue.TryEnqueue(3); + jobQueue.TryEnqueue(4); + jobQueue.TryEnqueue(5); + + int j = 0; + for (int i = 0; i < 100; i++) + { + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(i + 1); + jobQueue.TryEnqueue(i + 5 + 1).Should().BeTrue(); + } + } + + [Test] + public void SmokeTestFullAndRolling() + { + MpmcRingBuffer jobQueue = new MpmcRingBuffer(16); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryEnqueue(1), Is.True); + } + Assert.That(jobQueue.TryEnqueue(1), Is.False); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryDequeue(out _), Is.True); + } + Assert.That(jobQueue.TryDequeue(out _), Is.False); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryEnqueue(1), Is.True); + } + Assert.That(jobQueue.TryEnqueue(1), Is.False); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryDequeue(out _), Is.True); + } + Assert.That(jobQueue.TryDequeue(out _), Is.False); + } + + [Test] + public async Task HighConcurrency_StressTest_NoDataLoss() + { + int Capacity = 1024; + int ItemsToProduce = 1_000_000; + int ProducerCount = 4; + int ConsumerCount = 4; + + MpmcRingBuffer buffer = new MpmcRingBuffer(Capacity); + int[] consumedCounts = new int[ItemsToProduce]; + long totalConsumed = 0; + + // Producer Task (Single Producer) + long itemLeftToProduce = ItemsToProduce; + + // Producers Tasks (Multiple Producers) + Task[] producers = Enumerable.Range(0, ProducerCount).Select(_ => Task.Run(() => + { + while (true) + { + long remaining = Interlocked.Read(ref itemLeftToProduce); + if (remaining == 0) break; + if (Interlocked.CompareExchange(ref itemLeftToProduce, remaining - 1, remaining) != remaining) continue; + + while (!buffer.TryEnqueue((int)remaining - 1)) + { + Thread.SpinWait(10); // Wait for space + } + } + })).ToArray(); + + // Consumer Tasks (Multiple Consumers) + Task[] consumers = Enumerable.Range(0, ConsumerCount).Select(_ => Task.Run(() => + { + while (Interlocked.Read(ref totalConsumed) < ItemsToProduce) + { + if (buffer.TryDequeue(out int item)) + { + // Track that this specific item was hit + Interlocked.Increment(ref consumedCounts[item]); + Interlocked.Increment(ref totalConsumed); + } + else + { + Thread.SpinWait(10); + } + } + })).ToArray(); + + await Task.WhenAll(producers); + await Task.WhenAll(consumers); + + // Assertions + Assert.That(ItemsToProduce, Is.EqualTo(Interlocked.Read(ref totalConsumed))); + + for (int i = 0; i < ItemsToProduce; i++) + { + Assert.That(consumedCounts[i] == 1, $"Item {i} was consumed {consumedCounts[i]} times!"); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/Nethermind.State.Flat.Test.csproj b/src/Nethermind/Nethermind.State.Flat.Test/Nethermind.State.Flat.Test.csproj new file mode 100644 index 00000000000..a9ef96f63d5 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/Nethermind.State.Flat.Test.csproj @@ -0,0 +1,16 @@ + + + + + + Nethermind.State.Flat.Test + enable + + + + + + + + + diff --git a/src/Nethermind/Nethermind.State.Flat.Test/Persistence/BloomFilter/BloomFilterTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/BloomFilter/BloomFilterTests.cs new file mode 100644 index 00000000000..0acfbafae10 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/BloomFilter/BloomFilterTests.cs @@ -0,0 +1,173 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using Nethermind.State.Flat.Persistence.BloomFilter; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test.Persistence.BloomFilter; + +[TestFixture] +public class BloomFilterTests +{ + #region Basic Add/Query Tests + + [Test] + public void Add_SingleItem_ShouldBeFound() + { + // Arrange + using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10); + ulong hash = 12345; + + // Act + bloom.Add(hash); + + // Assert + bloom.MightContain(hash).Should().BeTrue(); + } + + [Test] + public void Add_MultipleItems_ShouldAllBeFound() + { + // Arrange + using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10); + ulong[] hashes = { 1, 2, 3, 100, 1000, 99999 }; + + // Act + foreach (ulong hash in hashes) + { + bloom.Add(hash); + } + + // Assert + foreach (ulong hash in hashes) + { + bloom.MightContain(hash).Should().BeTrue($"hash {hash} should be found"); + } + } + + #endregion + + #region Concurrency Tests + + [Test] + public void Add_Concurrent_ShouldBeThreadSafe() + { + // Arrange + using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 1000, bitsPerKey: 10); + int threadsCount = 10; + int itemsPerThread = 50; + using Barrier barrier = new(threadsCount); + System.Collections.Concurrent.ConcurrentBag addedHashes = new(); + + // Act - Multiple threads adding concurrently + Task[] tasks = Enumerable.Range(0, threadsCount).Select(threadId => Task.Run(() => + { + barrier.SignalAndWait(); // Sync start + for (int i = 0; i < itemsPerThread; i++) + { + ulong hash = (ulong)(threadId * itemsPerThread + i); + bloom.Add(hash); + addedHashes.Add(hash); + } + })).ToArray(); + + Task.WaitAll(tasks); + + // Assert - All items should be found + foreach (ulong hash in addedHashes) + { + bloom.MightContain(hash).Should().BeTrue($"hash {hash} should be found"); + } + } + + [Test] + public void Add_ConcurrentWithMightContain_ShouldWork() + { + // Arrange + using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 10000, bitsPerKey: 10); + int duration = 1000; // ms + CancellationTokenSource cts = new(duration); + + // Act - Some threads adding, others querying + Task[] writerTasks = Enumerable.Range(0, 3).Select(threadId => Task.Run(() => + { + ulong hash = (ulong)(threadId * 1000000); + while (!cts.Token.IsCancellationRequested) + { + bloom.Add(hash++); + } + })).ToArray(); + + Task[] readerTasks = Enumerable.Range(0, 3).Select(_ => Task.Run(() => + { + ulong hash = 0; + while (!cts.Token.IsCancellationRequested) + { + bloom.MightContain(hash++); + Thread.Yield(); + } + })).ToArray(); + + Task.WaitAll(writerTasks.Concat(readerTasks).ToArray()); + + // Assert - No exceptions thrown + Assert.Pass("Concurrent operations completed without exceptions"); + } + + #endregion + + #region Edge Cases + + [Test] + public void Dispose_MultipleTimes_ShouldNotThrow() + { + // Arrange + Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10); + + // Act & Assert + bloom.Dispose(); + Assert.DoesNotThrow(() => bloom.Dispose()); + } + + [Test] + public void MightContain_BeforeAnyAdds_ShouldReturnFalse() + { + // Arrange + using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10); + + // Act & Assert + // Empty bloom filter should generally return false (though false positives are theoretically possible) + bool result = bloom.MightContain(99999); + result.Should().BeFalse("empty bloom filter should return false for items not added"); + } + + [Test] + public void Add_LargeNumberOfItems_ShouldWork() + { + // Arrange + int totalItems = 500; + using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: totalItems, bitsPerKey: 10); + + // Act + for (ulong i = 0; i < (ulong)totalItems; i++) + { + bloom.Add(i); + } + + // Assert - Verify count + bloom.Count.Should().Be(totalItems); + + // Verify sample of items can be found + for (ulong i = 0; i < 50; i++) + { + bloom.MightContain(i).Should().BeTrue($"hash {i} should be found"); + } + } + + #endregion +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/Persistence/PreimageRecordingPersistenceTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/PreimageRecordingPersistenceTests.cs new file mode 100644 index 00000000000..3b752372472 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/PreimageRecordingPersistenceTests.cs @@ -0,0 +1,212 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using FluentAssertions; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test.Persistence; + +[TestFixture] +public class PreimageRecordingPersistenceTests +{ + private const int PreimageLookupSize = 12; + + private IPersistence _innerPersistence = null!; + private MemDb _preimageDb = null!; + private PreimageRecordingPersistence _sut = null!; + + [SetUp] + public void SetUp() + { + _innerPersistence = Substitute.For(); + _preimageDb = new MemDb(); + _sut = new PreimageRecordingPersistence(_innerPersistence, _preimageDb); + } + + [TearDown] + public void TearDown() => _preimageDb.Dispose(); + + [Test] + public void PassThroughOperations_DelegateToInnerPersistence() + { + // CreateReader + IPersistence.IPersistenceReader expectedReader = Substitute.For(); + _innerPersistence.CreateReader().Returns(expectedReader); + _sut.CreateReader().Should().BeSameAs(expectedReader); + + // CreateWriteBatch + StateId from = StateId.PreGenesis; + StateId to = new StateId(1, TestItem.KeccakA); + IPersistence.IWriteBatch innerBatch = Substitute.For(); + _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch); + using IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None); + _innerPersistence.Received(1).CreateWriteBatch(from, to, WriteFlags.None); + } + + [Test] + public void SetAccount_SetStorage_SelfDestruct_RecordPreimages() + { + StateId from = StateId.PreGenesis; + StateId to = new StateId(1, TestItem.KeccakA); + IPersistence.IWriteBatch innerBatch = Substitute.For(); + _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch); + + Address addressA = TestItem.AddressA; + Address addressB = TestItem.AddressB; + Account account = TestItem.GenerateIndexedAccount(0); + UInt256 slot = 42; + SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0x01, 0x02, 0x03]); + + using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None)) + { + batch.SetAccount(addressA, account); + batch.SetStorage(addressA, slot, value); + batch.SelfDestruct(addressB); + } + + // Verify inner batch calls + innerBatch.Received(1).SetAccount(addressA, account); + innerBatch.Received(1).SetStorage(addressA, slot, Arg.Is(v => v != null)); + innerBatch.Received(1).SelfDestruct(addressB); + + // Verify address preimages + ValueHash256 addressAPath = addressA.ToAccountPath; + _preimageDb.Get(addressAPath.BytesAsSpan[..PreimageLookupSize]).Should().BeEquivalentTo(addressA.Bytes); + + ValueHash256 addressBPath = addressB.ToAccountPath; + _preimageDb.Get(addressBPath.BytesAsSpan[..PreimageLookupSize]).Should().BeEquivalentTo(addressB.Bytes); + + // Verify slot preimage + ValueHash256 slotHash = ValueKeccak.Zero; + StorageTree.ComputeKeyWithLookup(slot, ref slotHash); + _preimageDb.Get(slotHash.BytesAsSpan[..PreimageLookupSize]).Should().BeEquivalentTo(slot.ToBigEndian()); + } + + [Test] + public void TrieAndRawOperations_WithoutPreimage_DelegateAsRaw() + { + StateId from = StateId.PreGenesis; + StateId to = new StateId(1, TestItem.KeccakA); + IPersistence.IWriteBatch innerBatch = Substitute.For(); + _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch); + + TreePath path = TreePath.FromHexString("1234"); + TrieNode node = new TrieNode(NodeType.Leaf, [0xc1, 0x01]); + Hash256 addrHash = TestItem.KeccakA; + Hash256 slotHash = TestItem.KeccakB; + Account account = TestItem.GenerateIndexedAccount(0); + SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0xff]); + + using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None)) + { + batch.SetStateTrieNode(path, node); + batch.SetStorageTrieNode(addrHash, path, node); + batch.SetStorageRaw(addrHash, slotHash, value); + batch.SetAccountRaw(addrHash, account); + } + + // Verify trie operations delegated + innerBatch.Received(1).SetStateTrieNode(path, node); + innerBatch.Received(1).SetStorageTrieNode(addrHash, path, node); + + // Without preimage, raw operations stay raw + innerBatch.Received(1).SetStorageRaw(addrHash, slotHash, Arg.Is(v => v != null)); + innerBatch.Received(1).SetAccountRaw(addrHash, account); + + // No preimages should be recorded for trie/raw operations + _preimageDb.Keys.Should().BeEmpty(); + } + + [Test] + public void RawOperations_WithPreimage_TranslatedToNonRaw() + { + Address address = TestItem.AddressA; + UInt256 slot = 42; + Account account = TestItem.GenerateIndexedAccount(0); + SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0xff]); + + // Pre-populate preimage database with address and slot preimages + ValueHash256 addrHash = address.ToAccountPath; + _preimageDb.Set(addrHash.BytesAsSpan[..PreimageLookupSize], address.Bytes); + + ValueHash256 slotHash = ValueKeccak.Zero; + StorageTree.ComputeKeyWithLookup(slot, ref slotHash); + _preimageDb.Set(slotHash.BytesAsSpan[..PreimageLookupSize], slot.ToBigEndian()); + + StateId from = StateId.PreGenesis; + StateId to = new StateId(1, TestItem.KeccakA); + IPersistence.IWriteBatch innerBatch = Substitute.For(); + _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch); + + using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None)) + { + batch.SetStorageRaw(new Hash256(addrHash), new Hash256(slotHash), value); + batch.SetAccountRaw(new Hash256(addrHash), account); + } + + // With preimage available, raw operations are translated to non-raw + innerBatch.Received(1).SetStorage(address, slot, Arg.Is(v => v != null)); + innerBatch.Received(1).SetAccount(address, account); + + // Raw operations should NOT be called + innerBatch.DidNotReceive().SetStorageRaw(Arg.Any(), Arg.Any(), Arg.Any()); + innerBatch.DidNotReceive().SetAccountRaw(Arg.Any(), Arg.Any()); + } + + [Test] + public void SetStorageRaw_WithOnlyAddressPreimage_FallsBackToRaw() + { + Address address = TestItem.AddressA; + UInt256 slot = 42; + SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0xff]); + + // Pre-populate only address preimage (missing slot preimage) + ValueHash256 addrHash = address.ToAccountPath; + _preimageDb.Set(addrHash.BytesAsSpan[..PreimageLookupSize], address.Bytes); + + ValueHash256 slotHash = ValueKeccak.Zero; + StorageTree.ComputeKeyWithLookup(slot, ref slotHash); + // Note: NOT setting slot preimage + + StateId from = StateId.PreGenesis; + StateId to = new StateId(1, TestItem.KeccakA); + IPersistence.IWriteBatch innerBatch = Substitute.For(); + _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch); + + using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None)) + { + batch.SetStorageRaw(new Hash256(addrHash), new Hash256(slotHash), value); + } + + // Without slot preimage, storage stays raw + innerBatch.Received(1).SetStorageRaw(new Hash256(addrHash), new Hash256(slotHash), Arg.Is(v => v != null)); + innerBatch.DidNotReceive().SetStorage(Arg.Any
(), Arg.Any(), Arg.Any()); + } + + [Test] + public void Dispose_DisposesPreimageBatchAndInnerBatch() + { + StateId from = StateId.PreGenesis; + StateId to = new StateId(1, TestItem.KeccakA); + IPersistence.IWriteBatch innerBatch = Substitute.For(); + _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch); + + IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None); + batch.SetAccount(TestItem.AddressA, TestItem.GenerateIndexedAccount(0)); + batch.Dispose(); + + innerBatch.Received(1).Dispose(); + + // Preimages should be flushed after dispose + ValueHash256 addressPath = TestItem.AddressA.ToAccountPath; + _preimageDb.Get(addressPath.BytesAsSpan[..PreimageLookupSize]).Should().NotBeNull(); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/PersistenceManagerTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceManagerTests.cs new file mode 100644 index 00000000000..8d647074d49 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceManagerTests.cs @@ -0,0 +1,528 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Generic; +using System.Linq; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class PersistenceManagerTests +{ + private PersistenceManager _persistenceManager = null!; + private FlatDbConfig _config = null!; + private TestFinalizedStateProvider _finalizedStateProvider = null!; + private SnapshotRepository _snapshotRepository = null!; + private IPersistence _persistence = null!; + private ResourcePool _resourcePool = null!; + private StateId Block0 = new StateId(0, Keccak.EmptyTreeHash); + + [SetUp] + public void SetUp() + { + _config = new FlatDbConfig + { + CompactSize = 16, + MinReorgDepth = 64, + MaxReorgDepth = 256 + }; + + _resourcePool = new ResourcePool(_config); + _finalizedStateProvider = new TestFinalizedStateProvider(); + _snapshotRepository = new SnapshotRepository(LimboLogs.Instance); + _persistence = Substitute.For(); + + IPersistence.IPersistenceReader persistenceReader = Substitute.For(); + persistenceReader.CurrentState.Returns(Block0); + _persistence.CreateReader().Returns(persistenceReader); + + _persistenceManager = new PersistenceManager( + _config, + _finalizedStateProvider, + _persistence, + _snapshotRepository, + LimboLogs.Instance); + } + + [TearDown] + public void TearDown() + { + } + + private StateId CreateStateId(long blockNumber, byte rootByte = 0) + { + byte[] bytes = new byte[32]; + bytes[0] = rootByte; + return new StateId(blockNumber, new ValueHash256(bytes)); + } + + private Snapshot CreateSnapshot(StateId from, StateId to, bool compacted = false) + { + Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot.Content.Accounts[TestItem.AddressA] = new Account(1, 100); + + if (compacted) + { + _snapshotRepository.TryAddCompactedSnapshot(snapshot); + } + else + { + _snapshotRepository.TryAddSnapshot(snapshot); + } + + // AddStateId is needed for GetStatesAtBlockNumber to work + _snapshotRepository.AddStateId(to); + + return snapshot; + } + + private Snapshot CreateSnapshotWithSelfDestruct(StateId from, StateId to) + { + Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot.Content.SelfDestructedStorageAddresses[TestItem.AddressA] = false; // false = should be processed + return snapshot; + } + + #region Basic Behavior Tests + + [Test] + public void DetermineSnapshotToPersist_InsufficientInMemoryDepth_ReturnsNull() + { + // Setup: persisted at Block0 (0), latest at 60, after persist would be < 64 minimum + StateId persisted = Block0; + StateId latest = CreateStateId(60); + _finalizedStateProvider.SetFinalizedBlockNumber(100); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Null); + } + + [TestCase(true, TestName = "DetermineSnapshotToPersist_SufficientDepthAndFinalized_ReturnsCompactedSnapshot")] + [TestCase(false, TestName = "DetermineSnapshotToPersist_SufficientDepthAndFinalized_FallsBackToUncompacted")] + public void DetermineSnapshotToPersist_SufficientDepthAndFinalized(bool useCompacted) + { + // Setup: persisted at Block0, latest at 100, finalized at 100 + StateId persisted = Block0; + StateId latest = CreateStateId(100); + + // Vary target block and compaction based on parameter + int targetBlock = useCompacted ? 16 : 1; // compacted uses 16, fallback uses 1 + StateId target = CreateStateId(targetBlock); + + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(targetBlock, new Hash256(target.StateRoot.Bytes)); + + // Create snapshot (compacted or not based on parameter) + using Snapshot expectedSnapshot = CreateSnapshot(persisted, target, compacted: useCompacted); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Not.Null); + Assert.That(result!.From, Is.EqualTo(persisted)); + Assert.That(result.To, Is.EqualTo(target)); + + result.Dispose(); + } + + #endregion + + #region Unfinalized State Tests + + [Test] + public void DetermineSnapshotToPersist_UnfinalizedButBelowForceLimit_ReturnsNull() + { + // Setup: persisted at Block0, latest at 150, finalized at 10 (way behind) + // After persist would be at 16, which is > finalized + // But in-memory depth is 150 (< 256 forced boundary) + StateId persisted = Block0; + StateId latest = CreateStateId(150); + _finalizedStateProvider.SetFinalizedBlockNumber(10); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Null); + } + + [TestCase(true, TestName = "DetermineSnapshotToPersist_UnfinalizedAndAboveForceLimit_ForcePersistsCompacted")] + [TestCase(false, TestName = "DetermineSnapshotToPersist_UnfinalizedAndAboveForceLimit_FallsBackToUncompacted")] + public void DetermineSnapshotToPersist_UnfinalizedAndAboveForceLimit(bool useCompacted) + { + // Setup: persisted at Block0, latest at 300, finalized at 10 + // In-memory depth is ~301 (> 256 forced boundary) + StateId persisted = Block0; + StateId latest = CreateStateId(300); + + // Vary target block and compaction based on parameter + int targetBlock = useCompacted ? 16 : 1; // compacted uses 16, fallback uses 1 + StateId target = CreateStateId(targetBlock); + + _finalizedStateProvider.SetFinalizedBlockNumber(10); + + // Create snapshot (compacted or not based on parameter) + using Snapshot expectedSnapshot = CreateSnapshot(persisted, target, compacted: useCompacted); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Not.Null); + Assert.That(result!.From, Is.EqualTo(persisted)); + Assert.That(result.To, Is.EqualTo(target)); + + result.Dispose(); + } + + #endregion + + #region Edge Cases + + [Test] + public void DetermineSnapshotToPersist_NoSnapshotAvailable_ReturnsNull() + { + // Setup: sufficient depth but no snapshots in repository + StateId persisted = Block0; + StateId latest = CreateStateId(100); + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(CreateStateId(16).StateRoot.Bytes)); + + // Don't create any snapshots + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Null); + } + + [Test] + public void DetermineSnapshotToPersist_SnapshotWithWrongFromState_ReturnsNull() + { + // Setup: snapshot exists but doesn't start from current persisted state + StateId persisted = Block0; + StateId latest = CreateStateId(100); + StateId wrongFrom = CreateStateId(5); + StateId target = CreateStateId(16); + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(target.StateRoot.Bytes)); + + // Create snapshot with wrong "from" state + using Snapshot wrongSnapshot = CreateSnapshot(wrongFrom, target, compacted: true); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Null); + } + + [Test] + public void DetermineSnapshotToPersist_MultipleStatesAtBlock_SelectsCorrectOne() + { + // Setup: multiple state roots at same block number (reorg scenario) + StateId persisted = Block0; + StateId latest = CreateStateId(100); + StateId target1 = CreateStateId(16, rootByte: 1); + StateId target2 = CreateStateId(16, rootByte: 2); // Different root + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(target2.StateRoot.Bytes)); // target2 is finalized + + // Create both snapshots + using Snapshot snapshot1 = CreateSnapshot(persisted, target1, compacted: true); + using Snapshot snapshot2 = CreateSnapshot(persisted, target2, compacted: true); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Not.Null); + Assert.That(result!.To.StateRoot.Bytes.ToArray(), Is.EqualTo(target2.StateRoot.Bytes.ToArray())); // Should select finalized one + + result.Dispose(); + } + + [Test] + public void DetermineSnapshotToPersist_ExactlyAtMinimumBoundary_ReturnsNull() + { + // Setup: persisted at Block0 (0), latest at 79 + // After persist would be at 15, leaving depth of 64 (exactly at minimum boundary) + StateId persisted = Block0; + StateId latest = CreateStateId(79); + _finalizedStateProvider.SetFinalizedBlockNumber(100); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Null); + } + + [Test] + public void DetermineSnapshotToPersist_OneAboveMinimumBoundary_ReturnsSnapshot() + { + // Setup: persisted at Block0 (0), latest at 80 + // After persist would be at 15, leaving depth of 65 (one above minimum boundary) + StateId persisted = Block0; + StateId latest = CreateStateId(80); + StateId target = CreateStateId(16); + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(target.StateRoot.Bytes)); + + using Snapshot expectedSnapshot = CreateSnapshot(persisted, target, compacted: true); + + Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest); + + Assert.That(result, Is.Not.Null); + + result!.Dispose(); + } + + #endregion + + #region PersistSnapshot Tests + + [Test] + public void PersistSnapshot_WithAccountsStorageAndTrieNodes_WritesToBatch() + { + // Arrange + StateId from = Block0; + StateId to = CreateStateId(16); + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + // Add accounts + snapshot.Content.Accounts[TestItem.AddressA] = new Account(1, 100); + snapshot.Content.Accounts[TestItem.AddressB] = new Account(2, 200); + + // Add storage + snapshot.Content.Storages[(TestItem.AddressA, (UInt256)1)] = SlotValue.FromSpanWithoutLeadingZero([42]); + snapshot.Content.Storages[(TestItem.AddressA, (UInt256)2)] = SlotValue.FromSpanWithoutLeadingZero([99]); + + // Add trie nodes + TreePath path = TreePath.Empty; + TrieNode node = new TrieNode(NodeType.Leaf, Keccak.Zero); + snapshot.Content.StateNodes[path] = node; + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(from, to).Returns(writeBatch); + + // Act + _persistenceManager.PersistSnapshot(snapshot); + + // Assert + writeBatch.Received().SetAccount(TestItem.AddressA, Arg.Any()); + writeBatch.Received().SetAccount(TestItem.AddressB, Arg.Any()); + writeBatch.Received().SetStorage(TestItem.AddressA, (UInt256)1, Arg.Any()); + writeBatch.Received().SetStorage(TestItem.AddressA, (UInt256)2, Arg.Any()); + writeBatch.Received().SetStateTrieNode(Arg.Any(), Arg.Any()); + Assert.That(node.IsPersisted, Is.True); + } + + [Test] + public void PersistSnapshot_WithSelfDestructedAddresses_CallsSelfDestruct() + { + // Arrange + StateId from = Block0; + StateId to = CreateStateId(16); + using Snapshot snapshot = CreateSnapshotWithSelfDestruct(from, to); + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(from, to).Returns(writeBatch); + + // Act + _persistenceManager.PersistSnapshot(snapshot); + + // Assert + writeBatch.Received().SelfDestruct(TestItem.AddressA); + } + + [Test] + public void PersistSnapshot_EmptySnapshot_CreatesWriteBatch() + { + // Arrange + StateId from = Block0; + StateId to = CreateStateId(16); + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(from, to).Returns(writeBatch); + + // Act + _persistenceManager.PersistSnapshot(snapshot); + + // Assert + _persistence.Received(1).CreateWriteBatch(from, to); + } + + #endregion + + #region AddToPersistence Tests + + [Test] + public void AddToPersistence_WithAvailableSnapshot_PersistsAndUpdatesState() + { + // Arrange + StateId from = Block0; + StateId to = CreateStateId(16); + StateId latest = CreateStateId(100); + + // Create a snapshot that should be persisted + using Snapshot snapshot = CreateSnapshot(from, to, compacted: true); + + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(to.StateRoot.Bytes)); + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch); + + // Act + _persistenceManager.AddToPersistence(latest); + + // Assert + // Verify write batch was created (persistence happened) + _persistence.Received().CreateWriteBatch(from, to); + + // Verify current persisted state was updated + Assert.That(_persistenceManager.GetCurrentPersistedStateId(), Is.EqualTo(to)); + } + + #endregion + + #region FlushToPersistence Tests + + [Test] + public void FlushToPersistence_NoSnapshots_ReturnsCurrentPersistedState() + { + // Arrange - no snapshots added + StateId persisted = Block0; + + // Act + StateId result = _persistenceManager.FlushToPersistence(); + + // Assert + Assert.That(result, Is.EqualTo(persisted)); + } + + [Test] + public void FlushToPersistence_WithFinalizedSnapshots_PersistsFinalizedFirst() + { + // Arrange + StateId state16 = CreateStateId(16); + StateId state32 = CreateStateId(32); + + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(state16.StateRoot.Bytes)); + _finalizedStateProvider.SetFinalizedStateRootAt(32, new Hash256(state32.StateRoot.Bytes)); + + using Snapshot snapshot1 = CreateSnapshot(Block0, state16, compacted: true); + using Snapshot snapshot2 = CreateSnapshot(state16, state32, compacted: true); + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch); + + // Act + StateId result = _persistenceManager.FlushToPersistence(); + + // Assert + Assert.That(result, Is.EqualTo(state32)); + _persistence.Received().CreateWriteBatch(Block0, state16); + _persistence.Received().CreateWriteBatch(state16, state32); + } + + [Test] + public void FlushToPersistence_WithUnfinalizedSnapshots_FallsBackToFirstAvailable() + { + // Arrange - no finalization info available + StateId state16 = CreateStateId(16); + _finalizedStateProvider.SetFinalizedBlockNumber(0); // Nothing finalized + + using Snapshot snapshot = CreateSnapshot(Block0, state16, compacted: true); + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch); + + // Act + StateId result = _persistenceManager.FlushToPersistence(); + + // Assert + Assert.That(result, Is.EqualTo(state16)); + _persistence.Received().CreateWriteBatch(Block0, state16); + } + + [Test] + public void FlushToPersistence_PrefersFinalizedOverUnfinalized() + { + // Arrange - two snapshots at same block, one finalized + StateId finalizedState = CreateStateId(16, rootByte: 1); + StateId unfinalizedState = CreateStateId(16, rootByte: 2); + + _finalizedStateProvider.SetFinalizedBlockNumber(100); + _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(finalizedState.StateRoot.Bytes)); + + // Create both snapshots + using Snapshot finalizedSnapshot = CreateSnapshot(Block0, finalizedState, compacted: true); + using Snapshot unfinalizedSnapshot = CreateSnapshot(Block0, unfinalizedState, compacted: true); + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch); + + // Act + StateId result = _persistenceManager.FlushToPersistence(); + + // Assert - should persist finalized state + Assert.That(result.StateRoot.Bytes.ToArray(), Is.EqualTo(finalizedState.StateRoot.Bytes.ToArray())); + } + + [Test] + public void FlushToPersistence_PersistsMultipleSnapshots_InOrder() + { + // Arrange + StateId state1 = CreateStateId(1); + StateId state2 = CreateStateId(2); + StateId state3 = CreateStateId(3); + + // No finalization - will use first available + _finalizedStateProvider.SetFinalizedBlockNumber(0); + + using Snapshot snapshot1 = CreateSnapshot(Block0, state1, compacted: false); + using Snapshot snapshot2 = CreateSnapshot(state1, state2, compacted: false); + using Snapshot snapshot3 = CreateSnapshot(state2, state3, compacted: false); + + IPersistence.IWriteBatch writeBatch = Substitute.For(); + _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch); + + // Act + StateId result = _persistenceManager.FlushToPersistence(); + + // Assert + Assert.That(result, Is.EqualTo(state3)); + Received.InOrder(() => + { + _persistence.CreateWriteBatch(Block0, state1); + _persistence.CreateWriteBatch(state1, state2); + _persistence.CreateWriteBatch(state2, state3); + }); + } + + #endregion + + #region Helper Classes + + private class TestFinalizedStateProvider : IFinalizedStateProvider + { + private long _finalizedBlockNumber; + private readonly Dictionary _finalizedStateRoots = new(); + + public long FinalizedBlockNumber => _finalizedBlockNumber; + + public void SetFinalizedBlockNumber(long blockNumber) => _finalizedBlockNumber = blockNumber; + + public void SetFinalizedStateRootAt(long blockNumber, Hash256 stateRoot) => _finalizedStateRoots[blockNumber] = stateRoot; + + public Hash256? GetFinalizedStateRootAt(long blockNumber) => + _finalizedStateRoots.TryGetValue(blockNumber, out Hash256? root) ? root : null; + } + + #endregion +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/PersistenceScenario.cs b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceScenario.cs new file mode 100644 index 00000000000..8c772d79d1d --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceScenario.cs @@ -0,0 +1,878 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + + +using System; +using System.Collections.Generic; +using Autofac; +using Nethermind.Api; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Core.Test.Builders; +using Nethermind.Core.Test.IO; +using Nethermind.Db; +using Nethermind.Init.Modules; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Serialization.Rlp; +using Nethermind.Specs.ChainSpecStyle; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixtureSource(nameof(TestConfigs))] +public class PersistenceScenario(PersistenceScenario.TestConfiguration configuration) +{ + private TempPath _tmpDirectory = null!; + private IContainer _container = null!; + private IPersistence _persistence = null!; + + // Helper method to convert TryGetSlot to GetSlot-like behavior + private static byte[]? GetSlot(IPersistence.IPersistenceReader reader, Address address, in UInt256 slot) + { + SlotValue slotValue = default; + if (reader.TryGetSlot(address, in slot, ref slotValue)) + { + return slotValue.ToEvmBytes(); + } + return null; + } + + public record TestConfiguration(FlatDbConfig FlatDbConfig, string Name) + { + public override string ToString() => Name; + } + + public static IEnumerable TestConfigs() + { + yield return new TestConfiguration(new FlatDbConfig() + { + Enabled = true, + Layout = FlatLayout.Flat + }, "Flat"); + yield return new TestConfiguration(new FlatDbConfig() + { + Enabled = true, + Layout = FlatLayout.FlatInTrie + }, "FlatInTrie"); + yield return new TestConfiguration(new FlatDbConfig() + { + Enabled = true, + Layout = FlatLayout.PreimageFlat + }, "PreimageFlat"); + } + + [SetUp] + public void Setup() + { + _tmpDirectory = TempPath.GetTempDirectory(); + _container = new ContainerBuilder() + .AddModule( + new NethermindModule( + new ChainSpec(), + new ConfigProvider( + configuration.FlatDbConfig, + new InitConfig() + { + BaseDbPath = _tmpDirectory.Path, + }), + LimboLogs.Instance)) + .AddSingleton(Substitute.For()) + .Build(); + + _persistence = _container.Resolve(); + } + + [TearDown] + public void TearDown() + { + _container.Dispose(); + _tmpDirectory.Dispose(); + } + + [Test] + public void TestCanWriteAccount() + { + Account acc = TestItem.GenerateIndexedAccount(0); + Address address = TestItem.AddressA; + + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.GetAccount(address), Is.Null); + } + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, acc); + } + + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.GetAccount(address), Is.EqualTo(acc)); + } + } + + [Test] + public void TestCanAccountSnapshot() + { + Address address = TestItem.AddressA; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, TestItem.GenerateIndexedAccount(0)); + } + + using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader(); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, TestItem.GenerateIndexedAccount(1)); + } + + using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader(); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, TestItem.GenerateIndexedAccount(2)); + } + + using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader(); + + Assert.That(reader1.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(0))); + Assert.That(reader2.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(1))); + Assert.That(reader3.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(2))); + } + + [Test] + public void TestSelfDestructAccount() + { + Account acc = TestItem.GenerateIndexedAccount(0); + Account acc2 = TestItem.GenerateIndexedAccount(1); + Address address = TestItem.AddressA; + Address address2 = TestItem.AddressB; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, acc); + writer.SetStorage(address, UInt256.MinValue, SlotValue.FromSpanWithoutLeadingZero([1])); + writer.SetStorage(address, 123, SlotValue.FromSpanWithoutLeadingZero([2])); + writer.SetStorage(address, UInt256.MaxValue, SlotValue.FromSpanWithoutLeadingZero([3])); + + writer.SetAccount(address2, acc2); + writer.SetStorage(address2, UInt256.MinValue, SlotValue.FromSpanWithoutLeadingZero([1])); + writer.SetStorage(address2, 123, SlotValue.FromSpanWithoutLeadingZero([2])); + writer.SetStorage(address2, UInt256.MaxValue, SlotValue.FromSpanWithoutLeadingZero([3])); + } + + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.EqualTo([1])); + Assert.That(GetSlot(reader, address, 123), Is.EqualTo([2])); + Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.EqualTo([3])); + } + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SelfDestruct(address); + } + + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.Null); + Assert.That(GetSlot(reader, address, 123), Is.Null); + Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.Null); + + Assert.That(GetSlot(reader, address2, UInt256.MinValue), Is.EqualTo([1])); + Assert.That(GetSlot(reader, address2, 123), Is.EqualTo([2])); + Assert.That(GetSlot(reader, address2, UInt256.MaxValue), Is.EqualTo([3])); + } + } + + [Test] + public void TestCanWriteAndReadStorage() + { + Account acc = TestItem.GenerateIndexedAccount(0); + Address address = TestItem.AddressA; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, acc); + } + + // Initially, slots should be null + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.Null); + Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.Null); + } + + // Write various storage slots + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorage(address, UInt256.MinValue, SlotValue.FromSpanWithoutLeadingZero([1, 2, 3])); + writer.SetStorage(address, 42, SlotValue.FromSpanWithoutLeadingZero([0x42])); + writer.SetStorage(address, 12345, SlotValue.FromSpanWithoutLeadingZero([0x10, 0x20, 0x30, 0x40])); + writer.SetStorage(address, UInt256.MaxValue, SlotValue.FromSpanWithoutLeadingZero([0xff, 0xfe, 0xfd])); + } + + // Verify all slots can be read back + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.EqualTo([1, 2, 3])); + Assert.That(GetSlot(reader, address, 42), Is.EqualTo([0x42])); + Assert.That(GetSlot(reader, address, 12345), Is.EqualTo([0x10, 0x20, 0x30, 0x40])); + Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.EqualTo([0xff, 0xfe, 0xfd])); + } + } + + [Test] + public void TestCanStorageSnapshot() + { + Account acc = TestItem.GenerateIndexedAccount(0); + Address address = TestItem.AddressA; + UInt256 slot = 100; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, acc); + writer.SetStorage(address, slot, SlotValue.FromSpanWithoutLeadingZero([1])); + } + + using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader(); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorage(address, slot, SlotValue.FromSpanWithoutLeadingZero([2])); + } + + using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader(); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorage(address, slot, SlotValue.FromSpanWithoutLeadingZero([3])); + } + + using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader(); + + Assert.That(GetSlot(reader1, address, slot), Is.EqualTo([1])); + Assert.That(GetSlot(reader2, address, slot), Is.EqualTo([2])); + Assert.That(GetSlot(reader3, address, slot), Is.EqualTo([3])); + } + + [Test] + public void TestRemoveAccount() + { + Account acc = TestItem.GenerateIndexedAccount(0); + Address address = TestItem.AddressA; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, acc); + writer.SetStorage(address, 1, SlotValue.FromSpanWithoutLeadingZero([0x01])); + writer.SetStorage(address, 2, SlotValue.FromSpanWithoutLeadingZero([0x02])); + } + + // Verify account and storage exist + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.GetAccount(address), Is.EqualTo(acc)); + Assert.That(GetSlot(reader, address, 1), Is.EqualTo([0x01])); + } + + // Remove account + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, null); + } + + // Verify account is removed (storage should remain unless explicitly removed) + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.GetAccount(address), Is.Null); + } + } + + [Test] + public void TestRawOperations() + { + if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat) Assert.Ignore("Preimage mode does not support raw operation"); + + Account acc = TestItem.GenerateIndexedAccount(0); + Hash256 addrHash = new Hash256(TestItem.AddressA.ToAccountPath.Bytes); + Hash256 slotHash = Keccak.Compute([1, 2, 3]); + + // Test raw account operations + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccountRaw(addrHash, acc); + } + + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + byte[]? rawAccount = reader.GetAccountRaw(addrHash); + Assert.That(rawAccount, Is.Not.Null); + + // Decode and verify + Rlp.ValueDecoderContext ctx = new Rlp.ValueDecoderContext(rawAccount); + Assert.That(AccountDecoder.Instance.Decode(ref ctx), Is.EqualTo(acc)); + } + + // Test raw storage operations + byte[] storageValue = Bytes.FromHexString("0x000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorageRaw(addrHash, slotHash, SlotValue.FromBytes(storageValue)); + } + + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + SlotValue rawValue = default; + Assert.That(reader.TryGetStorageRaw(addrHash, slotHash, ref rawValue), Is.EqualTo(storageValue is not null)); + if (storageValue is not null) + { + Assert.That(rawValue.ToEvmBytes(), Is.EqualTo(storageValue.WithoutLeadingZeros().ToArray())); + } + } + } + + [Test] + public void TestConcurrentSnapshots() + { + Account acc = TestItem.GenerateIndexedAccount(0); + Address address = TestItem.AddressA; + UInt256 slot1 = 100; + UInt256 slot2 = 200; + + // Initial state + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, acc); + writer.SetStorage(address, slot1, SlotValue.FromSpanWithoutLeadingZero([1])); + writer.SetStorage(address, slot2, SlotValue.FromSpanWithoutLeadingZero([10])); + } + + using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader(); + + // Modify account and slot1 + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(address, TestItem.GenerateIndexedAccount(1)); + writer.SetStorage(address, slot1, SlotValue.FromSpanWithoutLeadingZero([2])); + } + + using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader(); + + // Modify slot2 + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorage(address, slot2, SlotValue.FromSpanWithoutLeadingZero([20])); + } + + using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader(); + + // Verify reader1 sees initial state + Assert.That(reader1.GetAccount(address), Is.EqualTo(acc)); + Assert.That(GetSlot(reader1, address, slot1), Is.EqualTo([1])); + Assert.That(GetSlot(reader1, address, slot2), Is.EqualTo([10])); + + // Verify reader2 sees second state + Assert.That(reader2.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(1))); + Assert.That(GetSlot(reader2, address, slot1), Is.EqualTo([2])); + Assert.That(GetSlot(reader2, address, slot2), Is.EqualTo([10])); + + // Verify reader3 sees final state + Assert.That(reader3.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(1))); + Assert.That(GetSlot(reader3, address, slot1), Is.EqualTo([2])); + Assert.That(GetSlot(reader3, address, slot2), Is.EqualTo([20])); + } + + [Test] + public void TestStorageAcrossMultipleAccounts() + { + Address addr1 = TestItem.AddressA; + Address addr2 = TestItem.AddressB; + Address addr3 = TestItem.AddressC; + UInt256 slot = 42; + + // Write same slot number for different accounts + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(addr1, TestItem.GenerateIndexedAccount(0)); + writer.SetAccount(addr2, TestItem.GenerateIndexedAccount(1)); + writer.SetAccount(addr3, TestItem.GenerateIndexedAccount(2)); + + writer.SetStorage(addr1, slot, SlotValue.FromSpanWithoutLeadingZero([0x11])); + writer.SetStorage(addr2, slot, SlotValue.FromSpanWithoutLeadingZero([0x22])); + writer.SetStorage(addr3, slot, SlotValue.FromSpanWithoutLeadingZero([0x33])); + } + + // Verify each account has its own isolated storage + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(GetSlot(reader, addr1, slot), Is.EqualTo([0x11])); + Assert.That(GetSlot(reader, addr2, slot), Is.EqualTo([0x22])); + Assert.That(GetSlot(reader, addr3, slot), Is.EqualTo([0x33])); + } + + // Modify storage for addr2 only + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorage(addr2, slot, SlotValue.FromSpanWithoutLeadingZero([0xff])); + } + + // Verify only addr2's storage changed + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(GetSlot(reader, addr1, slot), Is.EqualTo([0x11])); + Assert.That(GetSlot(reader, addr2, slot), Is.EqualTo([0xff])); + Assert.That(GetSlot(reader, addr3, slot), Is.EqualTo([0x33])); + } + } + + [Test] + public void TestCanWriteAndReadTrieNodes() + { + // State trie nodes with various path lengths + TreePath stateShortPath = TreePath.FromHexString("12345"); // <=5 nibbles -> stateTopNodes + TreePath stateMediumPath = TreePath.FromHexString("123456789abc"); // >5 nibbles -> stateNodes + TreePath stateLongPath = TreePath.FromHexString("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); + + byte[] stateShortRlp = [0xc1, 0x01]; + byte[] stateMediumRlp = [0xc1, 0x02]; + byte[] stateLongRlp = [0xc1, 0x03]; + + // Storage trie nodes for different accounts + Hash256 account1 = TestItem.KeccakA; + Hash256 account2 = TestItem.KeccakB; + TreePath storageShortPath = TreePath.FromHexString("abcd"); + TreePath storageLongPath = TreePath.FromHexString("abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789"); + + byte[] storage1ShortRlp = [0xc1, 0xaa]; + byte[] storage1LongRlp = [0xc1, 0xab]; + byte[] storage2ShortRlp = [0xc1, 0xbb]; + + // Write all trie nodes + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + // State trie nodes (address=null) + writer.SetStateTrieNode(in stateShortPath, new TrieNode(NodeType.Leaf, stateShortRlp)); + writer.SetStateTrieNode(in stateMediumPath, new TrieNode(NodeType.Leaf, stateMediumRlp)); + writer.SetStateTrieNode(in stateLongPath, new TrieNode(NodeType.Leaf, stateLongRlp)); + + // Storage trie nodes (with account address) + writer.SetStorageTrieNode(account1, in storageShortPath, new TrieNode(NodeType.Leaf, storage1ShortRlp)); + writer.SetStorageTrieNode(account1, in storageLongPath, new TrieNode(NodeType.Leaf, storage1LongRlp)); + writer.SetStorageTrieNode(account2, in storageShortPath, new TrieNode(NodeType.Leaf, storage2ShortRlp)); + } + + // Verify all nodes + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + // State trie nodes + Assert.That(reader.TryLoadStateRlp(in stateShortPath, ReadFlags.None), Is.EqualTo(stateShortRlp)); + Assert.That(reader.TryLoadStateRlp(in stateMediumPath, ReadFlags.None), Is.EqualTo(stateMediumRlp)); + Assert.That(reader.TryLoadStateRlp(in stateLongPath, ReadFlags.None), Is.EqualTo(stateLongRlp)); + + // Storage trie nodes - verify account isolation + Assert.That(reader.TryLoadStorageRlp(account1, in storageShortPath, ReadFlags.None), Is.EqualTo(storage1ShortRlp)); + Assert.That(reader.TryLoadStorageRlp(account1, in storageLongPath, ReadFlags.None), Is.EqualTo(storage1LongRlp)); + Assert.That(reader.TryLoadStorageRlp(account2, in storageShortPath, ReadFlags.None), Is.EqualTo(storage2ShortRlp)); + + // State and storage at same path are separate + Assert.That(reader.TryLoadStateRlp(in storageShortPath, ReadFlags.None), Is.Null); + } + } + + [Test] + public void TestTrieNodeSnapshot() + { + TreePath path = TreePath.FromHexString("abcdef"); + + byte[] rlpData1 = [0xc1, 0x01]; + byte[] rlpData2 = [0xc1, 0x02]; + byte[] rlpData3 = [0xc1, 0x03]; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStateTrieNode(in path, new TrieNode(NodeType.Leaf, rlpData1)); + } + using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader(); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStateTrieNode(in path, new TrieNode(NodeType.Leaf, rlpData2)); + } + using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader(); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStateTrieNode(in path, new TrieNode(NodeType.Leaf, rlpData3)); + } + using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader(); + + Assert.That(reader1.TryLoadStateRlp(in path, ReadFlags.None), Is.EqualTo(rlpData1)); + Assert.That(reader2.TryLoadStateRlp(in path, ReadFlags.None), Is.EqualTo(rlpData2)); + Assert.That(reader3.TryLoadStateRlp(in path, ReadFlags.None), Is.EqualTo(rlpData3)); + } + + [Test] + public void TestTrieNodeBoundaryPathLengths() + { + // Test boundary conditions for path length thresholds: + // StateNodesTop: 0-5, StateNodes: 6-15, FallbackNodes: 16+ + // StorageNodes: 0-15, FallbackNodes: 16+ + + // State trie boundary paths + TreePath statePath5 = TreePath.FromHexString("12345"); // exactly 5 -> StateNodesTop + TreePath statePath6 = TreePath.FromHexString("123456"); // exactly 6 -> StateNodes + TreePath statePath15 = TreePath.FromHexString("123456789abcdef"); // exactly 15 -> StateNodes + TreePath statePath16 = TreePath.FromHexString("123456789abcdef0"); // exactly 16 -> FallbackNodes + + // Storage trie boundary paths + Hash256 account = TestItem.KeccakA; + TreePath storagePath15 = TreePath.FromHexString("abcdef123456789"); // exactly 15 -> StorageNodes + TreePath storagePath16 = TreePath.FromHexString("abcdef1234567890"); // exactly 16 -> FallbackNodes + + byte[] rlp5 = [0xc1, 0x05]; + byte[] rlp6 = [0xc1, 0x06]; + byte[] rlp15 = [0xc1, 0x0f]; + byte[] rlp16 = [0xc1, 0x10]; + byte[] storageRlp15 = [0xc1, 0x1f]; + byte[] storageRlp16 = [0xc1, 0x20]; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStateTrieNode(in statePath5, new TrieNode(NodeType.Leaf, rlp5)); + writer.SetStateTrieNode(in statePath6, new TrieNode(NodeType.Leaf, rlp6)); + writer.SetStateTrieNode(in statePath15, new TrieNode(NodeType.Leaf, rlp15)); + writer.SetStateTrieNode(in statePath16, new TrieNode(NodeType.Leaf, rlp16)); + writer.SetStorageTrieNode(account, in storagePath15, new TrieNode(NodeType.Leaf, storageRlp15)); + writer.SetStorageTrieNode(account, in storagePath16, new TrieNode(NodeType.Leaf, storageRlp16)); + } + + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.TryLoadStateRlp(in statePath5, ReadFlags.None), Is.EqualTo(rlp5)); + Assert.That(reader.TryLoadStateRlp(in statePath6, ReadFlags.None), Is.EqualTo(rlp6)); + Assert.That(reader.TryLoadStateRlp(in statePath15, ReadFlags.None), Is.EqualTo(rlp15)); + Assert.That(reader.TryLoadStateRlp(in statePath16, ReadFlags.None), Is.EqualTo(rlp16)); + Assert.That(reader.TryLoadStorageRlp(account, in storagePath15, ReadFlags.None), Is.EqualTo(storageRlp15)); + Assert.That(reader.TryLoadStorageRlp(account, in storagePath16, ReadFlags.None), Is.EqualTo(storageRlp16)); + } + } + + [Test] + public void TestSelfDestructTrieNodes() + { + // Test that SelfDestruct removes storage trie nodes for an account + // This tests both shortened storage nodes (path ≤15) and fallback storage nodes (path >15) + + // SelfDestruct takes Address, but SetTrieNodes/TryLoadRlp take Hash256 (keccak of address) + Address address1 = TestItem.AddressA; + Address address2 = TestItem.AddressB; + Hash256 account1Hash = Keccak.Compute(address1.Bytes); + Hash256 account2Hash = Keccak.Compute(address2.Bytes); + + // Various path lengths to test both StorageNodes and FallbackNodes columns + TreePath shortPath = TreePath.FromHexString("abcd"); // 4 nibbles -> StorageNodes + TreePath mediumPath = TreePath.FromHexString("123456789abcdef"); // 15 nibbles -> StorageNodes + TreePath longPath = TreePath.FromHexString("0123456789abcdef0123456789abcdef01234567"); // 40 nibbles -> FallbackNodes + + byte[] rlpShort = [0xc1, 0x01]; + byte[] rlpMedium = [0xc1, 0x02]; + byte[] rlpLong = [0xc1, 0x03]; + + // Write trie nodes for both accounts + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + // Account 1 storage trie nodes + writer.SetStorageTrieNode(account1Hash, in shortPath, new TrieNode(NodeType.Leaf, rlpShort)); + writer.SetStorageTrieNode(account1Hash, in mediumPath, new TrieNode(NodeType.Leaf, rlpMedium)); + writer.SetStorageTrieNode(account1Hash, in longPath, new TrieNode(NodeType.Leaf, rlpLong)); + + // Account 2 storage trie nodes (same paths, different account) + writer.SetStorageTrieNode(account2Hash, in shortPath, new TrieNode(NodeType.Leaf, rlpShort)); + writer.SetStorageTrieNode(account2Hash, in mediumPath, new TrieNode(NodeType.Leaf, rlpMedium)); + writer.SetStorageTrieNode(account2Hash, in longPath, new TrieNode(NodeType.Leaf, rlpLong)); + } + + // Verify all nodes exist + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlpShort)); + Assert.That(reader.TryLoadStorageRlp(account1Hash, in mediumPath, ReadFlags.None), Is.EqualTo(rlpMedium)); + Assert.That(reader.TryLoadStorageRlp(account1Hash, in longPath, ReadFlags.None), Is.EqualTo(rlpLong)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlpShort)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in mediumPath, ReadFlags.None), Is.EqualTo(rlpMedium)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in longPath, ReadFlags.None), Is.EqualTo(rlpLong)); + } + + // SelfDestruct account1 (uses Address, internally converts to hash) + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SelfDestruct(address1); + } + + // Verify account1's trie nodes are deleted, account2's remain + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + // Account 1 nodes should be gone + Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.Null); + Assert.That(reader.TryLoadStorageRlp(account1Hash, in mediumPath, ReadFlags.None), Is.Null); + Assert.That(reader.TryLoadStorageRlp(account1Hash, in longPath, ReadFlags.None), Is.Null); + + // Account 2 nodes should still exist + Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlpShort)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in mediumPath, ReadFlags.None), Is.EqualTo(rlpMedium)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in longPath, ReadFlags.None), Is.EqualTo(rlpLong)); + } + } + + [Test] + public void TestSelfDestructTrieNodesWithSimilarAddressHashPrefix() + { + // Test that SelfDestruct correctly differentiates accounts even when their hashes + // might share the first 4 bytes (the prefix used in storage key encoding). + // The storage key uses first 4 bytes of hash as prefix, remaining 16 bytes at end. + // This tests that the suffix comparison works correctly. + + // Create two hashes that share the same first 4 bytes but differ in later bytes + // We bypass Address->Hash256 conversion to directly test the hash-based logic + byte[] hash1Bytes = new byte[32]; + byte[] hash2Bytes = new byte[32]; + // Same prefix (first 4 bytes) + hash1Bytes[0] = 0xAA; hash1Bytes[1] = 0xBB; hash1Bytes[2] = 0xCC; hash1Bytes[3] = 0xDD; + hash2Bytes[0] = 0xAA; hash2Bytes[1] = 0xBB; hash2Bytes[2] = 0xCC; hash2Bytes[3] = 0xDD; + // Different suffix (bytes 4-19 are used in the key suffix check) + hash1Bytes[4] = 0x11; + hash2Bytes[4] = 0x22; + + Hash256 account1Hash = new Hash256(hash1Bytes); + Hash256 account2Hash = new Hash256(hash2Bytes); + + TreePath shortPath = TreePath.FromHexString("1234"); // -> StorageNodes + TreePath longPath = TreePath.FromHexString("0123456789abcdef0123456789abcdef01234567"); // -> FallbackNodes + + byte[] rlp1 = [0xc1, 0x11]; + byte[] rlp2 = [0xc1, 0x22]; + + // Write trie nodes using the hashes directly + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorageTrieNode(account1Hash, in shortPath, new TrieNode(NodeType.Leaf, rlp1)); + writer.SetStorageTrieNode(account1Hash, in longPath, new TrieNode(NodeType.Leaf, rlp1)); + writer.SetStorageTrieNode(account2Hash, in shortPath, new TrieNode(NodeType.Leaf, rlp2)); + writer.SetStorageTrieNode(account2Hash, in longPath, new TrieNode(NodeType.Leaf, rlp2)); + } + + // Verify all nodes exist before SelfDestruct + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp1)); + Assert.That(reader.TryLoadStorageRlp(account1Hash, in longPath, ReadFlags.None), Is.EqualTo(rlp1)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp2)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in longPath, ReadFlags.None), Is.EqualTo(rlp2)); + } + + // SelfDestruct account1 using an address that hashes to account1Hash + // Note: We use AddressC since we need a real Address for SelfDestruct + // This tests the general SelfDestruct flow; the prefix collision test above + // verifies the data is correctly written with similar prefixes + Address address1 = TestItem.AddressC; + Hash256 address1Hash = Keccak.Compute(address1.Bytes); + + // Write and then delete using the real address flow + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetStorageTrieNode(address1Hash, in shortPath, new TrieNode(NodeType.Leaf, rlp1)); + writer.SetStorageTrieNode(address1Hash, in longPath, new TrieNode(NodeType.Leaf, rlp1)); + } + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SelfDestruct(address1); + } + + // Verify address1's trie nodes are deleted + using (IPersistence.IPersistenceReader reader = _persistence.CreateReader()) + { + Assert.That(reader.TryLoadStorageRlp(address1Hash, in shortPath, ReadFlags.None), Is.Null); + Assert.That(reader.TryLoadStorageRlp(address1Hash, in longPath, ReadFlags.None), Is.Null); + + // The manually created hashes should still exist (they weren't self-destructed) + Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp1)); + Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp2)); + } + } + + [Test] + public void TestAccountIterator_EnumeratesAllAccounts() + { + // Write multiple accounts + Address addr1 = TestItem.AddressA; + Address addr2 = TestItem.AddressB; + Address addr3 = TestItem.AddressC; + + Account acc1 = TestItem.GenerateIndexedAccount(1); + Account acc2 = TestItem.GenerateIndexedAccount(2); + Account acc3 = TestItem.GenerateIndexedAccount(3); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(addr1, acc1); + writer.SetAccount(addr2, acc2); + writer.SetAccount(addr3, acc3); + } + + // Use iterator to enumerate accounts + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + using IPersistence.IFlatIterator iterator = reader.CreateAccountIterator(); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + } + + // All layouts should find 3 accounts + Assert.That(count, Is.EqualTo(3)); + } + + [Test] + public void TestAccountIterator_EmptyState_ReturnsNoAccounts() + { + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + using IPersistence.IFlatIterator iterator = reader.CreateAccountIterator(); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + } + + Assert.That(count, Is.EqualTo(0)); + } + + [Test] + public void TestStorageIterator_EnumeratesAccountStorage() + { + // PreimageFlat uses raw address, others use hashed address paths + if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat) + Assert.Ignore("Preimage mode uses raw address format which differs from hashed mode"); + + // Write account with storage + Address addr = TestItem.AddressA; + Account acc = TestItem.GenerateIndexedAccount(0); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(addr, acc); + writer.SetStorage(addr, 1, SlotValue.FromSpanWithoutLeadingZero([0x11])); + writer.SetStorage(addr, 42, SlotValue.FromSpanWithoutLeadingZero([0x42])); + writer.SetStorage(addr, 100, SlotValue.FromSpanWithoutLeadingZero([0x64])); + } + + // Use iterator to enumerate storage + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + + // Storage keys are written using addr.ToAccountPath (Keccak hash of address) + ValueHash256 accountKey = addr.ToAccountPath; + + using IPersistence.IFlatIterator iterator = reader.CreateStorageIterator(accountKey); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + } + + // Should find 3 storage slots + Assert.That(count, Is.EqualTo(3)); + } + + [Test] + public void TestStorageIterator_NoStorage_ReturnsEmpty() + { + if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat) + Assert.Ignore("Preimage mode uses raw address format which differs from hashed mode"); + + // Write account without storage + Address addr = TestItem.AddressA; + Account acc = TestItem.GenerateIndexedAccount(0); + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(addr, acc); + } + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + + ValueHash256 accountKey = addr.ToAccountPath; + + using IPersistence.IFlatIterator iterator = reader.CreateStorageIterator(accountKey); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + } + + Assert.That(count, Is.EqualTo(0)); + } + + [Test] + public void TestStorageIterator_IsolatesAccountStorage() + { + if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat) + Assert.Ignore("Preimage mode uses raw address format which differs from hashed mode"); + + // Write storage for two accounts + Address addr1 = TestItem.AddressA; + Address addr2 = TestItem.AddressB; + + using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None)) + { + writer.SetAccount(addr1, TestItem.GenerateIndexedAccount(0)); + writer.SetStorage(addr1, 1, SlotValue.FromSpanWithoutLeadingZero([0x11])); + writer.SetStorage(addr1, 2, SlotValue.FromSpanWithoutLeadingZero([0x22])); + + writer.SetAccount(addr2, TestItem.GenerateIndexedAccount(1)); + writer.SetStorage(addr2, 10, SlotValue.FromSpanWithoutLeadingZero([0xaa])); + writer.SetStorage(addr2, 20, SlotValue.FromSpanWithoutLeadingZero([0xbb])); + writer.SetStorage(addr2, 30, SlotValue.FromSpanWithoutLeadingZero([0xcc])); + } + + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + + // Count storage for addr1 using proper address hash + ValueHash256 accountKey1 = addr1.ToAccountPath; + using IPersistence.IFlatIterator iterator1 = reader.CreateStorageIterator(accountKey1); + int count1 = 0; + while (iterator1.MoveNext()) count1++; + + // Count storage for addr2 using proper address hash + ValueHash256 accountKey2 = addr2.ToAccountPath; + using IPersistence.IFlatIterator iterator2 = reader.CreateStorageIterator(accountKey2); + int count2 = 0; + while (iterator2.MoveNext()) count2++; + + Assert.That(count1, Is.EqualTo(2)); + Assert.That(count2, Is.EqualTo(3)); + } + + [Test] + public void TestIsPreimageMode_ReturnsCorrectValue() + { + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + + // PreimageFlat layout should return true, others false + bool expected = configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat; + Assert.That(reader.IsPreimageMode, Is.EqualTo(expected)); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/ResourcePoolTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/ResourcePoolTests.cs new file mode 100644 index 00000000000..a49df0cd846 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/ResourcePoolTests.cs @@ -0,0 +1,162 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Generic; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class ResourcePoolTests +{ + private ResourcePool _resourcePool; + private FlatDbConfig _config; + + [SetUp] + public void SetUp() + { + _config = new FlatDbConfig { CompactSize = 2 }; // Small compact size for testing limits + _resourcePool = new ResourcePool(_config); + } + + [Test] + public void Test_GetSnapshotContent_ReturnsNewInstance_WhenPoolEmpty() + { + SnapshotContent content = _resourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing); + Assert.That(content, Is.Not.Null); + Assert.That(content.Accounts, Is.Empty); + } + + [Test] + public void Test_ReturnSnapshotContent_RecyclesInstance() + { + ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing; + SnapshotContent content1 = _resourcePool.GetSnapshotContent(usage); + + content1.Accounts[new AddressAsKey(new Address("0x1234567890123456789012345678901234567890"))] = new Account(1, 2); + Assert.That(content1.Accounts, Is.Not.Empty); + + _resourcePool.ReturnSnapshotContent(usage, content1); + + SnapshotContent content2 = _resourcePool.GetSnapshotContent(usage); + + // Should be the same instance (LIFO) + Assert.That(content2, Is.SameAs(content1)); + // Should have been reset + Assert.That(content2.Accounts, Is.Empty); + } + + [Test] + public void Test_SnapshotContentPool_RespectsCapacity() + { + // For MainBlockProcessing: capacity = config.CompactSize + 8 = 2 + 8 = 10 + ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing; + int capacity = _config.CompactSize + 8; + List items = new List(); + + for (int i = 0; i < capacity + 5; i++) + { + items.Add(_resourcePool.GetSnapshotContent(usage)); + } + + foreach (SnapshotContent item in items) + { + _resourcePool.ReturnSnapshotContent(usage, item); + } + + // Now if we get 'capacity' items, they should be from the pool + for (int i = 0; i < capacity; i++) + { + SnapshotContent content = _resourcePool.GetSnapshotContent(usage); + Assert.That(items.Contains(content), Is.True, $"Item {i} should be from recycled items"); + } + + // The next one should be a new instance because pool is empty + SnapshotContent newContent = _resourcePool.GetSnapshotContent(usage); + Assert.That(items.Contains(newContent), Is.False, "Should be a new instance"); + } + + [Test] + public void Test_GetCachedResource_ReturnsNewInstance_WhenPoolEmpty() + { + TransientResource resource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + Assert.That(resource, Is.Not.Null); + Assert.That(resource.size.PrewarmedAddressSize, Is.EqualTo(1024)); + Assert.That(resource.size.NodesCacheSize, Is.EqualTo(1024)); + } + + [Test] + public void Test_ReturnCachedResource_RecyclesInstance() + { + ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing; + TransientResource resource1 = _resourcePool.GetCachedResource(usage); + + _resourcePool.ReturnCachedResource(usage, resource1); + + TransientResource resource2 = _resourcePool.GetCachedResource(usage); + + // Should be the same instance + Assert.That(resource2, Is.SameAs(resource1)); + } + + [Test] + public void Test_CachedResourcePool_RespectsCapacity() + { + // For MainBlockProcessing: capacity = 2 + ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing; + + TransientResource r1 = _resourcePool.GetCachedResource(usage); + TransientResource r2 = _resourcePool.GetCachedResource(usage); + TransientResource r3 = _resourcePool.GetCachedResource(usage); + + _resourcePool.ReturnCachedResource(usage, r1); + _resourcePool.ReturnCachedResource(usage, r2); + _resourcePool.ReturnCachedResource(usage, r3); // This one should be disposed + + TransientResource p1 = _resourcePool.GetCachedResource(usage); + TransientResource p2 = _resourcePool.GetCachedResource(usage); + TransientResource p3 = _resourcePool.GetCachedResource(usage); + + Assert.That(p1, Is.SameAs(r2)); // LIFO + Assert.That(p2, Is.SameAs(r1)); + Assert.That(p3, Is.Not.SameAs(r3)); + } + + [Test] + public void Test_CreateSnapshot_UsesPool() + { + StateId from = new StateId(1, Keccak.Zero); + StateId to = new StateId(2, Keccak.Zero); + ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing; + + SnapshotContent content; + using (Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, usage)) + { + Assert.That(snapshot.From, Is.EqualTo(from)); + Assert.That(snapshot.To, Is.EqualTo(to)); + Assert.That(snapshot.Content, Is.Not.Null); + content = snapshot.Content; + } + + SnapshotContent recycledContent = _resourcePool.GetSnapshotContent(usage); + Assert.That(recycledContent, Is.SameAs(content)); + } + + [Test] + public void Test_DifferentUsages_HaveIndependentPools() + { + SnapshotContent contentMain = _resourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing); + SnapshotContent contentCompactor = _resourcePool.GetSnapshotContent(ResourcePool.Usage.Compactor); + + _resourcePool.ReturnSnapshotContent(ResourcePool.Usage.MainBlockProcessing, contentMain); + + SnapshotContent contentCompactor2 = _resourcePool.GetSnapshotContent(ResourcePool.Usage.Compactor); + Assert.That(contentCompactor2, Is.Not.SameAs(contentMain)); + + SnapshotContent contentMain2 = _resourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing); + Assert.That(contentMain2, Is.SameAs(contentMain)); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/SnapshotCompactorTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotCompactorTests.cs new file mode 100644 index 00000000000..f58f4773b2d --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotCompactorTests.cs @@ -0,0 +1,489 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Generic; +using System.Linq; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Trie; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class SnapshotCompactorTests +{ + private SnapshotCompactor _compactor = null!; + private ResourcePool _resourcePool = null!; + private FlatDbConfig _config = null!; + private SnapshotRepository _snapshotRepository; + + [SetUp] + public void SetUp() + { + _config = new FlatDbConfig { CompactSize = 16 }; + _resourcePool = new ResourcePool(_config); + _snapshotRepository = new SnapshotRepository(LimboLogs.Instance); + _compactor = new SnapshotCompactor(_config, _resourcePool, _snapshotRepository, LimboLogs.Instance); + } + + private static StateId CreateStateId(long blockNumber, byte rootByte = 0) + { + byte[] bytes = new byte[32]; + bytes[0] = rootByte; + return new StateId(blockNumber, new ValueHash256(bytes)); + } + + private void BuildSnapshotChain(long startBlock, long endBlock) + { + for (long i = startBlock; i < endBlock; i++) + { + StateId from = CreateStateId(i); + StateId to = CreateStateId(i + 1); + Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + bool added = _snapshotRepository.TryAddSnapshot(snapshot); + Assert.That(added, Is.True, $"Failed to add snapshot {i}->{i + 1}"); + _snapshotRepository.AddStateId(to); + } + } + + private static void AssertSlotValueEqual(SlotValue? expected, SlotValue? actual) + { + Assert.That(actual, Is.Not.Null); + Assert.That(actual!.Value.AsReadOnlySpan.ToArray(), Is.EqualTo(expected!.Value.AsReadOnlySpan.ToArray())); + } + + private static void AssertAccountSame(Account expected, Account? actual) + { + Assert.That(actual, Is.Not.Null); + Assert.That(actual!.Nonce, Is.EqualTo(expected.Nonce)); + Assert.That(actual!.Balance, Is.EqualTo(expected.Balance)); + } + + [Test] + public void CompactSnapshotBundle_SingleSnapshot_ReturnsCorrectStateIds() + { + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(1, Keccak.Zero); + + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + Address address = new Address("0x1234567890123456789012345678901234567890"); + snapshot.Content.Accounts[address] = new Account(1, 100); + + SnapshotPooledList snapshots = new SnapshotPooledList(1); + snapshots.Add(snapshot); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + Assert.That(compacted.From.BlockNumber, Is.EqualTo(0)); + Assert.That(compacted.To.BlockNumber, Is.EqualTo(1)); + } + + [Test] + public void CompactSnapshotBundle_SingleSnapshot_PreservesAllDataTypes() + { + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(1, Keccak.Zero); + + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + Address address1 = new Address("0x1111111111111111111111111111111111111111"); + Address address2 = new Address("0x2222222222222222222222222222222222222222"); + UInt256 storageIndex1 = new UInt256(1); + UInt256 storageIndex2 = new UInt256(2); + TreePath statePath1 = TreePath.FromHexString("abcd"); + TreePath statePath2 = TreePath.FromHexString("ef01"); + TreePath storageNodePath1 = TreePath.FromHexString("1234"); + TreePath storageNodePath2 = TreePath.FromHexString("5678"); + Hash256 storageNodeHash1 = Keccak.Zero; + Hash256 storageNodeHash2 = Keccak.Zero; + SlotValue slotValue1 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 }); + SlotValue slotValue2 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 200 }); + + // Add accounts + snapshot.Content.Accounts[address1] = new Account(1, 100); + snapshot.Content.Accounts[address2] = new Account(2, 200); + + // Add storage values + snapshot.Content.Storages[(address1, storageIndex1)] = slotValue1; + snapshot.Content.Storages[(address2, storageIndex2)] = slotValue2; + + // Add state nodes + snapshot.Content.StateNodes[statePath1] = new TrieNode(NodeType.Leaf, storageNodeHash1); + snapshot.Content.StateNodes[statePath2] = new TrieNode(NodeType.Branch, storageNodeHash2); + + // Add storage nodes + Hash256 address1Hash = address1.ToAccountPath.ToCommitment(); + Hash256 address2Hash = address2.ToAccountPath.ToCommitment(); + snapshot.Content.StorageNodes[(address1Hash, storageNodePath1)] = new TrieNode(NodeType.Leaf, storageNodeHash1); + snapshot.Content.StorageNodes[(address2Hash, storageNodePath2)] = new TrieNode(NodeType.Branch, storageNodeHash2); + + SnapshotPooledList snapshots = new SnapshotPooledList(1); + snapshots.Add(snapshot); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + // Verify all data types are preserved + Assert.That(compacted.AccountsCount, Is.EqualTo(2)); + AssertAccountSame(new Account(1, 100), compacted.Content.Accounts[address1]); + AssertAccountSame(new Account(2, 200), compacted.Content.Accounts[address2]); + + Assert.That(compacted.StoragesCount, Is.EqualTo(2)); + AssertSlotValueEqual(slotValue1, compacted.Content.Storages[(address1, storageIndex1)]); + AssertSlotValueEqual(slotValue2, compacted.Content.Storages[(address2, storageIndex2)]); + + Assert.That(compacted.StateNodesCount, Is.EqualTo(2)); + Assert.That(compacted.Content.StateNodes[statePath1].Keccak, Is.EqualTo(storageNodeHash1)); + Assert.That(compacted.Content.StateNodes[statePath2].Keccak, Is.EqualTo(storageNodeHash2)); + + Assert.That(compacted.StorageNodesCount, Is.EqualTo(2)); + } + + [Test] + public void CompactSnapshotBundle_MultipleSnapshots_MergesAllDataTypes() + { + Address address1 = new Address("0x1111111111111111111111111111111111111111"); + Address address2 = new Address("0x2222222222222222222222222222222222222222"); + UInt256 storageIndex1 = new UInt256(1); + UInt256 storageIndex2 = new UInt256(2); + TreePath statePath1 = TreePath.FromHexString("abcd"); + TreePath statePath2 = TreePath.FromHexString("ef01"); + TreePath storageNodePath1 = TreePath.FromHexString("1234"); + TreePath storageNodePath2 = TreePath.FromHexString("5678"); + SlotValue slotValue1 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 }); + SlotValue slotValue2 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 200 }); + + // First snapshot + StateId from0 = new StateId(0, Keccak.Zero); + StateId to0 = new StateId(1, Keccak.Zero); + using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot0.Content.Accounts[address1] = new Account(1, 100); + snapshot0.Content.Storages[(address1, storageIndex1)] = slotValue1; + snapshot0.Content.StateNodes[statePath1] = new TrieNode(NodeType.Leaf, Keccak.Zero); + Hash256 address1Hash = address1.ToAccountPath.ToCommitment(); + snapshot0.Content.StorageNodes[(address1Hash, storageNodePath1)] = new TrieNode(NodeType.Leaf, Keccak.Zero); + + // Second snapshot with different items + StateId from1 = new StateId(1, Keccak.Zero); + StateId to1 = new StateId(2, Keccak.Zero); + using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot1.Content.Accounts[address2] = new Account(2, 200); + snapshot1.Content.Storages[(address2, storageIndex2)] = slotValue2; + snapshot1.Content.StateNodes[statePath2] = new TrieNode(NodeType.Branch, Keccak.Zero); + Hash256 address2Hash = address2.ToAccountPath.ToCommitment(); + snapshot1.Content.StorageNodes[(address2Hash, storageNodePath2)] = new TrieNode(NodeType.Branch, Keccak.Zero); + + SnapshotPooledList snapshots = new SnapshotPooledList(2); + snapshots.Add(snapshot0); + snapshots.Add(snapshot1); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + // Verify all items from both snapshots are merged + Assert.That(compacted.AccountsCount, Is.EqualTo(2)); + Assert.That(compacted.StoragesCount, Is.EqualTo(2)); + Assert.That(compacted.StateNodesCount, Is.EqualTo(2)); + Assert.That(compacted.StorageNodesCount, Is.EqualTo(2)); + } + + [Test] + public void CompactSnapshotBundle_MultipleSnapshots_LatestValueOverridesForAllDataTypes() + { + Address address = new Address("0x1111111111111111111111111111111111111111"); + UInt256 storageIndex = new UInt256(1); + TreePath statePath = TreePath.FromHexString("abcd"); + TreePath storageNodePath = TreePath.FromHexString("1234"); + SlotValue slotValue1 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 }); + SlotValue slotValue2 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 200 }); + + // First snapshot with initial values + StateId from0 = new StateId(0, Keccak.Zero); + StateId to0 = new StateId(1, Keccak.Zero); + using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot0.Content.Accounts[address] = new Account(1, 100); + snapshot0.Content.Storages[(address, storageIndex)] = slotValue1; + snapshot0.Content.StateNodes[statePath] = new TrieNode(NodeType.Leaf, Keccak.Zero); + Hash256 addressHash = address.ToAccountPath.ToCommitment(); + snapshot0.Content.StorageNodes[(addressHash, storageNodePath)] = new TrieNode(NodeType.Leaf, Keccak.Zero); + + // Second snapshot with updated values for same keys + StateId from1 = new StateId(1, Keccak.Zero); + StateId to1 = new StateId(2, Keccak.Zero); + using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot1.Content.Accounts[address] = new Account(2, 200); + snapshot1.Content.Storages[(address, storageIndex)] = slotValue2; + snapshot1.Content.StateNodes[statePath] = new TrieNode(NodeType.Branch, Keccak.Zero); + snapshot1.Content.StorageNodes[(addressHash, storageNodePath)] = new TrieNode(NodeType.Branch, Keccak.Zero); + + SnapshotPooledList snapshots = new SnapshotPooledList(2); + snapshots.Add(snapshot0); + snapshots.Add(snapshot1); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + // Verify latest values override earlier ones + Assert.That(compacted.AccountsCount, Is.EqualTo(1)); + AssertAccountSame(new Account(2, 200), compacted.Content.Accounts[address]); + + Assert.That(compacted.StoragesCount, Is.EqualTo(1)); + AssertSlotValueEqual(slotValue2, compacted.Content.Storages[(address, storageIndex)]); + + Assert.That(compacted.StateNodesCount, Is.EqualTo(1)); + Assert.That(compacted.StateNodesCount, Is.EqualTo(1)); + Assert.That(compacted.StorageNodesCount, Is.EqualTo(1)); + } + + [Test] + public void CompactSnapshotBundle_SelfDestructedAddress_RemovesStorageAndNodes() + { + Address address = new Address("0x1111111111111111111111111111111111111111"); + UInt256 storageIndex = new UInt256(1); + TreePath storagePath = TreePath.FromHexString("1234"); + Hash256 storageHash = Keccak.Zero; + SlotValue slotValue = new SlotValue(new byte[32]); + + StateId from0 = new StateId(0, Keccak.Zero); + StateId to0 = new StateId(1, Keccak.Zero); + using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot0.Content.Accounts[address] = new Account(1, 100); + snapshot0.Content.Storages[(address, storageIndex)] = slotValue; + snapshot0.Content.StorageNodes[(address.ToAccountPath.ToCommitment(), storagePath)] = new TrieNode(NodeType.Leaf, storageHash); + + StateId from1 = new StateId(1, Keccak.Zero); + StateId to1 = new StateId(2, Keccak.Zero); + using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot1.Content.SelfDestructedStorageAddresses[address] = false; + + SnapshotPooledList snapshots = new SnapshotPooledList(2); + snapshots.Add(snapshot0); + snapshots.Add(snapshot1); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + // Self-destructed address should be tracked, and its storage cleared + Assert.That(compacted.Content.SelfDestructedStorageAddresses.Count, Is.GreaterThan(0)); + Assert.That(compacted.StoragesCount, Is.EqualTo(0)); + Assert.That(compacted.StorageNodesCount, Is.EqualTo(0)); + } + + [Test] + public void CompactSnapshotBundle_NewAccountSelfDestruct_MarkedAsTrue() + { + Address address = new Address("0x1111111111111111111111111111111111111111"); + + StateId from0 = new StateId(0, Keccak.Zero); + StateId to0 = new StateId(1, Keccak.Zero); + using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv); + + StateId from1 = new StateId(1, Keccak.Zero); + StateId to1 = new StateId(2, Keccak.Zero); + using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv); + snapshot1.Content.SelfDestructedStorageAddresses[address] = true; + + SnapshotPooledList snapshots = new SnapshotPooledList(2); + snapshots.Add(snapshot0); + snapshots.Add(snapshot1); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + // New account marked as self-destructed should be tracked + Assert.That(compacted.Content.SelfDestructedStorageAddresses.Count, Is.GreaterThan(0)); + // Verify at least one entry has true value + Assert.That(compacted.Content.SelfDestructedStorageAddresses.Values.Any(v => v), Is.True); + } + + [Test] + public void CompactSnapshotBundle_UsesCompactorUsageAtBoundary() + { + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(16, Keccak.Zero); + + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + SnapshotPooledList snapshots = new SnapshotPooledList(1); + snapshots.Add(snapshot); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + Assert.That(compacted.Usage, Is.EqualTo(ResourcePool.Usage.Compactor)); + } + + [Test] + public void CompactSnapshotBundle_UsesMidCompactorUsageNonBoundary() + { + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(15, Keccak.Zero); + + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + SnapshotPooledList snapshots = new SnapshotPooledList(1); + snapshots.Add(snapshot); + + using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots); + + Assert.That(compacted.Usage, Is.EqualTo(ResourcePool.Usage.MidCompactor)); + } + + #region GetSnapshotsToCompact Tests + + [Test] + public void Debug_AssembleSnapshotsUntil_Works() + { + BuildSnapshotChain(0, 4); + + StateId target = CreateStateId(4); + SnapshotPooledList assembled = _snapshotRepository.AssembleSnapshotsUntil(target, 0, 10); + + Assert.That(assembled.Count, Is.EqualTo(4)); + + foreach (Snapshot s in assembled) s.Dispose(); + assembled.Dispose(); + } + + [Test] + public void GetSnapshotsToCompact_CompactSizeDisabled_ReturnsEmpty() + { + FlatDbConfig config = new FlatDbConfig { CompactSize = 0 }; + SnapshotCompactor compactor = new SnapshotCompactor(config, _resourcePool, _snapshotRepository, LimboLogs.Instance); + + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(16, Keccak.Zero); + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + using SnapshotPooledList snapshots = compactor.GetSnapshotsToCompact(snapshot); + + Assert.That(snapshots.Count, Is.EqualTo(0)); + } + + [Test] + public void GetSnapshotsToCompact_BlockZero_ReturnsEmpty() + { + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(0, Keccak.Zero); + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(snapshot); + + Assert.That(snapshots.Count, Is.EqualTo(0)); + } + + [Test] + public void GetSnapshotsToCompact_NotCompactionBlock_ReturnsEmpty() + { + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(5, Keccak.Zero); + using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(snapshot); + + Assert.That(snapshots.Count, Is.EqualTo(0)); + } + + [Test] + public void GetSnapshotsToCompact_FullCompaction_ReturnsMultipleSnapshots() + { + // Build chain of 15 snapshots (0->1, 1->2, ..., 14->15) + BuildSnapshotChain(0, 15); + + // Add the 16th snapshot (15->16) separately + StateId targetFrom = CreateStateId(15); + StateId targetTo = CreateStateId(16); + Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv); + _snapshotRepository.TryAddSnapshot(targetSnapshot); + _snapshotRepository.AddStateId(targetTo); + + using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(targetSnapshot); + + Assert.That(snapshots.Count, Is.EqualTo(16)); + } + + [Test] + public void GetSnapshotsToCompact_MidCompaction_ReturnsMultipleSnapshots() + { + FlatDbConfig config = new FlatDbConfig { CompactSize = 16, MidCompactSize = 8 }; + SnapshotCompactor compactor = new SnapshotCompactor(config, _resourcePool, _snapshotRepository, LimboLogs.Instance); + + // Build chain of 7 snapshots (0->1, 1->2, ..., 6->7) + BuildSnapshotChain(0, 7); + + // Add the 8th snapshot (7->8) separately + StateId targetFrom = CreateStateId(7); + StateId targetTo = CreateStateId(8); + Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv); + _snapshotRepository.TryAddSnapshot(targetSnapshot); + _snapshotRepository.AddStateId(targetTo); + + using SnapshotPooledList snapshots = compactor.GetSnapshotsToCompact(targetSnapshot); + + Assert.That(snapshots.Count, Is.EqualTo(8)); + } + + [Test] + public void GetSnapshotsToCompact_SingleSnapshot_ReturnsEmpty() + { + StateId from = new StateId(0, Keccak.Zero); + StateId to = new StateId(16, Keccak.Zero); + Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + _snapshotRepository.TryAddSnapshot(snapshot); + _snapshotRepository.AddStateId(to); + + using Snapshot targetSnapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + + using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(targetSnapshot); + + Assert.That(snapshots.Count, Is.EqualTo(0)); + } + + [Test] + public void GetSnapshotsToCompact_IncompleteChain_ReturnsEmpty() + { + // Missing 1 + for (long i = 2; i < 16; i++) + { + StateId from = new StateId(i, Keccak.Zero); + StateId to = new StateId(i + 1, Keccak.Zero); + Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + _snapshotRepository.TryAddSnapshot(snapshot); + _snapshotRepository.AddStateId(to); + } + + StateId targetFrom = new StateId(15, Keccak.Zero); + StateId targetTo = new StateId(16, Keccak.Zero); + using Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv); + + using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(targetSnapshot); + + Assert.That(snapshots.Count, Is.EqualTo(0)); + } + + #endregion + + #region DoCompactSnapshot Tests + + [Test] + public void DoCompactSnapshot_ValidChain_CreatesCompactedSnapshot() + { + // Build chain of 15 snapshots (0->1, 1->2, ..., 14->15) + BuildSnapshotChain(0, 15); + + // Add the 16th snapshot (15->16) separately + StateId targetFrom = CreateStateId(15); + StateId targetTo = CreateStateId(16); + Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv); + targetSnapshot.Content.Accounts[TestItem.AddressB] = new Account((UInt256)20, (UInt256)2000); + _snapshotRepository.TryAddSnapshot(targetSnapshot); + _snapshotRepository.AddStateId(targetTo); + + _compactor.DoCompactSnapshot(targetSnapshot.To); + + Assert.That(_snapshotRepository.CompactedSnapshotCount, Is.EqualTo(1)); + } + + #endregion + +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/SnapshotRepositoryTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotRepositoryTests.cs new file mode 100644 index 00000000000..587df139881 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotRepositoryTests.cs @@ -0,0 +1,356 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Generic; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Logging; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class SnapshotRepositoryTests +{ + private SnapshotRepository _repository = null!; + private ResourcePool _resourcePool = null!; + private FlatDbConfig _config = null!; + + [SetUp] + public void SetUp() + { + _config = new FlatDbConfig { CompactSize = 16 }; + _resourcePool = new ResourcePool(_config); + _repository = new SnapshotRepository(LimboLogs.Instance); + } + + private StateId CreateStateId(long blockNumber, byte rootByte = 0) + { + byte[] bytes = new byte[32]; + bytes[0] = rootByte; + return new StateId(blockNumber, new ValueHash256(bytes)); + } + + private Snapshot CreateSnapshot(StateId from, StateId to, bool withData = false) + { + Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv); + if (withData) + { + snapshot.Content.Accounts[TestItem.AddressA] = new Account(1, 100); + } + return snapshot; + } + + private Snapshot AddSnapshotToRepository(long fromBlock, long toBlock, bool compacted = false, bool withData = false) + { + StateId from = CreateStateId(fromBlock); + StateId to = CreateStateId(toBlock); + Snapshot snapshot = CreateSnapshot(from, to, withData); + + bool added = compacted + ? _repository.TryAddCompactedSnapshot(snapshot) + : _repository.TryAddSnapshot(snapshot); + + Assert.That(added, Is.True, $"Failed to add snapshot {fromBlock}->{toBlock}"); + + if (!compacted) + { + _repository.AddStateId(to); + } + + return snapshot; + } + + private List BuildSnapshotChain(long startBlock, long endBlock) + { + List snapshots = new List(); + for (long i = startBlock; i < endBlock; i++) + { + snapshots.Add(AddSnapshotToRepository(i, i + 1)); + } + return snapshots; + } + + #region Snapshot Addition and Removal + + [Test] + public void TryAddSnapshot_NewAndDuplicate_BehavesCorrectly() + { + StateId from = CreateStateId(0); + StateId to = CreateStateId(1); + Snapshot snapshot1 = CreateSnapshot(from, to); + Snapshot snapshot2 = CreateSnapshot(from, to); + + bool added1 = _repository.TryAddSnapshot(snapshot1); + bool added2 = _repository.TryAddSnapshot(snapshot2); + + Assert.That(added1, Is.True); + Assert.That(added2, Is.False); + + snapshot2.Dispose(); + } + + [Test] + public void TryAddCompactedSnapshot_NewAndDuplicate_BehavesCorrectly() + { + StateId from = CreateStateId(0); + StateId to = CreateStateId(1); + Snapshot snapshot1 = CreateSnapshot(from, to); + Snapshot snapshot2 = CreateSnapshot(from, to); + + bool added1 = _repository.TryAddCompactedSnapshot(snapshot1); + bool added2 = _repository.TryAddCompactedSnapshot(snapshot2); + + Assert.That(added1, Is.True); + Assert.That(added2, Is.False); + + snapshot2.Dispose(); + } + + [Test] + public void AddAndRemoveSnapshot_CannotLeaseAfterRemoval() + { + StateId from = CreateStateId(0); + StateId to = CreateStateId(1); + Snapshot snapshot = CreateSnapshot(from, to); + _repository.AddStateId(to); + + _repository.TryAddSnapshot(snapshot); + bool leasedBefore = _repository.TryLeaseState(to, out Snapshot? leasedSnapshot); + leasedSnapshot?.Dispose(); + + _repository.RemoveAndReleaseKnownState(to); + bool leasedAfter = _repository.TryLeaseState(to, out _); + + Assert.That(leasedBefore, Is.True); + Assert.That(leasedAfter, Is.False); + } + + [Test] + public void RemoveSnapshot_WithActiveLeases_DisposesWhenAllReleased() + { + AddSnapshotToRepository(0, 1); + StateId to = CreateStateId(1); + + bool leased1 = _repository.TryLeaseState(to, out Snapshot? snapshot1); + bool leased2 = _repository.TryLeaseState(to, out Snapshot? snapshot2); + + Assert.That(leased1, Is.True); + Assert.That(leased2, Is.True); + + _repository.RemoveAndReleaseKnownState(to); + + snapshot1!.Dispose(); + snapshot2!.Dispose(); + + bool leasedAfter = _repository.TryLeaseState(to, out _); + Assert.That(leasedAfter, Is.False); + } + + #endregion + + #region Lease Operations + + [Test] + public void TryLeaseState_ExistingAndNonExistent() + { + AddSnapshotToRepository(0, 1); + + StateId existing = CreateStateId(1); + bool leasedExisting = _repository.TryLeaseState(existing, out Snapshot? snapshot); + Assert.That(leasedExisting, Is.True); + Assert.That(snapshot, Is.Not.Null); + snapshot!.Dispose(); + + StateId nonExistent = CreateStateId(999); + bool leasedNonExistent = _repository.TryLeaseState(nonExistent, out Snapshot? nonExistentSnapshot); + Assert.That(leasedNonExistent, Is.False); + Assert.That(nonExistentSnapshot, Is.Null); + } + + [Test] + public void TryLeaseState_MultipleLeases_AllSucceed() + { + AddSnapshotToRepository(0, 1); + + StateId to = CreateStateId(1); + bool leased1 = _repository.TryLeaseState(to, out Snapshot? snapshot1); + bool leased2 = _repository.TryLeaseState(to, out Snapshot? snapshot2); + bool leased3 = _repository.TryLeaseState(to, out Snapshot? snapshot3); + + Assert.That(leased1, Is.True); + Assert.That(leased2, Is.True); + Assert.That(leased3, Is.True); + + Assert.That(snapshot1, Is.SameAs(snapshot2)); + Assert.That(snapshot2, Is.SameAs(snapshot3)); + + snapshot1!.Dispose(); + snapshot2!.Dispose(); + snapshot3!.Dispose(); + } + + [Test] + public void TryLeaseCompactedState_ExistingAndNonExistent() + { + AddSnapshotToRepository(0, 1, compacted: true); + + StateId existing = CreateStateId(1); + bool leasedExisting = _repository.TryLeaseCompactedState(existing, out Snapshot? snapshot); + Assert.That(leasedExisting, Is.True); + Assert.That(snapshot, Is.Not.Null); + snapshot!.Dispose(); + + StateId nonExistent = CreateStateId(999); + bool leasedNonExistent = _repository.TryLeaseCompactedState(nonExistent, out Snapshot? nonExistentSnapshot); + Assert.That(leasedNonExistent, Is.False); + Assert.That(nonExistentSnapshot, Is.Null); + } + + [Test] + public void TryLeaseCompactedState_MultipleLeases_AllSucceed() + { + AddSnapshotToRepository(0, 1, compacted: true); + + StateId to = CreateStateId(1); + bool leased1 = _repository.TryLeaseCompactedState(to, out Snapshot? snapshot1); + bool leased2 = _repository.TryLeaseCompactedState(to, out Snapshot? snapshot2); + + Assert.That(leased1, Is.True); + Assert.That(leased2, Is.True); + + snapshot1!.Dispose(); + snapshot2!.Dispose(); + } + + #endregion + + #region Query Operations + + [Test] + public void HasState_ExistingAndNonExistent() + { + AddSnapshotToRepository(0, 1); + StateId existing = CreateStateId(1); + StateId nonExistent = CreateStateId(999); + + bool hasExisting = _repository.HasState(existing); + bool hasNonExistent = _repository.HasState(nonExistent); + + Assert.That(hasExisting, Is.True); + Assert.That(hasNonExistent, Is.False); + } + + [Test] + public void GetSnapshotBeforeStateId_EmptyRepository() + { + StateId target = CreateStateId(10); + + ArrayPoolList states = _repository.GetSnapshotBeforeStateId(target); + + Assert.That(states.Count, Is.EqualTo(0)); + states.Dispose(); + } + + [Test] + public void GetSnapshotBeforeStateId_NoStatesBeforeTarget() + { + StateId state10 = CreateStateId(10); + _repository.AddStateId(state10); + + StateId target = CreateStateId(5); + ArrayPoolList states = _repository.GetSnapshotBeforeStateId(target); + + Assert.That(states.Count, Is.EqualTo(0)); + states.Dispose(); + } + + [Test] + public void GetSnapshotBeforeStateId_StatesBeforeTarget() + { + StateId state1 = CreateStateId(1); + StateId state3 = CreateStateId(3); + StateId state5 = CreateStateId(5); + StateId state7 = CreateStateId(7); + StateId state10 = CreateStateId(10); + + _repository.AddStateId(state1); + _repository.AddStateId(state3); + _repository.AddStateId(state5); + _repository.AddStateId(state7); + _repository.AddStateId(state10); + + StateId target = CreateStateId(6); + ArrayPoolList states = _repository.GetSnapshotBeforeStateId(target); + + Assert.That(states.Count, Is.EqualTo(3)); + states.Dispose(); + } + + #endregion + + #region AssembleSnapshotsUntil + + [Test] + public void AssembleSnapshotsUntil_EmptyRepository() + { + StateId target = CreateStateId(10); + + using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 0, 10); + + Assert.That(assembled.Count, Is.EqualTo(0)); + } + + [Test] + public void AssembleSnapshotsUntil_SingleSnapshot() + { + AddSnapshotToRepository(0, 1); + + StateId target = CreateStateId(1); + using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 0, 10); + + Assert.That(assembled.Count, Is.EqualTo(1)); + Assert.That(assembled[0].To, Is.EqualTo(target)); + } + + [Test] + public void AssembleSnapshotsUntil_LinearChain() + { + BuildSnapshotChain(0, 4); + + StateId target = CreateStateId(4); + using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 0, 10); + + Assert.That(assembled.Count, Is.EqualTo(4)); + } + + [Test] + public void AssembleSnapshotsUntil_StopsAtStartingBlock() + { + BuildSnapshotChain(0, 5); + + StateId target = CreateStateId(4); + using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 2, 10); + + Assert.That(assembled.Count, Is.EqualTo(2)); + } + + [Test] + public void AssembleSnapshotsUntil_PrefersCompacted() + { + StateId from = CreateStateId(0); + StateId to = CreateStateId(1); + + Snapshot compacted = CreateSnapshot(from, to); + _repository.TryAddCompactedSnapshot(compacted); + + using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(to, 0, 10); + + Assert.That(assembled.Count, Is.EqualTo(1)); + } + + #endregion +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/SpmcRingBufferTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/SpmcRingBufferTests.cs new file mode 100644 index 00000000000..e02435042a9 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/SpmcRingBufferTests.cs @@ -0,0 +1,139 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using FluentAssertions; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +public class SpmcRingBufferTests +{ + [Test] + public void SmokeTest() + { + SpmcRingBuffer jobQueue = new SpmcRingBuffer(16); + + jobQueue.TryEnqueue(1); + jobQueue.TryEnqueue(2); + jobQueue.TryEnqueue(3); + jobQueue.TryEnqueue(4); + jobQueue.TryEnqueue(5); + + jobQueue.TryDequeue(out int j).Should().BeTrue(); + j.Should().Be(1); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(2); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(3); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(4); + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(5); + } + + [Test] + public void RollingSmokeTest() + { + SpmcRingBuffer jobQueue = new SpmcRingBuffer(16); + + jobQueue.TryEnqueue(1); + jobQueue.TryEnqueue(2); + jobQueue.TryEnqueue(3); + jobQueue.TryEnqueue(4); + jobQueue.TryEnqueue(5); + + int j = 0; + for (int i = 0; i < 100; i++) + { + jobQueue.TryDequeue(out j).Should().BeTrue(); + j.Should().Be(i + 1); + jobQueue.TryEnqueue(i + 5 + 1).Should().BeTrue(); + } + } + + [Test] + public void SmokeTestFullAndRolling() + { + SpmcRingBuffer jobQueue = new SpmcRingBuffer(16); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryEnqueue(1), Is.True); + } + Assert.That(jobQueue.TryEnqueue(1), Is.False); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryDequeue(out _), Is.True); + } + Assert.That(jobQueue.TryDequeue(out _), Is.False); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryEnqueue(1), Is.True); + } + Assert.That(jobQueue.TryEnqueue(1), Is.False); + + for (int i = 0; i < 16; i++) + { + Assert.That(jobQueue.TryDequeue(out _), Is.True); + } + Assert.That(jobQueue.TryDequeue(out _), Is.False); + } + + [Test] + public async Task HighConcurrency_StressTest_NoDataLoss() + { + int Capacity = 1024; + int ItemsToProduce = 1_000_000; + int ConsumerCount = 4; + + SpmcRingBuffer buffer = new SpmcRingBuffer(Capacity); + int[] consumedCounts = new int[ItemsToProduce]; + long totalConsumed = 0; + + // Producer Task (Single Producer) + Task producer = Task.Run(() => + { + for (int i = 0; i < ItemsToProduce; i++) + { + while (!buffer.TryEnqueue(i)) + { + Thread.SpinWait(10); // Wait for space + } + } + }); + + // Consumer Tasks (Multiple Consumers) + Task[] consumers = Enumerable.Range(0, ConsumerCount).Select(_ => Task.Run(() => + { + while (Interlocked.Read(ref totalConsumed) < ItemsToProduce) + { + if (buffer.TryDequeue(out int item)) + { + // Track that this specific item was hit + Interlocked.Increment(ref consumedCounts[item]); + Interlocked.Increment(ref totalConsumed); + } + else + { + Thread.SpinWait(10); + } + } + })).ToArray(); + + await Task.WhenAll(producer); + await Task.WhenAll(consumers); + + // Assertions + Assert.That(ItemsToProduce, Is.EqualTo(Interlocked.Read(ref totalConsumed))); + + for (int i = 0; i < ItemsToProduce; i++) + { + Assert.That(consumedCounts[i] == 1, $"Item {i} was consumed {consumedCounts[i]} times!"); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/TrieNodeCacheTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/TrieNodeCacheTests.cs new file mode 100644 index 00000000000..77737d8747f --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/TrieNodeCacheTests.cs @@ -0,0 +1,500 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Logging; +using Nethermind.Trie; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class TrieNodeCacheTests +{ + private TrieNodeCache _cache = null!; + private FlatDbConfig _config = null!; + private ResourcePool _resourcePool = null!; + + [SetUp] + public void SetUp() + { + _config = new FlatDbConfig { TrieCacheMemoryBudget = 1024 * 1024 }; + _cache = new TrieNodeCache(_config, LimboLogs.Instance); + _resourcePool = new ResourcePool(_config); + } + + [Test] + public void TryGet_ReturnsNotFound_WhenCacheEmpty() + { + TreePath path = TreePath.FromHexString("1234"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + + bool found = _cache.TryGet(null, in path, hash, out TrieNode? node); + + Assert.That(found, Is.False); + Assert.That(node, Is.Null); + } + + [Test] + public void TryGet_ReturnsNotFound_WithStorageAddress_WhenCacheEmpty() + { + Hash256 address = Keccak.Compute([0xaa, 0xbb]); + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash = Keccak.Compute([4, 5, 6]); + + bool found = _cache.TryGet(address, in path, hash, out TrieNode? node); + + Assert.That(found, Is.False); + Assert.That(node, Is.Null); + } + + [Test] + public void Constructor_WithZeroMemoryTarget_DoesNotThrow() + { + FlatDbConfig config = new FlatDbConfig { TrieCacheMemoryBudget = 0 }; + Assert.DoesNotThrow(() => new TrieNodeCache(config, LimboLogs.Instance)); + } + + [Test] + public void Constructor_WithSmallMemoryTarget_UseMinimumBucketSize() + { + FlatDbConfig config = new FlatDbConfig { TrieCacheMemoryBudget = 1 }; + Assert.DoesNotThrow(() => new TrieNodeCache(config, LimboLogs.Instance)); + } + + [Test] + public void Add_ThenTryGet_ReturnsNode() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + TrieNode trieNode = new TrieNode(NodeType.Leaf, hash); + + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource.Nodes.Set(null, in path, trieNode); + + _cache.Add(transientResource); + + bool found = _cache.TryGet(null, in path, hash, out TrieNode? retrievedNode); + + Assert.That(found, Is.True); + Assert.That(retrievedNode!.Keccak, Is.EqualTo(hash)); + } + + [Test] + public void Add_WithStorageAddress_ThenTryGet_ReturnsNode() + { + Hash256 address = Keccak.Compute([0xaa, 0xbb]); + TreePath path = TreePath.FromHexString("1234"); + Hash256 hash = Keccak.Compute([3, 4, 5]); + TrieNode trieNode = new TrieNode(NodeType.Branch, hash); + + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource.Nodes.Set(address, in path, trieNode); + + _cache.Add(transientResource); + + bool found = _cache.TryGet(address, in path, hash, out TrieNode? retrievedNode); + + Assert.That(found, Is.True); + Assert.That(retrievedNode!.Keccak, Is.EqualTo(hash)); + } + + [Test] + public void Add_WithZeroMemoryTarget_DoesNotCacheNodes() + { + FlatDbConfig zeroConfig = new FlatDbConfig { TrieCacheMemoryBudget = 0 }; + TrieNodeCache zeroCache = new TrieNodeCache(zeroConfig, LimboLogs.Instance); + ResourcePool zeroResourcePool = new ResourcePool(zeroConfig); + + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + TrieNode trieNode = new TrieNode(NodeType.Leaf, hash); + + TransientResource transientResource = zeroResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource.Nodes.Set(null, in path, trieNode); + + zeroCache.Add(transientResource); + + bool found = zeroCache.TryGet(null, in path, hash, out TrieNode? retrievedNode); + + Assert.That(found, Is.False); + Assert.That(retrievedNode, Is.Null); + } + + [Test] + public void Add_MultipleNodes_AllRetrievable() + { + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + + TreePath path1 = TreePath.FromHexString("1111"); + TreePath path2 = TreePath.FromHexString("2222"); + TreePath path3 = TreePath.FromHexString("3333"); + Hash256 hash1 = Keccak.Compute([1]); + Hash256 hash2 = Keccak.Compute([2]); + Hash256 hash3 = Keccak.Compute([3]); + + transientResource.Nodes.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1)); + transientResource.Nodes.Set(null, in path2, new TrieNode(NodeType.Branch, hash2)); + transientResource.Nodes.Set(null, in path3, new TrieNode(NodeType.Extension, hash3)); + + _cache.Add(transientResource); + + Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.True); + Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.True); + Assert.That(_cache.TryGet(null, in path3, hash3, out _), Is.True); + } + + [Test] + public void Add_MixedStateAndStorageNodes_AllRetrievable() + { + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + + Hash256 storageAddress = Keccak.Compute([0xaa]); + TreePath statePath = TreePath.FromHexString("1111"); + TreePath storagePath = TreePath.FromHexString("2222"); + Hash256 stateHash = Keccak.Compute([1]); + Hash256 storageHash = Keccak.Compute([2]); + + transientResource.Nodes.Set(null, in statePath, new TrieNode(NodeType.Leaf, stateHash)); + transientResource.Nodes.Set(storageAddress, in storagePath, new TrieNode(NodeType.Leaf, storageHash)); + + _cache.Add(transientResource); + + Assert.That(_cache.TryGet(null, in statePath, stateHash, out _), Is.True); + Assert.That(_cache.TryGet(storageAddress, in storagePath, storageHash, out _), Is.True); + } + + [Test] + public void TryGet_ReturnsNotFound_WhenHashDoesNotMatch() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 storedHash = Keccak.Compute([1, 2, 3]); + Hash256 queryHash = Keccak.Compute([4, 5, 6]); + TrieNode trieNode = new TrieNode(NodeType.Leaf, storedHash); + + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource.Nodes.Set(null, in path, trieNode); + + _cache.Add(transientResource); + + bool found = _cache.TryGet(null, in path, queryHash, out TrieNode? retrievedNode); + + Assert.That(found, Is.False); + Assert.That(retrievedNode, Is.Null); + } + + [Test] + public void Add_OverwritesExistingNode_OnCollision() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash1 = Keccak.Compute([1, 2, 3]); + Hash256 hash2 = Keccak.Compute([4, 5, 6]); + + TransientResource transientResource1 = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource1.Nodes.Set(null, in path, new TrieNode(NodeType.Leaf, hash1)); + + _cache.Add(transientResource1); + + TransientResource transientResource2 = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource2.Nodes.Set(null, in path, new TrieNode(NodeType.Leaf, hash2)); + + _cache.Add(transientResource2); + + Assert.That(_cache.TryGet(null, in path, hash1, out _), Is.False); + Assert.That(_cache.TryGet(null, in path, hash2, out _), Is.True); + } + + [Test] + public void Sharding_DifferentFirstBytes_GoToDifferentShards() + { + TreePath path1 = TreePath.FromHexString("1000"); + TreePath path2 = TreePath.FromHexString("2000"); + Hash256 hash1 = Keccak.Compute([1]); + Hash256 hash2 = Keccak.Compute([2]); + + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource.Nodes.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1)); + transientResource.Nodes.Set(null, in path2, new TrieNode(NodeType.Leaf, hash2)); + + _cache.Add(transientResource); + + Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.True); + Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.True); + } + + [Test] + public void Sharding_StorageNodes_ShardByAddressFirstByte() + { + Hash256 address1 = new Hash256("0x1000000000000000000000000000000000000000000000000000000000000000"); + Hash256 address2 = new Hash256("0x2000000000000000000000000000000000000000000000000000000000000000"); + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash1 = Keccak.Compute([1]); + Hash256 hash2 = Keccak.Compute([2]); + + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + transientResource.Nodes.Set(address1, in path, new TrieNode(NodeType.Leaf, hash1)); + transientResource.Nodes.Set(address2, in path, new TrieNode(NodeType.Leaf, hash2)); + + _cache.Add(transientResource); + + Assert.That(_cache.TryGet(address1, in path, hash1, out _), Is.True); + Assert.That(_cache.TryGet(address2, in path, hash2, out _), Is.True); + } + + [Test] + public void Clear_RemovesAllCachedNodes() + { + // Add multiple nodes across different shards + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + + TreePath path1 = TreePath.FromHexString("1000"); + TreePath path2 = TreePath.FromHexString("2000"); + TreePath path3 = TreePath.FromHexString("3000"); + Hash256 hash1 = Keccak.Compute([1]); + Hash256 hash2 = Keccak.Compute([2]); + Hash256 hash3 = Keccak.Compute([3]); + + transientResource.Nodes.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1)); + transientResource.Nodes.Set(null, in path2, new TrieNode(NodeType.Branch, hash2)); + transientResource.Nodes.Set(null, in path3, new TrieNode(NodeType.Extension, hash3)); + + _cache.Add(transientResource); + + // Verify nodes are cached + Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.True); + Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.True); + Assert.That(_cache.TryGet(null, in path3, hash3, out _), Is.True); + + // Clear the cache + _cache.Clear(); + + // Verify all nodes are removed + Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.False); + Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.False); + Assert.That(_cache.TryGet(null, in path3, hash3, out _), Is.False); + } + + [Test] + public void Clear_RemovesStateAndStorageNodes() + { + TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing); + + Hash256 storageAddress = Keccak.Compute([0xaa]); + TreePath statePath = TreePath.FromHexString("1111"); + TreePath storagePath = TreePath.FromHexString("2222"); + Hash256 stateHash = Keccak.Compute([1]); + Hash256 storageHash = Keccak.Compute([2]); + + transientResource.Nodes.Set(null, in statePath, new TrieNode(NodeType.Leaf, stateHash)); + transientResource.Nodes.Set(storageAddress, in storagePath, new TrieNode(NodeType.Leaf, storageHash)); + + _cache.Add(transientResource); + + // Verify nodes are cached + Assert.That(_cache.TryGet(null, in statePath, stateHash, out _), Is.True); + Assert.That(_cache.TryGet(storageAddress, in storagePath, storageHash, out _), Is.True); + + // Clear the cache + _cache.Clear(); + + // Verify all nodes are removed + Assert.That(_cache.TryGet(null, in statePath, stateHash, out _), Is.False); + Assert.That(_cache.TryGet(storageAddress, in storagePath, storageHash, out _), Is.False); + } +} + +[TestFixture] +public class ChildCacheTests +{ + private TrieNodeCache.ChildCache _cache = null!; + + [SetUp] + public void SetUp() + { + _cache = new TrieNodeCache.ChildCache(1024); + } + + [Test] + public void TryGet_ReturnsNotFound_WhenCacheEmpty() + { + TreePath path = TreePath.FromHexString("1234"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + + bool found = _cache.TryGet(null, in path, hash, out TrieNode? node); + + Assert.That(found, Is.False); + Assert.That(node, Is.Null); + } + + [Test] + public void Set_ThenTryGet_ReturnsNode() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + TrieNode trieNode = new TrieNode(NodeType.Leaf, hash); + + _cache.Set(null, in path, trieNode); + + bool found = _cache.TryGet(null, in path, hash, out TrieNode? retrievedNode); + + Assert.That(found, Is.True); + Assert.That(retrievedNode, Is.SameAs(trieNode)); + } + + [Test] + public void Set_WithStorageAddress_ThenTryGet_ReturnsNode() + { + Hash256 address = Keccak.Compute([0xaa, 0xbb]); + TreePath path = TreePath.FromHexString("1234"); + Hash256 hash = Keccak.Compute([3, 4, 5]); + TrieNode trieNode = new TrieNode(NodeType.Branch, hash); + + _cache.Set(address, in path, trieNode); + + bool found = _cache.TryGet(address, in path, hash, out TrieNode? retrievedNode); + + Assert.That(found, Is.True); + Assert.That(retrievedNode, Is.SameAs(trieNode)); + } + + [Test] + public void TryGet_ReturnsNotFound_WhenHashMismatch() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 storedHash = Keccak.Compute([1, 2, 3]); + Hash256 queryHash = Keccak.Compute([4, 5, 6]); + TrieNode trieNode = new TrieNode(NodeType.Leaf, storedHash); + + _cache.Set(null, in path, trieNode); + + bool found = _cache.TryGet(null, in path, queryHash, out TrieNode? retrievedNode); + + Assert.That(found, Is.False); + Assert.That(retrievedNode, Is.Null); + } + + [Test] + public void GetOrAdd_ReturnsExistingNode_WhenPresent() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + TrieNode existingNode = new TrieNode(NodeType.Leaf, hash); + TrieNode newNode = new TrieNode(NodeType.Leaf, hash); + + _cache.Set(null, in path, existingNode); + TrieNode result = _cache.GetOrAdd(null, in path, newNode); + + Assert.That(result, Is.SameAs(existingNode)); + } + + [Test] + public void GetOrAdd_AddsAndReturnsNewNode_WhenNotPresent() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + TrieNode newNode = new TrieNode(NodeType.Leaf, hash); + + TrieNode result = _cache.GetOrAdd(null, in path, newNode); + + Assert.That(result, Is.SameAs(newNode)); + Assert.That(_cache.Count, Is.EqualTo(1)); + } + + [Test] + public void GetOrAdd_WithStorageAddress_ReturnsExistingNode() + { + Hash256 address = Keccak.Compute([0xaa, 0xbb]); + TreePath path = TreePath.FromHexString("1234"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + TrieNode existingNode = new TrieNode(NodeType.Branch, hash); + TrieNode newNode = new TrieNode(NodeType.Branch, hash); + + _cache.Set(address, in path, existingNode); + TrieNode result = _cache.GetOrAdd(address, in path, newNode); + + Assert.That(result, Is.SameAs(existingNode)); + } + + [Test] + public void Reset_ClearsCache() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 hash = Keccak.Compute([1, 2, 3]); + TrieNode trieNode = new TrieNode(NodeType.Leaf, hash); + + _cache.Set(null, in path, trieNode); + Assert.That(_cache.Count, Is.EqualTo(1)); + + _cache.Reset(); + + Assert.That(_cache.Count, Is.EqualTo(0)); + bool found = _cache.TryGet(null, in path, hash, out _); + Assert.That(found, Is.False); + } + + [Test] + public void Count_IncrementsOnSet() + { + Assert.That(_cache.Count, Is.EqualTo(0)); + + TreePath path1 = TreePath.FromHexString("1111"); + TreePath path2 = TreePath.FromHexString("2222"); + Hash256 hash1 = Keccak.Compute([1]); + Hash256 hash2 = Keccak.Compute([2]); + + _cache.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1)); + Assert.That(_cache.Count, Is.EqualTo(1)); + + _cache.Set(null, in path2, new TrieNode(NodeType.Leaf, hash2)); + Assert.That(_cache.Count, Is.EqualTo(2)); + } + + [Test] + public void Capacity_ReturnsExpectedValue() + { + TrieNodeCache.ChildCache smallCache = new TrieNodeCache.ChildCache(16); + Assert.That(smallCache.Capacity, Is.GreaterThan(0)); + } + + [Test] + public void Reset_ResizesCache_WhenCountExceedsCapacity() + { + TrieNodeCache.ChildCache smallCache = new TrieNodeCache.ChildCache(16); + int initialCapacity = smallCache.Capacity; + + for (int i = 0; i < initialCapacity * 3; i++) + { + TreePath path = TreePath.FromHexString(i.ToString("x8")); + Hash256 hash = Keccak.Compute([(byte)i]); + smallCache.Set(null, in path, new TrieNode(NodeType.Leaf, hash)); + } + + smallCache.Reset(); + + Assert.That(smallCache.Count, Is.EqualTo(0)); + Assert.That(smallCache.Capacity, Is.GreaterThanOrEqualTo(initialCapacity)); + } + + [Test] + public void StateNodes_AndStorageNodes_AreSeparate() + { + TreePath path = TreePath.FromHexString("abcd"); + Hash256 stateHash = Keccak.Compute([1, 2, 3]); + Hash256 storageHash = Keccak.Compute([4, 5, 6]); + Hash256 storageAddress = Keccak.Compute([0xaa]); + TrieNode stateNode = new TrieNode(NodeType.Leaf, stateHash); + TrieNode storageNode = new TrieNode(NodeType.Branch, storageHash); + + _cache.Set(null, in path, stateNode); + _cache.Set(storageAddress, in path, storageNode); + + bool foundState = _cache.TryGet(null, in path, stateHash, out TrieNode? retrievedState); + bool foundStorage = _cache.TryGet(storageAddress, in path, storageHash, out TrieNode? retrievedStorage); + + Assert.That(foundState, Is.True); + Assert.That(foundStorage, Is.True); + Assert.That(retrievedState, Is.SameAs(stateNode)); + Assert.That(retrievedStorage, Is.SameAs(storageNode)); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/TrieWarmerTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/TrieWarmerTests.cs new file mode 100644 index 00000000000..fc6f52d5db8 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/TrieWarmerTests.cs @@ -0,0 +1,91 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Threading; +using System.Threading.Tasks; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.State.Flat.ScopeProvider; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class TrieWarmerTests +{ + private IProcessExitSource _processExitSource = null!; + private CancellationTokenSource _cts = null!; + private ILogManager _logManager = null!; + private FlatDbConfig _config = null!; + + [SetUp] + public void SetUp() + { + _cts = new CancellationTokenSource(); + _processExitSource = Substitute.For(); + _processExitSource.Token.Returns(_cts.Token); + _logManager = LimboLogs.Instance; + _config = new FlatDbConfig { TrieWarmerWorkerCount = 2 }; + } + + [TearDown] + public void TearDown() => _cts?.Dispose(); + + [Test] + public async Task PushAddressJob_CallsWarmUpStateTrie() + { + TrieWarmer warmer = new TrieWarmer(_processExitSource, _logManager, _config); + + ITrieWarmer.IAddressWarmer addressWarmer = Substitute.For(); + Address address = new Address("0x1234567890123456789012345678901234567890"); + + warmer.PushAddressJob(addressWarmer, address, sequenceId: 1); + + await Task.Delay(200); + + addressWarmer.Received().WarmUpStateTrie(address, 1); + + _cts.Cancel(); + await warmer.DisposeAsync(); + } + + [Test] + public async Task PushSlotJob_CallsWarmUpStorageTrie() + { + TrieWarmer warmer = new TrieWarmer(_processExitSource, _logManager, _config); + + ITrieWarmer.IStorageWarmer storageWarmer = Substitute.For(); + UInt256 index = 42; + + warmer.PushSlotJob(storageWarmer, index, sequenceId: 5); + + await Task.Delay(200); + + storageWarmer.Received().WarmUpStorageTrie(index, 5); + + _cts.Cancel(); + await warmer.DisposeAsync(); + } + + [Test] + public async Task PushAddressJob_PassesCorrectSequenceId() + { + TrieWarmer warmer = new TrieWarmer(_processExitSource, _logManager, _config); + + ITrieWarmer.IAddressWarmer addressWarmer = Substitute.For(); + Address address = new Address("0x1111111111111111111111111111111111111111"); + + warmer.PushAddressJob(addressWarmer, address, sequenceId: 999); + + await Task.Delay(200); + + addressWarmer.Received().WarmUpStateTrie(address, 999); + + _cts.Cancel(); + await warmer.DisposeAsync(); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/FlatDbColumns.cs b/src/Nethermind/Nethermind.State.Flat/FlatDbColumns.cs new file mode 100644 index 00000000000..12dddcbc57f --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/FlatDbColumns.cs @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +namespace Nethermind.State.Flat; + +public enum FlatDbColumns +{ + Metadata, + Account, + Storage, + StateNodes, + StateTopNodes, + StorageNodes, + FallbackNodes, +} diff --git a/src/Nethermind/Nethermind.State.Flat/FlatDbManager.cs b/src/Nethermind/Nethermind.State.Flat/FlatDbManager.cs new file mode 100644 index 00000000000..6bc14238de0 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/FlatDbManager.cs @@ -0,0 +1,411 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Concurrent; +using System.Diagnostics; +using System.Threading.Channels; +using Nethermind.Config; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat; + +/// +/// The main top level FlatDb orchestrator. +/// +public class FlatDbManager : IFlatDbManager, IAsyncDisposable +{ + private static readonly TimeSpan GatherGiveUpDeadline = TimeSpan.FromSeconds(5); + + private readonly ILogger _logger; + private readonly IPersistenceManager _persistenceManager; + private readonly ISnapshotCompactor _snapshotCompactor; + private readonly ISnapshotRepository _snapshotRepository; + private readonly ITrieNodeCache _trieNodeCache; + private readonly IResourcePool _resourcePool; + + // Cache for assembling `ReadOnlySnapshotBundle`. Its not actually slow, but its called 1.8k per sec so caching + // it save a decent amount of CPU. + private readonly ConcurrentDictionary _readonlySnapshotBundleCache = new(); + + // First it go to here + private readonly Task _compactorTask; + private readonly Channel _compactorJobs; + + // And here in parallel. + // The node cache is kinda important for performance, so we want it populated as quickly as possible. + private readonly Task _populateTrieNodeCacheTask; + private readonly Channel _populateTrieNodeCacheJobs; + + // Then eventually a compacted snapshot will be sent here where this will decide what to persist exactly + private readonly Task _persistenceTask; + private readonly Channel _persistenceJobs; + + private readonly int _compactSize; + + // For debugging. Do the compaction synchronously + private readonly bool _inlineCompaction; + private readonly CancellationTokenSource _cancelTokenSource; + private int _isDisposed = 0; + private readonly bool _enableDetailedMetrics; + + public event EventHandler? ReorgBoundaryReached; + + public FlatDbManager( + IResourcePool resourcePool, + IProcessExitSource processExitSource, + ITrieNodeCache trieNodeCache, + ISnapshotCompactor snapshotCompactor, + ISnapshotRepository snapshotRepository, + IPersistenceManager persistenceManager, + IFlatDbConfig config, + ILogManager logManager, + bool enableDetailedMetrics) + { + _trieNodeCache = trieNodeCache; + _snapshotCompactor = snapshotCompactor; + _snapshotRepository = snapshotRepository; + _resourcePool = resourcePool; + _persistenceManager = persistenceManager; + _logger = logManager.GetClassLogger(); + _enableDetailedMetrics = enableDetailedMetrics; + + _compactSize = config.CompactSize; + _inlineCompaction = config.InlineCompaction; + + _cancelTokenSource = CancellationTokenSource.CreateLinkedTokenSource(processExitSource.Token); + + _compactorJobs = Channel.CreateBounded(config.MaxInFlightCompactJob); + _populateTrieNodeCacheJobs = Channel.CreateBounded(1); + _persistenceJobs = Channel.CreateBounded(config.MaxInFlightCompactJob); + + _compactorTask = RunCompactor(_cancelTokenSource.Token); + _populateTrieNodeCacheTask = RunTrieCachePopulator(_cancelTokenSource.Token); + _persistenceTask = RunPersistence(_cancelTokenSource.Token); + } + + private async Task RunCompactor(CancellationToken cancellationToken) + { + try + { + await foreach (StateId stateId in _compactorJobs.Reader.ReadAllAsync(cancellationToken)) + { + await NotifyWhenSlow($"Compacting {stateId}", async () => + { + await RunCompactJob(stateId, cancellationToken); + }); + } + } + catch (OperationCanceledException) + { + } + } + + private async Task RunCompactJobSync(StateId stateId, TransientResource transientResource, CancellationToken cancellationToken) + { + PopulateTrieNodeCache(transientResource); + await RunCompactJob(stateId, cancellationToken); + } + + private async Task RunCompactJob(StateId stateId, CancellationToken cancellationToken) + { + // We do this async because of the lock + _snapshotRepository.AddStateId(stateId); + + if (_snapshotCompactor.DoCompactSnapshot(stateId)) + { + ClearReadOnlyBundleCache(); + } + + if (stateId.BlockNumber % _compactSize == 0) + { + // Trigger persistence job. + await _persistenceJobs.Writer.WriteAsync(stateId, cancellationToken); + } + } + + private async Task RunPersistence(CancellationToken cancellationToken) + { + try + { + await foreach (StateId stateId in _persistenceJobs.Reader.ReadAllAsync(cancellationToken)) + { + await NotifyWhenSlow($"Persisting {stateId}", () => + { + PersistIfNeeded(stateId); + return Task.CompletedTask; + }); + } + } + catch (OperationCanceledException) + { + } + } + + private void PersistIfNeeded(in StateId latestSnapshot) + { + _persistenceManager.AddToPersistence(latestSnapshot); + + StateId currentPersistedStateId = _persistenceManager.GetCurrentPersistedStateId(); + if (currentPersistedStateId == StateId.PreGenesis) return; + + _snapshotRepository.RemoveStatesUntil(currentPersistedStateId); + ClearReadOnlyBundleCache(); + ReorgBoundaryReached?.Invoke(this, new ReorgBoundaryReached(currentPersistedStateId.BlockNumber)); + } + + private async Task RunTrieCachePopulator(CancellationToken cancellationToken) + { + try + { + await foreach (TransientResource cachedResource in _populateTrieNodeCacheJobs.Reader.ReadAllAsync(cancellationToken)) + { + await NotifyWhenSlow("Populating trie node cache", () => + { + PopulateTrieNodeCache(cachedResource); + return Task.CompletedTask; + }); + } + } + catch (OperationCanceledException) + { + } + } + + private void PopulateTrieNodeCache(TransientResource transientResource) + { + _trieNodeCache.Add(transientResource); + _resourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, transientResource); + } + + private async Task NotifyWhenSlow(string name, Func closure) + { + TimeSpan slowTime = TimeSpan.FromSeconds(2); + + Task jobTask = Task.Run(async () => + { + long sw = Stopwatch.GetTimestamp(); + try + { + await closure(); + } + catch (OperationCanceledException) + { + throw; + } + catch (Exception ex) + { + if (_logger.IsError) _logger.Error($"Error on {name}", ex); + } + if (_logger.IsTrace) _logger.Trace($"{name} took {Stopwatch.GetElapsedTime(sw)}"); + }); + + _ = Task.Run(async () => + { + long sw = Stopwatch.GetTimestamp(); + while (true) + { + Task delayTask = Task.Delay(slowTime); + if (await Task.WhenAny(jobTask, delayTask) == jobTask) break; + if (_logger.IsWarn) _logger.Warn($"Slow task \"{name}\". Took {Stopwatch.GetElapsedTime(sw)}"); + } + }); + + await jobTask; + } + + public SnapshotBundle GatherSnapshotBundle(in StateId baseBlock, ResourcePool.Usage usage) + { + if (_logger.IsTrace) _logger.Trace($"Gathering {baseBlock}."); + return new SnapshotBundle( + GatherReadOnlySnapshotBundle(baseBlock), + _trieNodeCache, + _resourcePool, + usage: usage); + } + + public ReadOnlySnapshotBundle GatherReadOnlySnapshotBundle(in StateId baseBlock) + { + // Note to self: The current verdict on trying to use a linked list of snapshots is that it is error prone and + // hard to pull of due to the constantly moving chain making invalidation hard. + if (_logger.IsTrace) _logger.Trace($"Gathering {baseBlock}."); + + if (baseBlock == StateId.PreGenesis) + { + // Special case for pregenesis. Note: nethermind always tries to generate genesis. + return new ReadOnlySnapshotBundle(new SnapshotPooledList(0), new NoopPersistenceReader(), _enableDetailedMetrics); + } + + long sw = 0; + int attempt = 0; + while (true) + { + // Fastpath: Share a recently created ReadOnlySnapshotBundle + if (_readonlySnapshotBundleCache.TryGetValue(baseBlock, out ReadOnlySnapshotBundle? bundle) && bundle.TryLease()) return bundle; + + if (attempt == 1) sw = Stopwatch.GetTimestamp(); + if (attempt != 0) + { + if (Stopwatch.GetElapsedTime(sw) > GatherGiveUpDeadline) + { + throw new InvalidOperationException($"Unable to gather {nameof(ReadOnlySnapshotBundle)} for block {baseBlock} in {Stopwatch.GetElapsedTime(sw)}"); + } + + int delayMs = Math.Min(1 << attempt, 100); // 1, 2, 4, 8, 16, 32, 64, 100ms max + Thread.Sleep(delayMs); + } + + IPersistence.IPersistenceReader persistenceReader = _persistenceManager.LeaseReader(); + SnapshotPooledList snapshots; + try + { + snapshots = _snapshotRepository.AssembleSnapshots( + baseBlock, + persistenceReader.CurrentState, + estimatedSize: Math.Max(1, _snapshotRepository.SnapshotCount / _compactSize)); + } + catch (Exception) + { + persistenceReader.Dispose(); + throw; + } + + + if (snapshots.Count == 0) + { + if (persistenceReader.CurrentState != baseBlock) + { + persistenceReader.Dispose(); + throw new InvalidOperationException($"Unable to gather snapshots for state {baseBlock}."); + } + } + else + { + if (snapshots[0].From != persistenceReader.CurrentState) + { + // Cannot assemble snapshot that reaches the persisted state snapshot. It could be that the snapshots was removed + // concurrently. We will retry. + snapshots.Dispose(); + persistenceReader.Dispose(); + attempt++; + continue; + } + } + + if (_logger.IsTrace) _logger.Trace($"Gathered {baseBlock}. Got {snapshots.Count} known states, Reader state: {persistenceReader.CurrentState}. Persistence state: {_persistenceManager.GetCurrentPersistedStateId()}"); + + ReadOnlySnapshotBundle res = new(snapshots, persistenceReader, _enableDetailedMetrics); + + res.TryLease(); + if (!_readonlySnapshotBundleCache.TryAdd(baseBlock, res)) + { + res.Dispose(); + } + + Metrics.SnapshotBundleSize = snapshots.Count; + return res; + } + } + + public void AddSnapshot(Snapshot snapshot, TransientResource transientResource) + { + StateId startingBlock = snapshot.From; + StateId endBlock = snapshot.To; + + if (_logger.IsTrace) _logger.Trace($"Registering {startingBlock.BlockNumber} to {endBlock.BlockNumber}"); + StateId persistedStateId = _persistenceManager.GetCurrentPersistedStateId(); + if (endBlock.BlockNumber <= persistedStateId.BlockNumber) + { + if (_logger.IsWarn) _logger.Warn($"Cannot register snapshot earlier than bigcache. Snapshot number {endBlock.BlockNumber}, bigcache number: {persistedStateId}"); + return; + } + + if (!_snapshotRepository.TryAddSnapshot(snapshot)) + { + if (_logger.IsWarn) _logger.Warn($"State {snapshot.To} already added"); + _resourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, transientResource); + snapshot.Dispose(); + return; + } + + if (_inlineCompaction) + { + RunCompactJobSync(endBlock, transientResource, _cancelTokenSource.Token).Wait(); + } + else + { + if (!_populateTrieNodeCacheJobs.Writer.TryWrite(transientResource)) + { + // Ignore it, just dispose + transientResource.Dispose(); + } + + if (!_compactorJobs.Writer.TryWrite(endBlock)) + { + if (_cancelTokenSource.Token.IsCancellationRequested) return; // When cancelled the queue stop + + if (_logger.IsWarn) _logger.Warn("Compactor job stall! Insufficient reorg depth or too slow persistence!"); + _compactorJobs.Writer.WriteAsync(endBlock).AsTask().Wait(); + } + } + } + + private void ClearReadOnlyBundleCache() + { + using ArrayPoolListRef statesToRemove = new(); + statesToRemove.AddRange(_readonlySnapshotBundleCache.Keys); + + foreach (StateId stateId in statesToRemove) + { + if (_readonlySnapshotBundleCache.TryRemove(stateId, out ReadOnlySnapshotBundle? bundle)) + { + bundle.Dispose(); + } + } + } + + public void FlushCache(CancellationToken cancellationToken) + { + if (_logger.IsInfo) _logger.Info("FlatDbManager FlushCache started."); + + StateId persistedState = _persistenceManager.FlushToPersistence(); + + if (cancellationToken.IsCancellationRequested) return; + + _snapshotRepository.RemoveStatesUntil(persistedState); + + ClearReadOnlyBundleCache(); + _trieNodeCache.Clear(); + + if (_logger.IsInfo) _logger.Info($"FlatDbManager FlushCache completed. Persisted to {persistedState}."); + } + + public bool HasStateForBlock(in StateId stateId) + { + if (_snapshotRepository.HasState(stateId)) return true; + if (_persistenceManager.GetCurrentPersistedStateId() == stateId) return true; + return false; + } + + public async ValueTask DisposeAsync() + { + if (Interlocked.CompareExchange(ref _isDisposed, 1, 0) == 1) return; + + ClearReadOnlyBundleCache(); + _cancelTokenSource.Cancel(); + + _compactorJobs.Writer.Complete(); + _populateTrieNodeCacheJobs.Writer.Complete(); + _persistenceJobs.Writer.Complete(); + + await _compactorTask; + await _populateTrieNodeCacheTask; + await _persistenceTask; + + _cancelTokenSource.Dispose(); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/FlatStateReader.cs b/src/Nethermind/Nethermind.State.Flat/FlatStateReader.cs new file mode 100644 index 00000000000..884e2c9cc64 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/FlatStateReader.cs @@ -0,0 +1,72 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Autofac.Features.AttributeFilters; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.Trie; + +namespace Nethermind.State.Flat; + +public class FlatStateReader( + [KeyFilter(DbNames.Code)] IDb codeDb, + IFlatDbManager flatDbManager, + ILogManager logManager +) : IStateReader +{ + public bool TryGetAccount(BlockHeader? baseBlock, Address address, out AccountStruct account) + { + using ReadOnlySnapshotBundle? reader = flatDbManager.GatherReadOnlySnapshotBundle(new StateId(baseBlock)); + if (reader is null) + { + account = default; + return false; + } + + if (reader.GetAccount(address) is { } accountCls) + { + account = accountCls.ToStruct(); + return true; + } + + account = default; + return false; + } + + public ReadOnlySpan GetStorage(BlockHeader? baseBlock, Address address, in UInt256 index) + { + using ReadOnlySnapshotBundle? reader = flatDbManager.GatherReadOnlySnapshotBundle(new StateId(baseBlock)); + if (reader is null) + { + return Array.Empty(); + } + + return reader.GetSlot(address, index, reader.DetermineSelfDestructSnapshotIdx(address)) ?? []; + } + + public byte[]? GetCode(Hash256 codeHash) => codeHash == Keccak.OfAnEmptyString ? [] : codeDb[codeHash.Bytes]; + + public byte[]? GetCode(in ValueHash256 codeHash) => codeHash == Keccak.OfAnEmptyString.ValueHash256 ? [] : codeDb[codeHash.Bytes]; + + public void RunTreeVisitor(ITreeVisitor treeVisitor, BlockHeader? baseBlock, VisitingOptions? visitingOptions = null) where TCtx : struct, INodeContext + { + StateId stateId = new(baseBlock); + + using ReadOnlySnapshotBundle? reader = flatDbManager.GatherReadOnlySnapshotBundle(stateId); + if (reader is null) + { + throw new InvalidOperationException($"State at {baseBlock} not found"); + } + + ReadOnlyStateTrieStoreAdapter trieStoreAdapter = new(reader); + + PatriciaTree patriciaTree = new PatriciaTree(trieStoreAdapter, logManager); + patriciaTree.Accept(treeVisitor, stateId.StateRoot.ToCommitment(), visitingOptions); + } + + public bool HasStateForBlock(BlockHeader? baseBlock) => flatDbManager.HasStateForBlock(new StateId(baseBlock)); +} diff --git a/src/Nethermind/Nethermind.State.Flat/FlatTrieVerifier.cs b/src/Nethermind/Nethermind.State.Flat/FlatTrieVerifier.cs new file mode 100644 index 00000000000..7fb4bdadf91 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/FlatTrieVerifier.cs @@ -0,0 +1,880 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Buffers.Binary; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Threading; +using System.Threading.Channels; +using System.Threading.Tasks; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Serialization.Rlp; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat; + +/// +/// Verifier for flat DB against trie state. +/// - Hashed mode: Single-pass co-iteration (flat and trie share same sort order) +/// - Preimage mode: Two-pass verification using PatriciaTree.Get() directly +/// +public class FlatTrieVerifier +{ + private const int StorageChannelCapacity = 4; + private const int FlatKeyLength = 20; + private const int PartitionCount = 8; + + private readonly IFlatDbManager? _flatDbManager; + private readonly IPersistence? _persistence; + private readonly ILogManager _logManager; + private readonly ILogger _logger; + + private long _accountCount; + private long _slotCount; + private long _mismatchedAccount; + private long _mismatchedSlot; + private long _missingInFlat; + private long _missingInTrie; + + public FlatTrieVerifier(IFlatDbManager flatDbManager, IPersistence persistence, ILogManager logManager) + { + _flatDbManager = flatDbManager; + _persistence = persistence; + _logManager = logManager; + _logger = logManager.GetClassLogger(); + } + + // Internal constructor for testing + internal FlatTrieVerifier(ILogManager logManager) + { + _logManager = logManager; + _logger = logManager.GetClassLogger(); + } + + public VerificationStats Stats => new( + Interlocked.Read(ref _accountCount), + Interlocked.Read(ref _slotCount), + Interlocked.Read(ref _mismatchedAccount), + Interlocked.Read(ref _mismatchedSlot), + Interlocked.Read(ref _missingInFlat), + Interlocked.Read(ref _missingInTrie)); + + public bool Verify(BlockHeader stateAtBlock, CancellationToken cancellationToken) + { + ArgumentNullException.ThrowIfNull(_persistence); + ArgumentNullException.ThrowIfNull(_flatDbManager); + + StateId stateId = new StateId(stateAtBlock); + using IPersistence.IPersistenceReader reader = _persistence.CreateReader(); + if (reader.CurrentState != stateId) + { + _logger.Warn($"With flat, only the persisted state can be verified. Will use current persisted state: {reader.CurrentState}"); + stateId = reader.CurrentState; + } + + using ReadOnlySnapshotBundle bundle = _flatDbManager.GatherReadOnlySnapshotBundle(stateId); + ReadOnlyStateTrieStoreAdapter trieStore = new(bundle); + + return VerifyCore(reader, trieStore, stateId.StateRoot.ToCommitment(), cancellationToken); + } + + // Internal method for testing with direct components + internal void Verify(IPersistence.IPersistenceReader reader, IScopedTrieStore trieStore, Hash256 stateRoot, CancellationToken cancellationToken) + { + VerifyCore(reader, trieStore, stateRoot, cancellationToken); + } + + private bool VerifyCore(IPersistence.IPersistenceReader reader, IScopedTrieStore trieStore, Hash256 stateRoot, CancellationToken cancellationToken) + { + HashVerifyingTrieStore verifyingTrieStore = new(trieStore, null, _logger); + VisitorProgressTracker progressTracker = new("Verify flat", _logManager, printNodes: false); + + Channel channel = Channel.CreateBounded( + new BoundedChannelOptions(StorageChannelCapacity) + { + FullMode = BoundedChannelFullMode.Wait + }); + + int workerCount = Math.Max(1, Environment.ProcessorCount - 1); + Task[] workers = new Task[workerCount]; + for (int i = 0; i < workerCount; i++) + { + workers[i] = Task.Run(() => ProcessStorageQueue(channel.Reader, reader, verifyingTrieStore, cancellationToken)); + } + + try + { + if (reader.IsPreimageMode) + { + VerifyPreimageMode(reader, verifyingTrieStore, stateRoot, channel.Writer, progressTracker, cancellationToken); + } + else + { + VerifyHashedMode(reader, verifyingTrieStore, stateRoot, channel.Writer, progressTracker, cancellationToken); + } + } + finally + { + channel.Writer.Complete(); + + while (!Task.WaitAll(workers, TimeSpan.FromSeconds(30))) + { + if (_logger.IsInfo) _logger.Info($"Waiting for storage verification workers... {Stats}"); + } + + progressTracker.Finish(); + } + + if (verifyingTrieStore.HashMismatchCount > 0) + { + if (_logger.IsError) _logger.Error($"Hash verification found {verifyingTrieStore.HashMismatchCount} mismatches"); + } + + bool isOk = Stats.MismatchedAccount == 0 && Stats.MismatchedSlot == 0 && + Stats.MissingInFlat == 0 && Stats.MissingInTrie == 0 && + verifyingTrieStore.HashMismatchCount == 0; + + if (!isOk) + { + if (_logger.IsWarn) _logger.Warn( + $"Verification failed: {Stats.MismatchedAccount} mismatched accounts, {Stats.MismatchedSlot} mismatched slots, " + + $"{Stats.MissingInFlat} missing in flat, {Stats.MissingInTrie} missing in trie"); + } + + if (_logger.IsInfo) _logger.Info($"Verification complete. {Stats}"); + + return isOk; + } + + /// + /// Get partition bounds for parallel verification. + /// Divides the 256-bit key space into 8 equal ranges based on the first byte. + /// + private static (ValueHash256 start, ValueHash256 end) GetPartitionBounds(int partition) + { + byte startByte = (byte)(partition * 32); + ValueHash256 start = default; + start.BytesAsSpan[0] = startByte; + + ValueHash256 end; + if (partition < PartitionCount - 1) + { + end = default; + end.BytesAsSpan[0] = (byte)((partition + 1) * 32); + } + else + { + // Last partition ends at 0xFF...FF (inclusive via iterator semantics) + end = new ValueHash256(Bytes.FromHexString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")); + } + return (start, end); + } + + /// + /// Hashed mode: Parallelized single-pass co-iteration across partitions. + /// + private void VerifyHashedMode( + IPersistence.IPersistenceReader reader, + IScopedTrieStore trieStore, + Hash256 stateRoot, + ChannelWriter storageWriter, + VisitorProgressTracker progressTracker, + CancellationToken cancellationToken) + { + Task[] partitionTasks = new Task[PartitionCount]; + for (int i = 0; i < PartitionCount; i++) + { + int partition = i; + partitionTasks[i] = Task.Run(() => VerifyHashedModePartition(partition, reader, trieStore, stateRoot, storageWriter, progressTracker, cancellationToken), cancellationToken); + } + Task.WaitAll(partitionTasks, cancellationToken); + } + + /// + /// Verifies a single partition in hashed mode. + /// + private void VerifyHashedModePartition( + int partition, + IPersistence.IPersistenceReader reader, + IScopedTrieStore trieStore, + Hash256 stateRoot, + ChannelWriter storageWriter, + VisitorProgressTracker progressTracker, + CancellationToken cancellationToken) + { + (ValueHash256 startKey, ValueHash256 endKey) = GetPartitionBounds(partition); + + using IPersistence.IFlatIterator flatIter = reader.CreateAccountIterator(startKey, endKey); + TrieLeafIterator trieIter = new(trieStore, stateRoot, LogTrieNodeException, startKey, endKey); + + bool hasFlat = flatIter.MoveNext(); + bool hasTrie = trieIter.MoveNext(); + + TreePath progressPath = TreePath.Empty; + + while (hasFlat || hasTrie) + { + cancellationToken.ThrowIfCancellationRequested(); + + int cmp = CompareHashedKeys( + hasFlat ? flatIter.CurrentKey : default, + hasTrie ? trieIter.CurrentPath : default, + hasFlat, + hasTrie); + + if (cmp == 0) + { + Interlocked.Increment(ref _accountCount); + if (trieIter.CurrentPath.Truncate(VisitorProgressTracker.Level3Depth) != progressPath) + { + progressPath = trieIter.CurrentPath.Truncate(VisitorProgressTracker.Level3Depth); + progressTracker.OnNodeVisited(progressPath, isStorage: false); + } + VerifyAccountMatch(flatIter.CurrentValue, trieIter.CurrentLeaf!, flatIter.CurrentKey, trieIter.CurrentPath, reader.IsPreimageMode, trieStore, storageWriter, cancellationToken); + hasFlat = flatIter.MoveNext(); + hasTrie = trieIter.MoveNext(); + } + else if (cmp < 0 || !hasTrie) + { + Interlocked.Increment(ref _accountCount); + Interlocked.Increment(ref _missingInTrie); + if (_logger.IsWarn) _logger.Warn($"Account in flat not found in trie. FlatKey: {flatIter.CurrentKey}"); + DiagnoseTriePath(trieStore, stateRoot, flatIter.CurrentKey); + hasFlat = flatIter.MoveNext(); + } + else + { + Interlocked.Increment(ref _accountCount); + Interlocked.Increment(ref _missingInFlat); + if (trieIter.CurrentPath.Truncate(VisitorProgressTracker.Level3Depth) != progressPath) + { + progressPath = trieIter.CurrentPath.Truncate(VisitorProgressTracker.Level3Depth); + progressTracker.OnNodeVisited(progressPath, isStorage: false); + } + if (_logger.IsWarn) _logger.Warn($"Account in trie not found in flat. TriePath: {trieIter.CurrentPath}"); + hasTrie = trieIter.MoveNext(); + } + } + } + + /// + /// Preimage mode: Two-pass verification using PatriciaTree.Get() directly for RLP lookup. + /// Pass 1: Iterate flat sequentially (can't partition by hash since flat uses raw addresses), lookup each in trie + /// Pass 2: Iterate trie partitions in parallel, check against seen set - detects entries missing in flat + /// Note: Flat iteration is not parallelized because addresses don't partition by hash. + /// + private void VerifyPreimageMode( + IPersistence.IPersistenceReader reader, + IScopedTrieStore trieStore, + Hash256 stateRoot, + ChannelWriter storageWriter, + VisitorProgressTracker progressTracker, + CancellationToken cancellationToken) + { + // PatriciaTree for direct RLP lookup (thread-safe for reads) + PatriciaTree? tree = stateRoot != Keccak.EmptyTreeHash ? new(trieStore, _logManager) : null; + + // Thread-safe set of verified trie paths to avoid double-counting in pass 2 + ConcurrentDictionary verifiedTriePaths = new(); + + TreePath progressPath = TreePath.Empty; + + // Pass 1: Flat -> Trie (sequential - can't partition raw addresses by hash) + using (IPersistence.IFlatIterator flatIter = reader.CreateAccountIterator(ValueKeccak.Zero, ValueKeccak.MaxValue)) + { + while (flatIter.MoveNext()) + { + cancellationToken.ThrowIfCancellationRequested(); + Interlocked.Increment(ref _accountCount); + + // In preimage mode, flat key contains raw address bytes + ValueHash256 flatKey = flatIter.CurrentKey; + Hash256 trieHash = Keccak.Compute(flatKey.Bytes[..20]); + ulong hashKey = BinaryPrimitives.ReadUInt64LittleEndian(trieHash.Bytes); + + // Direct RLP lookup using PatriciaTree.Get() + ReadOnlySpan trieAccountRlp = tree is not null ? tree.Get(trieHash.Bytes, stateRoot) : []; + + if (trieAccountRlp.IsEmpty) + { + Interlocked.Increment(ref _missingInTrie); + if (_logger.IsWarn) _logger.Warn($"Account in flat not found in trie. Address: {new Address(flatKey.Bytes[..20].ToArray())}"); + DiagnoseTriePath(trieStore, stateRoot, flatKey); + continue; + } + + verifiedTriePaths.TryAdd(hashKey, 0); + TreePath triePath = TreePath.FromPath(trieHash.Bytes); + if (triePath.Truncate(VisitorProgressTracker.Level3Depth) != progressPath) + { + progressPath = triePath.Truncate(VisitorProgressTracker.Level3Depth); + progressTracker.OnNodeVisited(progressPath, isStorage: false); + } + + VerifyAccountMatchPreimageWithRlp(flatIter.CurrentValue, trieAccountRlp, flatKey, trieHash.ValueHash256, storageWriter, cancellationToken); + } + } + + // Pass 2: Trie -> Flat (parallelized across partitions - trie uses hashes which partition evenly) + Task[] pass2Tasks = new Task[PartitionCount]; + for (int i = 0; i < PartitionCount; i++) + { + int partition = i; + pass2Tasks[i] = Task.Run(() => + VerifyPreimageModePass2Partition(partition, trieStore, stateRoot, progressTracker, verifiedTriePaths, cancellationToken), + cancellationToken); + } + Task.WaitAll(pass2Tasks, cancellationToken); + } + + /// + /// Pass 2 of preimage mode verification for a single partition. + /// + private void VerifyPreimageModePass2Partition( + int partition, + IScopedTrieStore trieStore, + Hash256 stateRoot, + VisitorProgressTracker progressTracker, + ConcurrentDictionary verifiedTriePaths, + CancellationToken cancellationToken) + { + (ValueHash256 startKey, ValueHash256 endKey) = GetPartitionBounds(partition); + TreePath progressPath = TreePath.Empty; + + TrieLeafIterator trieIter = new(trieStore, stateRoot, LogTrieNodeException, startKey, endKey); + while (trieIter.MoveNext()) + { + cancellationToken.ThrowIfCancellationRequested(); + + ulong triePathKey = BinaryPrimitives.ReadUInt64LittleEndian(trieIter.CurrentPath.Path.Bytes); + + if (verifiedTriePaths.ContainsKey(triePathKey)) + continue; + + Interlocked.Increment(ref _accountCount); + Interlocked.Increment(ref _missingInFlat); + if (trieIter.CurrentPath.Truncate(VisitorProgressTracker.Level3Depth) != progressPath) + { + progressPath = trieIter.CurrentPath.Truncate(VisitorProgressTracker.Level3Depth); + progressTracker.OnNodeVisited(progressPath, isStorage: false); + } + if (_logger.IsWarn) _logger.Warn($"Account in trie not found in flat. TriePath: {trieIter.CurrentPath}"); + } + } + + private static int CompareHashedKeys(in ValueHash256 flatKey, in TreePath triePath, bool hasFlat, bool hasTrie) => + (hasFlat, hasTrie) switch + { + (false, false) => 0, + (false, true) => 1, + (true, false) => -1, + _ => Bytes.BytesComparer.Compare(flatKey.Bytes[..FlatKeyLength], triePath.Path.Bytes[..FlatKeyLength]) + }; + + private void VerifyAccountMatch( + ReadOnlySpan flatAccountRlp, + TrieNode trieLeaf, + in ValueHash256 flatKey, + in TreePath triePath, + bool isPreimageMode, + IScopedTrieStore trieStore, + ChannelWriter storageWriter, + CancellationToken cancellationToken) + { + ReadOnlySpan trieAccountRlp = trieLeaf.Value.Span; + + Rlp.ValueDecoderContext flatCtx = new(flatAccountRlp); + Account? flatAccount = AccountDecoder.Slim.Decode(ref flatCtx); + + Rlp.ValueDecoderContext trieCtx = new(trieAccountRlp); + Account? trieAccount = AccountDecoder.Instance.Decode(ref trieCtx); + + if (flatAccount != trieAccount) + { + Interlocked.Increment(ref _mismatchedAccount); + if (_logger.IsWarn) _logger.Warn($"Mismatched account. Path: {triePath}. Flat: {flatAccount}, Trie: {trieAccount}"); + } + + if (trieAccount is not null && trieAccount.StorageRoot != Keccak.EmptyTreeHash) + { + Hash256 fullPath = triePath.Path.ToCommitment(); + StorageVerificationJob job = new(flatKey, fullPath, trieAccount.StorageRoot, isPreimageMode); + storageWriter.WriteAsync(job, cancellationToken).AsTask().Wait(cancellationToken); + } + } + + private void VerifyAccountMatchPreimageWithRlp( + ReadOnlySpan flatAccountRlp, + ReadOnlySpan trieAccountRlp, + in ValueHash256 flatKey, + in ValueHash256 trieHash, + ChannelWriter storageWriter, + CancellationToken cancellationToken) + { + Rlp.ValueDecoderContext flatCtx = new(flatAccountRlp); + Account? flatAccount = AccountDecoder.Slim.Decode(ref flatCtx); + + Rlp.ValueDecoderContext trieCtx = new(trieAccountRlp); + Account? trieAccount = AccountDecoder.Instance.Decode(ref trieCtx); + + if (flatAccount != trieAccount) + { + Interlocked.Increment(ref _mismatchedAccount); + if (_logger.IsWarn) _logger.Warn($"Mismatched account. Hash: {trieHash}. Flat: {flatAccount}, Trie: {trieAccount}"); + } + + if (trieAccount is not null && trieAccount.StorageRoot != Keccak.EmptyTreeHash) + { + Hash256 fullPath = trieHash.ToCommitment(); + StorageVerificationJob job = new(flatKey, fullPath, trieAccount.StorageRoot, true); + storageWriter.WriteAsync(job, cancellationToken).AsTask().Wait(cancellationToken); + } + } + + private async Task ProcessStorageQueue( + ChannelReader channelReader, + IPersistence.IPersistenceReader reader, + IScopedTrieStore trieStore, + CancellationToken cancellationToken) + { + await foreach (StorageVerificationJob job in channelReader.ReadAllAsync(cancellationToken)) + { + if (job.IsPreimageMode) + { + VerifyStoragePreimage(job, reader, trieStore, cancellationToken); + } + else + { + VerifyStorageHashed(job, reader, trieStore, cancellationToken); + } + } + } + + private void VerifyStorageHashed( + StorageVerificationJob job, + IPersistence.IPersistenceReader reader, + IScopedTrieStore trieStore, + CancellationToken cancellationToken) + { + using IPersistence.IFlatIterator flatIter = reader.CreateStorageIterator(job.FlatAccountKey, ValueKeccak.Zero, ValueKeccak.MaxValue); + IScopedTrieStore storageTrieStore = (IScopedTrieStore)trieStore.GetStorageTrieNodeResolver(job.TrieAccountPath); + TrieLeafIterator trieIter = new(storageTrieStore, job.StorageRoot, LogTrieNodeException); + + bool hasFlat = flatIter.MoveNext(); + bool hasTrie = trieIter.MoveNext(); + + while (hasFlat || hasTrie) + { + cancellationToken.ThrowIfCancellationRequested(); + + int cmp = CompareStorageKeys( + hasFlat ? flatIter.CurrentKey : default, + hasTrie ? trieIter.CurrentPath : default, + hasFlat, + hasTrie); + + if (cmp == 0) + { + Interlocked.Increment(ref _slotCount); + VerifySlotMatch(flatIter.CurrentValue, trieIter.CurrentLeaf!, job.FlatAccountKey, flatIter.CurrentKey); + hasFlat = flatIter.MoveNext(); + hasTrie = trieIter.MoveNext(); + } + else if (cmp < 0 || !hasTrie) + { + Interlocked.Increment(ref _slotCount); + if (!IsZeroValue(flatIter.CurrentValue)) + { + Interlocked.Increment(ref _missingInTrie); + if (_logger.IsWarn) _logger.Warn($"Storage slot in flat not in trie. Account: {job.FlatAccountKey}, Slot: {flatIter.CurrentKey}"); + DiagnoseTriePath(storageTrieStore, job.StorageRoot, flatIter.CurrentKey); + } + hasFlat = flatIter.MoveNext(); + } + else + { + Interlocked.Increment(ref _slotCount); + Interlocked.Increment(ref _missingInFlat); + if (_logger.IsWarn) _logger.Warn($"Storage slot in trie not in flat. Account: {job.FlatAccountKey}, TriePath: {trieIter.CurrentPath}"); + hasTrie = trieIter.MoveNext(); + } + } + } + + private void VerifyStoragePreimage( + StorageVerificationJob job, + IPersistence.IPersistenceReader reader, + IScopedTrieStore trieStore, + CancellationToken cancellationToken) + { + IScopedTrieStore storageTrieStore = (IScopedTrieStore)trieStore.GetStorageTrieNodeResolver(job.TrieAccountPath); + PatriciaTree storageTree = new(storageTrieStore, _logManager); + + HashSet verifiedSlots = []; + + // Pass 1: Flat -> Trie + using (IPersistence.IFlatIterator flatIter = reader.CreateStorageIterator(job.FlatAccountKey, ValueKeccak.Zero, ValueKeccak.MaxValue)) + { + while (flatIter.MoveNext()) + { + cancellationToken.ThrowIfCancellationRequested(); + Interlocked.Increment(ref _slotCount); + + // In preimage mode, flat key is raw slot bytes (big-endian UInt256) + ValueHash256 flatSlotKey = flatIter.CurrentKey; + Hash256 slotHash = Keccak.Compute(flatSlotKey.Bytes); + ulong hashKey = BinaryPrimitives.ReadUInt64LittleEndian(slotHash.Bytes); + + // Direct RLP lookup using PatriciaTree.Get() + ReadOnlySpan trieValueRlp = storageTree.Get(slotHash.Bytes, job.StorageRoot); + + if (trieValueRlp.IsEmpty) + { + if (!IsZeroValue(flatIter.CurrentValue)) + { + Interlocked.Increment(ref _missingInTrie); + if (_logger.IsWarn) _logger.Warn($"Storage slot in flat not in trie. Account: {job.FlatAccountKey}, Slot: {flatSlotKey}"); + } + continue; + } + + verifiedSlots.Add(hashKey); + VerifySlotMatchPreimageWithRlp(flatIter.CurrentValue, trieValueRlp, job.FlatAccountKey, flatSlotKey); + } + } + + // Pass 2: Trie -> Flat (check for entries in trie not in flat) + TrieLeafIterator trieIter = new(storageTrieStore, job.StorageRoot, LogTrieNodeException); + while (trieIter.MoveNext()) + { + cancellationToken.ThrowIfCancellationRequested(); + + ulong triePathKey = BinaryPrimitives.ReadUInt64LittleEndian(trieIter.CurrentPath.Path.Bytes); + + if (verifiedSlots.Contains(triePathKey)) + continue; + + Interlocked.Increment(ref _slotCount); + Interlocked.Increment(ref _missingInFlat); + if (_logger.IsWarn) _logger.Warn($"Storage slot in trie not in flat. Account: {job.FlatAccountKey}, TriePath: {trieIter.CurrentPath}"); + } + } + + private static int CompareStorageKeys(in ValueHash256 flatKey, in TreePath triePath, bool hasFlat, bool hasTrie) => + (hasFlat, hasTrie) switch + { + (false, false) => 0, + (false, true) => 1, + (true, false) => -1, + _ => Bytes.BytesComparer.Compare(flatKey.Bytes, triePath.Path.Bytes) + }; + + private void VerifySlotMatch(ReadOnlySpan flatValue, TrieNode trieLeaf, in ValueHash256 accountKey, in ValueHash256 slotKey) + { + ReadOnlySpan trieValue = trieLeaf.Value.Span; + if (trieValue.IsEmpty) + { + if (IsZeroValue(flatValue)) return; + Interlocked.Increment(ref _mismatchedSlot); + if (_logger.IsWarn) _logger.Warn($"Mismatched slot (trie empty). Account: {accountKey}, Slot: {slotKey}"); + return; + } + + Rlp.ValueDecoderContext ctx = new(trieValue); + byte[] decodedTrieValue = ctx.DecodeByteArray(); + + ReadOnlySpan flatTrimmed = flatValue.WithoutLeadingZeros(); + ReadOnlySpan trieTrimmed = decodedTrieValue.AsSpan().WithoutLeadingZeros(); + + if (!Bytes.AreEqual(flatTrimmed, trieTrimmed)) + { + Interlocked.Increment(ref _mismatchedSlot); + if (_logger.IsWarn) _logger.Warn($"Mismatched slot. Account: {accountKey}, Slot: {slotKey}. Flat: {flatTrimmed.ToHexString()}, Trie: {trieTrimmed.ToHexString()}"); + } + } + + private void VerifySlotMatchPreimageWithRlp(ReadOnlySpan flatValue, ReadOnlySpan trieValueRlp, in ValueHash256 accountKey, in ValueHash256 slotKey) + { + // Decode RLP to get the actual value + Rlp.ValueDecoderContext ctx = new(trieValueRlp); + byte[] decodedTrieValue = ctx.DecodeByteArray(); + + ReadOnlySpan flatTrimmed = flatValue.WithoutLeadingZeros(); + ReadOnlySpan trieTrimmed = decodedTrieValue.AsSpan().WithoutLeadingZeros(); + + if (!Bytes.AreEqual(flatTrimmed, trieTrimmed)) + { + Interlocked.Increment(ref _mismatchedSlot); + if (_logger.IsWarn) _logger.Warn($"Mismatched slot. Account: {accountKey}, Slot: {slotKey}. Flat: {flatTrimmed.ToHexString()}, Trie: {trieTrimmed.ToHexString()}"); + } + } + + private static bool IsZeroValue(ReadOnlySpan value) => + value.IsEmpty || value.WithoutLeadingZeros().IsEmpty; + + private void LogTrieNodeException(TrieNodeException ex) => + _logger.Warn($"TrieLeafIterator encountered exception: {ex.Message}"); + + /// + /// Diagnostic traversal when flat entry exists but trie lookup fails. + /// Walks the trie path showing node type, hash, and whether RLP is available/valid. + /// + private void DiagnoseTriePath( + IScopedTrieStore trieStore, + Hash256 stateRoot, + in ValueHash256 flatKey) + { + if (_logger.IsInfo) _logger.Info($"=== Diagnosing trie path for flat key {flatKey} ==="); + + TreePath currentPath = TreePath.Empty; + TrieNode? currentNode = trieStore.FindCachedOrUnknown(currentPath, stateRoot); + Hash256? expectedHash = stateRoot; + + while (currentNode is not null) + { + bool isInline = expectedHash is null; + + // Resolve the node (loads RLP for non-inline, no-op for already resolved inline nodes) + try + { + currentNode.ResolveNode(trieStore, currentPath); + } + catch (TrieNodeException ex) + { + if (_logger.IsWarn) _logger.Warn($" Path: {currentPath} | Failed to resolve: {ex.Message}"); + ScanRemainingPathWithZeroHash(trieStore, currentPath, flatKey); + return; + } + + // Verify hash only for non-inline nodes + if (!isInline) + { + bool hashValid = currentNode.Keccak == expectedHash; + string branchChildren = ""; + if (currentNode.IsBranch) + { + // Plot its child information, is it exist, is it in DB or missing. + branchChildren = $"| Branch children: {string.Join("", Enumerable.Range(0, 16).Select((idx) => + { + TreePath childPath = currentPath.Append(idx); + TrieNode? child = currentNode.GetChildWithChildPath(trieStore, ref childPath, idx); + if (child is null) return "."; + + try + { + child.ResolveNode(trieStore, childPath); + return "X"; + } + catch (TrieNodeException) + { + return "E"; + } + }))}"; + } + if (_logger.IsInfo) _logger.Info($" Path: {currentPath} | Type: {currentNode.NodeType} | Hash: {expectedHash!.ToShortString()} | HashValid: {hashValid} {branchChildren}"); + } + else + { + if (_logger.IsInfo) _logger.Info($" Path: {currentPath} | Type: {currentNode.NodeType} | Inline node (no hash)"); + } + + // Navigate based on node type + TreePath fullPath = new TreePath(flatKey, 64); + switch (currentNode.NodeType) + { + case NodeType.Branch: + int nibble = fullPath[currentPath.Length]; + + if (currentNode.IsChildNull(nibble)) + { + TreePath nullChildPath = currentPath.Append(nibble); + if (_logger.IsWarn) _logger.Warn($" -> Branch child {nibble:X} is null"); + if (_logger.IsWarn) _logger.Warn($" -> Remaining nibbles: {64 - nullChildPath.Length}"); + ScanRemainingPathWithZeroHash(trieStore, nullChildPath, flatKey); + return; + } + + // Get next hash (null for inline nodes) and then get the actual child node + expectedHash = currentNode.GetChildHash(nibble); + currentPath.AppendMut(nibble); + currentNode = currentNode.GetChildWithChildPath(trieStore, ref currentPath, nibble); + break; + + case NodeType.Extension: + byte[] key = currentNode.Key!; + if (_logger.IsInfo) _logger.Info($" -> Extension key: {TreePath.FromNibble(key ?? [])}"); + + // Check if path matches (only if we haven't passed target) + if (currentPath.Length < 64) + { + for (int i = 0; i < key!.Length && currentPath.Length + i < 64; i++) + { + if (key[i] != fullPath[currentPath.Length + i]) + { + if (_logger.IsWarn) _logger.Warn($" -> Extension key mismatch at position {i}: expected {fullPath[currentPath.Length + i]:X}, got {key[i]:X}"); + ScanRemainingPathWithZeroHash(trieStore, currentPath, flatKey); + return; + } + } + } + + // Get next hash (null for inline nodes) and then get the actual child node + expectedHash = currentNode.GetChildHash(0); + currentPath.AppendMut(key); + currentNode = currentNode.GetChildWithChildPath(trieStore, ref currentPath, 0); + break; + + case NodeType.Leaf: + if (_logger.IsInfo) _logger.Info($" -> Found leaf with key: {TreePath.FromNibble(currentNode.Key ?? [])}"); + if (_logger.IsInfo) _logger.Info($" -> Full leaf path: {currentPath.Append(currentNode.Key ?? [])}"); + return; + + default: + if (_logger.IsWarn) _logger.Warn($" -> Unknown node type: {currentNode.NodeType}"); + ScanRemainingPathWithZeroHash(trieStore, currentPath, flatKey); + return; + } + } + + if (_logger.IsInfo) _logger.Info($" -> Traversal ended with null node at path {currentPath}"); + + // Continue scanning remaining path with zero hash to see what's stored + ScanRemainingPathWithZeroHash(trieStore, currentPath, flatKey); + } + + /// + /// Scans remaining path nibbles using Keccak.Zero to see what data is stored + /// at each position. This helps diagnose cases where hash-based lookup fails. + /// + private void ScanRemainingPathWithZeroHash( + IScopedTrieStore trieStore, + TreePath currentPath, + in ValueHash256 flatKey) + { + const int lookupLimit = 16; // Limit lookup, make it less verbose. + if (currentPath.Length >= lookupLimit) + return; + + if (_logger.IsInfo) _logger.Info($" -> Scanning remaining path with zero hash..."); + + TreePath fullPath = new TreePath(flatKey, 64); + while (currentPath.Length < lookupLimit) + { + int nibble = fullPath[currentPath.Length]; + currentPath = currentPath.Append(nibble); + + byte[]? zeroHashRlp = trieStore.TryLoadRlp(currentPath, Keccak.Zero, ReadFlags.None); + if (zeroHashRlp is not null) + { + Hash256 actualHash = Keccak.Compute(zeroHashRlp); + if (_logger.IsInfo) _logger.Info($" Path: {currentPath} | ZeroHash lookup found data | ActualHash: {actualHash.ToShortString()}"); + + // Try to decode and show node info + TrieNode node = new(NodeType.Unknown, actualHash, zeroHashRlp); + try + { + node.ResolveNode(trieStore, currentPath); + if (_logger.IsInfo) _logger.Info($" -> Type: {node.NodeType}, Key: {node.Key?.ToHexString() ?? "null"}"); + } + catch (TrieNodeException ex) + { + if (_logger.IsWarn) _logger.Warn($" -> Failed to resolve: {ex.Message}"); + } + } + else + { + if (_logger.IsInfo) _logger.Info($" Path: {currentPath} | ZeroHash lookup: nothing"); + } + } + } + + private readonly record struct StorageVerificationJob( + ValueHash256 FlatAccountKey, + Hash256 TrieAccountPath, + Hash256 StorageRoot, + bool IsPreimageMode); + + public readonly record struct VerificationStats( + long AccountCount, + long SlotCount, + long MismatchedAccount, + long MismatchedSlot, + long MissingInFlat, + long MissingInTrie) + { + public override string ToString() => + $"Accounts={AccountCount}, Slots={SlotCount}, MismatchedAccounts={MismatchedAccount}, " + + $"MismatchedSlots={MismatchedSlot}, MissingInFlat={MissingInFlat}, MissingInTrie={MissingInTrie}"; + } + + /// + /// Wrapper around IScopedTrieStore that verifies hashes of loaded RLP data. + /// + private sealed class HashVerifyingTrieStore(IScopedTrieStore inner, Hash256? address, ILogger logger) : IScopedTrieStore + { + private long _hashMismatchCount; + + public long HashMismatchCount => Interlocked.Read(ref _hashMismatchCount); + + public TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => inner.FindCachedOrUnknown(path, hash); + + public byte[]? LoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) + { + byte[]? rlp = inner.LoadRlp(path, hash, flags); + if (rlp is not null) + { + VerifyHash(rlp, hash, path); + } + return rlp; + } + + public byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) + { + byte[]? rlp = inner.TryLoadRlp(path, hash, flags); + if (rlp is not null && hash != Keccak.Zero) + { + VerifyHash(rlp, hash, path); + } + return rlp; + } + + private void VerifyHash(byte[] rlp, Hash256 expectedHash, in TreePath path) + { + Hash256 computed = Keccak.Compute(rlp); + if (computed != expectedHash) + { + Interlocked.Increment(ref _hashMismatchCount); + if (address is null) + { + if (logger.IsError) logger.Error( + $"Hash mismatch at path {path}: expected {expectedHash.ToShortString()}, computed {computed.ToShortString()}"); + } + else + { + if (logger.IsError) logger.Error( + $"Hash mismatch at path {address}:{path}: expected {expectedHash.ToShortString()}, computed {computed.ToShortString()}"); + } + } + } + + public ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => + address is null + ? this + : new HashVerifyingTrieStore((IScopedTrieStore)inner.GetStorageTrieNodeResolver(address), address, logger); + + public INodeStorage.KeyScheme Scheme => inner.Scheme; + + public ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + inner.BeginCommit(root, writeFlags); + + public bool IsPersisted(in TreePath path, in ValueHash256 keccak) => inner.IsPersisted(path, keccak); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/IFlatDbManager.cs b/src/Nethermind/Nethermind.State.Flat/IFlatDbManager.cs new file mode 100644 index 00000000000..8ed5da6714d --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/IFlatDbManager.cs @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat; + +public interface IFlatDbManager : IFlatCommitTarget +{ + event EventHandler? ReorgBoundaryReached; + SnapshotBundle GatherSnapshotBundle(in StateId baseBlock, ResourcePool.Usage usage); + ReadOnlySnapshotBundle GatherReadOnlySnapshotBundle(in StateId baseBlock); + void FlushCache(CancellationToken cancellationToken); + bool HasStateForBlock(in StateId stateId); +} + +// Used by overridable world state env that has its own snapshot repositories. +public interface IFlatCommitTarget +{ + void AddSnapshot(Snapshot snapshot, TransientResource transientResource); +} diff --git a/src/Nethermind/Nethermind.State.Flat/IPersistenceManager.cs b/src/Nethermind/Nethermind.State.Flat/IPersistenceManager.cs new file mode 100644 index 00000000000..8a55edf811b --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/IPersistenceManager.cs @@ -0,0 +1,14 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.State.Flat.Persistence; + +namespace Nethermind.State.Flat; + +public interface IPersistenceManager +{ + IPersistence.IPersistenceReader LeaseReader(); + StateId GetCurrentPersistedStateId(); + void AddToPersistence(StateId latestSnapshot); + StateId FlushToPersistence(); +} diff --git a/src/Nethermind/Nethermind.State.Flat/IResourcePool.cs b/src/Nethermind/Nethermind.State.Flat/IResourcePool.cs new file mode 100644 index 00000000000..042429b04a5 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/IResourcePool.cs @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +namespace Nethermind.State.Flat; + +public interface IResourcePool +{ + SnapshotContent GetSnapshotContent(ResourcePool.Usage usage); + void ReturnSnapshotContent(ResourcePool.Usage usage, SnapshotContent snapshotContent); + TransientResource GetCachedResource(ResourcePool.Usage usage); + void ReturnCachedResource(ResourcePool.Usage usage, TransientResource transientResource); + Snapshot CreateSnapshot(in StateId from, in StateId to, ResourcePool.Usage usage); +} diff --git a/src/Nethermind/Nethermind.State.Flat/ISnapshotCompactor.cs b/src/Nethermind/Nethermind.State.Flat/ISnapshotCompactor.cs new file mode 100644 index 00000000000..60f737c1449 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ISnapshotCompactor.cs @@ -0,0 +1,13 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core.Collections; + +namespace Nethermind.State.Flat; + +public interface ISnapshotCompactor +{ + bool DoCompactSnapshot(in StateId stateId); + SnapshotPooledList GetSnapshotsToCompact(Snapshot snapshot); + Snapshot CompactSnapshotBundle(SnapshotPooledList snapshots); +} diff --git a/src/Nethermind/Nethermind.State.Flat/ISnapshotRepository.cs b/src/Nethermind/Nethermind.State.Flat/ISnapshotRepository.cs new file mode 100644 index 00000000000..f465cf08085 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ISnapshotRepository.cs @@ -0,0 +1,28 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Diagnostics.CodeAnalysis; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; + +namespace Nethermind.State.Flat; + +public interface ISnapshotRepository +{ + int SnapshotCount { get; } + int CompactedSnapshotCount { get; } + + void AddStateId(in StateId stateId); + bool TryAddSnapshot(Snapshot snapshot); + bool TryAddCompactedSnapshot(Snapshot snapshot); + bool TryLeaseState(in StateId stateId, [NotNullWhen(true)] out Snapshot? entry); + bool TryLeaseCompactedState(in StateId stateId, [NotNullWhen(true)] out Snapshot? entry); + bool RemoveAndReleaseCompactedKnownState(in StateId stateId); + bool HasState(in StateId stateId); + SnapshotPooledList AssembleSnapshots(in StateId stateId, in StateId targetStateId, int estimatedSize); + SnapshotPooledList AssembleSnapshotsUntil(in StateId stateId, long minBlockNumber, int estimatedSize); + StateId? GetLastSnapshotId(); + ArrayPoolList GetStatesAtBlockNumber(long blockNumber); + void RemoveStatesUntil(in StateId currentPersistedStateId); +} diff --git a/src/Nethermind/Nethermind.State.Flat/ITrieNodeCache.cs b/src/Nethermind/Nethermind.State.Flat/ITrieNodeCache.cs new file mode 100644 index 00000000000..d3a4220dc80 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ITrieNodeCache.cs @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Diagnostics.CodeAnalysis; +using Nethermind.Core.Crypto; +using Nethermind.Trie; + +namespace Nethermind.State.Flat; + +public interface ITrieNodeCache +{ + bool TryGet(Hash256? address, in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node); + void Add(TransientResource transientResource); + void Clear(); +} diff --git a/src/Nethermind/Nethermind.State.Flat/Importer.cs b/src/Nethermind/Nethermind.State.Flat/Importer.cs new file mode 100644 index 00000000000..a3b3c8e3806 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Importer.cs @@ -0,0 +1,204 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Diagnostics; +using System.Threading.Channels; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.Serialization.Rlp; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat; + +/// +/// Imports state from trie-based persistence to flat persistence. +/// +/// This importer uses SetAccountRaw/SetStorageRaw with hash-based keys. For PreimageFlat mode, +/// wrap the persistence with PreimageRecordingPersistence and provide a previously recorded +/// preimage database - it will automatically translate raw operations to preimage-keyed operations. +/// +public class Importer( + INodeStorage nodeStorage, + IPersistence persistence, + ILogManager logManager +) +{ + private readonly ILogger _logger = logManager.GetClassLogger(); + private readonly AccountDecoder _accountDecoder = AccountDecoder.Instance; + private long _totalNodes = 0; + private const int BatchSize = 128_000; + private const int FlushInterval = 50_000_000; + private const int CheckCancelInterval = 100_000; + + private record struct Entry(Hash256? address, TreePath path, TrieNode node); + + public async Task Copy(StateId to, CancellationToken cancellationToken = default) + { + StateId from; + using (IPersistence.IPersistenceReader reader = persistence.CreateReader()) + { + from = reader.CurrentState; + } + + ITrieStore trieStore = new RawTrieStore(nodeStorage); + PatriciaTree tree = new(trieStore, logManager) + { + RootHash = to.StateRoot.ToHash256() + }; + + Channel channel = Channel.CreateBounded(2_000_000); + if (_logger.IsWarn) _logger.Warn("Starting import"); + + int maxConcurrency = 8; + VisitorProgressTracker progressTracker = new("Flat Import", logManager); + + Task visitTask = Task.Run(() => + { + Visitor visitor = new(channel.Writer, progressTracker, cancellationToken); + try + { + tree.Accept(visitor, to.StateRoot.ToHash256(), new VisitingOptions() + { + MaxDegreeOfParallelism = Math.Min(4, Environment.ProcessorCount), // Tend to be faster with low thread + }); + } + finally + { + visitor.Finish(); + channel.Writer.Complete(); + } + }, cancellationToken); + int concurrentIngestCount = Math.Min(Environment.ProcessorCount, maxConcurrency); + using ArrayPoolList tasks = new(concurrentIngestCount + 1); + tasks.Add(visitTask); + tasks.AddRange(Enumerable.Range(0, concurrentIngestCount).Select(_ => Task.Run(async () => + { + await IngestLogic(from, channel.Reader, cancellationToken); + }, cancellationToken))); + + await Task.WhenAll(tasks.AsSpan()); + + // Finally, we increment the state id + IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(from, to); + writeBatch.Dispose(); + persistence.Flush(); + + if (_logger.IsInfo) _logger.Info($"Flat db copy completed. Wrote {_totalNodes} nodes."); + } + + private async Task IngestLogic(StateId from, ChannelReader channelReader, CancellationToken cancellationToken = default) + { + if (_logger.IsInfo) _logger.Info($"Ingest thread started"); + + int currentItemSize = 0; + IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(from, from, WriteFlags.DisableWAL); // It writes from initial state to initial state. + await foreach ((Hash256? address, TreePath path, TrieNode node) in channelReader.ReadAllAsync(cancellationToken)) + { + // Write it + Metrics.ImporterEntriesCount++; + + if (address is null) + { + writeBatch.SetStateTrieNode(path, node); + } + else + { + writeBatch.SetStorageTrieNode(address, path, node); + } + + if (node.IsLeaf) + { + ValueHash256 fullPath = path.Append(node.Key).Path; + if (address is null) + { + Account acc = _accountDecoder.Decode(node.Value.Span)!; + writeBatch.SetAccountRaw(fullPath.ToHash256(), acc); + } + else + { + ReadOnlySpan value = node.Value.Span; + byte[] toWrite; + + if (value.IsEmpty) + { + toWrite = StorageTree.ZeroBytes; + } + else + { + Rlp.ValueDecoderContext rlp = value.AsRlpValueContext(); + toWrite = rlp.DecodeByteArray(); + } + + writeBatch.SetStorageRaw(address, fullPath.ToHash256(), SlotValue.FromSpanWithoutLeadingZero(toWrite)); + } + } + + long theTotalNode = Interlocked.Increment(ref _totalNodes); + if (theTotalNode % CheckCancelInterval == 0) + { + cancellationToken.ThrowIfCancellationRequested(); + } + + if (theTotalNode % FlushInterval == 0) + { + writeBatch.Dispose(); + persistence.Flush(); + writeBatch = persistence.CreateWriteBatch(from, from, WriteFlags.DisableWAL); // It writes form initial state to initial state. + currentItemSize = 0; + } + + currentItemSize++; + if (currentItemSize > BatchSize) + { + writeBatch.Dispose(); + writeBatch = persistence.CreateWriteBatch(from, from, WriteFlags.DisableWAL); + currentItemSize = 0; + } + } + + writeBatch.Dispose(); + } + + private class Visitor(ChannelWriter channelWriter, VisitorProgressTracker progressTracker, CancellationToken cancellationToken = default) : ITreeVisitor + { + public bool IsFullDbScan => true; + public bool ExpectAccounts => true; + + public bool ShouldVisit(in TreePathContextWithStorage nodeContext, in ValueHash256 nextNode) => + !cancellationToken.IsCancellationRequested; + + public void VisitTree(in TreePathContextWithStorage nodeContext, in ValueHash256 rootHash) { } + + public void VisitMissingNode(in TreePathContextWithStorage nodeContext, in ValueHash256 nodeHash) => + throw new TrieException("Missing node is not expected"); + + private void Write(in TreePathContextWithStorage nodeContext, TrieNode node, bool isLeaf) + { + SpinWait sw = new(); + while (!channelWriter.TryWrite(new Entry(nodeContext.Storage, nodeContext.Path, node))) + { + cancellationToken.ThrowIfCancellationRequested(); + sw.SpinOnce(); + } + + progressTracker.OnNodeVisited(nodeContext.Path, isStorage: nodeContext.Storage is not null, isLeaf); + } + + public void VisitBranch(in TreePathContextWithStorage nodeContext, TrieNode node) => + Write(nodeContext, node, isLeaf: false); + + public void VisitExtension(in TreePathContextWithStorage nodeContext, TrieNode node) => + Write(nodeContext, node, isLeaf: false); + + public void VisitLeaf(in TreePathContextWithStorage nodeContext, TrieNode node) => + Write(nodeContext, node, isLeaf: true); + + public void VisitAccount(in TreePathContextWithStorage nodeContext, TrieNode node, in AccountStruct account) { } + + public void Finish() => progressTracker.Finish(); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Metrics.cs b/src/Nethermind/Nethermind.State.Flat/Metrics.cs new file mode 100644 index 00000000000..576463806d7 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Metrics.cs @@ -0,0 +1,92 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.ComponentModel; +using Nethermind.Core.Attributes; +using Nethermind.Core.Metric; +using NonBlocking; + +using StringLabel = Nethermind.Core.Attributes.StringLabel; + +namespace Nethermind.State.Flat; + +public static class Metrics +{ + [GaugeMetric] + [Description("Average snapshot bundle size in terms of num of snapshot")] + public static long SnapshotBundleSize { get; set; } + + [DetailedMetric] + [Description("Time for persistence job")] + [ExponentialPowerHistogramMetric(Start = 1, Factor = 1.5, Count = 30)] + public static IMetricObserver FlatPersistenceTime { get; set; } = new NoopMetricObserver(); + + [DetailedMetric] + [Description("Persistence write size")] + [ExponentialPowerHistogramMetric(Start = 1, Factor = 1.5, Count = 30, LabelNames = ["payload"])] + public static IMetricObserver FlatPersistenceSnapshotSize { get; set; } = new NoopMetricObserver(); + + [DetailedMetric] + [CounterMetric] + [Description("Importer entries count")] + public static long ImporterEntriesCount { get; set; } + + [DetailedMetric] + [CounterMetric] + [Description("Importer entries count flat")] + public static long ImporterEntriesCountFlat { get; set; } + + [GaugeMetric] + [Description("Active snapshot bundles")] + public static long ActiveSnapshotBundle { get; set; } + + [GaugeMetric] + [Description("Number of snapshots")] + public static long SnapshotCount { get; set; } + + [GaugeMetric] + [Description("Number of compacted snapshots")] + public static long CompactedSnapshotCount { get; set; } + + [GaugeMetric] + [Description("Estimated memory used by snapshots in bytes")] + public static long SnapshotMemory { get; set; } + + [GaugeMetric] + [Description("Estimated memory used by compacted snapshot dictionaries in bytes")] + public static long CompactedSnapshotMemory { get; set; } + + [GaugeMetric] + [Description("Total estimated snapshot memory in bytes")] + public static long TotalSnapshotMemory { get; set; } + + [DetailedMetric] + [Description("Active pooled resources by category and type")] + [KeyIsLabel("category", "resource_type")] + public static ConcurrentDictionary ActivePooledResource { get; } = new(); + + [DetailedMetric] + [Description("Cached pooled resources by category and type")] + [KeyIsLabel("category", "resource_type")] + public static ConcurrentDictionary CachedPooledResource { get; } = new(); + + [DetailedMetric] + [Description("Created pooled resources by category and type")] + [KeyIsLabel("category", "resource_type")] + public static ConcurrentDictionary CreatedPooledResource { get; } = new(); + + [DetailedMetric] + [Description("Readonly snapshot bundle times")] + [ExponentialPowerHistogramMetric(Start = 1, Factor = 1.5, Count = 30, LabelNames = ["type"])] + public static IMetricObserver ReadOnlySnapshotBundleTimes { get; set; } = new NoopMetricObserver(); + + [DetailedMetric] + [Description("Time spend compacting snapshots")] + [ExponentialPowerHistogramMetric(Start = 1, Factor = 1.5, Count = 1, LabelNames = [])] + public static IMetricObserver CompactTime { get; set; } = new NoopMetricObserver(); + + [DetailedMetric] + [Description("Time spend compaction snapshots for mid compaction")] + [ExponentialPowerHistogramMetric(Start = 1, Factor = 1.5, Count = 1, LabelNames = [])] + public static IMetricObserver MidCompactTime { get; set; } = new NoopMetricObserver(); +} diff --git a/src/Nethermind/Nethermind.State.Flat/MpmcRingBuffer.cs b/src/Nethermind/Nethermind.State.Flat/MpmcRingBuffer.cs new file mode 100644 index 00000000000..0ed18b5d1a3 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/MpmcRingBuffer.cs @@ -0,0 +1,127 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Runtime.CompilerServices; + +namespace Nethermind.State.Flat; + +/// +/// Multiple consumer variant of . +/// Slightly slower in the enqueue due to the need of interlocked operation on the tail. +/// +/// +public sealed class MpmcRingBuffer +{ + private readonly T[] _entries; + private readonly long[] _sequences; + private readonly int _mask; + private readonly int _capacity; + + public long EstimatedJobCount + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get + { + long tail = Volatile.Read(ref _tail); + long head = Volatile.Read(ref _head); + + long count = tail - head; + return count < 0 ? 0 : count; // clamp just in case of a race + } + } + +#pragma warning disable CS0169 // Field is never used + // --- head (consumers) + padding --- + private long _head; + private long _p1, _p2, _p3, _p4, _p5, _p6, _p7; + + // --- tail (producers) + padding --- + private long _tail; + private long _p8, _p9, _p10, _p11, _p12, _p13, _p14; +#pragma warning restore CS0169 // Field is never used + + public MpmcRingBuffer(int capacityPowerOfTwo) + { + if (capacityPowerOfTwo <= 0 || (capacityPowerOfTwo & (capacityPowerOfTwo - 1)) != 0) + throw new ArgumentException("Capacity must be power of two."); + + _capacity = capacityPowerOfTwo; + _mask = capacityPowerOfTwo - 1; + _entries = new T[capacityPowerOfTwo]; + _sequences = new long[capacityPowerOfTwo]; + + for (int i = 0; i < capacityPowerOfTwo; i++) + _sequences[i] = i; + } + + /// + /// Multiple producer variant of . + /// + /// + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryEnqueue(in T item) + { + while (true) + { + long tail = Volatile.Read(ref _tail); + int index = (int)(tail & _mask); + long seq = Volatile.Read(ref _sequences[index]); + + if (seq == tail) + { + // Interlocked exchange for multiple producer. + if (Interlocked.CompareExchange(ref _tail, tail + 1, tail) == tail) + { + // Success + _entries[index] = item; + + // Mark as ready for consumer (tail + 1) + Volatile.Write(ref _sequences[index], tail + 1); + return true; + } + } + else if (seq < tail) + { + // Slot hasn't been consumed yet from the previous lap + return false; + } + + // If seq > tail, another producer won the race; loop and retry + Thread.SpinWait(1); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryDequeue(out T item) + { + while (true) + { + long head = Volatile.Read(ref _head); + int index = (int)(head & _mask); + long seq = Volatile.Read(ref _sequences[index]); + long expectedSeq = head + 1; + + // If seq == expectedSeq, the producer has finished writing + if (seq == expectedSeq) + { + if (Interlocked.CompareExchange(ref _head, head + 1, head) == head) + { + item = _entries[index]; + // Mark as ready for the producer's next lap (head + capacity) + Volatile.Write(ref _sequences[index], head + _capacity); + return true; + } + } + else if (seq < expectedSeq) + { + // Producer hasn't filled this slot yet + item = default!; + return false; + } + + // If seq > expectedSeq, another consumer won the race; loop and retry + Thread.SpinWait(1); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Nethermind.State.Flat.csproj b/src/Nethermind/Nethermind.State.Flat/Nethermind.State.Flat.csproj new file mode 100644 index 00000000000..f0c27bcc209 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Nethermind.State.Flat.csproj @@ -0,0 +1,21 @@ + + + + net10.0 + enable + enable + true + + + + + + + + + + + + + + diff --git a/src/Nethermind/Nethermind.State.Flat/NodeHashMismatchException.cs b/src/Nethermind/Nethermind.State.Flat/NodeHashMismatchException.cs new file mode 100644 index 00000000000..f7964b0f64a --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/NodeHashMismatchException.cs @@ -0,0 +1,8 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +namespace Nethermind.State.Flat; + +public class NodeHashMismatchException(string message) : Exception(message) +{ +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/BaseFlatPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseFlatPersistence.cs new file mode 100644 index 00000000000..084dcb18bb7 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseFlatPersistence.cs @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; + +namespace Nethermind.State.Flat.Persistence; + +/// +/// Common persistence logic for flat state storage. Uses 2 database columns: +/// - State: Account data keyed by truncated address hash (20 bytes) +/// - Storage: Contract storage keyed by split address hash + slot hash (52 bytes) +/// +/// For storage, the address hash is split: first 4 bytes as prefix, remaining 16 bytes as suffix. +/// This helps RocksDB's comparator skip bytes during comparison and enables index shortening, +/// reducing memory usage. The tradeoff is that SelfDestruct must verify the 16-byte suffix. +/// +/// +/// ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +/// │ State Key (Account) Total: 20 bytes │ +/// ├─────────────────────────────────────────────────────────────────────────────────────────────────────────────┤ +/// │ Bytes 0-19 │ +/// │ AddressHash[0..20] │ +/// └─────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ +/// +/// ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +/// │ Storage Key Total: 52 bytes │ +/// ├──────────────────────────┬────────────────────────────────────────┬─────────────────────────────────────────┤ +/// │ Bytes 0-3 │ Bytes 4-35 │ Bytes 36-51 │ +/// │ AddressHash[0..4] │ SlotHash[0..32] │ AddressHash[4..20] │ +/// └──────────────────────────┴────────────────────────────────────────┴─────────────────────────────────────────┘ +/// +/// +public static class BaseFlatPersistence +{ + private const int AccountKeyLength = 20; + + private const int StoragePrefixPortion = BasePersistence.StoragePrefixPortion; + private const int StorageSlotKeySize = 32; + private const int StoragePostfixPortion = 16; + private const int StorageKeyLength = StoragePrefixPortion + StorageSlotKeySize + StoragePostfixPortion; + + private static ReadOnlySpan EncodeAccountKeyHashed(Span buffer, in ValueHash256 address) + { + address.Bytes[..AccountKeyLength].CopyTo(buffer); + return buffer[..AccountKeyLength]; + } + + private static ReadOnlySpan EncodeStorageKeyHashedWithShortPrefix(Span buffer, in ValueHash256 addrHash, in ValueHash256 slotHash) + { + // So we store the key with only a small part of the addr early then put the rest at the end. + // This helps with rocksdb comparator skipping 16 bytes during comparison, and with index shortening, which reduces + // memory usage. The downside is that during selfdestruct, it will need to double-check the 16 byte postfix. + // <4-byte-address><32-byte-slot><16-byte-address> + addrHash.Bytes[..StoragePrefixPortion].CopyTo(buffer); + slotHash.Bytes.CopyTo(buffer[StoragePrefixPortion..(StoragePrefixPortion + StorageSlotKeySize)]); + addrHash.Bytes[StoragePrefixPortion..(StoragePrefixPortion + StoragePostfixPortion)].CopyTo(buffer[(StoragePrefixPortion + StorageSlotKeySize)..]); + + return buffer[..StorageKeyLength]; + } + + public readonly struct Reader( + ISortedKeyValueStore state, + ISortedKeyValueStore storage, + bool isPreimageMode = false + ) : BasePersistence.IHashedFlatReader + { + public bool IsPreimageMode => isPreimageMode; + + public int GetAccount(in ValueHash256 address, Span outBuffer) + { + ReadOnlySpan key = EncodeAccountKeyHashed(stackalloc byte[AccountKeyLength], address); + return state.Get(key, outBuffer); + } + + public bool TryGetStorage(in ValueHash256 address, in ValueHash256 slot, ref SlotValue outValue) + { + ReadOnlySpan storageKey = EncodeStorageKeyHashedWithShortPrefix(stackalloc byte[StorageKeyLength], address, slot); + + Span buffer = stackalloc byte[40]; + int resultSize = GetStorageBuffer(storageKey, buffer); + if (resultSize == 0) return false; + + Span value = buffer[..resultSize]; + + // AI said: Use Unsafe to bypass the 'Slice' bounds check and property access + // This writes the variable-length DB value into the end of the 32-byte struct + int len = value.Length; + if (len == SlotValue.ByteCount) + { + outValue = Unsafe.As(ref MemoryMarshal.GetReference(value)); + } + else + { + ref byte destBase = ref Unsafe.As(ref outValue); + + // Zero-initialize the leading bytes before copying the value + Unsafe.InitBlockUnaligned(ref destBase, 0, (uint)(SlotValue.ByteCount - len)); + + ref byte destPtr = ref Unsafe.Add(ref destBase, SlotValue.ByteCount - len); + + Unsafe.CopyBlockUnaligned( + ref destPtr, + ref MemoryMarshal.GetReference(value), + (uint)len); + } + + return true; + } + + private int GetStorageBuffer(ReadOnlySpan key, Span outBuffer) => storage.Get(key, outBuffer); + + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey) + { + // Need to copy to arrays since spans from Value.Bytes might not survive the call + byte[] start = new byte[AccountKeyLength]; + startKey.Bytes[..AccountKeyLength].CopyTo(start); + + byte[] end = new byte[AccountKeyLength]; + endKey.Bytes[..AccountKeyLength].CopyTo(end); + + return new AccountIterator(state.GetViewBetween(start, end)); + } + + [SkipLocalsInit] + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey) + { + // Storage key layout: <4-byte-addr><32-byte-slot><16-byte-addr> + // We need to iterate all keys with the same 4-byte prefix and 16-byte suffix + Span firstKey = stackalloc byte[StorageKeyLength]; + Span lastKey = stackalloc byte[StorageKeyLength + 1]; + EncodeStorageKeyHashedWithShortPrefix(firstKey, accountKey, startSlotKey); + EncodeStorageKeyHashedWithShortPrefix(lastKey[..StorageKeyLength], accountKey, endSlotKey); + lastKey[StorageKeyLength] = 0; // Exclusive upper bound + + return new StorageIterator( + storage.GetViewBetween(firstKey, lastKey), + accountKey.Bytes[StoragePrefixPortion..(StoragePrefixPortion + StoragePostfixPortion)].ToArray()); + } + } + + public struct AccountIterator(ISortedView view) : IPersistence.IFlatIterator + { + private ValueHash256 _currentKey = default; + private byte[]? _currentValue = null; + + public bool MoveNext() + { + if (!view.MoveNext()) return false; + + // Account keys are 20 bytes (truncated hash) + if (view.CurrentKey.Length != AccountKeyLength) return MoveNext(); + + // Build 32-byte ValueHash256 from 20-byte key (zero-padded) + _currentKey = ValueKeccak.Zero; + view.CurrentKey.CopyTo(_currentKey.BytesAsSpan); + _currentValue = view.CurrentValue.ToArray(); + return true; + } + + public ValueHash256 CurrentKey => _currentKey; + public ReadOnlySpan CurrentValue => _currentValue; + + public void Dispose() => view.Dispose(); + } + + public struct StorageIterator(ISortedView view, byte[] addressSuffix) : IPersistence.IFlatIterator + { + // 16-byte suffix to match + private ValueHash256 _currentKey = default; + private byte[]? _currentValue = null; + + public bool MoveNext() + { + while (view.MoveNext()) + { + // Storage keys are 52 bytes: <4-byte-addr><32-byte-slot><16-byte-addr> + if (view.CurrentKey.Length != StorageKeyLength) continue; + + // Verify the 16-byte address suffix matches + if (!Bytes.AreEqual(view.CurrentKey[(StoragePrefixPortion + StorageSlotKeySize)..], addressSuffix)) + continue; + + // Extract the 32-byte slot hash from the middle of the key + _currentKey = new ValueHash256(view.CurrentKey.Slice(StoragePrefixPortion, StorageSlotKeySize)); + _currentValue = view.CurrentValue.ToArray(); + return true; + } + return false; + } + + public ValueHash256 CurrentKey => _currentKey; + public ReadOnlySpan CurrentValue => _currentValue; + + public void Dispose() => view.Dispose(); + } + + public readonly struct WriteBatch( + ISortedKeyValueStore storageSnap, + IWriteOnlyKeyValueStore state, + IWriteOnlyKeyValueStore storage, + WriteFlags flags + ) : BasePersistence.IHashedFlatWriteBatch + { + [SkipLocalsInit] + public void SelfDestruct(in ValueHash256 accountPath) + { + Span firstKey = stackalloc byte[StoragePrefixPortion]; // Because slot 0 is a thing, it's just the address prefix. + Span lastKey = stackalloc byte[StorageKeyLength + 1]; // The +1 is because the upper bound is exclusive + BasePersistence.CreateStorageRange(accountPath.Bytes, firstKey, lastKey); + + using ISortedView storageReader = storageSnap.GetViewBetween(firstKey, lastKey); + IWriteOnlyKeyValueStore storageWriter = storage; + while (storageReader.MoveNext()) + { + // FlatInTrie + if (storageReader.CurrentKey.Length != StorageKeyLength) continue; + + // If we have a storage prefix portion, we need to double-check that the last 16 bytes match. + if (Bytes.AreEqual(storageReader.CurrentKey[(StoragePrefixPortion + StorageSlotKeySize)..], accountPath.Bytes[StoragePrefixPortion..(StoragePrefixPortion + StoragePostfixPortion)])) + { + storageWriter.Remove(storageReader.CurrentKey); + } + } + } + + public void RemoveAccount(in ValueHash256 addrHash) + { + ReadOnlySpan key = addrHash.Bytes[..AccountKeyLength]; + state.Remove(key); + } + + public void SetStorage(in ValueHash256 addrHash, in ValueHash256 slotHash, in SlotValue? slot) + { + ReadOnlySpan theKey = EncodeStorageKeyHashedWithShortPrefix(stackalloc byte[StorageKeyLength], addrHash, slotHash); + + if (slot.HasValue) + { + ReadOnlySpan withoutLeadingZeros = slot.Value.AsSpan.WithoutLeadingZeros(); + storage.PutSpan(theKey, withoutLeadingZeros, flags); + } + else + { + storage.Remove(theKey); + } + } + + public void SetAccount(in ValueHash256 addrHash, ReadOnlySpan account) + { + ReadOnlySpan key = addrHash.Bytes[..AccountKeyLength]; + state.PutSpan(key, account, flags); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/BasePersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/BasePersistence.cs new file mode 100644 index 00000000000..01f9fd71e5d --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/BasePersistence.cs @@ -0,0 +1,267 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Int256; +using Nethermind.Serialization.Rlp; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +/// +/// A collection of classes to make combining persistence code easier. +/// Implementation accept generic of dependencies and the dependencies must be struct. This allow better inlining and devirtualized calls. +/// +/// implementation is expected to create and +/// passing in the flat and trie implementations along with +/// some dispose logic. +/// +/// Flat implementation is largely expected to implement and +/// which then get wrapped into and with +/// and . This allow preimage variation which does not hash the keys. +/// +public static class BasePersistence +{ + public const int StoragePrefixPortion = 4; + + internal static void CreateStorageRange( + ReadOnlySpan accountPath, + Span firstKey, + Span lastKey) + { + accountPath[..StoragePrefixPortion].CopyTo(firstKey); + accountPath[..StoragePrefixPortion].CopyTo(lastKey); + firstKey[StoragePrefixPortion..].Clear(); + lastKey[StoragePrefixPortion..].Fill(0xff); + } + + public interface IHashedFlatReader + { + public int GetAccount(in ValueHash256 address, Span outBuffer); + public bool TryGetStorage(in ValueHash256 address, in ValueHash256 slot, ref SlotValue outValue); + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey); + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey); + public bool IsPreimageMode { get; } + } + + public interface IHashedFlatWriteBatch + { + public void SelfDestruct(in ValueHash256 address); + + public void RemoveAccount(in ValueHash256 address); + + public void SetAccount(in ValueHash256 address, ReadOnlySpan value); + + public void SetStorage(in ValueHash256 address, in ValueHash256 slotHash, in SlotValue? value); + } + + public interface IFlatReader + { + public Account? GetAccount(Address address); + public bool TryGetSlot(Address address, in UInt256 slot, ref SlotValue outValue); + public byte[]? GetAccountRaw(Hash256 addrHash); + public bool TryGetSlotRaw(in ValueHash256 address, in ValueHash256 slotHash, ref SlotValue outValue); + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey); + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey); + public bool IsPreimageMode { get; } + } + + public interface IFlatWriteBatch + { + public void SelfDestruct(Address addr); + + public void SetAccount(Address addr, Account? account); + + public void SetStorage(Address addr, in UInt256 slot, in SlotValue? value); + + public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value); + + public void SetAccountRaw(Hash256 addrHash, Account account); + } + + public interface ITrieReader + { + public byte[]? TryLoadStateRlp(in TreePath path, ReadFlags flags); + public byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, ReadFlags flags); + } + + public interface ITrieWriteBatch + { + public void SelfDestruct(in ValueHash256 address); + public void SetStateTrieNode(in TreePath path, TrieNode tnValue); + public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tnValue); + } + + public struct ToHashedWriteBatch( + TWriteBatch flatWriteBatch, + bool useFlatAccount = true + ) : IFlatWriteBatch + where TWriteBatch : struct, IHashedFlatWriteBatch + { + private readonly AccountDecoder _accountDecoder = useFlatAccount ? AccountDecoder.Slim : AccountDecoder.Instance; + private TWriteBatch _flatWriteBatch = flatWriteBatch; + + public void SelfDestruct(Address addr) => _flatWriteBatch.SelfDestruct(addr.ToAccountPath); + + public void SetAccount(Address addr, Account? account) + { + if (account is null) + { + _flatWriteBatch.RemoveAccount(addr.ToAccountPath); + return; + } + + using NettyRlpStream stream = _accountDecoder.EncodeToNewNettyStream(account); + _flatWriteBatch.SetAccount(addr.ToAccountPath, stream.AsSpan()); + } + + public void SetStorage(Address addr, in UInt256 slot, in SlotValue? value) + { + ValueHash256 hashBuffer = ValueKeccak.Zero; + StorageTree.ComputeKeyWithLookup(slot, ref hashBuffer); + _flatWriteBatch.SetStorage(addr.ToAccountPath, hashBuffer, value); + } + + public void SetStorageRaw(Hash256? addrHash, Hash256 slotHash, in SlotValue? value) => + _flatWriteBatch.SetStorage(addrHash, slotHash, value); + + public void SetAccountRaw(Hash256 addrHash, Account account) + { + using NettyRlpStream stream = _accountDecoder.EncodeToNewNettyStream(account); + _flatWriteBatch.SetAccount(addrHash, stream.AsSpan()); + } + } + + public struct ToHashedFlatReader( + TFlatReader flatReader, + bool useFlatAccount = true + ) : IFlatReader + where TFlatReader : struct, IHashedFlatReader + { + private readonly AccountDecoder _accountDecoder = useFlatAccount ? AccountDecoder.Slim : AccountDecoder.Instance; + private readonly int _accountSpanBufferSize = 256; + private TFlatReader _flatReader = flatReader; + + public Account? GetAccount(Address address) + { + Span valueBuffer = stackalloc byte[_accountSpanBufferSize]; + int responseSize = _flatReader.GetAccount(address.ToAccountPath, valueBuffer); + if (responseSize == 0) + { + return null; + } + + Rlp.ValueDecoderContext ctx = new(valueBuffer[..responseSize]); + return _accountDecoder.Decode(ref ctx); + } + + public bool TryGetSlot(Address address, in UInt256 slot, ref SlotValue outValue) + { + ValueHash256 slotHash = ValueKeccak.Zero; + StorageTree.ComputeKeyWithLookup(slot, ref slotHash); + + return TryGetSlotRaw(address.ToAccountPath, slotHash, ref outValue); + } + + public byte[]? GetAccountRaw(Hash256 addrHash) + { + Span valueBuffer = stackalloc byte[_accountSpanBufferSize]; + int responseSize = _flatReader.GetAccount(addrHash.ValueHash256, valueBuffer); + return responseSize == 0 ? null : valueBuffer[..responseSize].ToArray(); + } + + public bool TryGetSlotRaw(in ValueHash256 address, in ValueHash256 slotHash, ref SlotValue outValue) => + _flatReader.TryGetStorage(address, slotHash, ref outValue); + + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey) => + _flatReader.CreateAccountIterator(startKey, endKey); + + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey) => + _flatReader.CreateStorageIterator(accountKey, startSlotKey, endSlotKey); + + public bool IsPreimageMode => _flatReader.IsPreimageMode; + } + + public class Reader( + TFlatReader flatReader, + TTrieReader trieReader, + StateId currentState, + IDisposable disposer) + : IPersistence.IPersistenceReader + where TFlatReader : struct, IFlatReader + where TTrieReader : struct, ITrieReader + { + private TTrieReader _trieReader = trieReader; + private TFlatReader _flatReader = flatReader; + + public StateId CurrentState { get; } = currentState; + + public void Dispose() => disposer.Dispose(); + + public Account? GetAccount(Address address) => + _flatReader.GetAccount(address); + + public bool TryGetSlot(Address address, in UInt256 slot, ref SlotValue outValue) => + _flatReader.TryGetSlot(address, in slot, ref outValue); + + public byte[]? TryLoadStateRlp(in TreePath path, ReadFlags flags) => + _trieReader.TryLoadStateRlp(path, flags); + + public byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, ReadFlags flags) => + _trieReader.TryLoadStorageRlp(address, path, flags); + + public byte[]? GetAccountRaw(Hash256 addrHash) => + _flatReader.GetAccountRaw(addrHash); + + public bool TryGetStorageRaw(Hash256 addrHash, Hash256 slotHash, ref SlotValue value) => + _flatReader.TryGetSlotRaw(addrHash, slotHash, ref value); + + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey) => + _flatReader.CreateAccountIterator(startKey, endKey); + + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey) => + _flatReader.CreateStorageIterator(accountKey, startSlotKey, endSlotKey); + + public bool IsPreimageMode => _flatReader.IsPreimageMode; + } + + public class WriteBatch( + in TFlatWriteBatch flatWriteBatch, + TTrieWriteBatch trieWriteBatch, + IDisposable disposer) + : IPersistence.IWriteBatch + where TFlatWriteBatch : struct, IFlatWriteBatch + where TTrieWriteBatch : struct, ITrieWriteBatch + { + private TFlatWriteBatch _flatWriter = flatWriteBatch; + private TTrieWriteBatch _trieWriteBatch = trieWriteBatch; + + public void Dispose() => disposer.Dispose(); + + public void SelfDestruct(Address addr) + { + _flatWriter.SelfDestruct(addr); + _trieWriteBatch.SelfDestruct(addr.ToAccountPath); + } + + public void SetAccount(Address addr, Account? account) => + _flatWriter.SetAccount(addr, account); + + public void SetStorage(Address addr, in UInt256 slot, in SlotValue? value) => + _flatWriter.SetStorage(addr, slot, value); + + public void SetStateTrieNode(in TreePath path, TrieNode tnValue) => + _trieWriteBatch.SetStateTrieNode(path, tnValue); + + public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tnValue) => + _trieWriteBatch.SetStorageTrieNode(address, path, tnValue); + + public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value) => + _flatWriter.SetStorageRaw(addrHash, slotHash, value); + + public void SetAccountRaw(Hash256 addrHash, Account account) => + _flatWriter.SetAccountRaw(addrHash, account); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/BaseTriePersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseTriePersistence.cs new file mode 100644 index 00000000000..83877c6a301 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseTriePersistence.cs @@ -0,0 +1,237 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Runtime.CompilerServices; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +/// +/// Common persistence logic for Trie. The trie is encoded with 4 different database columns. This implementation +/// exploits the fact that a vast majority of the key's TreePath have a length less than 15, which can be encoded +/// with only 8 bytes. The average length of the TreePath only increases by 1 if the number of keys increases by 16x. +/// +/// To handle cases where path length is greater than 15, a separate fallback column is used which stores both state +/// and storage nodes, with a prefix partition key (0x00 or 0x01) to separate them. +/// +/// For storage, only 20 bytes of the hashed address are used. The first 4 bytes are placed in front while the +/// remaining 16 bytes are placed at the end. This makes RocksDB's index smaller due to index key shortening. +/// +/// +/// === Main Columns (optimized for short paths) === +/// +/// ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +/// │ StateNodesTop (path length 0-5) Total: 3 bytes │ +/// ├──────────────┬──────────────┬──────────────────────────────────────────────────────────────────────────────┤ +/// │ Byte 0 │ Byte 1 │ Byte 2 │ +/// │ Path[0] │ Path[1] │ Path[2] upper 4 bits | Length lower 4 bits │ +/// └──────────────┴──────────────┴──────────────────────────────────────────────────────────────────────────────┘ +/// +/// ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +/// │ StateNodes (path length 6-15) Total: 8 bytes │ +/// ├────────────────────────────────────────┬────────────────────────────────────────────────────────────────────┤ +/// │ Bytes 0-6 │ Byte 7 │ +/// │ Path[0..7] │ Path[7] upper 4 bits | Length lower 4 bits │ +/// └────────────────────────────────────────┴────────────────────────────────────────────────────────────────────┘ +/// +/// ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +/// │ StorageNodes (path length 0-15) Total: 28 bytes │ +/// ├──────────────────────────┬───────────────────────────────────┬──────────────────────────────────────────────┤ +/// │ Bytes 0-3 │ Bytes 4-11 │ Bytes 12-27 │ +/// │ Address[0..4] │ Path via EncodeWith8Byte │ Address[4..20] │ +/// └──────────────────────────┴───────────────────────────────────┴──────────────────────────────────────────────┘ +/// +/// === FallbackNodes Column (for long paths, prefix-partitioned) === +/// +/// ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +/// │ State nodes (path length 16+) Total: 34 bytes │ +/// ├──────────────┬──────────────────────────────────────────────────────────────────┬───────────────────────────┤ +/// │ Byte 0 │ Bytes 1-32 │ Byte 33 │ +/// │ 0x00 │ Full 32-byte path │ Path length │ +/// └──────────────┴──────────────────────────────────────────────────────────────────┴───────────────────────────┘ +/// +/// ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────┐ +/// │ Storage nodes (path length 16+) Total: 54 bytes │ +/// ├────────┬────────────────┬─────────────────────────────────┬─────────────┬───────────────────────────────────┤ +/// │ Byte 0 │ Bytes 1-4 │ Bytes 5-36 │ Byte 37 │ Bytes 38-53 │ +/// │ 0x01 │ Address[0..4] │ Full 32-byte path │ Path length │ Address[4..20] │ +/// └────────┴────────────────┴─────────────────────────────────┴─────────────┴───────────────────────────────────┘ +/// +/// +public static class BaseTriePersistence +{ + private const int StorageHashPrefixLength = 20; // Store prefix of the 32 byte of the storage. Reduces index size. + private const int FullPathLength = 32; + private const int PathLengthLength = 1; + + private const int ShortenedPathThreshold = 15; // Must be odd + private const int ShortenedPathLength = 8; // ceil of ShortenedPathThreshold/2 + + // Note to self: Splitting the storage tree have been shown to not improve block cache hit rate + private const int StateNodesTopThreshold = 5; + private const int StateNodesTopPathLength = 3; + + private const int FullStateNodesKeyLength = 1 + FullPathLength + PathLengthLength; + + private const int StoragePrefixPortion = BasePersistence.StoragePrefixPortion; + private const int ShortenedStorageNodesKeyLength = StoragePrefixPortion + ShortenedPathLength + (StorageHashPrefixLength - StoragePrefixPortion); + private const int FullStorageNodesKeyLength = 1 + StorageHashPrefixLength + FullPathLength + PathLengthLength; + + private static ReadOnlySpan EncodeStateTopNodeKey(Span buffer, in TreePath path) + { + // Looks like this <3-byte-path> + // Last 4 bit of the path is the length + + path.Path.Bytes[0..StateNodesTopPathLength].CopyTo(buffer); + // Pack length into lower 4 bits of last byte (upper 4 bits contain path data) + byte lengthAsByte = (byte)path.Length; + buffer[StateNodesTopPathLength - 1] = (byte)((buffer[StateNodesTopPathLength - 1] & 0xf0) | (lengthAsByte & 0x0f)); + return buffer[..StateNodesTopPathLength]; + } + + private static ReadOnlySpan EncodeShortenedStateNodeKey(Span buffer, in TreePath path) + { + // Looks like this <8-byte-path> + // Last 4 bit of the path is the length + + path.EncodeWith8Byte(buffer); + return buffer[..ShortenedPathLength]; + } + + private static ReadOnlySpan EncodeFullStateNodeKey(Span buffer, in TreePath path) + { + // Looks like this <0-constant><32-byte-path><1-byte-length> + buffer[0] = 0; + path.Path.Bytes.CopyTo(buffer[1..]); + buffer[(1 + FullPathLength)] = (byte)path.Length; + return buffer[..FullStateNodesKeyLength]; + } + + internal static ReadOnlySpan EncodeShortenedStorageNodeKey(Span buffer, Hash256 addr, in TreePath path) + { + // Looks like this <4-byte-address-prefix><8-byte-path-portion><16-byte-remaining-address> + addr.Bytes[..StoragePrefixPortion].CopyTo(buffer); + path.EncodeWith8Byte(buffer[StoragePrefixPortion..]); + addr.Bytes[StoragePrefixPortion..StorageHashPrefixLength].CopyTo(buffer[(StoragePrefixPortion + ShortenedPathLength)..]); + return buffer[..ShortenedStorageNodesKeyLength]; + } + + private static ReadOnlySpan EncodeFullStorageNodeKey(Span buffer, Hash256 address, in TreePath path) + { + // Looks like this <1-constant><4-byte-address-prefix><32-byte-path><1-byte-length><16-byte-remaining-address> + buffer[0] = 1; + address.Bytes[..StoragePrefixPortion].CopyTo(buffer[1..]); + path.Path.Bytes.CopyTo(buffer[(1 + StoragePrefixPortion)..]); + buffer[(1 + StoragePrefixPortion + FullPathLength)] = (byte)path.Length; + address.Bytes[StoragePrefixPortion..StorageHashPrefixLength].CopyTo(buffer[(1 + StoragePrefixPortion + FullPathLength + PathLengthLength)..]); + return buffer[..FullStorageNodesKeyLength]; + } + + public readonly struct WriteBatch( + ISortedKeyValueStore storageNodesSnap, + ISortedKeyValueStore fallbackNodesSnap, + IWriteOnlyKeyValueStore stateTopNodes, + IWriteOnlyKeyValueStore stateNodes, + IWriteOnlyKeyValueStore storageNodes, + IWriteOnlyKeyValueStore fallbackNodes, + WriteFlags flags + ) : BasePersistence.ITrieWriteBatch + { + + [SkipLocalsInit] + public void SelfDestruct(in ValueHash256 accountPath) + { + Span firstKeyAlloc = stackalloc byte[1 + StoragePrefixPortion]; + Span lastKeyAlloc = stackalloc byte[FullStorageNodesKeyLength + 1]; + + // Technically, this is kinda not needed for nodes as it's always traversed so orphaned trie just get skipped. + { + Span firstKey = firstKeyAlloc[..StoragePrefixPortion]; + Span lastKey = lastKeyAlloc[..(ShortenedStorageNodesKeyLength + 1)]; + BasePersistence.CreateStorageRange(accountPath.Bytes, firstKey, lastKey); + + using ISortedView storageNodeReader = storageNodesSnap.GetViewBetween(firstKey, lastKey); + while (storageNodeReader.MoveNext()) + { + // Double-check the end portion + if (Bytes.AreEqual(storageNodeReader.CurrentKey[(StoragePrefixPortion + ShortenedPathLength)..], accountPath.Bytes[StoragePrefixPortion..(StorageHashPrefixLength)])) + { + storageNodes.Remove(storageNodeReader.CurrentKey); + } + } + } + + { + Span firstKey = firstKeyAlloc; + Span lastKey = lastKeyAlloc; + // Do the same for the fallback nodes, except that the key must be prefixed `1` also + firstKey[0] = 1; + lastKey[0] = 1; + BasePersistence.CreateStorageRange(accountPath.Bytes, firstKey[1..], lastKey[1..]); + using ISortedView storageNodeReader = fallbackNodesSnap.GetViewBetween(firstKey, lastKey); + while (storageNodeReader.MoveNext()) + { + // Double-check the end portion + if (Bytes.AreEqual(storageNodeReader.CurrentKey[(1 + StoragePrefixPortion + FullPathLength + PathLengthLength)..], accountPath.Bytes[StoragePrefixPortion..(StorageHashPrefixLength)])) + { + fallbackNodes.Remove(storageNodeReader.CurrentKey); + } + } + } + } + + public void SetStateTrieNode(in TreePath path, TrieNode tn) + { + switch (path.Length) + { + case <= StateNodesTopThreshold: + stateTopNodes.PutSpan(EncodeStateTopNodeKey(stackalloc byte[StateNodesTopPathLength], path), tn.FullRlp.Span, flags); + break; + case <= ShortenedPathThreshold: + stateNodes.PutSpan(EncodeShortenedStateNodeKey(stackalloc byte[ShortenedPathLength], path), tn.FullRlp.Span, flags); + break; + default: + fallbackNodes.PutSpan(EncodeFullStateNodeKey(stackalloc byte[FullStateNodesKeyLength], in path), tn.FullRlp.Span, flags); + break; + } + } + + public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tn) + { + switch (path.Length) + { + case <= ShortenedPathThreshold: + storageNodes.PutSpan(EncodeShortenedStorageNodeKey(stackalloc byte[ShortenedStorageNodesKeyLength], address, path), tn.FullRlp.Span, flags); + break; + default: + fallbackNodes.PutSpan(EncodeFullStorageNodeKey(stackalloc byte[FullStorageNodesKeyLength], address, in path), tn.FullRlp.Span, flags); + break; + } + } + } + + + public readonly struct Reader( + IReadOnlyKeyValueStore stateTopNodes, + IReadOnlyKeyValueStore stateNodes, + IReadOnlyKeyValueStore storageNodes, + IReadOnlyKeyValueStore fallbackNodes + ) : BasePersistence.ITrieReader + { + public byte[]? TryLoadStateRlp(in TreePath path, ReadFlags flags) => + path.Length switch + { + <= StateNodesTopThreshold => stateTopNodes.Get(EncodeStateTopNodeKey(stackalloc byte[StateNodesTopPathLength], in path), flags: flags), + <= ShortenedPathThreshold => stateNodes.Get(EncodeShortenedStateNodeKey(stackalloc byte[ShortenedPathLength], in path), flags: flags), + _ => fallbackNodes.Get(EncodeFullStateNodeKey(stackalloc byte[FullStateNodesKeyLength], in path), flags: flags) + }; + + public byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, ReadFlags flags) => + path.Length <= ShortenedPathThreshold + ? storageNodes.Get(EncodeShortenedStorageNodeKey(stackalloc byte[ShortenedStorageNodesKeyLength], address, in path), flags: flags) + : fallbackNodes.Get(EncodeFullStorageNodeKey(stackalloc byte[FullStorageNodesKeyLength], address, in path), flags: flags); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/BloomFilter/BloomFilter.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/BloomFilter/BloomFilter.cs new file mode 100644 index 00000000000..0fb9f8c361b --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/BloomFilter/BloomFilter.cs @@ -0,0 +1,309 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Runtime.Intrinsics; +using System.Runtime.Intrinsics.X86; + +namespace Nethermind.State.Flat.Persistence.BloomFilter; + +/// +/// Pure in-memory cache-local (64B / 512-bit) Bloom filter. +/// Owns its memory and supports atomic adds + AVX2-optimized queries. +/// Optimized for Linux Transparent Huge Pages (THP). +/// AI-generated based on RocksDB's bloom filter implementation. +/// +public sealed unsafe class BloomFilter : IDisposable +{ + // ---- constants ---- + private const int CacheLineBytes = 64; // 512 bits + + // RocksDB golden ratio constants + private const uint Mul32 = 0x9E3779B9u; + private const uint Mul8 = 0xAB25F4C1u; + + // Linux THP constants + private const int MADV_HUGEPAGE = 14; + private const nuint HugePageSize = 2 * 1024 * 1024; // 2MB + + [DllImport("libc", EntryPoint = "madvise", SetLastError = true)] + private static extern int Madvise(void* addr, nuint length, int advice); + + public long Capacity { get; } + public double BitsPerKey { get; } + public int K { get; } + + public long Count => Volatile.Read(ref _count); + + // Total bloom data bytes (no header), always multiple of 64 bytes + public long DataBytes { get; } + public long NumBlocks { get; } // number of 64B cache lines + + private long _count; + + // 64-byte aligned base address for AVX loads (and cacheline semantics) + private byte* _data; + private nuint _dataSize; + private int _disposed; + + public BloomFilter(long capacity, double bitsPerKey, long initialCount = 0) + { + if (capacity <= 0) throw new ArgumentOutOfRangeException(nameof(capacity)); + if (bitsPerKey <= 0.0 || double.IsNaN(bitsPerKey) || double.IsInfinity(bitsPerKey)) + throw new ArgumentOutOfRangeException(nameof(bitsPerKey), "BitsPerKey must be a finite value > 0."); + + Capacity = capacity; + BitsPerKey = bitsPerKey; + _count = initialCount; + + K = ChooseNumProbesRocks(bitsPerKey); + + long totalBytes = AlignUp((long)Math.Ceiling(capacity * bitsPerKey / 8.0), CacheLineBytes); + DataBytes = totalBytes; + NumBlocks = totalBytes / CacheLineBytes; + + _dataSize = checked((nuint)totalBytes); + + // Determine alignment: + // On Linux, if the size is large enough (>2MB), we align to 2MB boundaries. + // This allows the OS to back the allocation with Transparent Huge Pages (THP), + // significantly reducing TLB misses for large Bloom Filters. + nuint alignment = CacheLineBytes; + bool useHugePages = false; + + if (OperatingSystem.IsLinux() && _dataSize >= HugePageSize) + { + alignment = HugePageSize; + useHugePages = true; + } + + _data = (byte*)NativeMemory.AlignedAlloc(_dataSize, alignment); + if (_data == null) throw new OutOfMemoryException(); + + // Hint the kernel to use huge pages BEFORE we touch the memory (Clear). + // This ensures that when Clear() triggers page faults, the kernel allocates 2MB physical pages immediately. + if (useHugePages) + { + Madvise(_data, _dataSize, MADV_HUGEPAGE); + } + + // zero init + // Note: For huge allocations, this loop will trigger the actual physical memory allocation. + new Span(_data, checked((int)Math.Min(totalBytes, int.MaxValue))).Clear(); + if (totalBytes > int.MaxValue) + { + // chunk clear for huge allocations + long off = 0; + const int Chunk = 8 * 1024 * 1024; + while (off < totalBytes) + { + int len = (int)Math.Min(Chunk, totalBytes - off); + new Span(_data + off, len).Clear(); + off += len; + } + } + } + + /// + /// Returns the 64B cacheline byte offset within the bloom data that was touched. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public long Add(ulong key) + { + if (Volatile.Read(ref _disposed) != 0) + throw new ObjectDisposedException(nameof(BloomFilter)); + + GetLineAndHashState(key, NumBlocks, out long lineIndex, out uint h); + + byte* linePtr = _data + lineIndex * CacheLineBytes; + + // Scalar atomic add (SIMD add isn't worth it with atomics) + const int shift = 32 - 9; // log2(512)=9 + int k = K; + for (int i = 0; i < k; i++) + { + int bit = (int)(h >> shift); + ref long lane = ref *(long*)(linePtr + ((bit >> 6) * 8)); + Interlocked.Or(ref lane, (long)(1UL << (bit & 63))); + h *= Mul32; + } + + Interlocked.Increment(ref _count); + return lineIndex * CacheLineBytes; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool MightContain(ulong key) + { + if (Volatile.Read(ref _disposed) != 0) + throw new ObjectDisposedException(nameof(BloomFilter)); + + GetLineAndHashState(key, NumBlocks, out long lineIndex, out uint h); + + byte* linePtr = _data + lineIndex * CacheLineBytes; + + if (Avx2.IsSupported) + { + return HashMayMatchPreparedAvx2(h, K, linePtr); + } + + // Scalar fallback + ulong* lanes = (ulong*)linePtr; + const int shift = 32 - 9; + + int k = K; + for (int i = 0; i < k; i++) + { + int bit = (int)(h >> shift); + int laneIndex = bit >> 6; + ulong mask = 1UL << (bit & 63); + if ((lanes[laneIndex] & mask) == 0) return false; + h *= Mul32; + } + + return true; + } + + /// Zero the bloom bits and reset count to 0. + public void Clear() + { + if (Volatile.Read(ref _disposed) != 0) + throw new ObjectDisposedException(nameof(BloomFilter)); + + long totalBytes = DataBytes; + long off = 0; + const int Chunk = 8 * 1024 * 1024; + while (off < totalBytes) + { + int len = (int)Math.Min(Chunk, totalBytes - off); + new Span(_data + off, len).Clear(); + off += len; + } + Volatile.Write(ref _count, 0); + } + + internal byte* DangerousGetDataPointer() => _data; + + public void Dispose() + { + if (Interlocked.Exchange(ref _disposed, 1) != 0) return; + + if (_data != null) + { + NativeMemory.AlignedFree(_data); + _data = null; + _dataSize = 0; + } + } + + // ----------------- internal helpers ----------------- + + private static int ChooseNumProbesRocks(double bitsPerKey) + { + int mbpk = (int)Math.Round(bitsPerKey * 1000.0); + + return mbpk switch + { + <= 2080 => 1, + <= 3580 => 2, + <= 5100 => 3, + <= 6640 => 4, + <= 8300 => 5, + <= 10070 => 6, + <= 11720 => 7, + <= 14001 => 8, + <= 16050 => 9, + <= 18300 => 10, + <= 22001 => 11, + <= 25501 => 12, + > 50000 => 24, + _ => (mbpk - 1) / 2000 - 1 + }; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static void GetLineAndHashState(ulong key, long numBlocks, out long lineIndex, out uint h) + { + // One 64-bit mix, split into two 32-bit values like RocksDB + ulong x = Mix64(key); + uint h1 = (uint)x; + uint h2 = (uint)(x >> 32); + + lineIndex = (long)((ulong)numBlocks <= uint.MaxValue + ? ((ulong)h1 * (ulong)(uint)numBlocks) >> 32 // FastRange32-style: floor(h1 * numBlocks / 2^32) + : (ulong)(((UInt128)x * (ulong)numBlocks) >> 64)); // 64-bit multiply-high: floor(x * numBlocks / 2^64) + + h = h2; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static bool HashMayMatchPreparedAvx2(uint h2, int numProbes, byte* dataAtCacheLine) + { + // Powers of 32-bit golden ratio, mod 2^32 (same as RocksDB) + Vector256 multipliers = Vector256.Create( + 0x00000001u, 0x9E3779B9u, 0xE35E67B1u, 0x734297E9u, + 0x35FBE861u, 0xDEB7C719u, 0x0448B211u, 0x3459B749u + ); + + int rem = numProbes; + uint h = h2; + + // Two 256-bit loads = 64 bytes = 512 bits (aligned allocation, so aligned loads ok) + Vector256 lower = Avx.LoadVector256((uint*)dataAtCacheLine); + Vector256 upper = Avx.LoadVector256((uint*)(dataAtCacheLine + 32)); + + Vector256 zeroToSeven = Vector256.Create(0, 1, 2, 3, 4, 5, 6, 7); + Vector256 ones = Vector256.Create(1u); + + for (; ; ) + { + Vector256 hashVec = Vector256.Create(h); + hashVec = Avx2.MultiplyLow(hashVec, multipliers); + + // top 4 bits -> word address 0..15 + Vector256 wordAddr = Avx2.ShiftRightLogical(hashVec, 28); + + Vector256 pLower = Avx2.PermuteVar8x32(lower, wordAddr); + Vector256 pUpper = Avx2.PermuteVar8x32(upper, wordAddr); + + // Select upper vs lower based on sign bit (equivalent to top bit of word address) + Vector256 upperLowerSelector = Avx2.ShiftRightArithmetic(hashVec.AsInt32(), 31); + Vector256 selectedBytes = + Avx2.BlendVariable(pLower.AsByte(), pUpper.AsByte(), upperLowerSelector.AsByte()); + Vector256 valueVec = selectedBytes.AsUInt32(); + + // lanes 0..(rem-1) active + Vector256 remV = Vector256.Create(rem); + Vector256 kSel = Avx2.CompareGreaterThan(remV, zeroToSeven); + + // bit-within-word: (hashVec << 4) >> 27 => 0..31 + Vector256 bitAddr = Avx2.ShiftLeftLogical(hashVec, 4); + bitAddr = Avx2.ShiftRightLogical(bitAddr, 27); + + Vector256 bitMask = Avx2.ShiftLeftLogicalVariable(ones, bitAddr); + bitMask = Avx2.And(bitMask, kSel.AsUInt32()); + + bool match = Avx2.TestC(valueVec.AsByte(), bitMask.AsByte()); + + if (rem <= 8) return match; + if (!match) return false; + + h *= Mul8; + rem -= 8; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ulong Mix64(ulong x) + { + x ^= x >> 33; + x *= 0xff51afd7ed558ccdUL; + x ^= x >> 33; + x *= 0xc4ceb9fe1a85ec53UL; + x ^= x >> 33; + return x; + } + + private static long AlignUp(long value, int alignment) => (value + alignment - 1) & ~(alignment - 1); +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/CachedReaderPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/CachedReaderPersistence.cs new file mode 100644 index 00000000000..e1df994364c --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/CachedReaderPersistence.cs @@ -0,0 +1,140 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Threading; +using System.Threading.Tasks; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +/// +/// A decorator for IPersistence that caches readers to reduce the overhead of creating a full rocksdb snapshot. +/// The cache is periodically cleared to allow database compaction. +/// +public class CachedReaderPersistence : IPersistence, IAsyncDisposable +{ + private readonly IPersistence _inner; // Externally owned + private readonly ILogger _logger; + private readonly Lock _readerCacheLock = new(); + private readonly CancellationTokenSource _cancelTokenSource; + private readonly Task _clearTimerTask; + + private RefCountingPersistenceReader? _cachedReader; + private int _isDisposed; + + public CachedReaderPersistence(IPersistence inner, + IProcessExitSource processExitSource, + ILogManager logManager) + { + _inner = inner; + _logger = logManager.GetClassLogger(); + _cancelTokenSource = CancellationTokenSource.CreateLinkedTokenSource(processExitSource.Token); + + // Start the background cache clearing task + _clearTimerTask = Task.Run(async () => + { + using PeriodicTimer timer = new(TimeSpan.FromSeconds(5)); + + try + { + while (true) + { + await timer.WaitForNextTickAsync(_cancelTokenSource.Token); + ClearReaderCache(); + } + } + catch (OperationCanceledException) + { + } + }); + + // Prime the reader cache + using IPersistence.IPersistenceReader reader = CreateReader(); + } + + public IPersistence.IPersistenceReader CreateReader() + { + RefCountingPersistenceReader? cachedReader = _cachedReader; + if (cachedReader is not null && cachedReader.TryAcquire()) + { + return cachedReader; + } + + using Lock.Scope _ = _readerCacheLock.EnterScope(); + return CreateReaderNoLock(); + } + + private IPersistence.IPersistenceReader CreateReaderNoLock() + { + while (true) + { + RefCountingPersistenceReader? cachedReader = _cachedReader; + if (cachedReader is null) + { + _cachedReader = cachedReader = new RefCountingPersistenceReader( + _inner.CreateReader(), + _logger + ); + } + + if (cachedReader.TryAcquire()) + { + return cachedReader; + } + + // Was disposed but not cleared. Not yet at least. + Interlocked.CompareExchange(ref _cachedReader, null, cachedReader); + } + } + + public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, WriteFlags flags = WriteFlags.None) + { + return new ClearCacheOnWriteBatchComplete(_inner.CreateWriteBatch(from, to, flags), this); + } + + public void Flush() => _inner.Flush(); + + private void ClearReaderCache() + { + using Lock.Scope _ = _readerCacheLock.EnterScope(); + RefCountingPersistenceReader? cachedReader = _cachedReader; + _cachedReader = null; + cachedReader?.Dispose(); + } + + public async ValueTask DisposeAsync() + { + if (Interlocked.CompareExchange(ref _isDisposed, 1, 0) == 1) return; + + await _cancelTokenSource.CancelAsync(); + await _clearTimerTask.ConfigureAwait(false); + _cachedReader?.Dispose(); + _cancelTokenSource.Dispose(); + } + + private class ClearCacheOnWriteBatchComplete(IPersistence.IWriteBatch inner, CachedReaderPersistence parent) + : IPersistence.IWriteBatch + { + public void SelfDestruct(Address addr) => inner.SelfDestruct(addr); + public void SetAccount(Address addr, Account? account) => inner.SetAccount(addr, account); + public void SetStorage(Address addr, in UInt256 slot, in SlotValue? value) => inner.SetStorage(addr, slot, value); + public void SetStateTrieNode(in TreePath path, TrieNode tnValue) => inner.SetStateTrieNode(path, tnValue); + public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tnValue) => inner.SetStorageTrieNode(address, path, tnValue); + public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value) => inner.SetStorageRaw(addrHash, slotHash, value); + public void SetAccountRaw(Hash256 addrHash, Account account) => inner.SetAccountRaw(addrHash, account); + + public void Dispose() + { + inner.Dispose(); + + // not in lock as it has its own lock + parent.ClearReaderCache(); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/FlatInTriePersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/FlatInTriePersistence.cs new file mode 100644 index 00000000000..be6b84b56ec --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/FlatInTriePersistence.cs @@ -0,0 +1,98 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Db; + +namespace Nethermind.State.Flat.Persistence; + +/// +/// Persistence implementation that stores flat state data in the trie node columns (StateNodes/StorageNodes) +/// instead of separate Account/Storage columns. +/// +public class FlatInTriePersistence(IColumnsDb db) : IPersistence +{ + public void Flush() => db.Flush(); + + public IPersistence.IPersistenceReader CreateReader() + { + IColumnDbSnapshot snapshot = db.CreateSnapshot(); + try + { + BaseTriePersistence.Reader trieReader = new( + snapshot.GetColumn(FlatDbColumns.StateTopNodes), + snapshot.GetColumn(FlatDbColumns.StateNodes), + snapshot.GetColumn(FlatDbColumns.StorageNodes), + snapshot.GetColumn(FlatDbColumns.FallbackNodes) + ); + + StateId currentState = RocksDbPersistence.ReadCurrentState(snapshot.GetColumn(FlatDbColumns.Metadata)); + + return new BasePersistence.Reader, BaseTriePersistence.Reader>( + new BasePersistence.ToHashedFlatReader( + new BaseFlatPersistence.Reader( + (ISortedKeyValueStore)snapshot.GetColumn(FlatDbColumns.StateNodes), + (ISortedKeyValueStore)snapshot.GetColumn(FlatDbColumns.StorageNodes), + isPreimageMode: false + ) + ), + trieReader, + currentState, + new Reactive.AnonymousDisposable(() => + { + snapshot.Dispose(); + }) + ); + } + catch + { + snapshot.Dispose(); + throw; + } + } + + public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, WriteFlags flags) + { + IColumnDbSnapshot dbSnap = db.CreateSnapshot(); + StateId currentState = RocksDbPersistence.ReadCurrentState(dbSnap.GetColumn(FlatDbColumns.Metadata)); + if (currentState != from) + { + dbSnap.Dispose(); + throw new InvalidOperationException($"Attempted to apply snapshot on top of wrong state. Snapshot from: {from}, Db state: {currentState}"); + } + + IColumnsWriteBatch batch = db.StartWriteBatch(); + + BaseTriePersistence.WriteBatch trieWriteBatch = new( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.FallbackNodes), + batch.GetColumnBatch(FlatDbColumns.StateTopNodes), + batch.GetColumnBatch(FlatDbColumns.StateNodes), + batch.GetColumnBatch(FlatDbColumns.StorageNodes), + batch.GetColumnBatch(FlatDbColumns.FallbackNodes), + flags); + + StateId toCopy = to; + return new BasePersistence.WriteBatch, BaseTriePersistence.WriteBatch>( + new BasePersistence.ToHashedWriteBatch( + new BaseFlatPersistence.WriteBatch( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), + batch.GetColumnBatch(FlatDbColumns.StateNodes), + batch.GetColumnBatch(FlatDbColumns.StorageNodes), + flags + ) + ), + trieWriteBatch, + new Reactive.AnonymousDisposable(() => + { + RocksDbPersistence.SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); + batch.Dispose(); + dbSnap.Dispose(); + if (!flags.HasFlag(WriteFlags.DisableWAL)) + { + db.Flush(onlyWal: true); + } + }) + ); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/IPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/IPersistence.cs new file mode 100644 index 00000000000..d39988ac796 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/IPersistence.cs @@ -0,0 +1,63 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Int256; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +public interface IPersistence +{ + IPersistenceReader CreateReader(); + IWriteBatch CreateWriteBatch(in StateId from, in StateId to, WriteFlags flags = WriteFlags.None); + + // Note: RocksdbPersistence already flush WAL on writing batch dispose. You don't need this unless you are skipping WAL. + void Flush(); + + public interface IPersistenceReader : IDisposable + { + Account? GetAccount(Address address); + + // Note: It can return true while setting outValue to zero. This is because there is a distinction between + // zero and missing to conform to a potential verkle need. + bool TryGetSlot(Address address, in UInt256 slot, ref SlotValue outValue); + StateId CurrentState { get; } + byte[]? TryLoadStateRlp(in TreePath path, ReadFlags flags); + byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, ReadFlags flags); + + // Raw operations are used in importer + byte[]? GetAccountRaw(Hash256 addrHash); + bool TryGetStorageRaw(Hash256 addrHash, Hash256 slotHash, ref SlotValue value); + + IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey); + IFlatIterator CreateAccountIterator() => CreateAccountIterator(ValueKeccak.Zero, ValueKeccak.MaxValue); + IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey); + IFlatIterator CreateStorageIterator(in ValueHash256 accountKey) => CreateStorageIterator(accountKey, ValueKeccak.Zero, ValueKeccak.MaxValue); + bool IsPreimageMode { get; } + } + + public interface IWriteBatch : IDisposable + { + void SelfDestruct(Address addr); + void SetAccount(Address addr, Account? account); + void SetStorage(Address addr, in UInt256 slot, in SlotValue? value); + void SetStateTrieNode(in TreePath path, TrieNode tnValue); + void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tnValue); + + void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value); + void SetAccountRaw(Hash256 addrHash, Account account); + } + + /// + /// Iterator for iterating over flat storage key-value pairs. This is mainly used in verifytrie. + /// + public interface IFlatIterator : IDisposable + { + bool MoveNext(); + ValueHash256 CurrentKey { get; } + ReadOnlySpan CurrentValue { get; } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/NoopPersistenceReader.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/NoopPersistenceReader.cs new file mode 100644 index 00000000000..6019a736ace --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/NoopPersistenceReader.cs @@ -0,0 +1,42 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Int256; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +public class NoopPersistenceReader : IPersistence.IPersistenceReader +{ + public void Dispose() { } + + public Account? GetAccount(Address address) => null; + + public bool TryGetSlot(Address address, in UInt256 slot, ref SlotValue outValue) => false; + + public StateId CurrentState => new StateId(0, Keccak.EmptyTreeHash); + + public byte[]? TryLoadStateRlp(in TreePath path, ReadFlags flags) => null; + + public byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, ReadFlags flags) => null; + + public byte[]? GetAccountRaw(Hash256? addrHash) => null; + + public bool TryGetStorageRaw(Hash256 addrHash, Hash256 slotHash, ref SlotValue value) => false; + + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey) => new EmptyIterator(); + + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey) => new EmptyIterator(); + + public bool IsPreimageMode => false; + + private struct EmptyIterator : IPersistence.IFlatIterator + { + public bool MoveNext() => false; + public ValueHash256 CurrentKey => default; + public ReadOnlySpan CurrentValue => default; + public void Dispose() { } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRecordingPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRecordingPersistence.cs new file mode 100644 index 00000000000..8f9d4169e4f --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRecordingPersistence.cs @@ -0,0 +1,116 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +/// +/// A decorator that records preimages (address/slot hash to original bytes) to a separate database. +/// This is useful for external tooling that needs to look up the original address/slot from a hash. +/// When a preimage database is available, raw operations are translated to non-raw operations. +/// +public class PreimageRecordingPersistence : IPersistence +{ + private const int PreimageLookupSize = 12; + + private readonly IPersistence _inner; + private readonly IDb _preimageDb; + + public PreimageRecordingPersistence(IPersistence inner, IDb preimageDb) + { + _inner = inner; + _preimageDb = preimageDb; + } + + public IPersistence.IPersistenceReader CreateReader() => _inner.CreateReader(); + + public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, WriteFlags flags) + { + IPersistence.IWriteBatch innerBatch = _inner.CreateWriteBatch(from, to, flags); + IWriteBatch preimageWriteBatch = _preimageDb.StartWriteBatch(); + + return new RecordingWriteBatch(innerBatch, preimageWriteBatch, _preimageDb); + } + + public void Flush() => _inner.Flush(); + + private class RecordingWriteBatch(IPersistence.IWriteBatch inner, IWriteBatch preimageWriteBatch, IDb preimageDb) : IPersistence.IWriteBatch + { + public void Dispose() + { + preimageWriteBatch.Dispose(); + inner.Dispose(); + } + + public void SelfDestruct(Address addr) + { + RecordAddressPreimage(addr); + inner.SelfDestruct(addr); + } + + public void SetAccount(Address addr, Account? account) + { + RecordAddressPreimage(addr); + inner.SetAccount(addr, account); + } + + public void SetStorage(Address addr, in UInt256 slot, in SlotValue? value) + { + RecordAddressPreimage(addr); + RecordSlotPreimage(slot); + inner.SetStorage(addr, slot, value); + } + + public void SetStateTrieNode(in TreePath path, TrieNode tnValue) => inner.SetStateTrieNode(path, tnValue); + + public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tnValue) => inner.SetStorageTrieNode(address, path, tnValue); + + public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value) + { + byte[]? addrPreimage = preimageDb.Get(addrHash.Bytes[..PreimageLookupSize]); + byte[]? slotPreimage = preimageDb.Get(slotHash.Bytes[..PreimageLookupSize]); + if (addrPreimage is not null && slotPreimage is not null) + { + Address addr = new(addrPreimage); + UInt256 slot = new(slotPreimage, isBigEndian: true); + inner.SetStorage(addr, slot, value); + } + else + { + inner.SetStorageRaw(addrHash, slotHash, value); + } + } + + public void SetAccountRaw(Hash256 addrHash, Account account) + { + byte[]? addrPreimage = preimageDb.Get(addrHash.Bytes[..PreimageLookupSize]); + if (addrPreimage is not null) + { + Address addr = new(addrPreimage); + inner.SetAccount(addr, account); + } + else + { + inner.SetAccountRaw(addrHash, account); + } + } + + private void RecordAddressPreimage(Address addr) + { + ValueHash256 addressPath = addr.ToAccountPath; + preimageWriteBatch.PutSpan(addressPath.BytesAsSpan[..PreimageLookupSize], addr.Bytes); + } + + private void RecordSlotPreimage(in UInt256 slot) + { + ValueHash256 slotHash = ValueKeccak.Zero; + StorageTree.ComputeKeyWithLookup(slot, ref slotHash); + preimageWriteBatch.PutSpan(slotHash.BytesAsSpan[..PreimageLookupSize], slot.ToBigEndian()); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRocksdbPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRocksdbPersistence.cs new file mode 100644 index 00000000000..e135f1dc56d --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRocksdbPersistence.cs @@ -0,0 +1,229 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Buffers.Binary; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Serialization.Rlp; + +namespace Nethermind.State.Flat.Persistence; + +/// +/// Preimage means that instead of hashing the address and slot and using the address as a key, it uses the address and +/// slot directly as a key. This implementation simply fakes the hash by copying the bytes directly. +/// This has some benefits: +/// - Skipping hash calculation, address and slot (around 0.3 micros). +/// - Improved compression ratio, lower storage db size by about 15%, and therefore better os cache utilization. +/// - Related slot values tend to be closer together, resulting in a better block cache. +/// However, it has some major downsides. +/// - Cannot snap sync. +/// - Cannot import without a complete preimage db. +/// +public class PreimageRocksdbPersistence(IColumnsDb db) : IPersistence +{ + private static readonly byte[] CurrentStateKey = Keccak.Compute("CurrentState").BytesToArray(); + + public void Flush() => db.Flush(); + + internal static StateId ReadCurrentState(IReadOnlyKeyValueStore kv) + { + byte[]? bytes = kv.Get(CurrentStateKey); + if (bytes is null || bytes.Length == 0) + { + return new StateId(-1, Keccak.EmptyTreeHash); + } + + long blockNumber = BinaryPrimitives.ReadInt64BigEndian(bytes); + ValueHash256 stateHash = new(bytes[8..]); + return new StateId(blockNumber, stateHash); + } + + internal static void SetCurrentState(IWriteOnlyKeyValueStore kv, StateId stateId) + { + Span bytes = stackalloc byte[8 + 32]; + BinaryPrimitives.WriteInt64BigEndian(bytes[..8], stateId.BlockNumber); + stateId.StateRoot.BytesAsSpan.CopyTo(bytes[8..]); + + kv.PutSpan(CurrentStateKey, bytes); + } + + public IPersistence.IPersistenceReader CreateReader() + { + IColumnDbSnapshot snapshot = db.CreateSnapshot(); + BaseTriePersistence.Reader trieReader = new( + snapshot.GetColumn(FlatDbColumns.StateTopNodes), + snapshot.GetColumn(FlatDbColumns.StateNodes), + snapshot.GetColumn(FlatDbColumns.StorageNodes), + snapshot.GetColumn(FlatDbColumns.FallbackNodes) + ); + + StateId currentState = ReadCurrentState(snapshot.GetColumn(FlatDbColumns.Metadata)); + + ISortedKeyValueStore state = (ISortedKeyValueStore)snapshot.GetColumn(FlatDbColumns.Account); + ISortedKeyValueStore storage = (ISortedKeyValueStore)snapshot.GetColumn(FlatDbColumns.Storage); + + FakeHashFlatReader flatReader = new( + new BaseFlatPersistence.Reader( + state, + storage, + isPreimageMode: true + ) + ); + + return new BasePersistence.Reader, BaseTriePersistence.Reader>( + flatReader, + trieReader, + currentState, + new Reactive.AnonymousDisposable(() => + { + snapshot.Dispose(); + }) + ); + } + + public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, WriteFlags flags) + { + IColumnsWriteBatch batch = db.StartWriteBatch(); + IColumnDbSnapshot dbSnap = db.CreateSnapshot(); + StateId currentState = ReadCurrentState(dbSnap.GetColumn(FlatDbColumns.Metadata)); + if (currentState != from) + { + dbSnap.Dispose(); + throw new InvalidOperationException( + $"Attempted to apply snapshot on top of wrong state. Snapshot from: {from}, Db state: {currentState}"); + } + + FakeHashWriter flatWriter = new( + new BaseFlatPersistence.WriteBatch( + ((ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.Storage)), + batch.GetColumnBatch(FlatDbColumns.Account), + batch.GetColumnBatch(FlatDbColumns.Storage), + flags + ) + ); + + BaseTriePersistence.WriteBatch trieWriteBatch = new( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.FallbackNodes), + batch.GetColumnBatch(FlatDbColumns.StateTopNodes), + batch.GetColumnBatch(FlatDbColumns.StateNodes), + batch.GetColumnBatch(FlatDbColumns.StorageNodes), + batch.GetColumnBatch(FlatDbColumns.FallbackNodes), + flags); + + StateId toCopy = to; + return new BasePersistence.WriteBatch, BaseTriePersistence.WriteBatch>( + flatWriter, + trieWriteBatch, + new Reactive.AnonymousDisposable(() => + { + SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); + batch.Dispose(); + dbSnap.Dispose(); + if (!flags.HasFlag(WriteFlags.DisableWAL)) + { + db.Flush(onlyWal: true); + } + }) + ); + } + + private struct FakeHashWriter( + TWriteBatch flatWriteBatch + ) : BasePersistence.IFlatWriteBatch + where TWriteBatch : struct, BasePersistence.IHashedFlatWriteBatch + { + private TWriteBatch _flatWriteBatch = flatWriteBatch; + + public void SelfDestruct(Address addr) + { + ValueHash256 fakeAddrHash = ValueKeccak.Zero; + addr.Bytes.CopyTo(fakeAddrHash.BytesAsSpan); + _flatWriteBatch.SelfDestruct(fakeAddrHash); + } + + public void SetAccount(Address addr, Account? account) + { + ValueHash256 fakeAddrHash = ValueKeccak.Zero; + addr.Bytes.CopyTo(fakeAddrHash.BytesAsSpan); + + if (account is null) + { + _flatWriteBatch.RemoveAccount(fakeAddrHash); + return; + } + + using NettyRlpStream stream = AccountDecoder.Slim.EncodeToNewNettyStream(account); + _flatWriteBatch.SetAccount(fakeAddrHash, stream.AsSpan()); + } + + public void SetStorage(Address addr, in UInt256 slot, in SlotValue? value) + { + ValueHash256 fakeAddrHash = ValueKeccak.Zero; + addr.Bytes.CopyTo(fakeAddrHash.BytesAsSpan); + + ValueHash256 fakeSlotHash = ValueKeccak.Zero; + slot.ToBigEndian(fakeSlotHash.BytesAsSpan); + + _flatWriteBatch.SetStorage(fakeAddrHash, fakeSlotHash, value); + } + + public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value) => + throw new InvalidOperationException("Raw operations not available in preimage mode"); + + public void SetAccountRaw(Hash256 addrHash, Account account) => + throw new InvalidOperationException("Raw operations not available in preimage mode"); + } + + public struct FakeHashFlatReader( + TFlatReader flatReader + ) : BasePersistence.IFlatReader + where TFlatReader : struct, BasePersistence.IHashedFlatReader + { + private const int AccountSpanBufferSize = 256; + private TFlatReader _flatReader = flatReader; + + public Account? GetAccount(Address address) + { + ValueHash256 fakeHash = ValueKeccak.Zero; + address.Bytes.CopyTo(fakeHash.BytesAsSpan); + + Span valueBuffer = stackalloc byte[AccountSpanBufferSize]; + int responseSize = _flatReader.GetAccount(fakeHash, valueBuffer); + if (responseSize == 0) + { + return null; + } + + Rlp.ValueDecoderContext ctx = new(valueBuffer[..responseSize]); + return AccountDecoder.Slim.Decode(ref ctx); + } + + public bool TryGetSlot(Address address, in UInt256 slot, ref SlotValue outValue) + { + ValueHash256 fakeHash = ValueKeccak.Zero; + address.Bytes.CopyTo(fakeHash.BytesAsSpan); + + ValueHash256 fakeSlotHash = ValueKeccak.Zero; + slot.ToBigEndian(fakeSlotHash.BytesAsSpan); + + return TryGetSlotRaw(fakeHash, fakeSlotHash, ref outValue); + } + + public byte[] GetAccountRaw(Hash256 addrHash) => + throw new InvalidOperationException("Raw operation not available in preimage mode"); + + public bool TryGetSlotRaw(in ValueHash256 address, in ValueHash256 slotHash, ref SlotValue outValue) => + _flatReader.TryGetStorage(address, slotHash, ref outValue); + + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey) => + _flatReader.CreateAccountIterator(startKey, endKey); + + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey) => + _flatReader.CreateStorageIterator(accountKey, startSlotKey, endSlotKey); + + public bool IsPreimageMode => _flatReader.IsPreimageMode; + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/RefCountingPersistenceReader.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/RefCountingPersistenceReader.cs new file mode 100644 index 00000000000..29ed24e8eee --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/RefCountingPersistenceReader.cs @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Threading.Tasks; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Utils; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +public class RefCountingPersistenceReader : RefCountingDisposable, IPersistence.IPersistenceReader +{ + private const int Disposing = -1; // Same as parent's constant + private readonly IPersistence.IPersistenceReader _innerReader; + + public RefCountingPersistenceReader(IPersistence.IPersistenceReader innerReader, ILogger logger) + { + _innerReader = innerReader; + + _ = Task.Run(async () => + { + // Reader should be re-created every block unless something holds it for very long. + // It prevent database compaction, so this need to be closed eventually. + await Task.Delay(60_000); + if (logger.IsWarn && Volatile.Read(ref _leases.Value) != Disposing) + { + logger.Warn($"Unexpected old snapshot created. Lease count {_leases.Value}"); + } + }); + } + + public Account? GetAccount(Address address) => + _innerReader.GetAccount(address); + + public bool TryGetSlot(Address address, in UInt256 slot, ref SlotValue outValue) => + _innerReader.TryGetSlot(address, in slot, ref outValue); + + public StateId CurrentState => _innerReader.CurrentState; + + public byte[]? TryLoadStateRlp(in TreePath path, ReadFlags flags) => + _innerReader.TryLoadStateRlp(in path, flags); + + public byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, ReadFlags flags) => + _innerReader.TryLoadStorageRlp(address, in path, flags); + + public byte[]? GetAccountRaw(Hash256 addrHash) => + _innerReader.GetAccountRaw(addrHash); + + public bool TryGetStorageRaw(Hash256 addrHash, Hash256 slotHash, ref SlotValue value) => + _innerReader.TryGetStorageRaw(addrHash, slotHash, ref value); + + public IPersistence.IFlatIterator CreateAccountIterator(in ValueHash256 startKey, in ValueHash256 endKey) => + _innerReader.CreateAccountIterator(startKey, endKey); + + public IPersistence.IFlatIterator CreateStorageIterator(in ValueHash256 accountKey, in ValueHash256 startSlotKey, in ValueHash256 endSlotKey) => + _innerReader.CreateStorageIterator(accountKey, startSlotKey, endSlotKey); + + public bool IsPreimageMode => _innerReader.IsPreimageMode; + + protected override void CleanUp() => _innerReader.Dispose(); + + public bool TryAcquire() => TryAcquireLease(); +} diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/RocksDbPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/RocksDbPersistence.cs new file mode 100644 index 00000000000..6bdc33857b8 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/RocksDbPersistence.cs @@ -0,0 +1,117 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Buffers.Binary; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Persistence; + +public class RocksDbPersistence(IColumnsDb db) : IPersistence +{ + private static readonly byte[] CurrentStateKey = Keccak.Compute("CurrentState").BytesToArray(); + + internal static StateId ReadCurrentState(IReadOnlyKeyValueStore kv) + { + byte[]? bytes = kv.Get(CurrentStateKey); + return bytes is null || bytes.Length == 0 + ? new StateId(-1, ValueKeccak.EmptyTreeHash) + : new StateId(BinaryPrimitives.ReadInt64BigEndian(bytes), new ValueHash256(bytes[8..])); + } + + internal static void SetCurrentState(IWriteOnlyKeyValueStore kv, in StateId stateId) + { + Span bytes = stackalloc byte[8 + 32]; + BinaryPrimitives.WriteInt64BigEndian(bytes[..8], stateId.BlockNumber); + stateId.StateRoot.BytesAsSpan.CopyTo(bytes[8..]); + + kv.PutSpan(CurrentStateKey, bytes); + } + + public void Flush() => db.Flush(); + + public IPersistence.IPersistenceReader CreateReader() + { + IColumnDbSnapshot snapshot = db.CreateSnapshot(); + try + { + BaseTriePersistence.Reader trieReader = new( + snapshot.GetColumn(FlatDbColumns.StateTopNodes), + snapshot.GetColumn(FlatDbColumns.StateNodes), + snapshot.GetColumn(FlatDbColumns.StorageNodes), + snapshot.GetColumn(FlatDbColumns.FallbackNodes) + ); + + StateId currentState = ReadCurrentState(snapshot.GetColumn(FlatDbColumns.Metadata)); + + return new BasePersistence.Reader, BaseTriePersistence.Reader>( + new BasePersistence.ToHashedFlatReader( + new BaseFlatPersistence.Reader( + (ISortedKeyValueStore)snapshot.GetColumn(FlatDbColumns.Account), + (ISortedKeyValueStore)snapshot.GetColumn(FlatDbColumns.Storage), + isPreimageMode: false + ) + ), + trieReader, + currentState, + new Reactive.AnonymousDisposable(() => + { + snapshot.Dispose(); + }) + ); + } + catch + { + snapshot.Dispose(); + throw; + } + } + + public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, WriteFlags flags) + { + IColumnDbSnapshot dbSnap = db.CreateSnapshot(); + StateId currentState = ReadCurrentState(dbSnap.GetColumn(FlatDbColumns.Metadata)); + if (currentState != from) + { + dbSnap.Dispose(); + throw new InvalidOperationException($"Attempted to apply snapshot on top of wrong state. Snapshot from: {from}, Db state: {currentState}"); + } + + IColumnsWriteBatch batch = db.StartWriteBatch(); + + BaseTriePersistence.WriteBatch trieWriteBatch = new( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.FallbackNodes), + batch.GetColumnBatch(FlatDbColumns.StateTopNodes), + batch.GetColumnBatch(FlatDbColumns.StateNodes), + batch.GetColumnBatch(FlatDbColumns.StorageNodes), + batch.GetColumnBatch(FlatDbColumns.FallbackNodes), + flags); + + StateId toCopy = to; + + return new BasePersistence.WriteBatch, BaseTriePersistence.WriteBatch>( + new BasePersistence.ToHashedWriteBatch( + new BaseFlatPersistence.WriteBatch( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.Storage), + batch.GetColumnBatch(FlatDbColumns.Account), + batch.GetColumnBatch(FlatDbColumns.Storage), + flags + ) + ), + trieWriteBatch, + new Reactive.AnonymousDisposable(() => + { + SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); + batch.Dispose(); + dbSnap.Dispose(); + if (!flags.HasFlag(WriteFlags.DisableWAL)) + { + db.Flush(onlyWal: true); + } + }) + ); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/PersistenceManager.cs b/src/Nethermind/Nethermind.State.Flat/PersistenceManager.cs new file mode 100644 index 00000000000..76ab641d874 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/PersistenceManager.cs @@ -0,0 +1,318 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Diagnostics; +using System.Runtime.CompilerServices; +using Nethermind.Core; +using Nethermind.Core.Attributes; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +[assembly: InternalsVisibleTo("Nethermind.State.Flat.Test")] + +namespace Nethermind.State.Flat; + +public class PersistenceManager( + IFlatDbConfig configuration, + IFinalizedStateProvider finalizedStateProvider, + IPersistence persistence, + ISnapshotRepository snapshotRepository, + ILogManager logManager) : IPersistenceManager +{ + private readonly ILogger _logger = logManager.GetClassLogger(); + private readonly int _minReorgDepth = configuration.MinReorgDepth; + private readonly int _maxReorgDepth = configuration.MaxReorgDepth; + private readonly int _compactSize = configuration.CompactSize; + private readonly List<(Hash256AsKey, TreePath)> _trieNodesSortBuffer = new(); // Presort make it faster + private readonly Lock _persistenceLock = new(); + + private StateId _currentPersistedStateId = StateId.PreGenesis; + + public IPersistence.IPersistenceReader LeaseReader() => persistence.CreateReader(); + + public StateId GetCurrentPersistedStateId() + { + if (_currentPersistedStateId == StateId.PreGenesis) + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + _currentPersistedStateId = reader.CurrentState; + } + return _currentPersistedStateId; + } + + private Snapshot? GetFinalizedSnapshotAtBlockNumber(long blockNumber, StateId currentPersistedState, bool compactedSnapshot) + { + Hash256? finalizedStateRoot = finalizedStateProvider.GetFinalizedStateRootAt(blockNumber); + using ArrayPoolList states = snapshotRepository.GetStatesAtBlockNumber(blockNumber); + foreach (StateId stateId in states) + { + if (stateId.StateRoot != finalizedStateRoot) continue; + + Snapshot? snapshot; + if (compactedSnapshot) + { + if (!snapshotRepository.TryLeaseCompactedState(stateId, out snapshot)) continue; + } + else + { + if (!snapshotRepository.TryLeaseState(stateId, out snapshot)) continue; + } + + if (snapshot.From == currentPersistedState) + { + if (_logger.IsDebug) _logger.Debug($"Persisting compacted state {stateId}"); + + return snapshot; + } + + snapshot.Dispose(); + } + + return null; + } + + private Snapshot? GetFirstSnapshotAtBlockNumber(long blockNumber, StateId currentPersistedState, bool compactedSnapshot) + { + using ArrayPoolList states = snapshotRepository.GetStatesAtBlockNumber(blockNumber); + foreach (StateId stateId in states) + { + Snapshot? snapshot; + if (compactedSnapshot) + { + if (!snapshotRepository.TryLeaseCompactedState(stateId, out snapshot)) continue; + } + else + { + if (!snapshotRepository.TryLeaseState(stateId, out snapshot)) continue; + } + + if (snapshot.From == currentPersistedState) + { + if (_logger.IsWarn) _logger.Warn($"Force persisting state {stateId}"); + + return snapshot; + } + + snapshot.Dispose(); + } + + return null; + } + + internal Snapshot? DetermineSnapshotToPersist(StateId latestSnapshot) + { + // Actually, the latest compacted snapshot, not the latest snapshot. + long lastSnapshotNumber = latestSnapshot.BlockNumber; + + StateId currentPersistedState = GetCurrentPersistedStateId(); + long finalizedBlockNumber = finalizedStateProvider.FinalizedBlockNumber; + long inMemoryStateDepth = lastSnapshotNumber - currentPersistedState.BlockNumber; + if (inMemoryStateDepth - _compactSize < _minReorgDepth) + { + // Keep some state in memory + return null; + } + + Snapshot? snapshotToPersist; + + long afterPersistPersistedBlockNumber = currentPersistedState.BlockNumber + _compactSize; + if (afterPersistPersistedBlockNumber > finalizedBlockNumber) + { + if (inMemoryStateDepth <= _maxReorgDepth) + { + // Unfinalized, and still under max reorg depth + return null; + } + + if (_logger.IsWarn) _logger.Warn($"Very long unfinalized state. Force persisting to conserve memory. finalized block number is {finalizedBlockNumber}."); + snapshotToPersist = GetFirstSnapshotAtBlockNumber(currentPersistedState.BlockNumber + _compactSize, currentPersistedState, true) ?? + GetFirstSnapshotAtBlockNumber(currentPersistedState.BlockNumber + 1, currentPersistedState, false); + } + else + { + snapshotToPersist = GetFinalizedSnapshotAtBlockNumber(currentPersistedState.BlockNumber + _compactSize, currentPersistedState, true) ?? + GetFinalizedSnapshotAtBlockNumber(currentPersistedState.BlockNumber + 1, currentPersistedState, false); + } + + if (snapshotToPersist is null) + { + if (_logger.IsWarn) _logger.Warn($"Unable to find snapshot to persist. Current persisted state {currentPersistedState}. Compact size {_compactSize}."); + } + + return snapshotToPersist; + } + + public void AddToPersistence(StateId latestSnapshot) + { + using Lock.Scope scope = _persistenceLock.EnterScope(); + // Attempt to add snapshots into bigcache + while (true) + { + Snapshot? snapshotToSave = DetermineSnapshotToPersist(latestSnapshot); + + if (snapshotToSave is null) return; + using Snapshot _ = snapshotToSave; // dispose + + // Add the canon snapshot + PersistSnapshot(snapshotToSave); + _currentPersistedStateId = snapshotToSave.To; + } + } + + /// + /// Force persist all snapshots regardless of finalization status. + /// Used by FlushCache to ensure all state is persisted before clearing caches. + /// + public StateId FlushToPersistence() + { + using Lock.Scope scope = _persistenceLock.EnterScope(); + + StateId currentPersistedState = GetCurrentPersistedStateId(); + StateId? latestStateId = snapshotRepository.GetLastSnapshotId(); + + if (latestStateId is null) + { + return currentPersistedState; + } + + // Persist all snapshots from current persisted state to latest + while (currentPersistedState.BlockNumber < latestStateId.Value.BlockNumber) + { + // Try finalized snapshots first (compacted, then non-compacted) + Snapshot? snapshotToPersist = GetFinalizedSnapshotAtBlockNumber( + currentPersistedState.BlockNumber + _compactSize, + currentPersistedState, + compactedSnapshot: true); + + snapshotToPersist ??= GetFinalizedSnapshotAtBlockNumber( + currentPersistedState.BlockNumber + 1, + currentPersistedState, + compactedSnapshot: false); + + // Fall back to the first available snapshot if finalized not available + snapshotToPersist ??= GetFirstSnapshotAtBlockNumber( + currentPersistedState.BlockNumber + _compactSize, + currentPersistedState, + compactedSnapshot: true); + + snapshotToPersist ??= GetFirstSnapshotAtBlockNumber( + currentPersistedState.BlockNumber + 1, + currentPersistedState, + compactedSnapshot: false); + + if (snapshotToPersist is null) + { + break; + } + + using Snapshot _ = snapshotToPersist; + PersistSnapshot(snapshotToPersist); + _currentPersistedStateId = snapshotToPersist.To; + currentPersistedState = _currentPersistedStateId; + } + + return currentPersistedState; + } + + internal void PersistSnapshot(Snapshot snapshot) + { + long compactLength = snapshot.To.BlockNumber! - snapshot.From.BlockNumber!; + + // Usually at the start of the application + if (compactLength != _compactSize && _logger.IsTrace) _logger.Trace($"Persisting non compacted state of length {compactLength}"); + + long sw = Stopwatch.GetTimestamp(); + using (IPersistence.IWriteBatch batch = persistence.CreateWriteBatch(snapshot.From, snapshot.To)) + { + foreach (KeyValuePair toSelfDestructStorage in snapshot.SelfDestructedStorageAddresses) + { + if (toSelfDestructStorage.Value) + { + continue; + } + + batch.SelfDestruct(toSelfDestructStorage.Key.Value); + } + + foreach (KeyValuePair kv in snapshot.Accounts) + { + (AddressAsKey addr, Account? account) = kv; + batch.SetAccount(addr, account); + } + + foreach (KeyValuePair<(AddressAsKey, UInt256), SlotValue?> kv in snapshot.Storages) + { + ((Address addr, UInt256 slot), SlotValue? value) = kv; + + batch.SetStorage(addr, slot, value); + } + + _trieNodesSortBuffer.Clear(); + _trieNodesSortBuffer.AddRange(snapshot.StateNodeKeys.Select((path) => (new Hash256AsKey(Hash256.Zero), path))); + _trieNodesSortBuffer.Sort(); + + long stateNodesSize = 0; + // foreach (var tn in snapshot.TrieNodes) + foreach ((Hash256AsKey, TreePath) k in _trieNodesSortBuffer) + { + (_, TreePath path) = k; + + snapshot.TryGetStateNode(path, out TrieNode? node); + + if (node!.FullRlp.Length == 0) + { + // TODO: Need to double check this case. Does it need a rewrite or not? + if (node.NodeType == NodeType.Unknown) + { + continue; + } + } + + stateNodesSize += node.FullRlp.Length; + // Note: Even if the node already marked as persisted, we still re-persist it + batch.SetStateTrieNode(path, node); + + node.IsPersisted = true; + } + + _trieNodesSortBuffer.Clear(); + _trieNodesSortBuffer.AddRange(snapshot.StorageTrieNodeKeys); + _trieNodesSortBuffer.Sort(); + + long storageNodesSize = 0; + // foreach (var tn in snapshot.TrieNodes) + foreach ((Hash256AsKey, TreePath) k in _trieNodesSortBuffer) + { + (Hash256AsKey address, TreePath path) = k; + + snapshot.TryGetStorageNode(address, path, out TrieNode? node); + + if (node!.FullRlp.Length == 0) + { + // TODO: Need to double check this case. Does it need a rewrite or not? + if (node.NodeType == NodeType.Unknown) + { + continue; + } + } + + storageNodesSize += node.FullRlp.Length; + // Note: Even if the node already marked as persisted, we still re-persist it + batch.SetStorageTrieNode(address, path, node); + + node.IsPersisted = true; + } + + Metrics.FlatPersistenceSnapshotSize.Observe(stateNodesSize, labels: new StringLabel("state_nodes")); + Metrics.FlatPersistenceSnapshotSize.Observe(storageNodesSize, labels: new StringLabel("storage_nodes")); + } + + Metrics.FlatPersistenceTime.Observe(Stopwatch.GetTimestamp() - sw); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ReadOnlySnapshotBundle.cs b/src/Nethermind/Nethermind.State.Flat/ReadOnlySnapshotBundle.cs new file mode 100644 index 00000000000..5e1fb176b8e --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ReadOnlySnapshotBundle.cs @@ -0,0 +1,205 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using Nethermind.Core; +using Nethermind.Core.Attributes; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Core.Utils; +using Nethermind.Int256; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; + +namespace Nethermind.State.Flat; + +/// +/// A bundle of and a layer of write buffer backed by a . +/// +public sealed class ReadOnlySnapshotBundle( + SnapshotPooledList snapshots, + IPersistence.IPersistenceReader persistenceReader, + bool recordDetailedMetrics) + : RefCountingDisposable +{ + public int SnapshotCount => snapshots.Count; + private bool _isDisposed; + + private static readonly StringLabel _readAccountSnapshotLabel = new("account_snapshot"); + private static readonly StringLabel _readAccountPersistenceLabel = new("account_persistence"); + private static readonly StringLabel _readAccountPersistenceNullLabel = new("account_persistence_null"); + private static readonly StringLabel _readStorageSnapshotLabel = new("storage_snapshot"); + private static readonly StringLabel _readStoragePersistenceLabel = new("storage_persistence"); + private static readonly StringLabel _readStoragePersistenceNullLabel = new("storage_persistence_null"); + private static readonly StringLabel _readStateNodeSnapshotLabel = new("state_node_snapshot"); + private static readonly StringLabel _readStorageNodeSnapshotLabel = new("storage_node_snapshot"); + private static readonly StringLabel _readStateRlpLabel = new("state_rlp"); + private static readonly StringLabel _readStorageRlpLabel = new("storage_rlp"); + + public Account? GetAccount(Address address) + { + GuardDispose(); + + AddressAsKey key = address; + + long sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + for (int i = snapshots.Count - 1; i >= 0; i--) + { + if (snapshots[i].TryGetAccount(key, out Account? acc)) + { + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readAccountSnapshotLabel); + return acc; + } + } + + sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + Account? account = persistenceReader.GetAccount(address); + if (account == null) + { + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readAccountPersistenceNullLabel); + } + else + { + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readAccountPersistenceLabel); + } + + return account; + } + + public int DetermineSelfDestructSnapshotIdx(Address address) + { + for (int i = snapshots.Count - 1; i >= 0; i--) + { + if (snapshots[i].HasSelfDestruct(address)) + { + return i; + } + } + + return -1; + } + + public byte[]? GetSlot(Address address, in UInt256 index, int selfDestructStateIdx) + { + GuardDispose(); + + long sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + for (int i = snapshots.Count - 1; i >= 0; i--) + { + if (snapshots[i].TryGetStorage(address, index, out SlotValue? slotValue)) + { + byte[]? res = slotValue?.ToEvmBytes(); + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readStorageSnapshotLabel); + return res; + } + + if (i <= selfDestructStateIdx) + { + return null; + } + } + + SlotValue outSlotValue = new(); + + sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + persistenceReader.TryGetSlot(address, index, ref outSlotValue); + byte[]? value = outSlotValue.ToEvmBytes(); + + if (recordDetailedMetrics) + { + if (value is null || value.IsZero()) + { + Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readStoragePersistenceNullLabel); + } + else + { + Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readStoragePersistenceLabel); + } + } + + return value; + } + + public bool TryFindStateNodes(in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node) + { + GuardDispose(); + + long sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + for (int i = snapshots.Count - 1; i >= 0; i--) + { + if (snapshots[i].TryGetStateNode(path, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readStateNodeSnapshotLabel); + return true; + } + } + + node = null; + return false; + } + + // Note: No self-destruct boundary check needed for trie nodes. Trie iteration starts from the storage root hash, + // so if storage was self-destructed, the new root is different and orphaned nodes won't be traversed. + public bool TryFindStorageNodes(Hash256AsKey address, in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node) + { + GuardDispose(); + + long sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + for (int i = snapshots.Count - 1; i >= 0; i--) + { + if (snapshots[i].TryGetStorageNode(address, path, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readStorageNodeSnapshotLabel); + return true; + } + } + + node = null; + return false; + } + + public byte[]? TryLoadStateRlp(in TreePath path, Hash256 hash, ReadFlags flags) + { + GuardDispose(); + + Nethermind.Trie.Pruning.Metrics.LoadedFromDbNodesCount++; + long sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + byte[]? value = persistenceReader.TryLoadStateRlp(path, flags); + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readStateRlpLabel); + + return value; + } + + public byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, Hash256 hash, ReadFlags flags) + { + GuardDispose(); + + Nethermind.Trie.Pruning.Metrics.LoadedFromDbNodesCount++; + long sw = recordDetailedMetrics ? Stopwatch.GetTimestamp() : 0; + byte[]? value = persistenceReader.TryLoadStorageRlp(address, path, flags); + if (recordDetailedMetrics) Metrics.ReadOnlySnapshotBundleTimes.Observe(Stopwatch.GetTimestamp() - sw, _readStorageRlpLabel); + + return value; + } + + private void GuardDispose() + { + if (_isDisposed) throw new ObjectDisposedException($"{nameof(ReadOnlySnapshotBundle)} is disposed"); + } + + public bool TryLease() => TryAcquireLease(); + + protected override void CleanUp() + { + if (Interlocked.CompareExchange(ref _isDisposed, true, false)) return; + + snapshots.Dispose(); + + // Null them in case unexpected mutation from trie warmer + persistenceReader.Dispose(); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ResourcePool.cs b/src/Nethermind/Nethermind.State.Flat/ResourcePool.cs new file mode 100644 index 00000000000..81de78d9620 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ResourcePool.cs @@ -0,0 +1,144 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Concurrent; +using System.Diagnostics.CodeAnalysis; +using Nethermind.Core.Collections; +using Nethermind.Core.Metric; +using Nethermind.Db; +using IResettable = Nethermind.Core.Resettables.IResettable; + +namespace Nethermind.State.Flat; + +/// +/// A pool of objects used to manage different sized pool objects for different category. +/// +/// +public class ResourcePool(IFlatDbConfig flatConfig) : IResourcePool +{ + private readonly Dictionary _categories = new() + { + // For main BlockProcessing once a compacted snapshot is persisted, all `flatConfig.CompactSize` snapshot content will be returned. + { Usage.MainBlockProcessing, new ResourcePoolCategory(Usage.MainBlockProcessing, flatConfig.CompactSize + 8, 2) }, + + // PostMainBlockProcessing is a special usage right after the commit of `MainBlockProcessing` which only commit once and never modified. + { Usage.PostMainBlockProcessing, new ResourcePoolCategory(Usage.PostMainBlockProcessing, 1, 1) }, + + // Note: prewarmer use readonly processing env + // Note: readonly here means it's never committed to the flat repo, but within the worldscope itself it may be committed. + { Usage.ReadOnlyProcessingEnv, new ResourcePoolCategory(Usage.ReadOnlyProcessingEnv, Environment.ProcessorCount * 4, Environment.ProcessorCount * 4) }, + + // Compacter is the large compacted snapshot. The pool usage is hard to predict during forward sync as the persistence + // may lag behind block processing and vice versa. + { Usage.Compactor, new ResourcePoolCategory(Usage.Compactor, 4, 1) }, + { Usage.MidCompactor, new ResourcePoolCategory(Usage.MidCompactor, 2, 1) }, + }; + + public SnapshotContent GetSnapshotContent(Usage usage) => _categories[usage].GetSnapshotContent(); + + public void ReturnSnapshotContent(Usage usage, SnapshotContent snapshotContent) => _categories[usage].ReturnSnapshotContent(snapshotContent); + + public TransientResource GetCachedResource(Usage usage) => _categories[usage].GetCachedResource(); + + public void ReturnCachedResource(Usage usage, TransientResource transientResource) => _categories[usage].ReturnCachedResource(transientResource); + + public Snapshot CreateSnapshot(in StateId from, in StateId to, Usage usage) => + new( + from, + to, + content: GetSnapshotContent(usage), + resourcePool: this, + usage: usage); + + public enum Usage + { + MainBlockProcessing, + PostMainBlockProcessing, + ReadOnlyProcessingEnv, + MidCompactor, + Compactor, + } + + // Using stack for better cpu cache effectiveness + private class ConcurrentStackPool(int maxCapacity = 16) where T : notnull, IDisposable, IResettable + { + private readonly ConcurrentStack _queue = new(); + public double PooledItemCount => _queue.Count; + + public bool TryGet([NotNullWhen(true)] out T? item) => _queue.TryPop(out item); + + public bool Return(T item) + { + item.Reset(); + if (_queue.Count >= maxCapacity) + { + item.Dispose(); + return false; + } + _queue.Push(item); + return true; + } + + } + + private class ResourcePoolCategory(Usage usage, int snapshotContentPoolSize, int cachedResourcePoolSize) + { + private readonly ConcurrentStackPool _snapshotPool = new(snapshotContentPoolSize); + private readonly ConcurrentStackPool _cachedResourcePool = new(cachedResourcePoolSize); + private TransientResource.Size _lastCachedResourceSize = new(1024, 1024); + private readonly PooledResourceLabel _snapshotLabel = new(usage.ToString(), "SnapshotContent"); + private readonly PooledResourceLabel _cachedResourceLabel = new(usage.ToString(), "CachedResource"); + + public SnapshotContent GetSnapshotContent() + { + Metrics.ActivePooledResource.AddBy(_snapshotLabel, 1); + if (_snapshotPool.TryGet(out SnapshotContent? snapshotContent)) + { + Metrics.CachedPooledResource[_snapshotLabel] = (long)_snapshotPool.PooledItemCount; + return snapshotContent; + } + + Metrics.CreatedPooledResource.AddBy(_snapshotLabel, 1); + return new SnapshotContent(); + } + + public void ReturnSnapshotContent(SnapshotContent snapshotContent) + { + Metrics.ActivePooledResource.AddBy(_snapshotLabel, -1); + if (!_snapshotPool.Return(snapshotContent)) + { + } + + Metrics.CachedPooledResource[_snapshotLabel] = (long)_snapshotPool.PooledItemCount; + } + + public TransientResource GetCachedResource() + { + Metrics.ActivePooledResource.AddBy(_cachedResourceLabel, 1); + if (_cachedResourcePool.TryGet(out TransientResource? cachedResource)) + { + Metrics.CachedPooledResource[_cachedResourceLabel] = (long)_cachedResourcePool.PooledItemCount; + return cachedResource; + } + + Metrics.CreatedPooledResource.AddBy(_cachedResourceLabel, 1); + return new TransientResource(_lastCachedResourceSize); + } + + public void ReturnCachedResource(TransientResource transientResource) + { + Metrics.ActivePooledResource.AddBy(_cachedResourceLabel, -1); + if (!_cachedResourcePool.Return(transientResource)) + { + _lastCachedResourceSize = transientResource.GetSize(); + } + + Metrics.CachedPooledResource[_cachedResourceLabel] = (long)_cachedResourcePool.PooledItemCount; + } + } + + public record PooledResourceLabel(string Category, string ResourceType) : IMetricLabels + { + public string[] Labels => [Category, ResourceType]; + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/AbstractMinimalTrieStore.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/AbstractMinimalTrieStore.cs new file mode 100644 index 00000000000..4530ac8ef23 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/AbstractMinimalTrieStore.cs @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Threading; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.ScopeProvider; + +public abstract class AbstractMinimalTrieStore : IScopedTrieStore +{ + public abstract TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash); + + public abstract byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None); + + public virtual ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + throw new NotSupportedException("Commit not supported"); + + + public byte[] LoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) + { + byte[]? value = TryLoadRlp(path, hash, flags); + return value ?? throw new TrieNodeException($"Missing trie node. {path}:{hash}", path, hash); + } + + public virtual ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => throw new UnsupportedOperationException("Get trie node resolver not supported"); + + public INodeStorage.KeyScheme Scheme => INodeStorage.KeyScheme.HalfPath; + + public bool IsPersisted(in TreePath path, in ValueHash256 keccak) => throw new UnsupportedOperationException("Persisted check not supported"); + + public abstract class AbstractMinimalCommitter(ConcurrencyController quota) : ICommitter + { + public void Dispose() { } + + public abstract TrieNode CommitNode(ref TreePath path, TrieNode node); + + bool ICommitter.TryRequestConcurrentQuota() => quota.TryRequestConcurrencyQuota(); + void ICommitter.ReturnConcurrencyQuota() => quota.ReturnConcurrencyQuota(); + } + + public class UnsupportedOperationException(string message) : Exception(message); +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatScopeProvider.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatScopeProvider.cs new file mode 100644 index 00000000000..3d1f2644859 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatScopeProvider.cs @@ -0,0 +1,41 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Autofac.Features.AttributeFilters; +using Nethermind.Core; +using Nethermind.Db; +using Nethermind.Evm.State; +using Nethermind.Logging; + +namespace Nethermind.State.Flat.ScopeProvider; + +public class FlatScopeProvider( + [KeyFilter(DbNames.Code)] IDb codeDb, + IFlatDbManager flatDbManager, + IFlatDbConfig configuration, + ITrieWarmer trieWarmer, + ResourcePool.Usage usage, + ILogManager logManager, + bool isReadOnly) + : IWorldStateScopeProvider +{ + private readonly TrieStoreScopeProvider.KeyValueWithBatchingBackedCodeDb _codeDb = new(codeDb); + + public bool HasRoot(BlockHeader? baseBlock) => flatDbManager.HasStateForBlock(new StateId(baseBlock)); + + public IWorldStateScopeProvider.IScope BeginScope(BlockHeader? baseBlock) + { + StateId currentState = new(baseBlock); + SnapshotBundle snapshotBundle = flatDbManager.GatherSnapshotBundle(currentState, usage: usage); + + return new FlatWorldStateScope( + currentState, + snapshotBundle, + _codeDb, + flatDbManager, + configuration, + trieWarmer, + logManager, + isReadOnly: isReadOnly); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatStorageTree.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatStorageTree.cs new file mode 100644 index 00000000000..cea3db5029a --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatStorageTree.cs @@ -0,0 +1,164 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Core.Threading; +using Nethermind.Db; +using Nethermind.Evm.State; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.ScopeProvider; + +public sealed class FlatStorageTree : IWorldStateScopeProvider.IStorageTree, ITrieWarmer.IStorageWarmer +{ + private readonly StorageTree _tree; + private readonly StorageTree _warmupStorageTree; + private readonly Address _address; + private readonly IFlatDbConfig _config; + private readonly ITrieWarmer _trieCacheWarmer; + private readonly FlatWorldStateScope _scope; + private readonly SnapshotBundle _bundle; + private readonly Hash256 _addressHash; + + // This number is the idx of the snapshot in the SnapshotBundle where a clear for this account was found. + // This is passed to TryGetSlot which prevent it from reading before self destruct. + private int _selfDestructKnownStateIdx; + + public FlatStorageTree( + FlatWorldStateScope scope, + ITrieWarmer trieCacheWarmer, + SnapshotBundle bundle, + IFlatDbConfig config, + ConcurrencyController concurrencyQuota, + Hash256 storageRoot, + Address address, + ILogManager logManager) + { + _scope = scope; + _trieCacheWarmer = trieCacheWarmer; + _bundle = bundle; + _address = address; + _addressHash = address.ToAccountPath.ToHash256(); + _selfDestructKnownStateIdx = bundle.DetermineSelfDestructSnapshotIdx(address); + + StorageTrieStoreAdapter storageTrieAdapter = new(bundle, concurrencyQuota, _addressHash); + StorageTrieStoreWarmerAdapter warmerStorageTrieAdapter = new(bundle, _addressHash); + + _tree = new StorageTree(storageTrieAdapter, storageRoot, logManager) + { + RootHash = storageRoot + }; + + // Set the rootref manually. Cut the call to find nodes by about 1/4th. + _warmupStorageTree = new StorageTree(warmerStorageTrieAdapter, logManager); + _warmupStorageTree.SetRootHash(storageRoot, false); + _warmupStorageTree.RootRef = _tree.RootRef; + + _config = config; + } + + public Hash256 RootHash => _tree.RootHash; + public byte[] Get(in UInt256 index) + { + byte[]? value = _bundle.GetSlot(_address, index, _selfDestructKnownStateIdx); + if (value is null || value.Length == 0) + { + value = StorageTree.ZeroBytes; + } + + if (_config.VerifyWithTrie) + { + byte[] treeValue = _tree.Get(index); + if (!Bytes.AreEqual(treeValue, value)) + { + throw new TrieException($"Get slot got wrong value. Address {_address}, {_tree.RootHash}, {index}. Tree: {treeValue?.ToHexString()} vs Flat: {value?.ToHexString()}. Self destruct it {_selfDestructKnownStateIdx}"); + } + } + + HintGet(index, value); + + return value!; + } + + // Note: VERY hot code. + // 90% of the read goes through prewarmer, not actually go through this class, meaning this method is called + // a lot. Setting the set slot have a measurable net negative impact on performance. + // Trying to set this value async through trie warmer proved to be hard to pull of and result in random invalid + // block. + public void HintGet(in UInt256 index, byte[]? value) => WarmUpSlot(index); + + private void WarmUpSlot(UInt256 index) + { + if (_bundle.ShouldQueuePrewarm(_address, index)) + { + _trieCacheWarmer.PushSlotJob(this, index, _scope.HintSequenceId); + } + } + + // Called by trie warmer. + public bool WarmUpStorageTrie(UInt256 index, int sequenceId) + { + if (_scope.HintSequenceId != sequenceId || _scope._pausePrewarmer) + { + return false; + } + + // Note: storage tree root not changed after write batch. Also not cleared. So the result is not correct. + // this is just to warm up the nodes. + ValueHash256 key = ValueKeccak.Zero; + StorageTree.ComputeKeyWithLookup(index, ref key); + + _warmupStorageTree.WarmUpPath(key.BytesAsSpan); + return true; + } + + public byte[] Get(in ValueHash256 hash) => throw new NotSupportedException("Not supported"); + + private void Set(UInt256 slot, byte[] value) => _bundle.SetChangedSlot(_address, slot, value); + + public void SelfDestruct() + { + _bundle.Clear(_address, _addressHash); + _selfDestructKnownStateIdx = _bundle.DetermineSelfDestructSnapshotIdx(_address); + } + + public void CommitTree() => _tree.Commit(); + + public IWorldStateScopeProvider.IStorageWriteBatch CreateWriteBatch(int estimatedEntries, Action onRootUpdated) + { + TrieStoreScopeProvider.StorageTreeBulkWriteBatch storageTreeBulkWriteBatch = new( + estimatedEntries, + _tree, + onRootUpdated, + _address, + commit: true); + + return new StorageTreeBulkWriteBatch( + storageTreeBulkWriteBatch, + this + ); + } + + private class StorageTreeBulkWriteBatch( + TrieStoreScopeProvider.StorageTreeBulkWriteBatch storageTreeBulkWriteBatch, + FlatStorageTree storageTree) : IWorldStateScopeProvider.IStorageWriteBatch + { + public void Set(in UInt256 index, byte[] value) + { + storageTreeBulkWriteBatch.Set(in index, value); + storageTree.Set(index, value); + } + + public void Clear() + { + storageTreeBulkWriteBatch.Clear(); + storageTree.SelfDestruct(); + } + + public void Dispose() => storageTreeBulkWriteBatch.Dispose(); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateManager.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateManager.cs new file mode 100644 index 00000000000..af26aacfb89 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateManager.cs @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Autofac.Features.AttributeFilters; +using Nethermind.Core; +using Nethermind.Db; +using Nethermind.Evm.State; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.SnapServer; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.ScopeProvider; + +public class FlatWorldStateManager( + IFlatDbManager flatDbManager, + IPersistence persistence, + IFlatDbConfig configuration, + FlatStateReader flatStateReader, + ITrieWarmer trieWarmer, + Func overridableWorldScopeFactory, + [KeyFilter(DbNames.Code)] IDb codeDb, + ILogManager logManager) + : IWorldStateManager +{ + private readonly FlatScopeProvider _mainWorldState = new( + codeDb, + flatDbManager, + configuration, + trieWarmer, + ResourcePool.Usage.MainBlockProcessing, + logManager, + isReadOnly: false); + + private readonly FlatTrieVerifier _trieVerifier = new(flatDbManager, persistence, logManager); + + public IWorldStateScopeProvider GlobalWorldState => _mainWorldState; + public IStateReader GlobalStateReader => flatStateReader; + public ISnapServer? SnapServer => null; + public IReadOnlyKeyValueStore? HashServer => null; + + public IWorldStateScopeProvider CreateResettableWorldState() => + new FlatScopeProvider( + codeDb, + flatDbManager, + configuration, + new NoopTrieWarmer(), + ResourcePool.Usage.ReadOnlyProcessingEnv, + logManager, + isReadOnly: true); + + event EventHandler? IWorldStateManager.ReorgBoundaryReached + { + add => flatDbManager.ReorgBoundaryReached += value; + remove => flatDbManager.ReorgBoundaryReached -= value; + } + + public IOverridableWorldScope CreateOverridableWorldScope() => + overridableWorldScopeFactory(); + + public bool VerifyTrie(BlockHeader stateAtBlock, CancellationToken cancellationToken) => + _trieVerifier.Verify(stateAtBlock, cancellationToken); + + public void FlushCache(CancellationToken cancellationToken) => flatDbManager.FlushCache(cancellationToken); +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateScope.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateScope.cs new file mode 100644 index 00000000000..77838ac6555 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateScope.cs @@ -0,0 +1,290 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Concurrent; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Threading; +using Nethermind.Db; +using Nethermind.Evm.State; +using Nethermind.Logging; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.ScopeProvider; + +public sealed class FlatWorldStateScope : IWorldStateScopeProvider.IScope, ITrieWarmer.IAddressWarmer +{ + private readonly SnapshotBundle _snapshotBundle; + private readonly IFlatCommitTarget _commitTarget; + private readonly IFlatDbConfig _configuration; + private readonly ITrieWarmer _warmer; + private readonly ILogManager _logManager; + private readonly bool _isReadOnly; + + private readonly ConcurrencyController _concurrencyQuota; + private readonly PatriciaTree _warmupStateTree; + private readonly StateTree _stateTree; + private readonly Dictionary _storages = new(); + private bool _isDisposed = false; + + // The sequence id is for stopping trie warmer for doing work while committing. Incrementing this value invalidates + // tasks within the trie warmer's ring buffer. + private int _hintSequenceId = 0; + private StateId _currentStateId; + internal bool _pausePrewarmer = false; + + public FlatWorldStateScope( + StateId currentStateId, + SnapshotBundle snapshotBundle, + IWorldStateScopeProvider.ICodeDb codeDb, + IFlatCommitTarget commitTarget, + IFlatDbConfig configuration, + ITrieWarmer trieCacheWarmer, + ILogManager logManager, + bool isReadOnly = false) + { + _currentStateId = currentStateId; + _snapshotBundle = snapshotBundle; + CodeDb = codeDb; + _commitTarget = commitTarget; + + _concurrencyQuota = new ConcurrencyController(Environment.ProcessorCount); // Used during tree commit. + _stateTree = new( + new StateTrieStoreAdapter(snapshotBundle, _concurrencyQuota), + logManager + ) + { + RootHash = currentStateId.StateRoot.ToCommitment() + }; + + _warmupStateTree = new( + new StateTrieStoreWarmerAdapter(snapshotBundle), + logManager + ) + { + RootHash = currentStateId.StateRoot.ToCommitment() + }; + + _configuration = configuration; + _logManager = logManager; + _warmer = trieCacheWarmer; + + _warmer.OnEnterScope(); + _isReadOnly = isReadOnly; + } + + public void Dispose() + { + if (Interlocked.CompareExchange(ref _isDisposed, true, false)) return; + _snapshotBundle.Dispose(); + _warmer.OnExitScope(); + } + + public Hash256 RootHash => _stateTree.RootHash; + public void UpdateRootHash() => _stateTree.UpdateRootHash(); + + public Account? Get(Address address) + { + Account? account = _snapshotBundle.GetAccount(address); + + HintGet(address, account); + + if (_configuration.VerifyWithTrie) + { + Account? accTrie = _stateTree.Get(address); + if (accTrie != account) + { + throw new TrieException($"Incorrect account {address}, account hash {address.ToAccountPath}, trie: {accTrie} vs flat: {account}"); + } + } + + return account; + } + + public void HintGet(Address address, Account? account) + { + _snapshotBundle.SetAccount(address, account); + if (_snapshotBundle.ShouldQueuePrewarm(address)) + { + _warmer.PushAddressJob(this, address, _hintSequenceId); + } + } + + public IWorldStateScopeProvider.ICodeDb CodeDb { get; } + + public int HintSequenceId => _hintSequenceId; // Called by FlatStorageTree + + public bool WarmUpStateTrie(Address address, int sequenceId) + { + if (_hintSequenceId != sequenceId || _pausePrewarmer) return false; + + // Note: tree root not changed after writing batch. Also, not cleared. So the result is not correct. + // this is just for warming up + _warmupStateTree.WarmUpPath(address.ToAccountPath.Bytes); + + return true; + } + + public IWorldStateScopeProvider.IStorageTree CreateStorageTree(Address address) => CreateStorageTreeImpl(address); + + private FlatStorageTree CreateStorageTreeImpl(Address address) + { + ref FlatStorageTree? storage = ref CollectionsMarshal.GetValueRefOrAddDefault(_storages, address, out bool exists); + if (exists) return storage!; + + Hash256 storageRoot = Get(address)?.StorageRoot ?? Keccak.EmptyTreeHash; + storage = new FlatStorageTree( + this, + _warmer, + _snapshotBundle, + _configuration, + _concurrencyQuota, + storageRoot, + address, + _logManager); + + return storage; + } + + public IWorldStateScopeProvider.IWorldStateWriteBatch StartWriteBatch(int estimatedAccountNum) => + new WriteBatch(this, estimatedAccountNum, _logManager.GetClassLogger()); + + public void Commit(long blockNumber) + { + _pausePrewarmer = true; + + using ArrayPoolListRef commitTask = new(_storages.Count); + + commitTask.Add(Task.Factory.StartNew(() => + { + // Commit will copy the trie nodes from the tree to the bundle. + // Its fine to commit the state tree together with the storage tree at this point as the storage tree + // root has been resolved and updated to the state tree within the writebatch. + _stateTree.Commit(); + })); + + foreach (KeyValuePair storage in _storages) + { + if (_concurrencyQuota.TryRequestConcurrencyQuota()) + { + commitTask.Add(Task.Factory.StartNew((ctx) => + { + FlatStorageTree st = (FlatStorageTree)ctx!; + st.CommitTree(); + _concurrencyQuota.ReturnConcurrencyQuota(); + }, storage.Value)); + } + else + { + storage.Value.CommitTree(); + } + } + + Task.WaitAll(commitTask.AsSpan()); + + _storages.Clear(); + + StateId newStateId = new(blockNumber, RootHash); + bool shouldAddSnapshot = !_isReadOnly && _currentStateId != newStateId; + (Snapshot? newSnapshot, TransientResource? cachedResource) = _snapshotBundle.CollectAndApplySnapshot(_currentStateId, newStateId, shouldAddSnapshot); + + if (shouldAddSnapshot) + { + if (_currentStateId != newStateId) + { + _commitTarget.AddSnapshot(newSnapshot!, cachedResource!); + } + else + { + newSnapshot?.Dispose(); + cachedResource?.Dispose(); + } + } + + _currentStateId = newStateId; + _pausePrewarmer = false; + } + + // Largely same logic as the the one for TrieStoreScopeProvider, but more confusing when deduplicated. + // So I just leave it here. + private class WriteBatch( + FlatWorldStateScope scope, + int estimatedAccountCount, + ILogger logger + ) : IWorldStateScopeProvider.IWorldStateWriteBatch + { + private readonly Dictionary _dirtyAccounts = new(estimatedAccountCount); + private readonly ConcurrentQueue<(AddressAsKey, Hash256)> _dirtyStorageTree = new(); + + public event EventHandler? OnAccountUpdated; + + public void Set(Address key, Account? account) + { + _dirtyAccounts[key] = account; + scope._snapshotBundle.SetAccount(key, account); + + if (account is null) + { + // This may not get called by the storage write batch as the worldstate does not try to update storage + // at all if the end account is null. This is not a problem for trie, but is a problem for flat. + scope.CreateStorageTreeImpl(key).SelfDestruct(); + } + } + + public IWorldStateScopeProvider.IStorageWriteBatch CreateStorageWriteBatch(Address address, int estimatedEntries) => + scope + .CreateStorageTreeImpl(address) + .CreateWriteBatch( + estimatedEntries: estimatedEntries, + onRootUpdated: (address, newRoot) => MarkDirty(address, newRoot)); + + private void MarkDirty(AddressAsKey address, Hash256 storageTreeRootHash) => + _dirtyStorageTree.Enqueue((address, storageTreeRootHash)); + + public void Dispose() + { + try + { + while (_dirtyStorageTree.TryDequeue(out (AddressAsKey, Hash256) entry)) + { + (AddressAsKey key, Hash256 storageRoot) = entry; + if (!_dirtyAccounts.TryGetValue(key, out Account? account)) account = scope.Get(key); + if (account == null && storageRoot == Keccak.EmptyTreeHash) continue; + account ??= ThrowNullAccount(key); + account = account!.WithChangedStorageRoot(storageRoot); + _dirtyAccounts[key] = account; + + scope._snapshotBundle.SetAccount(key, account); + + OnAccountUpdated?.Invoke(key, new IWorldStateScopeProvider.AccountUpdated(key, account)); + if (logger.IsTrace) Trace(key, storageRoot, account); + } + + using StateTree.StateTreeBulkSetter stateSetter = scope._stateTree.BeginSet(_dirtyAccounts.Count); + foreach (KeyValuePair kv in _dirtyAccounts) + { + stateSetter.Set(kv.Key, kv.Value); + } + } + finally + { + _dirtyAccounts.Clear(); + + Interlocked.Increment(ref scope._hintSequenceId); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + void Trace(Address address, Hash256 storageRoot, Account? account) => + logger.Trace($"Update {address} S {account?.StorageRoot} -> {storageRoot}"); + + [DoesNotReturn, StackTraceHidden] + static Account ThrowNullAccount(Address address) => + throw new InvalidOperationException($"Account {address} is null when updating storage hash"); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ITrieWarmer.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ITrieWarmer.cs new file mode 100644 index 00000000000..780cc7aeaa4 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ITrieWarmer.cs @@ -0,0 +1,33 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Int256; + +namespace Nethermind.State.Flat.ScopeProvider; + +public interface ITrieWarmer +{ + public void PushSlotJob( + IStorageWarmer storageTree, + in UInt256? index, + int sequenceId); + + public void PushAddressJob( + IAddressWarmer scope, + Address? path, + int sequenceId); + + void OnEnterScope(); + void OnExitScope(); + + public interface IAddressWarmer + { + bool WarmUpStateTrie(Address address, int sequenceId); + } + + public interface IStorageWarmer + { + bool WarmUpStorageTrie(UInt256 index, int sequenceId); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/NoopTrieWarmer.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/NoopTrieWarmer.cs new file mode 100644 index 00000000000..7f13ff21366 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/NoopTrieWarmer.cs @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Int256; + +namespace Nethermind.State.Flat.ScopeProvider; + +public class NoopTrieWarmer : ITrieWarmer +{ + public void PushSlotJob(ITrieWarmer.IStorageWarmer storageTree, in UInt256? index, int sequenceId) { } + + public void PushAddressJob(ITrieWarmer.IAddressWarmer scope, Address? path, int sequenceId) { } + + public void OnEnterScope() { } + + public void OnExitScope() { } +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/OverridableFlatScopeProvider.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/OverridableFlatScopeProvider.cs new file mode 100644 index 00000000000..7c63a8702ff --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/OverridableFlatScopeProvider.cs @@ -0,0 +1,192 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Autofac.Features.AttributeFilters; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Threading; +using Nethermind.Db; +using Nethermind.Evm.State; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Trie; +using NonBlocking; + +namespace Nethermind.State.Flat.ScopeProvider; + +/// +/// Create a scope with additional layers of snapshots independent of the main worldstate. This allow committing +/// worldstate without modifying the global snapshots. +/// +public class FlatOverridableWorldScope : IOverridableWorldScope, IFlatCommitTarget +{ + private readonly IReadOnlyDb _codeDbOverlay; + private readonly ConcurrentDictionary _snapshots = new(); + private readonly IResourcePool _resourcePool; + private readonly IFlatDbManager _flatDbManager; + private readonly ITrieNodeCache _trieNodeCache; + private bool _isDisposed = false; + + public FlatOverridableWorldScope( + [KeyFilter(DbNames.Code)] IDb codeDb, + IFlatDbManager flatDbManager, + IFlatDbConfig configuration, + ITrieNodeCache trieNodeCache, + IResourcePool resourcePool, + ILogManager logManager) + { + GlobalStateReader = new OverridableStateReader(this); + _codeDbOverlay = new ReadOnlyDb(codeDb, true); + _resourcePool = resourcePool; + _flatDbManager = flatDbManager; + _trieNodeCache = trieNodeCache; + WorldState = new OverridableFlatScopeProvider( + this, + configuration, + new NoopTrieWarmer(), + new TrieStoreScopeProvider.KeyValueWithBatchingBackedCodeDb(_codeDbOverlay), + logManager); + } + + public IWorldStateScopeProvider WorldState { get; } + public IStateReader GlobalStateReader { get; } + + public void ResetOverrides() + { + _codeDbOverlay.ClearTempChanges(); + foreach (Snapshot snapshot in _snapshots.Values) + { + snapshot.Dispose(); + } + + _snapshots.Clear(); + } + + private bool HasStateForBlock(BlockHeader? baseBlock) + { + StateId stateId = new(baseBlock); + return _snapshots.ContainsKey(stateId) || _flatDbManager.HasStateForBlock(stateId); + } + + public void AddSnapshot(Snapshot snapshot, TransientResource transientResource) + { + if (!_snapshots.TryAdd(snapshot.To, snapshot)) + { + snapshot.Dispose(); + } + + _resourcePool.ReturnCachedResource(ResourcePool.Usage.ReadOnlyProcessingEnv, transientResource); + } + + private SnapshotBundle GatherSnapshotBundle(BlockHeader? baseBlock) + { + StateId currentState = new(baseBlock); + + SnapshotPooledList snapshots = new(0); + while (_snapshots.TryGetValue(currentState, out Snapshot? snapshot) && snapshot.TryAcquire()) + { + snapshots.Add(snapshot); + if (snapshot.From == currentState) break; + currentState = snapshot.From; + } + snapshots.Reverse(); + + ReadOnlySnapshotBundle readOnlySnapshotBundle; + try + { + readOnlySnapshotBundle = _flatDbManager.GatherReadOnlySnapshotBundle(currentState); + } + catch (Exception) + { + snapshots.Dispose(); + throw; + } + + return new SnapshotBundle( + readOnlySnapshotBundle, + _trieNodeCache, + _resourcePool, + ResourcePool.Usage.ReadOnlyProcessingEnv, + snapshots + ); + } + + public void Dispose() + { + if (Interlocked.CompareExchange(ref _isDisposed, true, false)) return; + foreach (Snapshot snapshot in _snapshots.Values) + { + snapshot.Dispose(); + } + _snapshots.Clear(); + } + + private class OverridableFlatScopeProvider( + FlatOverridableWorldScope flatOverrideScope, + IFlatDbConfig configuration, + ITrieWarmer trieWarmer, + IWorldStateScopeProvider.ICodeDb codeDb, + ILogManager logManager) + : IWorldStateScopeProvider + { + public bool HasRoot(BlockHeader? baseBlock) => flatOverrideScope.HasStateForBlock(baseBlock); + + public IWorldStateScopeProvider.IScope BeginScope(BlockHeader? baseBlock) + { + StateId currentState = new(baseBlock); + SnapshotBundle snapshotBundle = flatOverrideScope.GatherSnapshotBundle(baseBlock); + + return new FlatWorldStateScope( + currentState, + snapshotBundle, + codeDb, + flatOverrideScope, + configuration, + trieWarmer, + logManager); + } + } + + private class OverridableStateReader(FlatOverridableWorldScope overridableWorldScope) : IStateReader + { + public bool TryGetAccount(BlockHeader? baseBlock, Address address, out AccountStruct account) + { + using SnapshotBundle snapshotBundle = overridableWorldScope.GatherSnapshotBundle(baseBlock); + if (snapshotBundle.GetAccount(address) is { } acc) + { + account = acc.ToStruct(); + return true; + } + account = default; + return false; + } + + public ReadOnlySpan GetStorage(BlockHeader? baseBlock, Address address, in UInt256 index) + { + using SnapshotBundle snapshotBundle = overridableWorldScope.GatherSnapshotBundle(baseBlock); + int selfDestructIdx = snapshotBundle.DetermineSelfDestructSnapshotIdx(address); + return snapshotBundle.GetSlot(address, index, selfDestructIdx) ?? []; + } + + public byte[]? GetCode(Hash256 codeHash) + => codeHash == Keccak.OfAnEmptyString ? [] : overridableWorldScope._codeDbOverlay[codeHash.Bytes]; + + public byte[]? GetCode(in ValueHash256 codeHash) + => codeHash == ValueKeccak.OfAnEmptyString ? [] : overridableWorldScope._codeDbOverlay[codeHash.Bytes]; + + public void RunTreeVisitor(ITreeVisitor treeVisitor, BlockHeader? baseBlock, VisitingOptions? visitingOptions = null) where TCtx : struct, INodeContext + { + StateId stateId = new(baseBlock); + using SnapshotBundle snapshotBundle = overridableWorldScope.GatherSnapshotBundle(baseBlock); + + ConcurrencyController concurrency = new(1); + StateTrieStoreAdapter trieStoreAdapter = new(snapshotBundle, concurrency); + + PatriciaTree patriciaTree = new(trieStoreAdapter, LimboLogs.Instance); + patriciaTree.Accept(treeVisitor, stateId.StateRoot.ToCommitment(), visitingOptions); + } + + public bool HasStateForBlock(BlockHeader? baseBlock) => overridableWorldScope.HasStateForBlock(baseBlock); + } +} + diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ReadOnlyStateTrieStoreAdapter.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ReadOnlyStateTrieStoreAdapter.cs new file mode 100644 index 00000000000..3cc7539ebf6 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ReadOnlyStateTrieStoreAdapter.cs @@ -0,0 +1,33 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.ScopeProvider; + +internal class ReadOnlyStateTrieStoreAdapter(ReadOnlySnapshotBundle bundle) : AbstractMinimalTrieStore +{ + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + bundle.TryFindStateNodes(path, hash, out TrieNode? node) ? node : new TrieNode(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => bundle.TryLoadStateRlp(path, hash, flags); + + public override ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => + address is null + ? this + : new ReadOnlyStorageTrieStoreAdapter(bundle, address); // Used in trie visitor and weird very edge case that cuts the whole thing to pieces +} + +internal class ReadOnlyStorageTrieStoreAdapter( + ReadOnlySnapshotBundle bundle, + Hash256AsKey addressHash +) : AbstractMinimalTrieStore +{ + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + bundle.TryFindStorageNodes(addressHash, path, hash, out TrieNode? node) ? node : new TrieNode(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => bundle.TryLoadStorageRlp(addressHash, in path, hash, flags); +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/StateTrieStoreAdapter.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/StateTrieStoreAdapter.cs new file mode 100644 index 00000000000..d2fbdd22fa6 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/StateTrieStoreAdapter.cs @@ -0,0 +1,107 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Threading; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.ScopeProvider; + +internal sealed class StateTrieStoreAdapter( + SnapshotBundle bundle, + ConcurrencyController concurrencyQuota +) : AbstractMinimalTrieStore +{ + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) + { + TrieNode node = bundle.FindStateNodeOrUnknown(path, hash); + return node.Keccak != hash ? throw new NodeHashMismatchException($"Node hash mismatch. Path: {path}. Hash: {node.Keccak} vs Requested: {hash}") : node; + } + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + bundle.TryLoadStateRlp(path, hash, flags); + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + new Committer(bundle, concurrencyQuota); + + public override ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) + { + if (address is null) return this; + return new StorageTrieStoreAdapter(bundle, concurrencyQuota, address); + } + + private class Committer(SnapshotBundle bundle, ConcurrencyController concurrencyQuota) : AbstractMinimalCommitter(concurrencyQuota) + { + public override TrieNode CommitNode(ref TreePath path, TrieNode node) + { + bundle.SetStateNode(path, node); + return node; + } + } +} + +internal sealed class StateTrieStoreWarmerAdapter( + SnapshotBundle bundle +) : AbstractMinimalTrieStore +{ + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) + { + TrieNode node = bundle.FindStateNodeOrUnknownForTrieWarmer(path, hash); + return node.Keccak != hash ? throw new NodeHashMismatchException($"Node hash mismatch. Path: {path}. Hash: {node.Keccak} vs Requested: {hash}") : node; + } + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + bundle.TryLoadStateRlp(path, hash, flags); + + public override ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) + { + if (address is null) return this; + return new StorageTrieStoreWarmerAdapter(bundle, address); + } +} + +internal sealed class StorageTrieStoreAdapter( + SnapshotBundle bundle, + ConcurrencyController concurrencyQuota, + Hash256AsKey addressHash +) : AbstractMinimalTrieStore +{ + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) + { + TrieNode node = bundle.FindStorageNodeOrUnknown(addressHash, path, hash); + return node.Keccak != hash ? throw new NodeHashMismatchException($"Node hash mismatch. Address {addressHash.Value}. Path: {path}. Hash: {node.Keccak} vs Requested: {hash}") : node; + } + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + bundle.TryLoadStorageRlp(addressHash, in path, hash, flags); + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + new Committer(bundle, addressHash, concurrencyQuota); + + private class Committer(SnapshotBundle bundle, Hash256AsKey addressHash, ConcurrencyController concurrencyQuota) : AbstractMinimalCommitter(concurrencyQuota) + { + public override TrieNode CommitNode(ref TreePath path, TrieNode node) + { + bundle.SetStorageNode(addressHash, path, node); + return node; + } + } +} + +internal sealed class StorageTrieStoreWarmerAdapter( + SnapshotBundle bundle, + Hash256AsKey addressHash +) : AbstractMinimalTrieStore +{ + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) + { + TrieNode node = bundle.FindStorageNodeOrUnknownTrieWarmer(addressHash, path, hash); + return node.Keccak != hash ? throw new NodeHashMismatchException($"Node hash mismatch. Address {addressHash.Value}. Path: {path}. Hash: {node.Keccak} vs Requested: {hash}") : node; + } + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + bundle.TryLoadStorageRlp(addressHash, in path, hash, flags); +} diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/TrieWarmer.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/TrieWarmer.cs new file mode 100644 index 00000000000..307e52e1ae4 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/TrieWarmer.cs @@ -0,0 +1,339 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Runtime.CompilerServices; +using Nethermind.Config; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.ScopeProvider; + +/// +/// The trie warmer warms up the trie to speed up the final commit in block processing. +/// The goal is to have very low latency in the enqueue so that it does not slow down block processing. +/// Additionally, it must not take up a lot of CPU as prewarmer is also run concurrently. Taking up CPU cycle will +/// slow down other part of the processing also. +/// +public sealed class TrieWarmer : ITrieWarmer, IAsyncDisposable +{ + private const int BufferSize = 1024 * 16; + private const int SlotBufferSize = 1024; + + private readonly ILogger _logger; + + private bool _isDisposed = false; + + private readonly SpmcRingBuffer _slotJobBuffer = new(SlotBufferSize); + + // This was also used to store the job from prewarmer. It will be added back in another PR. + private readonly MpmcRingBuffer _jobBufferMultiThreaded = new(BufferSize); + + // A job needs to be small, within one cache line (64B) ideally. + private record struct Job( + // If its warming up address, its a scope, otherwise, its a storage tree. + object scopeOrStorageTree, + Address? path, + UInt256 index, + int sequenceId); + + // A slot hint from the main processing thread is called a lot, so it has its own dedicated queue with a smaller job struct. + private record struct SlotJob( + ITrieWarmer.IStorageWarmer storageTree, + UInt256 index, + int sequenceId); + + private readonly Task? _warmerJob = null; + private readonly int _secondaryWorkerCount; + + private int _pendingWakeUpSlots = 0; + private int _activeSecondaryWorker = 0; + private int _shouldWakeUpPrimaryWorker = 0; + private readonly ManualResetEventSlim _primaryWorkerLatch = new ManualResetEventSlim(); + + // Use a full semaphore instead of the slim variant to reduce the spin used and prefer to not wake up thread until + // needed. Only the main worker spin. + private readonly Semaphore _executionSlots; + + private readonly CancellationTokenSource _cancelTokenSource; + + public TrieWarmer(IProcessExitSource processExitSource, ILogManager logManager, IFlatDbConfig flatDbConfig) + { + _logger = logManager.GetClassLogger(); + + int configuredWorkerCount = flatDbConfig.TrieWarmerWorkerCount; + int workerCount = configuredWorkerCount == -1 + ? Math.Max(Environment.ProcessorCount - 1, 1) + : configuredWorkerCount; + workerCount = Math.Max(workerCount, 2); // Min worker count is 2 + _secondaryWorkerCount = workerCount - 1; + + _executionSlots = new Semaphore(0, _secondaryWorkerCount); + + _cancelTokenSource = CancellationTokenSource.CreateLinkedTokenSource(processExitSource.Token); + + if (_secondaryWorkerCount > 0) + { + _warmerJob = Task.Run(() => + { + using ArrayPoolList tasks = new(_secondaryWorkerCount); + Thread primaryWorkerThread = new(() => + { + RunPrimaryWorker(_cancelTokenSource.Token); + }) + { + Name = "TrieWarmer-Primary", + IsBackground = true + }; + primaryWorkerThread.Start(); + tasks.Add(primaryWorkerThread); + + for (int i = 0; i < _secondaryWorkerCount; i++) + { + Thread t = new(() => + { + RunSecondaryWorker(_cancelTokenSource.Token); + }) + { + Name = $"TrieWarmer-Secondary-{i}", + Priority = ThreadPriority.Lowest, + IsBackground = true + }; + t.Start(); + tasks.Add(t); + } + + foreach (Thread thread in tasks) + { + thread.Join(); + } + }); + } + } + + private void RunPrimaryWorker(CancellationToken cancellationToken) + { + SpinWait spinWait = new(); + try + { + while (true) + { + if (cancellationToken.IsCancellationRequested) break; + + if (TryDequeue(out Job job)) + { + spinWait.Reset(); + MaybeWakeOpOtherWorker(); + + HandleJob(job); + } + else + { + if (spinWait.NextSpinWillYield) + { + _primaryWorkerLatch.Reset(); + _shouldWakeUpPrimaryWorker = 1; + _primaryWorkerLatch.Wait(1, cancellationToken); + _shouldWakeUpPrimaryWorker = 0; + } + else + { + spinWait.SpinOnce(); + } + } + } + } + catch (OperationCanceledException) { } + catch (Exception ex) + { + if (_logger.IsError) _logger.Error("Error in primary warmup job ", ex); + } + } + + private void RunSecondaryWorker(CancellationToken cancellationToken) + { + try + { + Interlocked.Increment(ref _activeSecondaryWorker); + while (true) + { + if (cancellationToken.IsCancellationRequested) break; + + if (TryDequeue(out Job job)) + { + HandleJob(job); + } + else + { + Interlocked.Decrement(ref _activeSecondaryWorker); + if (WaitForExecutionSlot()) + { + Interlocked.Decrement(ref _pendingWakeUpSlots); + } + Interlocked.Increment(ref _activeSecondaryWorker); + } + } + } + catch (OperationCanceledException) { } + catch (Exception ex) + { + if (_logger.IsError) _logger.Error("Error in warmup job ", ex); + } + } + + // Some wait but not forever so that it exit properly + private bool WaitForExecutionSlot() => _executionSlots.WaitOne(500); + + private bool ShouldWakeUpMoreWorker() + { + // Assume that for each pending job, it go to the respective worker. + int effectiveActiveWorker = _activeSecondaryWorker + _pendingWakeUpSlots; + if (effectiveActiveWorker >= _secondaryWorkerCount) return false; // We cant wake up more worker + + // We should wake up more worker if the num of job is more than effective active worker + + // We go check the queue one by one because they each do a volatile read + long jobCount = _jobBufferMultiThreaded.EstimatedJobCount; + if (jobCount > effectiveActiveWorker) return true; + + jobCount += _slotJobBuffer.EstimatedJobCount; + return jobCount > effectiveActiveWorker; + } + + private bool MaybeWakeOpOtherWorker() + { + bool wokeUpWorker = false; + + // Release one by one until all jobs were dequeued + while (ShouldWakeUpMoreWorker()) + { + try + { + Interlocked.Increment(ref _pendingWakeUpSlots); + _executionSlots.Release(); + wokeUpWorker = true; + } + catch (SemaphoreFullException) + { + Interlocked.Decrement(ref _pendingWakeUpSlots); + break; + } + } + + return wokeUpWorker; + } + + private bool MaybeWakeupFast() + { + // Skipping wakeup due to non-atomic read is fine. Doing atomic operation all the time slows down measurably. + if (_shouldWakeUpPrimaryWorker == 1) + { + _primaryWorkerLatch.Set(); + _shouldWakeUpPrimaryWorker = 0; + return true; + } + + return false; + } + + private bool TryDequeue(out Job job) + { + if (_slotJobBuffer.TryDequeue(out SlotJob slotJob)) + { + job = new Job( + slotJob.storageTree, + null, + slotJob.index, + slotJob.sequenceId); + return true; + } + + return _jobBufferMultiThreaded.TryDequeue(out job); + } + + private static void HandleJob(Job job) + { + (object scopeOrStorageTree, + Address? address, + UInt256 index, + int sequenceId) = job; + + try + { + if (scopeOrStorageTree is ITrieWarmer.IAddressWarmer scope) + { + scope.WarmUpStateTrie(address!, sequenceId); + } + else + { + ITrieWarmer.IStorageWarmer storageTree = (ITrieWarmer.IStorageWarmer)scopeOrStorageTree; + storageTree.WarmUpStorageTrie(index, sequenceId); + } + } + // It can be missing when the warmer lags so much behind that the node is now gone. + catch (TrieNodeException) { } + // Because it runs in parallel, it could happen that the bundle changed, which causes this. + catch (NodeHashMismatchException) { } + // Because it runs in parallel, it could be that the scope is disposed of early. + catch (ObjectDisposedException) { } + // When the scope is disposed, it set some of the dictionary to null to prevent corrupting later state + catch (NullReferenceException) { } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void PushAddressJob(ITrieWarmer.IAddressWarmer scope, Address? path, int sequenceId) + { + // Address is not single threaded. In which case, might as well use the same buffer. + if (_jobBufferMultiThreaded.TryEnqueue(new Job(scope, path, default, sequenceId))) MaybeWakeupFast(); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public void PushSlotJob(ITrieWarmer.IStorageWarmer storageTree, in UInt256? index, int sequenceId) + { + if (_slotJobBuffer.TryEnqueue(new SlotJob(storageTree, index.GetValueOrDefault(), sequenceId))) MaybeWakeupFast(); + } + + public void OnEnterScope() + { + // Drain any existing job + for (int i = 0; i < SlotBufferSize; i++) + { + if (!_slotJobBuffer.TryDequeue(out SlotJob _)) break; + } + for (int i = 0; i < BufferSize; i++) + { + if (!_jobBufferMultiThreaded.TryDequeue(out Job _)) break; + } + + _primaryWorkerLatch.Set(); + } + + public void OnExitScope() { } + + public async ValueTask DisposeAsync() + { + if (Interlocked.CompareExchange(ref _isDisposed, true, false)) return; + + _cancelTokenSource.Cancel(); + + // Release semaphore so that worker detects the cancellation quickly + while (true) + { + try + { + _executionSlots.Release(); + } + catch (SemaphoreFullException) + { + break; + } + } + + if (_warmerJob is not null) await _warmerJob; + _executionSlots.Dispose(); + _cancelTokenSource.Dispose(); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/SlotValue.cs b/src/Nethermind/Nethermind.State.Flat/SlotValue.cs new file mode 100644 index 00000000000..39823239261 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/SlotValue.cs @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Runtime.CompilerServices; +using System.Runtime.InteropServices; +using System.Runtime.Intrinsics; +using Nethermind.Core.Extensions; + +namespace Nethermind.State.Flat; + +/// +/// Make storing slot value smaller than a byte[]. +/// +[StructLayout(LayoutKind.Sequential, Pack = 32, Size = 32)] +public readonly struct SlotValue +{ + public readonly Vector256 _bytes; // Use Vector256 as the internal storage field + public Span AsSpan => MemoryMarshal.AsBytes(MemoryMarshal.CreateSpan(ref Unsafe.AsRef(in _bytes), 1)); + public ReadOnlySpan AsReadOnlySpan => MemoryMarshal.AsBytes(MemoryMarshal.CreateReadOnlySpan(ref Unsafe.AsRef(in _bytes), 1)); + public const int ByteCount = 32; + + public SlotValue(ReadOnlySpan data) + { + if (data.Length > 32) + { + ThrowInvalidLength(); + } + + if (data.Length == 32) + { + _bytes = Unsafe.ReadUnaligned>(ref MemoryMarshal.GetReference(data)); + } + else + { + _bytes = Vector256.Zero; + data.CopyTo(MemoryMarshal.AsBytes(MemoryMarshal.CreateSpan(ref _bytes, 1))); + } + } + + private static void ThrowInvalidLength() => throw new ArgumentException("Slot value cannot exceed 32 bytes", "data"); + + public static SlotValue? FromBytes(byte[]? data) => data == null ? null : new SlotValue(data); + + public static SlotValue FromSpanWithoutLeadingZero(ReadOnlySpan data) + { + switch (data.Length) + { + case > 32: + ThrowInvalidLength(); + return default; + case 32: + return Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(data)); + default: + Span buffer = stackalloc byte[32]; + buffer[..(32 - data.Length)].Clear(); + data.CopyTo(buffer[(32 - data.Length)..]); + return Unsafe.ReadUnaligned(ref MemoryMarshal.GetReference(buffer)); + } + } + + /// + /// Currently, the worldstate that the evm use expect the bytes to be without leading zeros + /// + public byte[] ToEvmBytes() => AsReadOnlySpan.WithoutLeadingZeros().ToArray(); +} diff --git a/src/Nethermind/Nethermind.State.Flat/Snapshot.cs b/src/Nethermind/Nethermind.State.Flat/Snapshot.cs new file mode 100644 index 00000000000..d4634c58eeb --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Snapshot.cs @@ -0,0 +1,129 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Concurrent; +using System.Collections.Frozen; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using Microsoft.Extensions.ObjectPool; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Metric; +using Nethermind.Core.Utils; +using Nethermind.Int256; +using Nethermind.Trie; +using IResettable = Nethermind.Core.Resettables.IResettable; + +namespace Nethermind.State.Flat; + +/// +/// Snapshot are written keys between state From to state To +/// +/// +/// +/// +/// +public class Snapshot( + StateId from, + StateId to, + SnapshotContent content, + IResourcePool resourcePool, + ResourcePool.Usage usage +) : RefCountingDisposable +{ + public long EstimateMemory() => content.EstimateMemory(); + public ResourcePool.Usage Usage => usage; + + public StateId From => from; + public StateId To => to; + public IEnumerable> Accounts => content.Accounts; + public IEnumerable> SelfDestructedStorageAddresses => content.SelfDestructedStorageAddresses; + public IEnumerable> Storages => content.Storages; + public IEnumerable> StorageNodes => content.StorageNodes; + public IEnumerable<(Hash256AsKey, TreePath)> StorageTrieNodeKeys => content.StorageNodes.Keys; + public IEnumerable> StateNodes => content.StateNodes; + public IEnumerable StateNodeKeys => content.StateNodes.Keys; + public int AccountsCount => content.Accounts.Count; + public int StoragesCount => content.Storages.Count; + public int StateNodesCount => content.StateNodes.Count; + public int StorageNodesCount => content.StorageNodes.Count; + public SnapshotContent Content => content; + + public bool TryGetAccount(AddressAsKey key, out Account? acc) => content.Accounts.TryGetValue(key, out acc); + + public bool HasSelfDestruct(Address address) => content.SelfDestructedStorageAddresses.TryGetValue(address, out bool _); + + public bool TryGetStorage(Address address, in UInt256 index, out SlotValue? value) => content.Storages.TryGetValue((address, index), out value); + + public bool TryGetStateNode(in TreePath path, [NotNullWhen(true)] out TrieNode? node) => content.StateNodes.TryGetValue(path, out node); + + public bool TryGetStorageNode(Hash256 address, in TreePath path, [NotNullWhen(true)] out TrieNode? node) => content.StorageNodes.TryGetValue((address, path), out node); + + protected override void CleanUp() => resourcePool.ReturnSnapshotContent(usage, content); + + public bool TryAcquire() => TryAcquireLease(); +} + +public sealed class SnapshotContent : IDisposable, IResettable +{ + private const int NodeSizeEstimate = 650; // Counting the node size one by one has a notable overhead. So we use estimate. + + // They dont actually need to be concurrent, but it makes commit fast by just passing the whole content. + public readonly ConcurrentDictionary Accounts = new(); + public readonly ConcurrentDictionary<(AddressAsKey, UInt256), SlotValue?> Storages = new(); + + // Bool is true if this is a new account also + public readonly ConcurrentDictionary SelfDestructedStorageAddresses = new(); + + // Use of a separate dictionary just for state has a small but measurable impact + public readonly ConcurrentDictionary StateNodes = new(); + + public readonly ConcurrentDictionary<(Hash256AsKey, TreePath), TrieNode> StorageNodes = new(); + + public void Reset() + { + foreach (KeyValuePair kv in StateNodes) kv.Value.PrunePersistedRecursively(1); + foreach (KeyValuePair<(Hash256AsKey, TreePath), TrieNode> kv in StorageNodes) kv.Value.PrunePersistedRecursively(1); + + Accounts.NoResizeClear(); + Storages.NoResizeClear(); + SelfDestructedStorageAddresses.NoResizeClear(); + StateNodes.NoResizeClear(); + StorageNodes.NoResizeClear(); + } + + public long EstimateMemory() + { + // ConcurrentDictionary entry overhead ~48 bytes, includes Account object (~104 bytes) + return + Accounts.Count * 168 + // Key (8B) + Value ref (8B) + concurrent dictionary overhead (48) + Account object (~104B) + Storages.Count * 128 + // Key (40B) + Value (40B SlotValue?) + concurrent dictionary overhead (48) + SelfDestructedStorageAddresses.Count * 60 + // Key (8B) + Value (4B) + concurrent dictionary overhead (48) + StateNodes.Count * (NodeSizeEstimate + 92) + // Key (36B) + Value ref (8B) + concurrent dictionary overhead (48) + TrieNode + StorageNodes.Count * (NodeSizeEstimate + 100); // Key (44B) + Value ref (8B) + concurrent dictionary overhead (48) + TrieNode + } + + /// + /// Estimates memory for compacted snapshots, counting only dictionary overhead + keys + value-type values. + /// Does not count reference type values (Account and TrieNode) as they are already accounted for + /// by non-compacted snapshots (compacted snapshots share these references with the original snapshots). + /// + public long EstimateCompactedMemory() + { + // ConcurrentDictionary entry overhead ~48 bytes + // Reference type values (Account, TrieNode) not counted - already accounted by non-compacted snapshot + return + Accounts.Count * 64 + // Key (8B) + Value ref (8B) + concurrent dictionary overhead (48) + Storages.Count * 128 + // Key (40B) + Value (40B SlotValue?) + concurrent dictionary overhead (48) + SelfDestructedStorageAddresses.Count * 60 + // Key (8B) + Value (4B) + concurrent dictionary overhead (48) + StateNodes.Count * 92 + // Key (36B TreePath) + Value ref (8B) + concurrent dictionary overhead (48) + StorageNodes.Count * 100; // Key (44B) + Value ref (8B) + concurrent dictionary overhead (48) + } + + public void Dispose() + { + } +} + diff --git a/src/Nethermind/Nethermind.State.Flat/SnapshotBundle.cs b/src/Nethermind/Nethermind.State.Flat/SnapshotBundle.cs new file mode 100644 index 00000000000..85597134a73 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/SnapshotBundle.cs @@ -0,0 +1,463 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Concurrent; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Int256; +using Nethermind.Trie; + +namespace Nethermind.State.Flat; + +/// +/// A bundle of and a layer of write buffer backed by a . +/// +public sealed class SnapshotBundle : IDisposable +{ + private readonly ReadOnlySnapshotBundle _readOnlySnapshotBundle; + + + private SnapshotContent _currentPooledContent = null!; + // These maps are direct reference from members in _currentPooledContent. + private ConcurrentDictionary _changedAccounts = null!; + private ConcurrentDictionary _changedStateNodes = null!; // Bulkset can get nodes concurrently + private ConcurrentDictionary<(Hash256AsKey, TreePath), TrieNode> _changedStorageNodes = null!; // Bulkset can get nodes concurrently + private ConcurrentDictionary<(AddressAsKey, UInt256), SlotValue?> _changedSlots = null!; // Bulkset can get nodes concurrently + private ConcurrentDictionary _selfDestructedAccountAddresses = null!; + + // The cached resource holds some items that are pooled. + // Notably, it holds loaded caches from trie warmer. + private TransientResource _transientResource = null!; + + internal SnapshotPooledList _snapshots; + private readonly ITrieNodeCache _trieNodeCache; + private bool _isDisposed; + private readonly IResourcePool _resourcePool; + + internal ResourcePool.Usage _usage; + + public SnapshotBundle( + ReadOnlySnapshotBundle readOnlySnapshotBundle, + ITrieNodeCache trieNodeCache, + IResourcePool resourcePool, + ResourcePool.Usage usage, + SnapshotPooledList? snapshots = null) + { + _readOnlySnapshotBundle = readOnlySnapshotBundle; + _snapshots = snapshots ?? new SnapshotPooledList(1); + _trieNodeCache = trieNodeCache; + _resourcePool = resourcePool; + _usage = usage; + + _currentPooledContent = resourcePool.GetSnapshotContent(usage); + _transientResource = resourcePool.GetCachedResource(usage); + + ExpandCurrentPooledContent(); + + Metrics.ActiveSnapshotBundle++; + } + + private void ExpandCurrentPooledContent() + { + _changedAccounts = _currentPooledContent.Accounts; + _changedSlots = _currentPooledContent.Storages; + _changedStorageNodes = _currentPooledContent.StorageNodes; + _changedStateNodes = _currentPooledContent.StateNodes; + _selfDestructedAccountAddresses = _currentPooledContent.SelfDestructedStorageAddresses; + } + + public Account? GetAccount(Address address) => DoGetAccount(address, false); + + private Account? DoGetAccount(Address address, bool excludeChanged) + { + GuardDispose(); + + if (!excludeChanged && _changedAccounts.TryGetValue(address, out Account? acc)) return acc; + + AddressAsKey key = address; + for (int i = _snapshots.Count - 1; i >= 0; i--) + { + if (_snapshots[i].TryGetAccount(key, out acc)) + { + return acc; + } + } + + return _readOnlySnapshotBundle.GetAccount(address); + } + + public int DetermineSelfDestructSnapshotIdx(Address address) + { + if (_selfDestructedAccountAddresses.ContainsKey(address)) return _snapshots.Count + _readOnlySnapshotBundle.SnapshotCount; + + for (int i = _snapshots.Count - 1; i >= 0; i--) + { + if (_snapshots[i].HasSelfDestruct(address)) return i + _readOnlySnapshotBundle.SnapshotCount; + } + + return _readOnlySnapshotBundle.DetermineSelfDestructSnapshotIdx(address); + } + + public byte[]? GetSlot(Address address, in UInt256 index, int selfDestructStateIdx) + { + GuardDispose(); + + if (_changedSlots.TryGetValue((address, index), out SlotValue? slotValue)) + { + return slotValue?.ToEvmBytes(); + } + + // Self-destructed at the point of the latest change + if (selfDestructStateIdx == _snapshots.Count + _readOnlySnapshotBundle.SnapshotCount) + { + return null; + } + + int currentBundleSelfDestructIdx = selfDestructStateIdx - _readOnlySnapshotBundle.SnapshotCount; + if (selfDestructStateIdx == -1 || currentBundleSelfDestructIdx >= 0) + { + for (int i = _snapshots.Count - 1; i >= 0; i--) + { + if (_snapshots[i].TryGetStorage(address, index, out slotValue)) + { + return slotValue?.ToEvmBytes(); + } + + if (i <= currentBundleSelfDestructIdx) + { + // This is the snapshot with selfdestruct + return null; + } + } + } + + return _readOnlySnapshotBundle.GetSlot(address, index, selfDestructStateIdx); + } + + public TrieNode FindStateNodeOrUnknown(in TreePath path, Hash256 hash) + { + GuardDispose(); + + if (_changedStateNodes.TryGetValue(path, out TrieNode? node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + } + else if (_transientResource.TryGetStateNode(path, hash, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + node = _changedStateNodes.GetOrAdd(path, node); + } + else + { + node = _changedStateNodes.GetOrAdd(path, + DoFindStateNodeExternal(path, hash, out node) + ? node + : new TrieNode(NodeType.Unknown, hash)); + } + + return node; + } + + public TrieNode FindStateNodeOrUnknownForTrieWarmer(in TreePath path, Hash256 hash) + { + // TrieWarmer only touch `_transientResource` + GuardDispose(); + + if (_transientResource.TryGetStateNode(path, hash, out TrieNode? node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + } + else + { + node = _transientResource.GetOrAddStateNode(path, + DoFindStateNodeExternal(path, hash, out node) + ? node + : new TrieNode(NodeType.Unknown, hash)); + } + + return node; + } + + private bool DoFindStateNodeExternal(in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node) + { + if (_trieNodeCache.TryGet(null, path, hash, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + return true; + } + + for (int i = _snapshots.Count - 1; i >= 0; i--) + { + if (_snapshots[i].TryGetStateNode(path, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + return true; + } + } + + return _readOnlySnapshotBundle.TryFindStateNodes(path, hash, out node); + } + + public TrieNode FindStorageNodeOrUnknown(Hash256 address, in TreePath path, Hash256 hash) + { + GuardDispose(); + + if (_changedStorageNodes.TryGetValue(((Hash256AsKey)address, path), out TrieNode? node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + _transientResource.UpdateStorageNode((Hash256AsKey)address, path, node); + } + else if (_transientResource.TryGetStorageNode((Hash256AsKey)address, path, hash, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + node = _changedStorageNodes.GetOrAdd(((Hash256AsKey)address, path), node); + } + else + { + node = _changedStorageNodes.GetOrAdd(((Hash256AsKey)address, path), + DoTryFindStorageNodeExternal((Hash256AsKey)address, path, hash, out node) && node is not null + ? node + : new TrieNode(NodeType.Unknown, hash)); + } + + return node; + } + + + public TrieNode FindStorageNodeOrUnknownTrieWarmer(Hash256 address, in TreePath path, Hash256 hash) + { + GuardDispose(); + + if (_transientResource.TryGetStorageNode((Hash256AsKey)address, path, hash, out TrieNode? node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + } + else + { + node = _transientResource.GetOrAddStorageNode((Hash256AsKey)address, path, + DoTryFindStorageNodeExternal((Hash256AsKey)address, path, hash, out node) && node is not null + ? node + : new TrieNode(NodeType.Unknown, hash)); + } + + return node; + } + + // Note: No self-destruct boundary check needed for trie nodes. Trie iteration starts from the storage root hash, + // so if storage was self-destructed, the new root is different and orphaned nodes won't be traversed. So we skip the + // check for slightly improved latency. + private bool DoTryFindStorageNodeExternal(Hash256AsKey address, in TreePath path, Hash256 hash, out TrieNode? node) + { + if (_trieNodeCache.TryGet(address, path, hash, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + return true; + } + + for (int i = _snapshots.Count - 1; i >= 0; i--) + { + if (_snapshots[i].TryGetStorageNode(address, path, out node)) + { + Nethermind.Trie.Pruning.Metrics.LoadedFromCacheNodesCount++; + return true; + } + } + + return _readOnlySnapshotBundle.TryFindStorageNodes(address, path, hash, out node); + } + + public byte[]? TryLoadStateRlp(in TreePath path, Hash256 hash, ReadFlags flags) + { + GuardDispose(); + + return _readOnlySnapshotBundle.TryLoadStateRlp(path, hash, flags); + } + + public byte[]? TryLoadStorageRlp(Hash256 address, in TreePath path, Hash256 hash, ReadFlags flags) + { + GuardDispose(); + + return _readOnlySnapshotBundle.TryLoadStorageRlp(address, path, hash, flags); + } + + // This is called only during trie commit + public void SetStateNode(in TreePath path, TrieNode newNode) + { + GuardDispose(); + if (!newNode.IsSealed) throw new Exception("Node must be sealed for setting"); + + // Note: Hot path + _changedStateNodes[path] = newNode; + + // Note to self: + // Skipping the cached resource update and doing it in background in TrieNodeCache barely make a dent + // to block processing time but increase the trie node add time by 3x. + _transientResource.UpdateStateNode(path, newNode); + } + + // This is called only during trie commit + public void SetStorageNode(Hash256 addr, in TreePath path, TrieNode newNode) + { + GuardDispose(); + if (!newNode.IsSealed) throw new Exception("Node must be sealed for setting"); + + // Note: Hot path + _changedStorageNodes[(addr, path)] = newNode; + _transientResource.UpdateStorageNode(addr, path, newNode); + } + + public void SetAccount(AddressAsKey addr, Account? account) => _changedAccounts[addr] = account; + + public void SetChangedSlot(AddressAsKey address, in UInt256 index, byte[] value) + { + // So right now, if the value is zero, then it is a deletion. This is not the case with verkle where you + // can set a value to be zero. Because of this distinction, the zerobytes logic is handled here instead of + // lower down. + if (value is null || Bytes.AreEqual(value, StorageTree.ZeroBytes)) + { + _changedSlots[(address, index)] = null; + } + else + { + _changedSlots[(address, index)] = SlotValue.FromSpanWithoutLeadingZero(value); + } + } + + // Also called SelfDestruct + public void Clear(Address address, Hash256AsKey addressHash) + { + GuardDispose(); + + Account? account = DoGetAccount(address, excludeChanged: true); + // So... a clear is always sent even on a new account. This makes is a minor optimization as + // it skips persistence, but probably need to make sure it does not send it at all in the first place. + bool isNewAccount = account == null || account.StorageRoot == Keccak.EmptyTreeHash; + + _selfDestructedAccountAddresses.TryAdd(address, isNewAccount); + + if (!isNewAccount) + { + // Collect keys first to avoid modifying during iteration + using ArrayPoolListRef<(Hash256AsKey, TreePath)> storageKeysToRemove = new(16); + foreach (KeyValuePair<(Hash256AsKey, TreePath), TrieNode> kv in _changedStorageNodes) + { + if (kv.Key.Item1.Value == addressHash) + { + storageKeysToRemove.Add(kv.Key); + } + } + + foreach ((Hash256AsKey, TreePath) key in storageKeysToRemove) + { + _changedStorageNodes.TryRemove(key, out _); + } + + using ArrayPoolListRef<(AddressAsKey, UInt256)> slotKeysToRemove = new(16); + foreach (KeyValuePair<(AddressAsKey, UInt256), SlotValue?> kv in _changedSlots) + { + if (kv.Key.Item1.Value == address) + { + slotKeysToRemove.Add(kv.Key); + } + } + + foreach ((AddressAsKey, UInt256) key in slotKeysToRemove) + { + _changedSlots.TryRemove(key, out _); + } + } + } + + // The trie warmer's PushSlotJob is slightly slow due to the wake up logic. + // It is a net improvement to check and modify the bloom filter before calling the trie warmer push + // as most of the slot should already be queued by prewarmer. + public bool ShouldQueuePrewarm(Address address, UInt256? slot = null) => _transientResource.ShouldPrewarm(address, slot); + + public (Snapshot?, TransientResource?) CollectAndApplySnapshot(StateId from, StateId to, bool returnSnapshot = true) + { + // When assembling the snapshot, we straight up pass the _currentPooledContent into the new snapshot + // This is because copying the values have a measurable impact on overall performance. + Snapshot snapshot = new( + from: from, + to: to, + content: _currentPooledContent, + resourcePool: _resourcePool, + usage: _usage); + + snapshot.AcquireLease(); // For this SnapshotBundle. + _snapshots.Add(snapshot); // Now later reads are correct + + // Invalidate cached resources + if (returnSnapshot) + { + TransientResource transientResource = _transientResource; + + // Main block processing only commits once. For optimization, we switch the usage so that the used resource + // is from a different pool that will essentially be empty all the time. + if (_usage == ResourcePool.Usage.MainBlockProcessing) + { + _usage = ResourcePool.Usage.PostMainBlockProcessing; + } + + _transientResource = _resourcePool.GetCachedResource(_usage); + + // Make and apply new snapshot content. + _currentPooledContent = _resourcePool.GetSnapshotContent(_usage); + ExpandCurrentPooledContent(); + + return (snapshot, transientResource); + } + else + { + snapshot.Dispose(); // Revert the lease before + + _transientResource.Reset(); + _currentPooledContent = _resourcePool.GetSnapshotContent(_usage); + + return (null, null); + } + } + + public void Reset() + { + if (_isDisposed) return; + + // Dispose all snapshots in the list + _snapshots.Dispose(); + _snapshots = new SnapshotPooledList(1); + + // Reset the current pooled content (clears _changedAccounts, _changedSlots, etc.) + _currentPooledContent.Reset(); + + // Reset transient resource (clears trie node cache and bloom filter) + _transientResource.Reset(); + + ExpandCurrentPooledContent(); + } + + private void GuardDispose() => ObjectDisposedException.ThrowIf(_isDisposed, this); + + public void Dispose() + { + if (Interlocked.Exchange(ref _isDisposed, true)) return; + + _snapshots.Dispose(); + + // Null them in case unexpected mutation from trie warmer + _snapshots = null!; + _changedSlots = null!; + _changedAccounts = null!; + _changedStorageNodes = null!; + _selfDestructedAccountAddresses = null!; + + _resourcePool.ReturnSnapshotContent(_usage, _currentPooledContent); + _resourcePool.ReturnCachedResource(_usage, _transientResource); + _readOnlySnapshotBundle.Dispose(); + + Metrics.ActiveSnapshotBundle--; + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/SnapshotCompactor.cs b/src/Nethermind/Nethermind.State.Flat/SnapshotCompactor.cs new file mode 100644 index 00000000000..5483c0e7ea7 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/SnapshotCompactor.cs @@ -0,0 +1,200 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Concurrent; +using System.Diagnostics; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.Logging; +using Nethermind.Trie; + +namespace Nethermind.State.Flat; + +public class SnapshotCompactor : ISnapshotCompactor +{ + private readonly int _compactSize; + private readonly int _midCompactSize; + private readonly ILogger _logger; + private readonly IResourcePool _resourcePool; + private readonly ISnapshotRepository _snapshotRepository; + + public SnapshotCompactor(IFlatDbConfig config, + IResourcePool resourcePool, + ISnapshotRepository snapshotRepository, + ILogManager logManager) + { + if (config.CompactSize % config.MidCompactSize != 0) throw new ArgumentException("Compact size must be divisible by mid compact size"); + + _resourcePool = resourcePool; + _snapshotRepository = snapshotRepository; + _compactSize = config.CompactSize; + _midCompactSize = config.MidCompactSize; + _logger = logManager.GetClassLogger(); + } + + public bool DoCompactSnapshot(in StateId stateId) + { + if (_snapshotRepository.TryLeaseState(stateId, out Snapshot? snapshot)) + { + using Snapshot _ = snapshot; // dispose + + // Actually do the compaction + long sw = Stopwatch.GetTimestamp(); + using SnapshotPooledList snapshots = GetSnapshotsToCompact(snapshot); + + if (snapshots.Count != 0) + { + Snapshot compactedSnapshot = CompactSnapshotBundle(snapshots); + if (_snapshotRepository.TryAddCompactedSnapshot(compactedSnapshot)) + { + StateId stateId1 = snapshot.To; + if (stateId1.BlockNumber % _compactSize == 0) + { + Metrics.CompactTime.Observe(Stopwatch.GetTimestamp() - sw); + } + else if (stateId1.BlockNumber % _midCompactSize == 0) + { + Metrics.MidCompactTime.Observe(Stopwatch.GetTimestamp() - sw); + } + + return true; + } + else + { + compactedSnapshot.Dispose(); + return false; + } + } + } + + return false; + } + + public SnapshotPooledList GetSnapshotsToCompact(Snapshot snapshot) + { + if (_compactSize <= 1) return SnapshotPooledList.Empty(); // Disabled + long blockNumber = snapshot.To.BlockNumber; + if (blockNumber == 0) return SnapshotPooledList.Empty(); + + bool isFullCompaction = blockNumber % _compactSize == 0; + bool isMidCompaction = !isFullCompaction && blockNumber % _midCompactSize == 0; + if (!isFullCompaction && !isMidCompaction) return SnapshotPooledList.Empty(); + + if (isMidCompaction) + { + // Save memory by removing the compacted state from previous mid compaction + foreach (StateId id in _snapshotRepository.GetStatesAtBlockNumber(blockNumber - _compactSize)) + { + if (_snapshotRepository.RemoveAndReleaseCompactedKnownState(id)) + { + } + } + } + + // So the compact size change if its midCompact or fullCompact. The reason being mid-compaction is much smaller + // and therefore faster and use less memory however, it increases the average snapshot count per bundle. + // Hard to know if it's better or not now. + int compactSize = isMidCompaction ? _midCompactSize : _compactSize; + long startingBlockNumber = ((blockNumber - 1) / compactSize) * compactSize; + SnapshotPooledList snapshots = _snapshotRepository.AssembleSnapshotsUntil(snapshot.To, startingBlockNumber, compactSize); + + bool snapshotsOk = false; + try + { + if (snapshots.Count == 0) return SnapshotPooledList.Empty(); + + if (snapshots[0].From.BlockNumber != startingBlockNumber) + { + // Could happen especially at start where the block may not be aligned, but not a big problem. + if (_logger.IsDebug) _logger.Debug($"Unable to compile snapshots to compact. {snapshots[0].From.BlockNumber} -> {snapshots[^1].To.BlockNumber}. Starting block number should be {startingBlockNumber}"); + + return SnapshotPooledList.Empty(); + } + + // Nothing to combine if it's just one + if (snapshots.Count == 1) return SnapshotPooledList.Empty(); + + snapshotsOk = true; + return snapshots; + } + finally + { + if (!snapshotsOk) snapshots.Dispose(); + } + } + + public Snapshot CompactSnapshotBundle(SnapshotPooledList snapshots) + { + StateId to = snapshots[^1].To; + StateId from = snapshots[0].From; + + ResourcePool.Usage usage = (to.BlockNumber % _compactSize == 0) + ? ResourcePool.Usage.Compactor + : ResourcePool.Usage.MidCompactor; + + Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, usage); + ConcurrentDictionary accounts = snapshot.Content.Accounts; + ConcurrentDictionary<(AddressAsKey, UInt256), SlotValue?> storages = snapshot.Content.Storages; + ConcurrentDictionary selfDestructedStorageAddresses = snapshot.Content.SelfDestructedStorageAddresses; + ConcurrentDictionary<(Hash256AsKey, TreePath), TrieNode> storageNodes = snapshot.Content.StorageNodes; + ConcurrentDictionary stateNodes = snapshot.Content.StateNodes; + + HashSet
addressToClear = new(); + HashSet addressHashToClear = new(); + + for (int i = 0; i < snapshots.Count; i++) + { + Snapshot knownState = snapshots[i]; + accounts.AddOrUpdateRange(knownState.Accounts); + + addressToClear.Clear(); + addressHashToClear.Clear(); + + foreach ((AddressAsKey address, var isNewAccount) in knownState.SelfDestructedStorageAddresses) + { + if (isNewAccount) + { + // Note, if it's already false, we should not set it to true, hence the TryAdd + selfDestructedStorageAddresses.TryAdd(address, true); + } + else + { + selfDestructedStorageAddresses[address] = false; + addressToClear.Add(address); + addressHashToClear.Add(address.Value.ToAccountPath.ToCommitment()); + } + } + + if (addressToClear.Count > 0) + { + // Clear + foreach (((AddressAsKey Address, UInt256) key, SlotValue? _) in storages) + { + if (addressToClear.Contains(key.Address)) + { + storages.Remove(key, out _); + } + } + + foreach (((Hash256AsKey Hash, TreePath) key, TrieNode _) in storageNodes) + { + if (addressHashToClear.Contains(key.Hash)) + { + storageNodes.Remove(key, out _); + } + } + } + + storages.AddOrUpdateRange(knownState.Storages); + stateNodes.AddOrUpdateRange(knownState.StateNodes); + storageNodes.AddOrUpdateRange(knownState.StorageNodes); + } + + return snapshot; + } + + +} diff --git a/src/Nethermind/Nethermind.State.Flat/SnapshotPooledList.cs b/src/Nethermind/Nethermind.State.Flat/SnapshotPooledList.cs new file mode 100644 index 00000000000..4963b908a55 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/SnapshotPooledList.cs @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections; +using Nethermind.Core.Collections; + +namespace Nethermind.State.Flat; + +public sealed class SnapshotPooledList : IDisposable, IEnumerable +{ + private readonly ArrayPoolList _list; + public SnapshotPooledList(int initial) => _list = new ArrayPoolList(initial); + private SnapshotPooledList(ArrayPoolList list) => _list = list; + public int Count => _list.Count; + public Snapshot this[int index] => _list[index]; + public Snapshot this[Index index] => _list[index]; + public void Add(Snapshot snapshot) => _list.Add(snapshot); + public void Reverse() => _list.Reverse(); + public static SnapshotPooledList Empty() => new SnapshotPooledList(ArrayPoolList.Empty()); + public IEnumerator GetEnumerator() => _list.GetEnumerator(); + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); + public void Dispose() => _list.DisposeRecursive(); +} diff --git a/src/Nethermind/Nethermind.State.Flat/SnapshotRepository.cs b/src/Nethermind/Nethermind.State.Flat/SnapshotRepository.cs new file mode 100644 index 00000000000..c7309c6ff79 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/SnapshotRepository.cs @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Collections.Concurrent; +using System.Diagnostics.CodeAnalysis; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Core.Threading; +using Nethermind.Logging; + +namespace Nethermind.State.Flat; + +public class SnapshotRepository(ILogManager logManager) : ISnapshotRepository +{ + private readonly ILogger _logger = logManager.GetClassLogger(); + + private readonly ConcurrentDictionary _compactedSnapshots = new(); + private readonly ConcurrentDictionary _snapshots = new(); + private readonly ReadWriteLockBox> _sortedSnapshotStateIds = new(new SortedSet()); + + public int SnapshotCount => _snapshots.Count; + public int CompactedSnapshotCount => _compactedSnapshots.Count; + + public void AddStateId(in StateId stateId) + { + using ReadWriteLockBox>.Lock _ = _sortedSnapshotStateIds.EnterWriteLock(out SortedSet sortedSnapshots); + sortedSnapshots.Add(stateId); + } + + public SnapshotPooledList AssembleSnapshots(in StateId baseBlock, in StateId targetState, int estimatedSize) + { + SnapshotPooledList list = AssembleSnapshotsUntil(baseBlock, targetState.BlockNumber, estimatedSize); + if (list.Count > 0 && list[0].From.BlockNumber == targetState.BlockNumber && list[0].From != targetState) + { + list.Dispose(); + + // Likely persisted a non-finalized block. + throw new InvalidOperationException($"Attempted to compile snapshots from {baseBlock} to {targetState} but target is not reachable from baseBlock"); + } + + return list; + } + + public SnapshotPooledList AssembleSnapshotsUntil(in StateId baseBlock, long minBlockNumber, int estimatedSize) + { + SnapshotPooledList snapshots = new(estimatedSize); + + StateId current = baseBlock; + while (TryLeaseCompactedState(current, out Snapshot? snapshot) || TryLeaseState(current, out snapshot)) + { + if (_logger.IsTrace) _logger.Trace($"Got {snapshot.From} -> {snapshot.To}"); + + if (snapshot.From.BlockNumber < minBlockNumber) + { + // `snapshot` is now a compacted snapshot, we dont want to use it. + snapshot.Dispose(); + + // Try got get a non compacted one + if (!TryLeaseState(current, out snapshot)) + { + // Failure, exit loop. + break; + } + } + + if (snapshot.From.BlockNumber < minBlockNumber) + { + // Should not happen... unless someone try to add out of order snapshots + snapshot.Dispose(); + break; + } + + snapshots.Add(snapshot); + if (snapshot.From == current) + { + break; // Some test commit two block with the same id, so we dont know the parent anymore. + } + + if (snapshot.From.BlockNumber == minBlockNumber) + { + break; + } + + current = snapshot.From; + } + + snapshots.Reverse(); + return snapshots; + } + + public bool TryLeaseCompactedState(in StateId stateId, [NotNullWhen(true)] out Snapshot? entry) + { + SpinWait sw = new(); + while (_compactedSnapshots.TryGetValue(stateId, out entry)) + { + if (entry.TryAcquire()) return true; + + sw.SpinOnce(); + } + return false; + } + + public bool TryLeaseState(in StateId stateId, [NotNullWhen(true)] out Snapshot? entry) + { + SpinWait sw = new(); + while (_snapshots.TryGetValue(stateId, out entry)) + { + if (entry.TryAcquire()) return true; + + sw.SpinOnce(); + } + return false; + } + + public bool TryAddCompactedSnapshot(Snapshot snapshot) + { + if (_compactedSnapshots.TryAdd(snapshot.To, snapshot)) + { + Metrics.CompactedSnapshotCount++; + + long compactedBytes = snapshot.Content.EstimateCompactedMemory(); + Metrics.CompactedSnapshotMemory += compactedBytes; + Metrics.TotalSnapshotMemory += compactedBytes; + + return true; + } + + return false; + } + + public bool TryAddSnapshot(Snapshot snapshot) + { + if (_snapshots.TryAdd(snapshot.To, snapshot)) + { + Metrics.SnapshotCount++; + + long totalBytes = snapshot.EstimateMemory(); + Metrics.SnapshotMemory += totalBytes; + Metrics.TotalSnapshotMemory += totalBytes; + + return true; + } + + return false; + } + + public ArrayPoolList GetStatesAtBlockNumber(long blockNumber) + { + using ReadWriteLockBox>.Lock _ = _sortedSnapshotStateIds.EnterReadLock(out SortedSet sortedSnapshots); + + StateId min = new(blockNumber, ValueKeccak.Zero); + StateId max = new(blockNumber, ValueKeccak.MaxValue); + + return sortedSnapshots.GetViewBetween(min, max).ToPooledList(0); + } + + public StateId? GetLastSnapshotId() + { + using ReadWriteLockBox>.Lock _ = _sortedSnapshotStateIds.EnterReadLock(out SortedSet sortedSnapshots); + return sortedSnapshots.Count == 0 ? null : sortedSnapshots.Max; + } + + public bool RemoveAndReleaseCompactedKnownState(in StateId stateId) + { + if (_compactedSnapshots.TryRemove(stateId, out Snapshot? existingState)) + { + Metrics.CompactedSnapshotCount--; + + long compactedBytes = existingState.Content.EstimateCompactedMemory(); + Metrics.CompactedSnapshotMemory -= compactedBytes; + Metrics.TotalSnapshotMemory -= compactedBytes; + + existingState.Dispose(); + + return true; + } + + return false; + } + + public void RemoveAndReleaseKnownState(StateId stateId) + { + if (_snapshots.TryRemove(stateId, out Snapshot? existingState)) + { + Metrics.SnapshotCount--; + + using (_sortedSnapshotStateIds.EnterWriteLock(out SortedSet sortedSnapshots)) + { + sortedSnapshots.Remove(stateId); + } + + long totalBytes = existingState.EstimateMemory(); + Metrics.SnapshotMemory -= totalBytes; + Metrics.TotalSnapshotMemory -= totalBytes; + + existingState.Dispose(); // After memory + } + } + + public bool HasState(in StateId stateId) => _snapshots.ContainsKey(stateId); + + public ArrayPoolList GetSnapshotBeforeStateId(StateId stateId) + { + using ReadWriteLockBox>.Lock _ = _sortedSnapshotStateIds.EnterReadLock(out SortedSet sortedSnapshots); + + return sortedSnapshots + .GetViewBetween(new StateId(0, Hash256.Zero), new StateId(stateId.BlockNumber, Keccak.MaxValue)) + .ToPooledList(0); + } + + public void RemoveStatesUntil(in StateId currentPersistedStateId) + { + using ArrayPoolList statesBeforeStateId = GetSnapshotBeforeStateId(currentPersistedStateId); + foreach (StateId stateToRemove in statesBeforeStateId) + { + RemoveAndReleaseCompactedKnownState(stateToRemove); + RemoveAndReleaseKnownState(stateToRemove); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/SpmcRingBuffer.cs b/src/Nethermind/Nethermind.State.Flat/SpmcRingBuffer.cs new file mode 100644 index 00000000000..9c0dc0d87f4 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/SpmcRingBuffer.cs @@ -0,0 +1,120 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Runtime.CompilerServices; + +namespace Nethermind.State.Flat; + +/// +/// AI generated single producer multiple consumer ring buffer. If called by multiple producers, it will hang. +/// See for multiple producer variant. +/// The selection of is important. It should be ideally a struct of size no more than 64 bytes +/// or 32 bytes if possible. +/// +/// +public sealed class SpmcRingBuffer +{ + private readonly T[] _entries; + private readonly long[] _sequences; + private readonly int _mask; + private readonly int _capacity; + + public long EstimatedJobCount + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get + { + long tail = Volatile.Read(ref _tail); + long head = Volatile.Read(ref _head); + + long count = tail - head; + return count < 0 ? 0 : count; // clamp just in case of a race + } + } + + // --- head (consumers) + padding to avoid false sharing with _tail --- + private long _head; +#pragma warning disable CS0169 // Field is never used + private long _headPad1, _headPad2, _headPad3, _headPad4, _headPad5, _headPad6, _headPad7; + + // --- tail (producer) + padding --- + private long _tail; + private long _tailPad1, _tailPad2, _tailPad3, _tailPad4, _tailPad5, _tailPad6, _tailPad7; +#pragma warning restore CS0169 // Field is never used + + public SpmcRingBuffer(int capacityPowerOfTwo) + { + if (capacityPowerOfTwo <= 0 || (capacityPowerOfTwo & (capacityPowerOfTwo - 1)) != 0) + throw new ArgumentException("Capacity must be power of two."); + + _capacity = capacityPowerOfTwo; + _mask = capacityPowerOfTwo - 1; + _entries = new T[capacityPowerOfTwo]; + _sequences = new long[capacityPowerOfTwo]; + + for (int i = 0; i < capacityPowerOfTwo; i++) + _sequences[i] = i; + } + + /// + /// Single producer: enqueue one item if there is space. + /// Returns false if the ring is full. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryEnqueue(in T item) + { + // Single producer: no CAS needed on tail. + long tail = _tail; + int index = (int)(tail & _mask); + + // Slot is free only if its sequence equals the current tail. + long seq = Volatile.Read(ref _sequences[index]); + if (seq != tail) + return false; // not yet consumed -> buffer full + + _entries[index] = item; + + // Publish: + // seq = tail + 1 means "item for head == tail is now visible". + // Volatile.Write gives us the release fence so consumer + // sees the payload after seeing seq. + Volatile.Write(ref _sequences[index], tail + 1); + + _tail = tail + 1; + + return true; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + public bool TryDequeue(out T item) + { + while (true) + { + long head = Volatile.Read(ref _head); + int index = (int)(head & _mask); + long seq = Volatile.Read(ref _sequences[index]); + long expectedSeq = head + 1; + + // If seq == expectedSeq, the producer has finished writing + if (seq == expectedSeq) + { + if (Interlocked.CompareExchange(ref _head, head + 1, head) == head) + { + item = _entries[index]; + // Mark as ready for the producer's next lap (head + capacity) + Volatile.Write(ref _sequences[index], head + _capacity); + return true; + } + } + else if (seq < expectedSeq) + { + // Producer hasn't filled this slot yet + item = default!; + return false; + } + + // If seq > expectedSeq, another consumer won the race; loop and retry + Thread.SpinWait(1); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/StateId.cs b/src/Nethermind/Nethermind.State.Flat/StateId.cs new file mode 100644 index 00000000000..3caf59bbed7 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/StateId.cs @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Crypto; + +namespace Nethermind.State.Flat; + +public readonly record struct StateId(long BlockNumber, in ValueHash256 StateRoot) : IComparable +{ + public StateId(BlockHeader? header) : this(header?.Number ?? -1, header?.StateRoot ?? Keccak.EmptyTreeHash) + { + } + + public static StateId PreGenesis = new(-1, Keccak.EmptyTreeHash); + + public int CompareTo(StateId other) + { + int blockNumberComparison = BlockNumber.CompareTo(other.BlockNumber); + if (blockNumberComparison != 0) return blockNumberComparison; + return StateRoot.CompareTo(other.StateRoot); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/TransientResource.cs b/src/Nethermind/Nethermind.State.Flat/TransientResource.cs new file mode 100644 index 00000000000..66959dea55f --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/TransientResource.cs @@ -0,0 +1,85 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Diagnostics.CodeAnalysis; +using System.IO.Hashing; +using System.Numerics; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Int256; +using Nethermind.State.Flat.Persistence.BloomFilter; +using Nethermind.Trie; +using IResettable = Nethermind.Core.Resettables.IResettable; + +namespace Nethermind.State.Flat; + +/// +/// Contains some large variable used by but not committed into +/// as part of a . Pooling this is largely for performance reason. +/// +/// +public record TransientResource(TransientResource.Size size) : IDisposable, IResettable +{ + public record Size(long PrewarmedAddressSize, int NodesCacheSize); + + public BloomFilter PrewarmedAddresses = new(size.PrewarmedAddressSize, 14); // 14 is exactly 8 probes, which the SIMD instruction does. + public TrieNodeCache.ChildCache Nodes = new(size.NodesCacheSize); + + public Size GetSize() => new(PrewarmedAddresses.Capacity, Nodes.Capacity); + + public int CachedNodes => Nodes.Count; + + public void Reset() + { + Nodes.Reset(); + + if (PrewarmedAddresses.Count > PrewarmedAddresses.Capacity) + { + long newCapacity = (long)BitOperations.RoundUpToPowerOf2((ulong)PrewarmedAddresses.Count); + double bitsPerKey = PrewarmedAddresses.BitsPerKey; + // Create new filter before disposing old one to avoid null ref race condition + BloomFilter newFilter = new BloomFilter(newCapacity, bitsPerKey); + BloomFilter oldFilter = Interlocked.Exchange(ref PrewarmedAddresses, newFilter); + oldFilter.Dispose(); + } + else + { + PrewarmedAddresses.Clear(); + } + } + + public bool ShouldPrewarm(Address address, UInt256? slot) + { + ulong hash; + if (slot is null) + { + hash = XxHash64.HashToUInt64(address.Bytes); + } + else + { + Span buffer = stackalloc byte[20 + 32]; + address.Bytes.CopyTo(buffer); + slot.Value.ToBigEndian(buffer[20..]); + hash = XxHash64.HashToUInt64(buffer); + } + + if (PrewarmedAddresses.MightContain(hash)) return false; + PrewarmedAddresses.Add(hash); + return true; + } + + public void Dispose() => PrewarmedAddresses.Dispose(); + + public bool TryGetStateNode(in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node) => Nodes.TryGet(null, path, hash, out node); + + public TrieNode GetOrAddStateNode(in TreePath path, TrieNode trieNode) => Nodes.GetOrAdd(null, path, trieNode); + + public void UpdateStateNode(in TreePath path, TrieNode node) => Nodes.Set(null, path, node); + + public bool TryGetStorageNode(Hash256AsKey address, in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node) => Nodes.TryGet(address, path, hash, out node); + + public TrieNode GetOrAddStorageNode(Hash256AsKey address, in TreePath path, TrieNode trieNode) => Nodes.GetOrAdd(address, path, trieNode); + + public void UpdateStorageNode(Hash256AsKey address, in TreePath path, TrieNode node) => Nodes.Set(address, path, node); +} diff --git a/src/Nethermind/Nethermind.State.Flat/TrieNodeCache.cs b/src/Nethermind/Nethermind.State.Flat/TrieNodeCache.cs new file mode 100644 index 00000000000..196de994b8f --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/TrieNodeCache.cs @@ -0,0 +1,293 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Diagnostics.CodeAnalysis; +using System.Numerics; +using System.Runtime.CompilerServices; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.Logging; +using Nethermind.Trie; + +namespace Nethermind.State.Flat; + +/// +/// A specialized cache. It uses a sharded array of as the cache with the +/// hashcode of the path mapping to the array position directly. If a collision happen, it just replace the old entry. +/// When trying to get the node, the node hash must be checked to ensure the right node is the one fetched. +/// The use of sharding is so that when memory target is exceeded, whole shard which is grouped by tree path is cleared. +/// This improve block cache hit rate as trie nodes of similar subtree tend to be clustered together. +/// +public sealed class TrieNodeCache : ITrieNodeCache +{ + private const int EstimatedSizePerNode = 700; + private const double UtilRatio = 0.25; + private const int ShardCount = 256; + + private readonly ILogger _logger; + private readonly TrieNode?[][] _cacheShards; + private readonly long[] _shardMemoryUsages; + private readonly long _maxCacheMemoryThreshold; + private readonly int _bucketSize; + private readonly int _bucketMask; + + private int _nextShardToClear = 0; + + public TrieNodeCache(IFlatDbConfig flatDbConfig, ILogManager logManager) + { + _logger = logManager.GetClassLogger(); + + long maxCacheMemoryThreshold = flatDbConfig.TrieCacheMemoryBudget; + long totalNodeCount = (maxCacheMemoryThreshold / EstimatedSizePerNode); + + int targetBucketSize = (int)((totalNodeCount / UtilRatio) / ShardCount); + _bucketSize = (int)BitOperations.RoundUpToPowerOf2((uint)Math.Max(16, targetBucketSize)); + _bucketMask = _bucketSize - 1; + + _cacheShards = new TrieNode[ShardCount][]; + for (int i = 0; i < ShardCount; i++) + { + _cacheShards[i] = new TrieNode[_bucketSize]; + } + + _shardMemoryUsages = new long[ShardCount]; + _maxCacheMemoryThreshold = maxCacheMemoryThreshold; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static (int, int) GetShardAndHashCode(Hash256? address, in TreePath path) + { + int h1; + + int shardIdx = path.Path.Bytes[0]; + if (address is not null) + { + // Add address byte so that the root nodes of storage does not all sit in a single shard + shardIdx += address.Bytes[0]; + shardIdx %= 256; + h1 = address.GetHashCode(); + } + else + { + h1 = 0; + } + + int h2 = path.GetHashCode(); + + // Simple XOR is often enough and faster than HashCode.Combine for this use case + int hashCode = (h1 ^ h2) & int.MaxValue; + + return (shardIdx, hashCode); + } + + public bool TryGet(Hash256? address, in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node) + { + (int shardIdx, int hashCode) = GetShardAndHashCode(address, in path); + int bucketIdx = hashCode & _bucketMask; + + TrieNode? maybeNode = _cacheShards[shardIdx][bucketIdx]; + if (maybeNode is not null && maybeNode.Keccak == hash) + { + node = maybeNode; + return true; + } + + node = null; + return false; + } + + public void Add(TransientResource transientResource) + { + if (_maxCacheMemoryThreshold == 0) + { + for (int i = 0; i < ShardCount; i++) + { + (int hashCode, TrieNode? node)[] shard = transientResource.Nodes.Shards[i]; + for (int j = 0; j < shard.Length; j++) + { + if (shard[j].node is { } newNode) newNode.PrunePersistedRecursively(1); + + } + } + return; + } + + void AddToCacheWithHashCode(int shardIdx, int hashCode, TrieNode newNode) + { + int bucketIdx = hashCode & _bucketMask; + newNode.PrunePersistedRecursively(1); + Interlocked.Add(ref _shardMemoryUsages[shardIdx], newNode.GetMemorySize(false)); + + TrieNode? oldNode = Interlocked.Exchange(ref _cacheShards[shardIdx][bucketIdx], newNode); + if (oldNode is not null) + { + long oldMemory = oldNode.GetMemorySize(false); + oldNode.PrunePersistedRecursively(1); + + Interlocked.Add(ref _shardMemoryUsages[shardIdx], -oldMemory); + } + } + + Parallel.For(0, ShardCount, (i) => + { + (int hashCode, TrieNode? node)[] shard = transientResource.Nodes.Shards[i]; + for (int j = 0; j < shard.Length; j++) + { + if (shard[j].node is { } newNode) AddToCacheWithHashCode(i, shard[j].hashCode, newNode); + } + }); + + long currentTotalMemory = 0; + for (int i = 0; i < ShardCount; i++) currentTotalMemory += _shardMemoryUsages[i]; + + long prevMemory = currentTotalMemory; + bool wasPruned = false; + + while (currentTotalMemory > _maxCacheMemoryThreshold) + { + wasPruned = true; + int shardToClear = _nextShardToClear; + + // Prune any remaining reference + for (int i = 0; i < _bucketSize; i++) + { + _cacheShards[shardToClear][i]?.PrunePersistedRecursively(1); + } + + // Clear the shard + Array.Clear(_cacheShards[shardToClear]); + + // Reset shard memory + long freedMemory = Interlocked.Exchange(ref _shardMemoryUsages[shardToClear], 0); + currentTotalMemory -= freedMemory; + + _nextShardToClear = (_nextShardToClear + 1) & 255; // Fast modulo 256 + } + + if (wasPruned && _logger.IsTrace) _logger.Trace($"Pruning trie cache from {prevMemory} to {currentTotalMemory}"); + + Nethermind.Trie.Pruning.Metrics.MemoryUsedByCache = currentTotalMemory; + } + + /// + /// Clears all cached trie nodes. + /// + public void Clear() + { + for (int i = 0; i < ShardCount; i++) + { + for (int j = 0; j < _bucketSize; j++) + { + _cacheShards[i][j]?.PrunePersistedRecursively(1); + } + Array.Clear(_cacheShards[i]); + Interlocked.Exchange(ref _shardMemoryUsages[i], 0); + } + _nextShardToClear = 0; + Nethermind.Trie.Pruning.Metrics.MemoryUsedByCache = 0; + } + + /// + /// Small cached for use in . Its also sharded with the same shard mechanics so that + /// when adding to trie node cache can be done in parallel. + /// + public class ChildCache + { + private readonly (int hashCode, TrieNode? node)[][] _shards; + private int _count = 0; + private int _mask; + private int _shardSize; + + public int Count => _count; + public int Capacity => _shards.Length * _shardSize; + public (int hashCode, TrieNode? node)[][] Shards => _shards; + + public ChildCache(int size) + { + int powerOfTwoSize = (int)BitOperations.RoundUpToPowerOf2((uint)(size + ShardCount - 1) / ShardCount); + _shards = new (int, TrieNode?)[ShardCount][]; + _mask = powerOfTwoSize - 1; + _shardSize = powerOfTwoSize; + CreateCacheArray(_shardSize); + } + + private void CreateCacheArray(int size) + { + for (int i = 0; i < ShardCount; i++) _shards[i] = new (int, TrieNode?)[size]; + } + + public void Reset() + { + if (_count / UtilRatio > ShardCount * _shardSize) + { + int newTarget = (int)(_count / UtilRatio); + int powerOfTwoSize = (int)BitOperations.RoundUpToPowerOf2((uint)(newTarget + ShardCount - 1) / ShardCount); + _shardSize = powerOfTwoSize; + CreateCacheArray(_shardSize); + _mask = powerOfTwoSize - 1; + } + else + { + for (int i = 0; i < ShardCount; i++) + { + Array.Clear(_shards[i], 0, _shards[i].Length); + } + } + + _count = 0; + } + + public bool TryGet(Hash256? address, in TreePath path, Hash256 hash, [NotNullWhen(true)] out TrieNode? node) + { + (int shardIdx, int hashCode) = GetShardAndHashCode(address, path); + int idx = hashCode & _mask; + (int hashCode, TrieNode? node) entry = _shards[shardIdx][idx]; // Copy struct once + + if (entry.hashCode != hashCode) + { + node = null; + return false; + } + + TrieNode? maybeNode = entry.node; // Store it to prevent concurrency issue + if (maybeNode is null || maybeNode.Keccak != hash) + { + node = null; + return false; + } + + node = maybeNode; + return true; + } + + public void Set(Hash256? address, in TreePath path, TrieNode node) + { + (int shard, int hashCode) = GetShardAndHashCode(address, path); + int idx = hashCode & _mask; + + _count++; // Track count + + _shards[shard][idx] = (hashCode, node); + } + + public TrieNode GetOrAdd(Hash256? address, in TreePath path, TrieNode trieNode) + { + (int shard, int hashCode) = GetShardAndHashCode(address, path); + int idx = hashCode & _mask; + + ref (int hashCode, TrieNode? node) entry = ref _shards[shard][idx]; + TrieNode? maybeNode = entry.node; // Store it to prevent concurrency issue + if (maybeNode is not null) + { + if (maybeNode.Keccak == trieNode.Keccak) return maybeNode; + } + else + { + _count++; // Track count + } + + entry = (hashCode, trieNode); + return trieNode; + } + } +} diff --git a/src/Nethermind/Nethermind.State.Test/Nethermind.State.Test.csproj b/src/Nethermind/Nethermind.State.Test/Nethermind.State.Test.csproj index 4d0fd2ba57d..cd5c0964188 100644 --- a/src/Nethermind/Nethermind.State.Test/Nethermind.State.Test.csproj +++ b/src/Nethermind/Nethermind.State.Test/Nethermind.State.Test.csproj @@ -1,7 +1,6 @@ - Nethermind.Store.Test diff --git a/src/Nethermind/Nethermind.State/IWorldStateManager.cs b/src/Nethermind/Nethermind.State/IWorldStateManager.cs index 6c56f8ca6dd..6573a8d0022 100644 --- a/src/Nethermind/Nethermind.State/IWorldStateManager.cs +++ b/src/Nethermind/Nethermind.State/IWorldStateManager.cs @@ -42,7 +42,7 @@ public interface IWorldStateManager void FlushCache(CancellationToken cancellationToken); } -public interface IOverridableWorldScope +public interface IOverridableWorldScope : IDisposable { IWorldStateScopeProvider WorldState { get; } IStateReader GlobalStateReader { get; } diff --git a/src/Nethermind/Nethermind.State/OverridableWorldStateManager.cs b/src/Nethermind/Nethermind.State/OverridableWorldStateManager.cs index 46412024a93..2ee6d19ff6a 100644 --- a/src/Nethermind/Nethermind.State/OverridableWorldStateManager.cs +++ b/src/Nethermind/Nethermind.State/OverridableWorldStateManager.cs @@ -25,8 +25,6 @@ public OverridableWorldStateManager(IDbProvider dbProvider, IReadOnlyTrieStore t public IWorldStateScopeProvider WorldState { get; } public IStateReader GlobalStateReader => _reader; - public void ResetOverrides() - { - _dbProvider.ClearTempChanges(); - } + public void ResetOverrides() => _dbProvider.ClearTempChanges(); + public void Dispose() => _dbProvider.Dispose(); } diff --git a/src/Nethermind/Nethermind.State/StateTree.cs b/src/Nethermind/Nethermind.State/StateTree.cs index cc3a384659b..d71447eaa2e 100644 --- a/src/Nethermind/Nethermind.State/StateTree.cs +++ b/src/Nethermind/Nethermind.State/StateTree.cs @@ -83,7 +83,7 @@ public class StateTreeBulkSetter(int estimatedEntries, StateTree tree) : IDispos { ArrayPoolList _bulkWrite = new(estimatedEntries); - public void Set(Address key, Account account) + public void Set(Address key, Account? account) { KeccakCache.ComputeTo(key.Bytes, out ValueHash256 keccak); diff --git a/src/Nethermind/Nethermind.State/TrieStoreScopeProvider.cs b/src/Nethermind/Nethermind.State/TrieStoreScopeProvider.cs index e4f01d4008e..9ed0c594865 100644 --- a/src/Nethermind/Nethermind.State/TrieStoreScopeProvider.cs +++ b/src/Nethermind/Nethermind.State/TrieStoreScopeProvider.cs @@ -181,7 +181,8 @@ public void Set(Address key, Account? account) public IWorldStateScopeProvider.IStorageWriteBatch CreateStorageWriteBatch(Address address, int estimatedEntries) { - return new StorageTreeBulkWriteBatch(estimatedEntries, scope.LookupStorageTree(address), this, address); + return new StorageTreeBulkWriteBatch(estimatedEntries, scope.LookupStorageTree(address), + (address, rootHash) => MarkDirty(address, rootHash), address); } public void MarkDirty(AddressAsKey address, Hash256 storageTreeRootHash) @@ -224,10 +225,15 @@ static Account ThrowNullAccount(Address address) } } - private class StorageTreeBulkWriteBatch(int estimatedEntries, StorageTree storageTree, WorldStateWriteBatch worldStateWriteBatch, AddressAsKey address) : IWorldStateScopeProvider.IStorageWriteBatch + public class StorageTreeBulkWriteBatch( + int estimatedEntries, + StorageTree storageTree, + Action onRootUpdated, + AddressAsKey address, + bool commit = false) : IWorldStateScopeProvider.IStorageWriteBatch { // Slight optimization on small contract as the index hash can be precalculated in some case. - private const int MIN_ENTRIES_TO_BATCH = 16; + public const int MIN_ENTRIES_TO_BATCH = 16; private bool _hasSelfDestruct; private bool _wasSetCalled = false; @@ -259,11 +265,9 @@ public void Clear() { storageTree.RootHash = Keccak.EmptyTreeHash; } - else - { - if (_wasSetCalled) throw new InvalidOperationException("Must call clear first in a storage write batch"); - _hasSelfDestruct = true; - } + + if (_wasSetCalled) throw new InvalidOperationException("Must call clear first in a storage write batch"); + _hasSelfDestruct = true; } public void Dispose() @@ -285,13 +289,20 @@ public void Dispose() if (hasSet) { - storageTree.UpdateRootHash(_bulkWrite?.Count > 64); - worldStateWriteBatch.MarkDirty(address, storageTree.RootHash); + if (commit) + { + storageTree.Commit(); + } + else + { + storageTree.UpdateRootHash(_bulkWrite?.Count > 64); + } + onRootUpdated(address, storageTree.RootHash); } } } - private class KeyValueWithBatchingBackedCodeDb(IKeyValueStoreWithBatching codeDb) : IWorldStateScopeProvider.ICodeDb + public class KeyValueWithBatchingBackedCodeDb(IKeyValueStoreWithBatching codeDb) : IWorldStateScopeProvider.ICodeDb { public byte[]? GetCode(in ValueHash256 codeHash) { diff --git a/src/Nethermind/Nethermind.Synchronization.Test/BlockDownloaderTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/BlockDownloaderTests.cs index b750fb3d64e..96cb3776394 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/BlockDownloaderTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/BlockDownloaderTests.cs @@ -843,6 +843,11 @@ private IContainer CreateNode(Action? configurer = null, IConf }) .AddSingleton(); + if (PseudoNethermindModule.TestUseFlat && configProvider.GetConfig().FastSync) + { + Assert.Ignore("Flat does not work when fast sync is on"); + } + configurer?.Invoke(b); return b .Build(); diff --git a/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs index 099d77cadb3..497c3378c13 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs @@ -66,7 +66,8 @@ public enum DbMode { Default, Hash, - NoPruning + NoPruning, + Flat } public static IEnumerable CreateTestCases() @@ -74,9 +75,11 @@ public static IEnumerable CreateTestCases() yield return new TestFixtureParameters(DbMode.Default, false); yield return new TestFixtureParameters(DbMode.Hash, false); yield return new TestFixtureParameters(DbMode.NoPruning, false); + yield return new TestFixtureParameters(DbMode.Flat, false); yield return new TestFixtureParameters(DbMode.Default, true); yield return new TestFixtureParameters(DbMode.Hash, true); yield return new TestFixtureParameters(DbMode.NoPruning, true); + yield return new TestFixtureParameters(DbMode.Flat, true); } private static TimeSpan SetupTimeout = TimeSpan.FromSeconds(60); @@ -157,6 +160,12 @@ private async Task CreateNode(PrivateKey nodeKey, Func(); + flatDbConfig.Enabled = true; + break; + } } var builder = new ContainerBuilder() @@ -261,6 +270,8 @@ public async Task FullSync() [Retry(5)] public async Task FastSync() { + if (dbMode == DbMode.Flat) Assert.Ignore(); + using CancellationTokenSource cancellationTokenSource = new CancellationTokenSource().ThatCancelAfter(TestTimeout); PrivateKey clientKey = TestItem.PrivateKeyC; @@ -294,6 +305,7 @@ private async Task SetPivot(SyncConfig syncConfig, CancellationToken cancellatio [Retry(5)] public async Task SnapSync() { + if (dbMode == DbMode.Flat) Assert.Ignore(); if (dbMode == DbMode.Hash) Assert.Ignore("Hash db does not support snap sync"); using CancellationTokenSource cancellationTokenSource = new CancellationTokenSource().ThatCancelAfter(TestTimeout); diff --git a/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs b/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs index da83b8a3da8..aa727d9bac9 100644 --- a/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/TreePathTests.cs @@ -200,6 +200,22 @@ public void TestScopedAppend() path.Length.Should().Be(0); } + [TestCase("", "0000000000000000")] + [TestCase("01", "1000000000000001")] + [TestCase("000102030405060708", "0123456780000009")] + [TestCase("000102030405060708090a0b0c0d0e", "0123456789abcdef")] // verifies upper nibble of byte 7 preserved + [TestCase("000102030405", "0123450000000006")] + public void TestEncodeWith8Byte(string nibbleHex, string expectedEncodedHex) + { + byte[] nibbles = string.IsNullOrEmpty(nibbleHex) ? [] : Bytes.FromHexString(nibbleHex); + TreePath path = TreePath.FromNibble(nibbles); + + Span buffer = stackalloc byte[8]; + path.EncodeWith8Byte(buffer); + + buffer.ToArray().ToHexString().Should().Be(expectedEncodedHex); + } + private static TreePath CreateFullTreePath() { TreePath path = new TreePath(); diff --git a/src/Nethermind/Nethermind.Trie.Test/TrieLeafIteratorTests.cs b/src/Nethermind/Nethermind.Trie.Test/TrieLeafIteratorTests.cs new file mode 100644 index 00000000000..7f7429d5b35 --- /dev/null +++ b/src/Nethermind/Nethermind.Trie.Test/TrieLeafIteratorTests.cs @@ -0,0 +1,162 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Collections.Generic; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test.Builders; +using Nethermind.Db; +using Nethermind.Logging; +using Nethermind.State; +using Nethermind.Trie.Pruning; +using NUnit.Framework; + +namespace Nethermind.Trie.Test; + +/// +/// Tests for TrieLeafIterator which provides in-order leaf iteration. +/// +[TestFixture] +public class TrieLeafIteratorTests +{ + private MemDb _db = null!; + private RawScopedTrieStore _trieStore = null!; + private StateTree _stateTree = null!; + + [SetUp] + public void SetUp() + { + _db = new MemDb(); + _trieStore = new RawScopedTrieStore(_db); + _stateTree = new StateTree(_trieStore, LimboLogs.Instance); + } + + [TearDown] + public void TearDown() + { + _db.Dispose(); + } + + [Test] + public void EmptyTrie_ReturnsNoLeaves() + { + TrieLeafIterator iterator = new TrieLeafIterator(_trieStore, Keccak.EmptyTreeHash); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + } + + Assert.That(count, Is.EqualTo(0)); + } + + [Test] + public void SingleAccount_ReturnsOneLeaf() + { + _stateTree.Set(TestItem.AddressA, TestItem.GenerateIndexedAccount(0)); + _stateTree.Commit(); + + TrieLeafIterator iterator = new TrieLeafIterator(_trieStore, _stateTree.RootHash); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + Assert.That(iterator.CurrentLeaf, Is.Not.Null); + Assert.That(iterator.CurrentLeaf!.IsLeaf, Is.True); + } + + Assert.That(count, Is.EqualTo(1)); + } + + [Test] + public void MultipleAccounts_ReturnsAllLeaves() + { + _stateTree.Set(TestItem.AddressA, TestItem.GenerateIndexedAccount(0)); + _stateTree.Set(TestItem.AddressB, TestItem.GenerateIndexedAccount(1)); + _stateTree.Set(TestItem.AddressC, TestItem.GenerateIndexedAccount(2)); + _stateTree.Commit(); + + TrieLeafIterator iterator = new TrieLeafIterator(_trieStore, _stateTree.RootHash); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + Assert.That(iterator.CurrentLeaf, Is.Not.Null); + Assert.That(iterator.CurrentLeaf!.IsLeaf, Is.True); + } + + Assert.That(count, Is.EqualTo(3)); + } + + [Test] + public void Iterator_ReturnsLeavesInSortedOrder() + { + // Add accounts and get their hashed paths + _stateTree.Set(TestItem.AddressA, TestItem.GenerateIndexedAccount(0)); + _stateTree.Set(TestItem.AddressB, TestItem.GenerateIndexedAccount(1)); + _stateTree.Set(TestItem.AddressC, TestItem.GenerateIndexedAccount(2)); + _stateTree.Set(TestItem.AddressD, TestItem.GenerateIndexedAccount(3)); + _stateTree.Set(TestItem.AddressE, TestItem.GenerateIndexedAccount(4)); + _stateTree.Commit(); + + TrieLeafIterator iterator = new TrieLeafIterator(_trieStore, _stateTree.RootHash); + + // Store copies of paths to avoid any ref struct sharing issues + List paths = []; + while (iterator.MoveNext()) + { + // Copy the path bytes to a new array + paths.Add(iterator.CurrentPath.Path.Bytes.ToArray()); + } + + Assert.That(paths.Count, Is.EqualTo(5)); + + // Verify paths are in ascending order using byte comparison + for (int i = 1; i < paths.Count; i++) + { + int cmp = paths[i - 1].AsSpan().SequenceCompareTo(paths[i]); + Assert.That(cmp, Is.LessThan(0), + $"Paths should be in ascending order. Path[{i - 1}] should be < Path[{i}]"); + } + } + + [Test] + public void NullRoot_ReturnsNoLeaves() + { + TrieLeafIterator iterator = new TrieLeafIterator(_trieStore, null); + + int count = 0; + while (iterator.MoveNext()) + { + count++; + } + + Assert.That(count, Is.EqualTo(0)); + } + + [Test] + public void CurrentPath_MatchesKeccakOfAddress() + { + Address address = TestItem.AddressA; + _stateTree.Set(address, TestItem.GenerateIndexedAccount(0)); + _stateTree.Commit(); + + // Compute expected path + Hash256 expectedPath = Keccak.Compute(address.Bytes); + + TrieLeafIterator iterator = new TrieLeafIterator(_trieStore, _stateTree.RootHash); + + Assert.That(iterator.MoveNext(), Is.True); + + // Get actual path from iterator + byte[] actualPathBytes = iterator.CurrentPath.Path.Bytes.ToArray(); + byte[] expectedPathBytes = expectedPath.Bytes.ToArray(); + + Assert.That(actualPathBytes, Is.EqualTo(expectedPathBytes), + $"Expected: {expectedPath}, Actual: {iterator.CurrentPath.Path}"); + } +} diff --git a/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs b/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs index 39acb58ec63..b4e0f004f9d 100644 --- a/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs +++ b/src/Nethermind/Nethermind.Trie.Test/TrieTests.cs @@ -1284,5 +1284,26 @@ public void Can_parallel_read_trees() } }); } + + [Test] + public void WarmUpPath_DoesNotThrow() + { + // Build a tree with extension, branch, and leaf nodes: _keyA, _keyB, _keyC, _keyD + using IPruningTrieStore trieStore = CreateTrieStore(); + PatriciaTree patriciaTree = new(trieStore, _logManager); + patriciaTree.Set(_keyA, _longLeaf1); + patriciaTree.Set(_keyB, _longLeaf2); + patriciaTree.Set(_keyC, _longLeaf1); + patriciaTree.Set(_keyD, _longLeaf2); + trieStore.CommitPatriciaTrie(0, patriciaTree); + + // Test warmup on various keys + patriciaTree.Invoking(t => t.WarmUpPath(_keyA)).Should().NotThrow(); // Existing key + patriciaTree.Invoking(t => t.WarmUpPath(_keyB)).Should().NotThrow(); // Existing key + patriciaTree.Invoking(t => t.WarmUpPath(_keyC)).Should().NotThrow(); // Existing key in different branch + patriciaTree.Invoking(t => t.WarmUpPath(_keyD)).Should().NotThrow(); // Existing key in different branch + patriciaTree.Invoking(t => t.WarmUpPath(Bytes.FromHexString("00000000000cc"))).Should().NotThrow(); // Non-existent key + patriciaTree.Invoking(t => t.WarmUpPath(Bytes.FromHexString("fffffffffffff"))).Should().NotThrow(); // Completely different path + } } } diff --git a/src/Nethermind/Nethermind.Trie/PatriciaTree.BulkSet.cs b/src/Nethermind/Nethermind.Trie/PatriciaTree.BulkSet.cs index 07938e21839..dda72d3bc44 100644 --- a/src/Nethermind/Nethermind.Trie/PatriciaTree.BulkSet.cs +++ b/src/Nethermind/Nethermind.Trie/PatriciaTree.BulkSet.cs @@ -166,7 +166,7 @@ private struct Context int nonNullChildCount = 0; if (entries.Length >= MinEntriesToParallelizeThreshold && nibMask == FullBranch && !flags.HasFlag(Flags.DoNotParallelize)) { - var jobs = new (int startIdx, int count, int nibble, TreePath appendedPath, TrieNode? currentChild, TrieNode? newChild)[TrieNode.BranchesCount]; + using ArrayPoolList<(int startIdx, int count, int nibble, TreePath appendedPath, TrieNode? currentChild, TrieNode? newChild)> jobs = new(TrieNode.BranchesCount, TrieNode.BranchesCount); Context closureCtx = ctx; BulkSetEntry[] originalEntriesArray = (flipCount % 2 == 0) ? ctx.originalEntriesArray : ctx.originalSortBufferArray; diff --git a/src/Nethermind/Nethermind.Trie/PatriciaTree.cs b/src/Nethermind/Nethermind.Trie/PatriciaTree.cs index 08b96d207fc..2bc7c2188a4 100644 --- a/src/Nethermind/Nethermind.Trie/PatriciaTree.cs +++ b/src/Nethermind/Nethermind.Trie/PatriciaTree.cs @@ -321,7 +321,7 @@ public void UpdateRootHash(bool canBeParallel = true) SetRootHash(RootRef?.Keccak ?? EmptyTreeHash, false); } - private void SetRootHash(Hash256? value, bool resetObjects) + public void SetRootHash(Hash256? value, bool resetObjects) { _rootHash = value ?? Keccak.EmptyTreeHash; // nulls were allowed before so for now we leave it this way if (_rootHash == Keccak.EmptyTreeHash) @@ -372,6 +372,37 @@ public virtual ReadOnlySpan Get(ReadOnlySpan rawKey, Hash256? rootHa } } + [SkipLocalsInit] + [DebuggerStepThrough] + public void WarmUpPath(ReadOnlySpan rawKey) + { + byte[]? array = null; + try + { + int nibblesCount = 2 * rawKey.Length; + Span nibbles = (rawKey.Length <= MaxKeyStackAlloc + ? stackalloc byte[MaxKeyStackAlloc] + : array = ArrayPool.Shared.Rent(nibblesCount)) + [..nibblesCount]; // Slice to exact size; + + Nibbles.BytesToNibbleBytes(rawKey, nibbles); + + TreePath emptyPath = TreePath.Empty; + TrieNode root = RootRef; + + DoWarmUpPath(nibbles, ref emptyPath, root); + } + catch (TrieException e) + { + EnhanceException(rawKey, RootHash, e); + throw; + } + finally + { + if (array is not null) ArrayPool.Shared.Return(array); + } + } + [DebuggerStepThrough] public byte[]? GetNodeByPath(byte[] nibbles, Hash256? rootHash = null) { @@ -897,6 +928,64 @@ private SpanSource GetNew(Span remainingKey, ref TreePath path, TrieNode? } } + private void DoWarmUpPath(Span remainingKey, ref TreePath path, TrieNode? node) + { + int originalPathLength = path.Length; + + try + { + while (true) + { + if (node is null) + { + // If node read, then missing node. If value read.... what is it suppose to be then? + return; + } + + // Call FindCachedOrUnknown on some path. + if (node.IsSealed && node.Keccak is not null && path.Length % 2 == 1) node = TrieStore.FindCachedOrUnknown(path, node!.Keccak); + node.ResolveNode(TrieStore, path); + + if (node.IsLeaf || node.IsExtension) + { + int commonPrefixLength = remainingKey.CommonPrefixLength(node.Key); + if (commonPrefixLength == node.Key!.Length) + { + if (node.IsLeaf) + { + // Done + return; + } + + // Continue traversal to the child of the extension + path.AppendMut(node.Key); + TrieNode? extensionChild = node.GetChildWithChildPath(TrieStore, ref path, 0, keepChildRef: true); + remainingKey = remainingKey[node!.Key.Length..]; + node = extensionChild; + + continue; + } + + // No node match + return; + } + + int nextNib = remainingKey[0]; + + path.AppendMut(nextNib); + TrieNode? child = node.GetChildWithChildPath(TrieStore, ref path, nextNib, keepChildRef: true); + + // Continue loop with child as current node + node = child; + remainingKey = remainingKey[1..]; + } + } + finally + { + path.TruncateMut(originalPathLength); + } + } + /// /// Run tree visitor /// diff --git a/src/Nethermind/Nethermind.Trie/Pruning/TreePath.cs b/src/Nethermind/Nethermind.Trie/Pruning/TreePath.cs index 5ab32a9a613..e04874e16eb 100644 --- a/src/Nethermind/Nethermind.Trie/Pruning/TreePath.cs +++ b/src/Nethermind/Nethermind.Trie/Pruning/TreePath.cs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Buffers.Binary; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; @@ -22,7 +23,7 @@ namespace Nethermind.Trie; /// [Todo("check if its worth it to change the length to byte, or if it actually make things slower.")] [Todo("check if its worth it to not clear byte during TruncateMut, but will need proper comparator, span copy, etc.")] -public struct TreePath : IEquatable +public struct TreePath : IEquatable, IComparable { public const int MemorySize = 36; public ValueHash256 Path; @@ -109,7 +110,7 @@ public readonly TreePath Append(int nib, int repeat) return copy; } - internal void AppendMut(ReadOnlySpan nibbles) + public void AppendMut(ReadOnlySpan nibbles) { if (nibbles.Length == 0) return; if (nibbles.Length == 1) @@ -316,6 +317,8 @@ public readonly int CompareTo(in TreePath otherTree) return Length.CompareTo(otherTree.Length); } + int IComparable.CompareTo(TreePath otherTree) => CompareTo(in otherTree); + /// /// Compare with otherTree, as if this TreePath was truncated to `length`. /// @@ -339,6 +342,30 @@ public readonly int CompareToTruncated(in TreePath otherTree, int length) return length.CompareTo(otherTree.Length); } + /// + /// Returns the Path as lower bound (remaining nibbles are 0x0, which TreePath already guarantees). + /// + public readonly ValueHash256 ToLowerBoundPath() => Path; + + /// + /// Returns the Path extended to 64 nibbles with 0xF (upper bound of subtree). + /// + public readonly ValueHash256 ToUpperBoundPath() + { + ValueHash256 result = Path; + Span bytes = result.BytesAsSpan; + + int startByte = Length / 2; + if (Length % 2 == 1) + { + bytes[startByte] |= 0x0F; + startByte++; + } + bytes[startByte..].Fill(0xFF); + + return result; + } + private static ReadOnlySpan ZeroMasksData => new byte[] { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -411,6 +438,15 @@ public bool StartsWith(TreePath otherPath) { return Truncate(otherPath.Length) == otherPath; } + + public readonly void EncodeWith8Byte(Span buffer) + { + Path.Bytes[..8].CopyTo(buffer); + byte lengthAsByte = (byte)Length; + + // Pack length into lower 4 bits of last byte (upper 4 bits contain path data) + buffer[8 - 1] = (byte)((buffer[8 - 1] & 0xf0) | (lengthAsByte & 0x0f)); + } } public static class TreePathExtensions diff --git a/src/Nethermind/Nethermind.Trie/TrieLeafIterator.cs b/src/Nethermind/Nethermind.Trie/TrieLeafIterator.cs new file mode 100644 index 00000000000..ad4c2ec2011 --- /dev/null +++ b/src/Nethermind/Nethermind.Trie/TrieLeafIterator.cs @@ -0,0 +1,221 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Runtime.CompilerServices; +using Nethermind.Core.Crypto; +using Nethermind.Trie.Pruning; + +namespace Nethermind.Trie; + +/// +/// Provides in-order (sorted by path) iteration over all leaf nodes in a Patricia trie. +/// Uses stack-based traversal to avoid recursion and yields leaves in lexicographical order. +/// +public ref struct TrieLeafIterator +{ + private const int MaxStackDepth = 128; // Generous depth for any practical trie + private const int FullPathLength = 64; // 32 bytes = 64 nibbles + + private struct StackFrame + { + public TrieNode Node; + public TreePath Path; + public int ChildIndex; // For branch nodes: next child to visit (0-15) + public bool Processed; // For leaf/extension: whether we've processed this node + } + + private readonly ITrieNodeResolver _resolver; + private readonly Action? _onException; + private readonly StackFrame[] _stack; + private readonly ValueHash256 _startPath; + private readonly ValueHash256 _endPath; + private readonly bool _hasRange; + private int _stackDepth; + private TreePath _currentPath; + private TrieNode? _currentLeaf; + + public TrieLeafIterator( + ITrieNodeResolver resolver, + Hash256? rootHash, + Action? onException = null, + in ValueHash256 startPath = default, + in ValueHash256 endPath = default) + { + _resolver = resolver; + _onException = onException; + _stack = new StackFrame[MaxStackDepth]; + _startPath = startPath; + _endPath = endPath; + _hasRange = startPath != default || endPath != default; + _stackDepth = 0; + _currentPath = default; + _currentLeaf = null; + + if (rootHash is not null && rootHash != Keccak.EmptyTreeHash) + { + TreePath emptyPath = TreePath.Empty; + TrieNode root = resolver.FindCachedOrUnknown(emptyPath, rootHash); + Push(root, emptyPath); + } + } + + public readonly TreePath CurrentPath => _currentPath; + public readonly TrieNode? CurrentLeaf => _currentLeaf; + + public bool MoveNext() + { + while (_stackDepth > 0) + { + ref StackFrame frame = ref _stack[_stackDepth - 1]; + + // Resolve the node if needed + try + { + frame.Node.ResolveNode(_resolver, frame.Path); + } + catch (TrieNodeException ex) + { + _onException?.Invoke(ex); + Pop(); + continue; + } + + switch (frame.Node.NodeType) + { + case NodeType.Leaf: + // Found a leaf - compute full path and check range + _currentPath = frame.Path.Append(frame.Node.Key); + _currentLeaf = frame.Node; + Pop(); + + // Check range bounds if applicable + if (_hasRange) + { + if (_currentPath.Path.CompareTo(_startPath) < 0) continue; // Before start, skip + + if (_currentPath.Path.CompareTo(_endPath) >= 0) + { + // At or past end, stop iteration + _stackDepth = 0; + _currentLeaf = null; + _currentPath = default; + return false; + } + } + return true; + + case NodeType.Extension: + if (!frame.Processed) + { + frame.Processed = true; + // Follow the extension to its child + TreePath childPath = frame.Path.Append(frame.Node.Key); + + // Range check for extension: skip if max path of subtree < start + if (_hasRange && childPath.ToUpperBoundPath() < _startPath) break; + // Range check: stop if min path of subtree >= end + if (_hasRange && childPath.ToLowerBoundPath() >= _endPath) + { + _stackDepth = 0; + continue; + } + + TrieNode? child = frame.Node.GetChildWithChildPath(_resolver, ref childPath, 0); + if (child is not null) + { + Push(child, childPath); + } + } + else + { + Pop(); + } + break; + + case NodeType.Branch: + // Find next non-null child within range + bool foundChild = false; + + // Compute start child index based on range (skip children before startPath) + if (_hasRange && frame.ChildIndex == 0) + { + frame.ChildIndex = GetStartChildIndex(frame.Path, _startPath); + } + + while (frame.ChildIndex < 16) + { + int childIdx = frame.ChildIndex; + frame.ChildIndex++; + + TreePath childPath = frame.Path.Append(childIdx); + + // Range check: stop if child's min path >= end + if (_hasRange && childPath.ToLowerBoundPath() >= _endPath) + { + _stackDepth = 0; + foundChild = true; // Exit the outer loop cleanly + break; + } + + TrieNode? child = frame.Node.GetChildWithChildPath(_resolver, ref childPath, childIdx); + if (child is not null) + { + Push(child, childPath); + foundChild = true; + break; + } + } + + if (!foundChild) + { + Pop(); + } + break; + + default: + // Unknown or other node type - skip + Pop(); + break; + } + } + + _currentLeaf = null; + _currentPath = default; + return false; + } + + /// + /// Get the starting child index for a branch node based on the start path. + /// + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static int GetStartChildIndex(in TreePath branchPath, in ValueHash256 startPath) + { + if (branchPath.Path.CompareTo(startPath) >= 0) return 0; + return new TreePath(startPath, FullPathLength)[branchPath.Length]; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void Push(TrieNode node, in TreePath path) + { + if (_stackDepth >= MaxStackDepth) + { + ThrowStackOverflow(); + } + + ref StackFrame frame = ref _stack[_stackDepth]; + frame.Node = node; + frame.Path = path; + frame.ChildIndex = 0; + frame.Processed = false; + _stackDepth++; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void Pop() + { + _stackDepth--; + } + + private static void ThrowStackOverflow() => throw new InvalidOperationException("TrieLeafIterator stack overflow - trie depth exceeds maximum"); +} diff --git a/src/Nethermind/Nethermind.Trie/TrieNode.cs b/src/Nethermind/Nethermind.Trie/TrieNode.cs index 52dc5e0b1c9..ea7885709dc 100644 --- a/src/Nethermind/Nethermind.Trie/TrieNode.cs +++ b/src/Nethermind/Nethermind.Trie/TrieNode.cs @@ -704,7 +704,7 @@ public int AppendChildPath(ref TreePath currentPath, int childIndex) return childNode; } - public TrieNode? GetChildWithChildPath(ITrieNodeResolver tree, ref TreePath childPath, int childIndex) + public TrieNode? GetChildWithChildPath(ITrieNodeResolver tree, ref TreePath childPath, int childIndex, bool keepChildRef = false) { /* extensions store value before the child while branches store children before the value * so just to treat them in the same way we update index on extensions @@ -736,7 +736,7 @@ public int AppendChildPath(ref TreePath currentPath, int childIndex) // Don't unresolve nodes with path length <= 4; there should be relatively few and they should fit // in RAM, but they are hit quite a lot, and don't have very good data locality. // That said, in practice, it does nothing notable, except for significantly improving benchmark score. - if (child?.IsPersisted == true && childPath.Length > 4 && childPath.Length % 2 == 0) + if (child?.IsPersisted == true && !keepChildRef && childPath.Length > 4 && childPath.Length % 2 == 0) { UnresolveChild(childIndex); } diff --git a/src/Nethermind/Nethermind.Trie/TrieNodeException.cs b/src/Nethermind/Nethermind.Trie/TrieNodeException.cs index 1899eb2e8c7..99429e2785a 100644 --- a/src/Nethermind/Nethermind.Trie/TrieNodeException.cs +++ b/src/Nethermind/Nethermind.Trie/TrieNodeException.cs @@ -6,16 +6,11 @@ namespace Nethermind.Trie; -public class TrieNodeException : TrieException +public class TrieNodeException(string message, TreePath path, Hash256 keccak, Exception? inner = null) + : TrieException(message, inner) { - public ValueHash256 NodeHash { get; private set; } - public TreePath Path { get; private set; } + public ValueHash256 NodeHash { get; private set; } = keccak; + public TreePath Path { get; private set; } = path; public string? EnhancedMessage { get; set; } public override string Message => EnhancedMessage is null ? base.Message : EnhancedMessage + Environment.NewLine + base.Message; - - public TrieNodeException(string message, TreePath path, Hash256 keccak, Exception? inner = null) : base(message, inner) - { - NodeHash = keccak; - Path = path; - } } diff --git a/src/Nethermind/Nethermind.Trie/TrieStatsCollector.cs b/src/Nethermind/Nethermind.Trie/TrieStatsCollector.cs index 747f8df258f..eac2aac2a8d 100644 --- a/src/Nethermind/Nethermind.Trie/TrieStatsCollector.cs +++ b/src/Nethermind/Nethermind.Trie/TrieStatsCollector.cs @@ -61,12 +61,17 @@ public readonly Context AddStorage(in ValueHash256 storage) public bool ExpectAccounts { get; } public TrieStatsCollector(IKeyValueStore codeKeyValueStore, ILogManager logManager, CancellationToken cancellationToken = default, bool expectAccounts = true) + : this(codeKeyValueStore, logManager, "Trie Verification", cancellationToken, expectAccounts) + { + } + + protected TrieStatsCollector(IKeyValueStore codeKeyValueStore, ILogManager logManager, string progressTrackerName, CancellationToken cancellationToken, bool expectAccounts) { _codeKeyValueStore = codeKeyValueStore ?? throw new ArgumentNullException(nameof(codeKeyValueStore)); _logger = logManager.GetClassLogger(); ExpectAccounts = expectAccounts; _cancellationToken = cancellationToken; - _progressTracker = new VisitorProgressTracker("Trie Verification", logManager); + _progressTracker = new VisitorProgressTracker(progressTrackerName, logManager); } public TrieStats Stats { get; } = new(); @@ -131,7 +136,7 @@ public void VisitExtension(in Context nodeContext, TrieNode node) IncrementLevel(nodeContext, isLeaf: false); } - public void VisitLeaf(in Context nodeContext, TrieNode node) + public virtual void VisitLeaf(in Context nodeContext, TrieNode node) { if (nodeContext.IsStorage) { diff --git a/src/Nethermind/Nethermind.Trie/VisitorProgressTracker.cs b/src/Nethermind/Nethermind.Trie/VisitorProgressTracker.cs index 33817dfece0..a662718c8cc 100644 --- a/src/Nethermind/Nethermind.Trie/VisitorProgressTracker.cs +++ b/src/Nethermind/Nethermind.Trie/VisitorProgressTracker.cs @@ -16,7 +16,7 @@ namespace Nethermind.Trie; /// public class VisitorProgressTracker { - private const int Level3Depth = 4; // 4 nibbles + public const int Level3Depth = 4; // 4 nibbles private const int MaxNodes = 65536; // 16^4 possible 4-nibble prefixes private int _seenCount; // Count of level-3 nodes seen (or estimated from shallow leaves) @@ -27,15 +27,18 @@ public class VisitorProgressTracker private readonly ProgressLogger _logger; private readonly string _operationName; private readonly int _reportingInterval; + private readonly bool _printNodes; public VisitorProgressTracker( string operationName, ILogManager logManager, - int reportingInterval = 100_000) + int reportingInterval = 100_000, + bool printNodes = true) { ArgumentNullException.ThrowIfNull(logManager); _operationName = operationName; + _printNodes = printNodes; _logger = new ProgressLogger(operationName, logManager); _logger.Reset(0, 10000); // Use 10000 for 0.01% precision _logger.SetFormat(FormatProgress); @@ -48,9 +51,9 @@ private string FormatProgress(ProgressLogger logger) float percentage = Math.Clamp(logger.CurrentValue / 10000f, 0, 1); long work = Interlocked.Read(ref _totalWorkDone); string workStr = work >= 1_000_000 ? $"{work / 1_000_000.0:F1}M" : $"{work:N0}"; - return $"{_operationName,-25} {percentage.ToString("P2", CultureInfo.InvariantCulture),8} " + - Progress.GetMeter(percentage, 1) + - $" nodes: {workStr,8}"; + return _printNodes + ? $"{_operationName,-25} {percentage.ToString("P2", CultureInfo.InvariantCulture),8} {Progress.GetMeter(percentage, 1)} nodes: {workStr,8}" + : $"{_operationName,-25} {percentage.ToString("P2", CultureInfo.InvariantCulture),8} {Progress.GetMeter(percentage, 1)}"; } /// @@ -68,10 +71,12 @@ public void OnNodeVisited(in TreePath path, bool isStorage = false, bool isLeaf // Only track state nodes for progress estimation at level 3 if (!isStorage) { + bool shouldLog = false; if (path.Length == Level3Depth) { // Node at exactly level 3 (4 nibbles): count as 1 node Interlocked.Increment(ref _seenCount); + shouldLog = true; } else if (isLeaf && path.Length > 0 && path.Length < Level3Depth) { @@ -86,11 +91,17 @@ public void OnNodeVisited(in TreePath path, bool isStorage = false, bool isLeaf // Add estimated coverage Interlocked.Add(ref _seenCount, estimatedNodes); + shouldLog = true; } // Nodes at depth > Level3Depth are ignored for progress calculation // Log progress at intervals (based on state nodes only) if (Interlocked.Increment(ref _nodeCount) % _reportingInterval == 0) + { + shouldLog = true; + } + + if (shouldLog) { LogProgress(); } diff --git a/src/Nethermind/Nethermind.slnx b/src/Nethermind/Nethermind.slnx index c64f1a920b7..36fd814fca6 100644 --- a/src/Nethermind/Nethermind.slnx +++ b/src/Nethermind/Nethermind.slnx @@ -70,6 +70,7 @@ + @@ -121,6 +122,7 @@ +