diff --git a/.github/workflows/nethermind-tests-flat.yml b/.github/workflows/nethermind-tests-flat.yml
new file mode 100644
index 00000000000..ee79846610c
--- /dev/null
+++ b/.github/workflows/nethermind-tests-flat.yml
@@ -0,0 +1,96 @@
+name: Nethermind tests (Flat DB)
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+on:
+ pull_request:
+ push:
+ branches: [master]
+ workflow_dispatch:
+
+env:
+ TEST_USE_FLAT: "1"
+ DOTNET_SYSTEM_CONSOLE_ALLOW_ANSI_COLOR_REDIRECTION: 1
+ TERM: xterm
+
+jobs:
+ tests:
+ name: Run ${{ matrix.project }}${{ matrix.chunk && format(' ({0})', matrix.chunk) || '' }}
+ runs-on: ubuntu-latest
+ continue-on-error: true
+ strategy:
+ matrix:
+ project:
+ - Ethereum.Abi.Test
+ - Ethereum.Basic.Test
+ - Ethereum.Blockchain.Block.Test
+ - Ethereum.Blockchain.Pyspec.Test
+ - Ethereum.Difficulty.Test
+ - Ethereum.HexPrefix.Test
+ - Ethereum.KeyAddress.Test
+ - Ethereum.KeyStore.Test
+ - Ethereum.Legacy.Blockchain.Block.Test
+ - Ethereum.Legacy.Transition.Test
+ - Ethereum.Legacy.VM.Test
+ - Ethereum.PoW.Test
+ - Ethereum.Rlp.Test
+ - Ethereum.Transaction.Test
+ - Ethereum.Trie.Test
+ - Nethermind.Consensus.Test
+ - Nethermind.Core.Test
+ - Nethermind.Db.Test
+ - Nethermind.Runner.Test
+ chunk: ['']
+ include:
+ - project: Ethereum.Legacy.Blockchain.Test
+ chunk: 1of4
+ - project: Ethereum.Legacy.Blockchain.Test
+ chunk: 2of4
+ - project: Ethereum.Legacy.Blockchain.Test
+ chunk: 3of4
+ - project: Ethereum.Legacy.Blockchain.Test
+ chunk: 4of4
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v6
+ with:
+ submodules: ${{ startsWith(matrix.project, 'Ethereum.') && 'recursive' || 'false' }}
+
+ - name: Set up .NET
+ uses: actions/setup-dotnet@v5
+
+ - name: ${{ matrix.project }}
+ id: test
+ working-directory: src/Nethermind/${{ matrix.project }}
+ env:
+ TEST_CHUNK: ${{ matrix.chunk }}
+ run: |
+ dotnet test --project ${{ matrix.project }}.csproj -c release
+
+ - name: Save test outcome
+ if: success() || failure()
+ run: echo "${{ steps.test.outcome == 'success' }}," >> test.outcome
+
+ - name: Upload test outcome
+ if: success() || failure()
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{ matrix.project }}${{ matrix.chunk && format('-{0}', matrix.chunk) || '' }}-flat-outcome
+ path: test.outcome
+ retention-days: 1
+
+ tests-summary:
+ name: Tests summary
+ needs: tests
+ runs-on: ubuntu-latest
+ steps:
+ - name: Download test outcomes
+ uses: actions/download-artifact@v4
+
+ - name: Ensure all tests passed
+ run: |
+ data=$(cat **/test.outcome) && data=${data%?}
+ passed=$(echo "[$data]" | jq -r 'all')
+ [[ "$passed" == "true" ]] && exit 0 || exit 1
diff --git a/.gitignore b/.gitignore
index 6f14b7b2d89..7ca90b44ee1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -445,3 +445,6 @@ FodyWeavers.xsd
## Nethermind
keystore/
/.githooks
+
+# Worktrees
+.worktrees/
diff --git a/Directory.Packages.props b/Directory.Packages.props
index 2112ae3eadd..964bd02f599 100644
--- a/Directory.Packages.props
+++ b/Directory.Packages.props
@@ -82,6 +82,7 @@
+
diff --git a/cspell.json b/cspell.json
index 313bcb6496c..ec2128db069 100644
--- a/cspell.json
+++ b/cspell.json
@@ -101,12 +101,14 @@
"bootnodes",
"bottlenecked",
"browsable",
+ "bsearch",
"btcs",
"buildtransitive",
"bulkset",
"bursty",
"buterin",
"bylica",
+ "cacheline",
"bytecodes",
"callcode",
"calldatacopy",
@@ -170,6 +172,7 @@
"deserialised",
"dests",
"devirtualize",
+ "devirtualized",
"devnet",
"devnets",
"devp2p",
@@ -237,7 +240,9 @@
"extopcodes",
"extradata",
"extstaticcall",
+ "Exitor",
"fastbin",
+ "Fastpath",
"fastlz",
"fastmod",
"fastsync",
@@ -292,6 +297,7 @@
"hostnames",
"hotstuff",
"hyperthreading",
+ "HUGEPAGE",
"idxs",
"iface",
"ikvp",
@@ -345,6 +351,7 @@
"ldarg",
"ldfld",
"lemire's",
+ "levelname",
"libc",
"libdl",
"libp",
@@ -361,6 +368,8 @@
"machdep",
"machinename",
"madv",
+ "Madvise",
+ "madvise",
"maiboroda",
"mainchain",
"mallopt",
@@ -379,10 +388,12 @@
"maxlevel",
"maxpriorityfee",
"mclbn",
+ "mbpk",
"mcmc",
"mcopy",
"mellor",
"memberwise",
+ "memcolumndb",
"memin",
"meminstart",
"meminsz",
@@ -411,6 +422,7 @@
"morden",
"movbe",
"movzx",
+ "Mpmc",
"mres",
"mscorlib",
"msgrecv",
@@ -457,6 +469,7 @@
"nodestore",
"nodetype",
"nofile",
+ "noninteractive",
"nonposdao",
"nonstring",
"nops",
@@ -483,6 +496,7 @@
"owlf",
"pacaya",
"parallelizable",
+ "patriciatree",
"paweł",
"pctg",
"pearce",
@@ -490,6 +504,7 @@
"pendingtxs",
"perfnet",
"perfolizer",
+ "Persistences",
"permissioned",
"pgrep",
"physicalcpu",
@@ -499,6 +514,7 @@
"pkcs",
"pmsg",
"poacore",
+ "Populator",
"poaps",
"podc",
"popcnt",
@@ -509,6 +525,7 @@
"powm",
"preconf",
"preconfirmation",
+ "pregenesis",
"predeploy",
"prefixedf",
"prefund",
@@ -560,6 +577,7 @@
"resettables",
"retesteth",
"retf",
+ "ribbonfilter",
"returncode",
"returndata",
"returndatacopy",
@@ -578,6 +596,7 @@
"rocksdb",
"ronin",
"roothash",
+ "rootref",
"rormask",
"rpcurl",
"runtimeconfig",
@@ -611,11 +630,13 @@
"sload",
"smod",
"somelabel",
+ "Spmc",
"spaceneth",
"spammy",
"sparkline",
"spinlocks",
"squarify",
+ "srcset",
"ssse",
"sstfiles",
"sstore",
@@ -680,6 +701,7 @@
"trienode",
"triestore",
"trietest",
+ "Triewarmer",
"trietestnextprev",
"triggerable",
"tstore",
@@ -723,6 +745,8 @@
"upto",
"upvoting",
"vbmi",
+ "verifytrie",
+ "verkle",
"vitalik",
"vmovups",
"vmtrace",
@@ -734,6 +758,8 @@
"vpor",
"vptest",
"vzeroupper",
+ "Wakeup",
+ "wakeup",
"wamp",
"warmcoinbase",
"wblob",
@@ -744,6 +770,7 @@
"worklet",
"worklist",
"worldchain",
+ "worldscope",
"worldstate",
"writebatch",
"writeoptions",
@@ -758,6 +785,7 @@
"yparity",
"zcompressor",
"zdecompressor",
+ "zerobytes",
"zhizhu",
"zstandard",
"zstd",
diff --git a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs
index 98483a553cc..ec91713897a 100644
--- a/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs
+++ b/src/Nethermind/Nethermind.Blockchain.Test/FullPruning/FullPruningDiskTest.cs
@@ -17,6 +17,7 @@
using Nethermind.Core.Extensions;
using Nethermind.Core.Test.Blockchain;
using Nethermind.Core.Test.IO;
+using Nethermind.Core.Test.Modules;
using Nethermind.Db;
using Nethermind.Db.FullPruning;
using Nethermind.Db.Rocks;
@@ -136,6 +137,12 @@ protected override async Task RunFullPruning(CancellationToken cancellationToken
}
}
+ [SetUp]
+ public void Setup()
+ {
+ if (PseudoNethermindModule.TestUseFlat) Assert.Ignore("Disabled in flat");
+ }
+
[Test, MaxTime(Timeout.LongTestTime)]
public async Task prune_on_disk_multiple_times()
{
diff --git a/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs b/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs
index e8d1c1f95a7..0d6f6d8a949 100644
--- a/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs
+++ b/src/Nethermind/Nethermind.Consensus.Test/ShareableTxProcessingSourceTests.cs
@@ -7,6 +7,8 @@
using Nethermind.Core;
using Nethermind.Core.Test.Builders;
using Nethermind.Core.Test.Modules;
+using Nethermind.Evm.State;
+using Nethermind.Evm.TransactionProcessing;
using NUnit.Framework;
namespace Nethermind.Consensus.Test;
@@ -19,8 +21,8 @@ public void OnSubsequentBuild_GiveDifferentWorldState()
using IContainer container = new ContainerBuilder().AddModule(new TestNethermindModule()).Build();
IShareableTxProcessorSource shareableSource = container.Resolve();
- var scope1 = shareableSource.Build(Build.A.BlockHeader.TestObject);
- var scope2 = shareableSource.Build(Build.A.BlockHeader.TestObject);
+ var scope1 = shareableSource.Build(IWorldState.PreGenesis);
+ var scope2 = shareableSource.Build(IWorldState.PreGenesis);
scope1.WorldState.Should().NotBeSameAs(scope2.WorldState);
}
@@ -31,9 +33,9 @@ public void OnSubsequentBuild_AfterFirstScopeDispose_GiveSameWorldState()
using IContainer container = new ContainerBuilder().AddModule(new TestNethermindModule()).Build();
IShareableTxProcessorSource shareableSource = container.Resolve();
- var scope1 = shareableSource.Build(Build.A.BlockHeader.TestObject);
+ var scope1 = shareableSource.Build(IWorldState.PreGenesis);
scope1.Dispose();
- var scope2 = shareableSource.Build(Build.A.BlockHeader.TestObject);
+ var scope2 = shareableSource.Build(IWorldState.PreGenesis);
scope1.WorldState.Should().BeSameAs(scope2.WorldState);
}
diff --git a/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs b/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs
index 719adc62367..ce070783a71 100644
--- a/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs
+++ b/src/Nethermind/Nethermind.Consensus/Processing/BranchProcessor.cs
@@ -70,8 +70,7 @@ public Block[] Process(BlockHeader? baseBlock, IReadOnlyList suggestedBlo
}
else
{
- BlockHeader? scopeBaseBlock = baseBlock ?? (suggestedBlock.IsGenesis ? suggestedBlock.Header : null);
- worldStateCloser = stateProvider.BeginScope(scopeBaseBlock);
+ worldStateCloser = stateProvider.BeginScope(baseBlock);
}
CancellationTokenSource? backgroundCancellation = new();
diff --git a/src/Nethermind/Nethermind.Core.Test/Modules/FlatDbManagerTestCompat.cs b/src/Nethermind/Nethermind.Core.Test/Modules/FlatDbManagerTestCompat.cs
new file mode 100644
index 00000000000..3e3ef35a28f
--- /dev/null
+++ b/src/Nethermind/Nethermind.Core.Test/Modules/FlatDbManagerTestCompat.cs
@@ -0,0 +1,56 @@
+// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using System.Threading;
+using Nethermind.Core.Crypto;
+using Nethermind.State.Flat;
+using Nethermind.Trie.Pruning;
+using NUnit.Framework;
+
+namespace Nethermind.Core.Test.Modules;
+
+///
+/// A LOT of test rely on the fact that trie store will assume state is available as long as the state root is
+/// empty tree even if the blocknumber is not -1. This does not work with flat. We will ignore it for now.
+///
+///
+internal class FlatDbManagerTestCompat(IFlatDbManager flatDbManager) : IFlatDbManager
+{
+ public SnapshotBundle GatherSnapshotBundle(in StateId baseBlock, ResourcePool.Usage usage)
+ {
+ IgnoreOnInvalidState(baseBlock);
+ return flatDbManager.GatherSnapshotBundle(baseBlock, usage);
+ }
+
+ public ReadOnlySnapshotBundle GatherReadOnlySnapshotBundle(in StateId baseBlock)
+ {
+ IgnoreOnInvalidState(baseBlock);
+ return flatDbManager.GatherReadOnlySnapshotBundle(baseBlock);
+ }
+
+ public bool HasStateForBlock(in StateId stateId)
+ {
+ IgnoreOnInvalidState(stateId);
+ return flatDbManager.HasStateForBlock(stateId);
+ }
+
+ private void IgnoreOnInvalidState(StateId stateId)
+ {
+ if (stateId.StateRoot == Keccak.EmptyTreeHash && stateId.BlockNumber != -1 &&
+ !flatDbManager.HasStateForBlock(stateId))
+ {
+ Assert.Ignore("Incompatible test");
+ }
+ }
+
+ public void FlushCache(CancellationToken cancellationToken) => flatDbManager.FlushCache(cancellationToken);
+
+ public void AddSnapshot(Snapshot snapshot, TransientResource transientResource) => flatDbManager.AddSnapshot(snapshot, transientResource);
+
+ public event EventHandler? ReorgBoundaryReached
+ {
+ add => flatDbManager.ReorgBoundaryReached += value;
+ remove => flatDbManager.ReorgBoundaryReached -= value;
+ }
+}
diff --git a/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs b/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs
index bc2b6a043fd..3381c6616c4 100644
--- a/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs
+++ b/src/Nethermind/Nethermind.Core.Test/Modules/PseudoNethermindModule.cs
@@ -1,13 +1,16 @@
// SPDX-FileCopyrightText: 2024 Demerzel Solutions Limited
// SPDX-License-Identifier: LGPL-3.0-only
+using System;
using System.Reflection;
using Autofac;
using Nethermind.Api;
+using Nethermind.Blockchain.Synchronization;
using Nethermind.Config;
using Nethermind.Consensus;
using Nethermind.Consensus.Processing;
using Nethermind.Consensus.Scheduler;
+using Nethermind.Db;
using Nethermind.Init.Modules;
using Nethermind.JsonRpc;
using Nethermind.KeyStore;
@@ -16,9 +19,12 @@
using Nethermind.Serialization.Json;
using Nethermind.Serialization.Rlp;
using Nethermind.Specs.ChainSpecStyle;
+using Nethermind.State.Flat;
+using Nethermind.State.Flat.ScopeProvider;
using Nethermind.TxPool;
using Nethermind.Wallet;
using NSubstitute;
+using NUnit.Framework;
using Module = Autofac.Module;
namespace Nethermind.Core.Test.Modules;
@@ -32,9 +38,20 @@ namespace Nethermind.Core.Test.Modules;
///
public class PseudoNethermindModule(ChainSpec spec, IConfigProvider configProvider, ILogManager logManager) : Module
{
+ public static bool TestUseFlat = Environment.GetEnvironmentVariable("TEST_USE_FLAT") == "1";
+
protected override void Load(ContainerBuilder builder)
{
IInitConfig initConfig = configProvider.GetConfig();
+ if (TestUseFlat)
+ {
+ ISyncConfig syncConfig = configProvider.GetConfig();
+ if (syncConfig.FastSync || syncConfig.SnapSync)
+ {
+ Assert.Ignore("Flat does not work with fast sync or snap sync");
+ }
+ configProvider.GetConfig().Enabled = true;
+ }
base.Load(builder);
builder
@@ -58,6 +75,15 @@ protected override void Load(ContainerBuilder builder)
.AddSingleton()
.AddSingleton(Substitute.For())
+ // Flatdb (if used) need a more complete memcolumndb implementation with snapshots and sorted view.
+ .AddSingleton>((_) => new TestMemColumnsDb())
+ .AddDecorator()
+ .Intercept((flatDbConfig) =>
+ {
+ // Dont want to make it very slow
+ flatDbConfig.TrieWarmerWorkerCount = 2;
+ })
+
// Rpc
.AddSingleton()
;
diff --git a/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs b/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs
index 2bd914a09d3..e031e6de2a9 100644
--- a/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs
+++ b/src/Nethermind/Nethermind.Core.Test/Modules/TestNethermindModule.cs
@@ -7,6 +7,7 @@
using Nethermind.Config;
using Nethermind.Core.Specs;
using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
using Nethermind.Logging;
using Nethermind.Serialization.Json;
using Nethermind.Specs;
diff --git a/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs b/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs
index 630f69bd37f..e1df1c913bb 100644
--- a/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs
+++ b/src/Nethermind/Nethermind.Core.Test/TestMemColumnDb.cs
@@ -8,9 +8,9 @@
namespace Nethermind.Core.Test;
public class TestMemColumnsDb : IColumnsDb
- where TKey : notnull
+ where TKey : struct, Enum
{
- private readonly IDictionary _columnDbs = new Dictionary();
+ private readonly IDictionary _columnDbs = new Dictionary();
public TestMemColumnsDb()
{
@@ -18,7 +18,7 @@ public TestMemColumnsDb()
public TestMemColumnsDb(params TKey[] keys)
{
- foreach (var key in keys)
+ foreach (TKey key in keys)
{
GetColumnDb(key);
}
@@ -29,14 +29,31 @@ public TestMemColumnsDb(params TKey[] keys)
public IColumnsWriteBatch StartWriteBatch()
{
+ EnsureAllKey();
return new InMemoryColumnWriteBatch(this);
}
public IColumnDbSnapshot CreateSnapshot()
{
- throw new NotSupportedException("Snapshot not implemented");
+ EnsureAllKey();
+ return new Snapshot(_columnDbs);
}
public void Dispose() { }
public void Flush(bool onlyWal = false) { }
+
+ private void EnsureAllKey()
+ {
+ foreach (TKey key in Enum.GetValues())
+ {
+ GetColumnDb(key);
+ }
+ }
+
+ private class Snapshot(IDictionary columns) : IColumnDbSnapshot
+ {
+ public IReadOnlyKeyValueStore GetColumn(TKey key) => columns[key];
+
+ public void Dispose() { }
+ }
}
diff --git a/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs b/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs
index b174709b966..c57b824934b 100644
--- a/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs
+++ b/src/Nethermind/Nethermind.Core.Test/TestMemDb.cs
@@ -6,6 +6,8 @@
using System.Linq;
using System.Runtime.CompilerServices;
using FluentAssertions;
+using Nethermind.Core.Collections;
+using Nethermind.Core.Extensions;
using Nethermind.Db;
using Bytes = Nethermind.Core.Extensions.Bytes;
@@ -14,7 +16,7 @@ namespace Nethermind.Core.Test;
///
/// MemDB with additional tools for testing purposes since you can't use NSubstitute with refstruct
///
-public class TestMemDb : MemDb, ITunableDb
+public class TestMemDb : MemDb, ITunableDb, ISortedKeyValueStore
{
private readonly List<(byte[], ReadFlags)> _readKeys = new();
private readonly List<((byte[], byte[]?), WriteFlags)> _writes = new();
@@ -71,4 +73,68 @@ public void KeyWasWrittenWithFlags(byte[] key, WriteFlags flags, int times = 1)
public void KeyWasRemoved(Func cond, int times = 1) => _removedKeys.Count(cond).Should().Be(times);
public override IWriteBatch StartWriteBatch() => new InMemoryWriteBatch(this);
public override void Flush(bool onlyWal) => FlushCount++;
+
+ public byte[]? FirstKey => Keys.Min();
+ public byte[]? LastKey => Keys.Max();
+ public ISortedView GetViewBetween(ReadOnlySpan firstKeyInclusive, ReadOnlySpan lastKeyExclusive)
+ {
+ ArrayPoolList<(byte[], byte[]?)> sortedValue = new(1);
+
+ foreach (KeyValuePair keyValuePair in GetAll())
+ {
+ if (Bytes.BytesComparer.Compare(keyValuePair.Key, firstKeyInclusive) < 0)
+ {
+ continue;
+ }
+
+ if (Bytes.BytesComparer.Compare(keyValuePair.Key, lastKeyExclusive) >= 0)
+ {
+ continue;
+ }
+ sortedValue.Add((keyValuePair.Key, keyValuePair.Value));
+ }
+
+ sortedValue.AsSpan().Sort((it1, it2) => Bytes.BytesComparer.Compare(it1.Item1, it2.Item1));
+ return new FakeSortedView(sortedValue);
+ }
+
+ private class FakeSortedView(ArrayPoolList<(byte[], byte[]?)> list) : ISortedView
+ {
+ private int idx = -1;
+
+ public void Dispose()
+ {
+ list.Dispose();
+ }
+
+ public bool StartBefore(ReadOnlySpan value)
+ {
+ if (list.Count == 0) return false;
+
+ idx = 0;
+ while (idx < list.Count)
+ {
+ if (Bytes.BytesComparer.Compare(list[idx].Item1, value) >= 0)
+ {
+ idx--;
+ return true;
+ }
+ idx++;
+ }
+
+ // All keys are less than value - position at last element (largest key <= value)
+ idx = list.Count - 1;
+ return true;
+ }
+
+ public bool MoveNext()
+ {
+ idx++;
+ if (idx >= list.Count) return false;
+ return true;
+ }
+
+ public ReadOnlySpan CurrentKey => list[idx].Item1;
+ public ReadOnlySpan CurrentValue => list[idx].Item2;
+ }
}
diff --git a/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs b/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs
index 0ebc2b3bcb4..34072a55435 100644
--- a/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs
+++ b/src/Nethermind/Nethermind.Core.Test/Threading/ConcurrencyControllerTests.cs
@@ -24,4 +24,19 @@ public void ThreadLimiterWillLimit()
limiter.TryTakeSlot(out _).Should().Be(true);
limiter.TryTakeSlot(out _).Should().Be(false);
}
+
+ [Test]
+ public void ThreadLimiterWillLimitWithManualRequest()
+ {
+ ConcurrencyController limiter = new ConcurrencyController(3);
+
+ limiter.TryRequestConcurrencyQuota().Should().Be(true);
+ limiter.TryRequestConcurrencyQuota().Should().Be(true);
+ limiter.TryRequestConcurrencyQuota().Should().Be(false);
+
+ limiter.ReturnConcurrencyQuota();
+
+ limiter.TryRequestConcurrencyQuota().Should().Be(true);
+ limiter.TryRequestConcurrencyQuota().Should().Be(false);
+ }
}
diff --git a/src/Nethermind/Nethermind.Core.Test/Utils/RefCountingTests.cs b/src/Nethermind/Nethermind.Core.Test/Utils/RefCountingTests.cs
new file mode 100644
index 00000000000..949a9179cbd
--- /dev/null
+++ b/src/Nethermind/Nethermind.Core.Test/Utils/RefCountingTests.cs
@@ -0,0 +1,72 @@
+using System.Threading;
+using FluentAssertions;
+using Nethermind.Core.Utils;
+using NUnit.Framework;
+
+namespace Nethermind.Core.Test.Utils;
+
+public class RefCountingTests
+{
+ private class TestRefCounting : RefCountingDisposable
+ {
+ private const int Used = 0;
+ private const int Cleaned = 1;
+
+ private int _cleaned = Used;
+ private int _tryCount;
+
+ public long TryCount => _tryCount;
+
+ public bool Try()
+ {
+ Interlocked.Increment(ref _tryCount);
+ return TryAcquireLease();
+ }
+
+ protected override void CleanUp()
+ {
+ var existing = Interlocked.Exchange(ref _cleaned, Cleaned);
+
+ // should be called only once and set it to used
+ existing.Should().Be(Used);
+ }
+ }
+
+ [Test]
+ public void Two_threads()
+ {
+ const int sleepInMs = 100;
+
+ var counter = new TestRefCounting();
+
+ var thread1 = new Thread(LeaseRelease);
+ var thread2 = new Thread(LeaseRelease);
+
+ thread1.Start();
+ thread2.Start();
+
+ Thread.Sleep(sleepInMs);
+
+ // dispose once
+ counter.Dispose();
+
+ thread1.Join();
+ thread2.Join();
+
+ const int minLeasesPerSecond = 1_000_000;
+ const int msInSec = 1000;
+ const int minLeaseCount = minLeasesPerSecond * sleepInMs / msInSec;
+
+ counter.TryCount.Should().BeGreaterThan(minLeaseCount,
+ $"On modern CPUs the speed of lease should be bigger than {minLeasesPerSecond} / s");
+
+ void LeaseRelease()
+ {
+ while (counter.Try())
+ {
+ // after lease, dispose
+ counter.Dispose();
+ }
+ }
+ }
+}
diff --git a/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs b/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs
index bf689b2519f..596af6faa9c 100644
--- a/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs
+++ b/src/Nethermind/Nethermind.Core/Collections/CollectionExtensions.cs
@@ -49,6 +49,14 @@ public static void AddRange(this ICollection list, IList items)
}
}
+ public static void AddOrUpdateRange(this IDictionary dict, IEnumerable> items)
+ {
+ foreach (KeyValuePair kv in items)
+ {
+ dict[kv.Key] = kv.Value;
+ }
+ }
+
[OverloadResolutionPriority(1)]
public static void AddRange(this ICollection list, IReadOnlyList items)
{
diff --git a/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs b/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs
index e841b44cd23..8242ca854d6 100644
--- a/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs
+++ b/src/Nethermind/Nethermind.Core/Threading/ConcurrencyController.cs
@@ -49,4 +49,17 @@ public void Dispose()
limiter.ReturnSlot();
}
}
+
+ public bool TryRequestConcurrencyQuota()
+ {
+ if (Interlocked.Decrement(ref _slots) > 0)
+ {
+ return true;
+ }
+
+ ReturnConcurrencyQuota();
+ return false;
+ }
+
+ public void ReturnConcurrencyQuota() => Interlocked.Increment(ref _slots);
}
diff --git a/src/Nethermind/Nethermind.Core/Threading/ReadWriteLockBox.cs b/src/Nethermind/Nethermind.Core/Threading/ReadWriteLockBox.cs
new file mode 100644
index 00000000000..e9c715585c7
--- /dev/null
+++ b/src/Nethermind/Nethermind.Core/Threading/ReadWriteLockBox.cs
@@ -0,0 +1,60 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using System.Threading;
+
+namespace Nethermind.Core.Threading;
+
+///
+/// Rust style wrapper of locked item. Make it a bit easier to know which object this lock is protecting.
+///
+///
+public readonly struct ReadWriteLockBox(T item)
+{
+ private readonly ReaderWriterLockSlim _lock = new();
+
+ public Lock EnterReadLock(out T item1)
+ {
+ item1 = item;
+ return new Lock(_lock, true);
+ }
+
+ public Lock EnterWriteLock(out T item1)
+ {
+ item1 = item;
+ return new Lock(_lock, false);
+ }
+
+ public readonly ref struct Lock : IDisposable
+ {
+ private readonly ReaderWriterLockSlim _rwLock;
+ private readonly bool _read;
+
+ public Lock(ReaderWriterLockSlim rwLock, bool read)
+ {
+ _rwLock = rwLock;
+ _read = read;
+ if (_read)
+ {
+ _rwLock.EnterReadLock();
+ }
+ else
+ {
+ _rwLock.EnterWriteLock();
+ }
+ }
+
+ public void Dispose()
+ {
+ if (_read)
+ {
+ _rwLock.ExitReadLock();
+ }
+ else
+ {
+ _rwLock.ExitWriteLock();
+ }
+ }
+ }
+}
diff --git a/src/Nethermind/Nethermind.Core/Utils/RefCountingDisposable.cs b/src/Nethermind/Nethermind.Core/Utils/RefCountingDisposable.cs
new file mode 100644
index 00000000000..042f9f572ad
--- /dev/null
+++ b/src/Nethermind/Nethermind.Core/Utils/RefCountingDisposable.cs
@@ -0,0 +1,139 @@
+using System;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+using System.Runtime.InteropServices;
+using System.Threading;
+
+namespace Nethermind.Core.Utils;
+
+///
+/// Provides a component that can be disposed multiple times and runs only on the last dispose.
+///
+public abstract class RefCountingDisposable : IDisposable
+{
+ private const int Single = 1;
+ private const int NoAccessors = 0;
+ private const int Disposing = -1;
+
+ protected PaddedValue _leases;
+
+ protected RefCountingDisposable(int initialCount = Single)
+ {
+ _leases.Value = initialCount;
+ }
+
+ public void AcquireLease()
+ {
+ if (!TryAcquireLease())
+ {
+ ThrowCouldNotAcquire();
+ }
+
+ [DoesNotReturn]
+ [StackTraceHidden]
+ static void ThrowCouldNotAcquire()
+ {
+ throw new InvalidOperationException("The lease cannot be acquired");
+ }
+ }
+
+ protected bool TryAcquireLease()
+ {
+ // Volatile read for starting value
+ long current = Volatile.Read(ref _leases.Value);
+ if (current == Disposing)
+ {
+ // Already disposed
+ return false;
+ }
+
+ while (true)
+ {
+ long prev = Interlocked.CompareExchange(ref _leases.Value, current + Single, current);
+ if (prev == current)
+ {
+ // Successfully acquired
+ return true;
+ }
+ if (prev == Disposing)
+ {
+ // Already disposed
+ return false;
+ }
+
+ // Try again with new starting value
+ current = prev;
+ // Add PAUSE instruction to reduce shared core contention
+ Thread.SpinWait(1);
+ }
+ }
+
+ ///
+ /// Disposes it once, decreasing the lease count by 1.
+ ///
+ public void Dispose() => ReleaseLeaseOnce();
+
+ private void ReleaseLeaseOnce()
+ {
+ // Volatile read for starting value
+ long current = Volatile.Read(ref _leases.Value);
+ if (current <= NoAccessors)
+ {
+ // Mismatched Acquire/Release
+ ThrowOverDisposed();
+ }
+
+ while (true)
+ {
+ long prev = Interlocked.CompareExchange(ref _leases.Value, current - Single, current);
+ if (prev != current)
+ {
+ current = prev;
+ // Add PAUSE instruction to reduce shared core contention
+ Thread.SpinWait(1);
+ continue;
+ }
+ if (prev == Single)
+ {
+ // Last use, try to dispose underlying
+ break;
+ }
+ if (prev <= NoAccessors)
+ {
+ // Mismatched Acquire/Release
+ ThrowOverDisposed();
+ }
+
+ // Successfully released
+ return;
+ }
+
+ if (Interlocked.CompareExchange(ref _leases.Value, Disposing, NoAccessors) == NoAccessors)
+ {
+ // set to disposed by this Release
+ CleanUp();
+ }
+
+ [DoesNotReturn]
+ [StackTraceHidden]
+ static void ThrowOverDisposed()
+ {
+ throw new ObjectDisposedException("The lease has already been disposed");
+ }
+ }
+
+ protected abstract void CleanUp();
+
+ public override string ToString()
+ {
+ var leases = Volatile.Read(ref _leases.Value);
+ return leases == Disposing ? "Disposed" : $"Leases: {leases}";
+ }
+
+ [StructLayout(LayoutKind.Explicit, Size = 128)]
+ protected struct PaddedValue
+ {
+ [FieldOffset(64)]
+ public long Value;
+ }
+}
diff --git a/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs b/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs
index c6004bcfc71..6b370d5f73b 100644
--- a/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs
+++ b/src/Nethermind/Nethermind.Db.Rocks/Config/DbConfig.cs
@@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited
// SPDX-License-Identifier: LGPL-3.0-only
+using System;
using Nethermind.Core.Extensions;
namespace Nethermind.Db.Rocks.Config;
@@ -182,7 +183,7 @@ public class DbConfig : IDbConfig
public ulong StateDbWriteBufferSize { get; set; } = (ulong)64.MB();
public ulong StateDbWriteBufferNumber { get; set; } = 4;
- public bool? StateDbVerifyChecksum { get; set; }
+ public bool? StateDbVerifyChecksum { get; set; } = true;
public ulong? StateDbRowCacheSize { get; set; }
public bool StateDbEnableFileWarmer { get; set; } = false;
public double StateDbCompressibilityHint { get; set; } = 0.45;
@@ -234,6 +235,9 @@ public class DbConfig : IDbConfig
// Default is 1 MB.
"max_write_batch_group_size_bytes=4000000;" +
+ // Dont do periodic compaction
+ "ttl=0;" +
+ "periodic_compaction_seconds=0;" +
"";
public string StateDbLargeMemoryRocksDbOptions { get; set; } =
@@ -271,4 +275,129 @@ public class DbConfig : IDbConfig
public string L1OriginDbRocksDbOptions { get; set; } = "";
public string? L1OriginDbAdditionalRocksDbOptions { get; set; }
+
+ public bool? FlatDbVerifyChecksum { get; set; } = true;
+ public string FlatDbRocksDbOptions { get; set; } =
+
+ // Common across flat columns.
+ "min_write_buffer_number_to_merge=2;" +
+ "block_based_table_factory.block_restart_interval=4;" +
+ "block_based_table_factory.data_block_index_type=kDataBlockBinaryAndHash;" +
+ "block_based_table_factory.data_block_hash_table_util_ratio=0.7;" +
+ "block_based_table_factory.block_size=16000;" +
+ "block_based_table_factory.filter_policy=ribbonfilter:10:3;" +
+ "max_write_batch_group_size_bytes=4000000;" +
+ "block_based_table_factory.pin_l0_filter_and_index_blocks_in_cache=true;" +
+ "block_based_table_factory.prepopulate_block_cache=kFlushOnly;" +
+ "block_based_table_factory.whole_key_filtering=true;" + // should be default. Just in case.
+ "level_compaction_dynamic_level_bytes=false;" +
+
+ // We bsearch instead of partitioned tree. This take up memory for improved latency.
+ "block_based_table_factory.partition_filters=false;" +
+ "block_based_table_factory.index_type=kBinarySearch;" +
+
+ "ttl=0;" +
+ "periodic_compaction_seconds=0;" +
+ "compression=kLZ4Compression;" +
+
+ // Reduce num of files. Tend to be a good thing.
+ "target_file_size_multiplier=2;" +
+
+ // Wal flushed manually in persistence.
+ "manual_wal_flush=true;" +
+
+ // When an SST is removed, also remove the cached blocks instead of waiting for it to disappear
+ "uncache_aggressiveness=1000;" +
+
+ // Small by default, column will override
+ "write_buffer_size=1000000;" +
+ "";
+ public string? FlatDbAdditionalRocksDbOptions { get; set; }
+
+ public string? FlatMetadataDbRocksDbOptions { get; set; } = "max_bytes_for_level_base=1000000;";
+ public string? FlatMetadataDbAdditionalRocksDbOptions { get; set; }
+
+ // Account is too small so we make it so that the file and buffer is smaller so that it does not compact too much
+ // at once
+ public string? FlatAccountDbRocksDbOptions { get; set; } =
+ // The account db is small, already using slim encoding. Disabling compression does not lose much.
+ "compression=kNoCompression;" +
+
+ // Keep last level bloom filter. Take up most index memory
+ "optimize_filters_for_hits=false;" +
+
+ // account db is really small in writes, so we set low buffer size to prevent too many different version account
+ // in the same memtable.
+ "target_file_size_multiplier=3;" +
+ "target_file_size_base=32000000;" +
+ "max_bytes_for_level_multiplier=15;" + // Reduce level count
+ "max_bytes_for_level_base=128000000;" +
+
+ // account db have no benefit in locality whatsoever, and have compression disabled.
+ "block_based_table_factory.block_size=4096;" +
+
+ // Smaller
+ "write_buffer_size=16000000;" +
+ "max_write_buffer_number=4;" +
+ "";
+ public string? FlatAccountDbAdditionalRocksDbOptions { get; set; }
+
+ public string? FlatStorageDbRocksDbOptions { get; set; } =
+ // Keep last level bloom filter. Take up most index memory
+ "optimize_filters_for_hits=false;" +
+
+ // Much like account kinda small.
+ "target_file_size_base=64000000;" +
+
+ // Using 4kb size is faster, IO wise, but uses additional 500 MB of memory, which if put on block cache is much better.
+ "block_based_table_factory.block_size=8000;" +
+
+ // Smaller
+ "write_buffer_size=32000000;" +
+ "max_write_buffer_number=4;" +
+ "";
+
+ public string? FlatStorageDbAdditionalRocksDbOptions { get; set; }
+
+ const string? FlatDbCommonTrieOptions =
+ "level_compaction_dynamic_level_bytes=true;" +
+ "block_based_table_factory.block_restart_interval=8;" +
+ "block_based_table_factory.block_size=16000;" +
+ "";
+
+ // Only 1 gig in total, but almost 1/3rd of the writes.
+ public string? FlatStateTopNodesDbRocksDbOptions { get; set; } =
+ FlatDbCommonTrieOptions +
+ "write_buffer_size=64000000;" +
+ "max_write_buffer_number=4;" +
+ "";
+ public string? FlatStateNodesDbAdditionalRocksDbOptions { get; set; }
+
+ // So not written as much so lower buffer size
+ public string? FlatStateNodesDbRocksDbOptions { get; set; } =
+ FlatDbCommonTrieOptions +
+ "write_buffer_size=32000000;" +
+ "max_write_buffer_number=4;" +
+ "";
+ public string? FlatStateTopNodesDbAdditionalRocksDbOptions { get; set; }
+
+ // Most writes
+ public string? FlatStorageNodesDbRocksDbOptions { get; set; } =
+ FlatDbCommonTrieOptions +
+ // Slight increase to account for high writes
+ "max_bytes_for_level_base=350000000;" +
+ "write_buffer_size=64000000;" +
+ "max_write_buffer_number=8;" +
+ "";
+ public string? FlatStorageNodesDbAdditionalRocksDbOptions { get; set; }
+
+ public string? FlatFallbackNodesNodesDbRocksDbOptions { get; set; } =
+ FlatDbCommonTrieOptions +
+ // Fallback nodes is tiny. Like KB level small. This is generous.
+ "max_bytes_for_level_base=4000000;" +
+ "";
+ public string? FlatFallbackNodesNodesDbAdditionalRocksDbOptions { get; set; }
+
+ public string? PreimageDbRocksDbOptions { get; set; } = "";
+ public string? PreimageDbAdditionalRocksDbOptions { get; set; }
}
diff --git a/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs b/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs
index fa7c8e2023b..a68b3b348a0 100644
--- a/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs
+++ b/src/Nethermind/Nethermind.Db.Rocks/Config/IDbConfig.cs
@@ -102,4 +102,32 @@ public interface IDbConfig : IConfig
string L1OriginDbRocksDbOptions { get; set; }
string? L1OriginDbAdditionalRocksDbOptions { get; set; }
+
+ bool? FlatDbVerifyChecksum { get; set; }
+ string FlatDbRocksDbOptions { get; set; }
+ string? FlatDbAdditionalRocksDbOptions { get; set; }
+
+ string? FlatMetadataDbRocksDbOptions { get; set; }
+ string? FlatMetadataDbAdditionalRocksDbOptions { get; set; }
+
+ string? FlatAccountDbRocksDbOptions { get; set; }
+ string? FlatAccountDbAdditionalRocksDbOptions { get; set; }
+
+ string? FlatStorageDbRocksDbOptions { get; set; }
+ string? FlatStorageDbAdditionalRocksDbOptions { get; set; }
+
+ string? FlatStateNodesDbRocksDbOptions { get; set; }
+ string? FlatStateNodesDbAdditionalRocksDbOptions { get; set; }
+
+ string? FlatStateTopNodesDbRocksDbOptions { get; set; }
+ string? FlatStateTopNodesDbAdditionalRocksDbOptions { get; set; }
+
+ string? FlatStorageNodesDbRocksDbOptions { get; set; }
+ string? FlatStorageNodesDbAdditionalRocksDbOptions { get; set; }
+
+ string? FlatFallbackNodesNodesDbRocksDbOptions { get; set; }
+ string? FlatFallbackNodesNodesDbAdditionalRocksDbOptions { get; set; }
+
+ string? PreimageDbRocksDbOptions { get; set; }
+ public string? PreimageDbAdditionalRocksDbOptions { get; set; }
}
diff --git a/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs b/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs
index cca010c66f8..8590a662a8c 100644
--- a/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs
+++ b/src/Nethermind/Nethermind.Db.Rocks/DbOnTheRocks.cs
@@ -221,7 +221,6 @@ private RocksDb Init(string basePath, string dbPath, IDbConfig dbConfig, ILogMan
CreateMarkerIfCorrupt(x);
throw;
}
-
}
private void WarmupFile(string basePath, RocksDb db)
@@ -438,7 +437,7 @@ private long GetMemtableSize()
return 0;
}
- [GeneratedRegex("(?[^; ]+)\\=(?[^; ]+);", RegexOptions.Singleline | RegexOptions.NonBacktracking | RegexOptions.ExplicitCapture)]
+ [GeneratedRegex("(?[A-Za-z0-9_\\.]+)\\=(?[^; ]+);", RegexOptions.Singleline | RegexOptions.NonBacktracking | RegexOptions.ExplicitCapture)]
private static partial Regex ExtractDbOptionsRegex();
public static IDictionary ExtractOptions(string dbOptions)
@@ -532,7 +531,7 @@ protected virtual void BuildOptions(IRocksDbConfig dbConfig, Options optio
Marshal.FreeHGlobal(optsPtr);
}
- if (dbConfig.WriteBufferSize is not null)
+ if (dbConfig.WriteBufferSize > 0)
{
_writeBufferSize = dbConfig.WriteBufferSize.Value;
options.SetWriteBufferSize(dbConfig.WriteBufferSize.Value);
@@ -1484,8 +1483,7 @@ private void ReleaseUnmanagedResources()
public void Dispose()
{
- if (_isDisposing) return;
- _isDisposing = true;
+ if (Interlocked.CompareExchange(ref _isDisposing, true, false)) return;
if (_logger.IsInfo) _logger.Info($"Disposing DB {Name}");
diff --git a/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs b/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs
index a47654fbd37..03d722ab29a 100644
--- a/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs
+++ b/src/Nethermind/Nethermind.Db.Rocks/HyperClockCacheWrapper.cs
@@ -18,6 +18,7 @@ public HyperClockCacheWrapper(ulong capacity = 32_000_000) : base(ownsHandle: tr
protected override bool ReleaseHandle()
{
+ // Temporary disable to see if it fix crash
RocksDbSharp.Native.Instance.rocksdb_cache_destroy(handle);
return true;
}
diff --git a/src/Nethermind/Nethermind.Db/DbNames.cs b/src/Nethermind/Nethermind.Db/DbNames.cs
index 2030be8e2bd..821a46eee2d 100644
--- a/src/Nethermind/Nethermind.Db/DbNames.cs
+++ b/src/Nethermind/Nethermind.Db/DbNames.cs
@@ -7,6 +7,7 @@ public static class DbNames
{
public const string Storage = "storage";
public const string State = "state";
+ public const string Flat = "flat";
public const string Code = "code";
public const string Blocks = "blocks";
public const string Headers = "headers";
@@ -20,5 +21,6 @@ public static class DbNames
public const string DiscoveryNodes = "discoveryNodes";
public const string DiscoveryV5Nodes = "discoveryV5Nodes";
public const string PeersDb = "peers";
+ public const string Preimage = "preimage";
}
}
diff --git a/src/Nethermind/Nethermind.Db/FlatDbConfig.cs b/src/Nethermind/Nethermind.Db/FlatDbConfig.cs
new file mode 100644
index 00000000000..12c903a3da9
--- /dev/null
+++ b/src/Nethermind/Nethermind.Db/FlatDbConfig.cs
@@ -0,0 +1,24 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using Nethermind.Core.Extensions;
+
+namespace Nethermind.Db;
+
+public class FlatDbConfig : IFlatDbConfig
+{
+ public bool Enabled { get; set; } = false;
+ public bool EnablePreimageRecording { get; set; } = false;
+ public bool ImportFromPruningTrieState { get; set; } = false;
+ public bool InlineCompaction { get; set; } = false;
+ public bool VerifyWithTrie { get; set; } = false;
+ public FlatLayout Layout { get; set; } = FlatLayout.Flat;
+ public int CompactSize { get; set; } = 32;
+ public int MaxInFlightCompactJob { get; set; } = 32;
+ public int MaxReorgDepth { get; set; } = 256;
+ public int MidCompactSize { get; set; } = 4;
+ public int MinReorgDepth { get; set; } = 128;
+ public int TrieWarmerWorkerCount { get; set; } = -1;
+ public long BlockCacheSizeBudget { get; set; } = 1.GiB();
+ public long TrieCacheMemoryBudget { get; set; } = 512.MiB();
+}
diff --git a/src/Nethermind/Nethermind.Db/FlatLayout.cs b/src/Nethermind/Nethermind.Db/FlatLayout.cs
new file mode 100644
index 00000000000..fcf818f6e54
--- /dev/null
+++ b/src/Nethermind/Nethermind.Db/FlatLayout.cs
@@ -0,0 +1,11 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+namespace Nethermind.Db;
+
+public enum FlatLayout
+{
+ Flat,
+ FlatInTrie,
+ PreimageFlat,
+}
diff --git a/src/Nethermind/Nethermind.Db/IFlatDbConfig.cs b/src/Nethermind/Nethermind.Db/IFlatDbConfig.cs
new file mode 100644
index 00000000000..1f10c45aacd
--- /dev/null
+++ b/src/Nethermind/Nethermind.Db/IFlatDbConfig.cs
@@ -0,0 +1,51 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using Nethermind.Config;
+
+namespace Nethermind.Db;
+
+public interface IFlatDbConfig : IConfig
+{
+ [ConfigItem(Description = "Block cache size budget", DefaultValue = "1073741824")]
+ long BlockCacheSizeBudget { get; set; }
+
+ [ConfigItem(Description = "Compact size", DefaultValue = "32")]
+ int CompactSize { get; set; }
+
+ [ConfigItem(Description = "Enabled", DefaultValue = "false")]
+ bool Enabled { get; set; }
+
+ [ConfigItem(Description = "Enable recording of preimages (address/slot hash to original bytes)", DefaultValue = "false")]
+ bool EnablePreimageRecording { get; set; }
+
+ [ConfigItem(Description = "Import from pruning trie state db", DefaultValue = "false")]
+ bool ImportFromPruningTrieState { get; set; }
+
+ [ConfigItem(Description = "Inline compaction", DefaultValue = "false")]
+ bool InlineCompaction { get; set; }
+
+ [ConfigItem(Description = "Flat db layout", DefaultValue = "Flat")]
+ FlatLayout Layout { get; set; }
+
+ [ConfigItem(Description = "Max in flight compact job", DefaultValue = "32")]
+ int MaxInFlightCompactJob { get; set; }
+
+ [ConfigItem(Description = "Max reorg depth", DefaultValue = "256")]
+ int MaxReorgDepth { get; set; }
+
+ [ConfigItem(Description = "Compact interval", DefaultValue = "4")]
+ int MidCompactSize { get; set; }
+
+ [ConfigItem(Description = "Minimum reorg depth", DefaultValue = "128")]
+ int MinReorgDepth { get; set; }
+
+ [ConfigItem(Description = "Trie cache memory target", DefaultValue = "536870912")]
+ long TrieCacheMemoryBudget { get; set; }
+
+ [ConfigItem(Description = "Trie warmer worker count (-1 for processor count - 1, 0 to disable)", DefaultValue = "-1")]
+ int TrieWarmerWorkerCount { get; set; }
+
+ [ConfigItem(Description = "Verify with trie", DefaultValue = "false")]
+ bool VerifyWithTrie { get; set; }
+}
diff --git a/src/Nethermind/Nethermind.Init/Modules/FlatRocksDbConfigAdjuster.cs b/src/Nethermind/Nethermind.Init/Modules/FlatRocksDbConfigAdjuster.cs
new file mode 100644
index 00000000000..30122863ebb
--- /dev/null
+++ b/src/Nethermind/Nethermind.Init/Modules/FlatRocksDbConfigAdjuster.cs
@@ -0,0 +1,67 @@
+// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using Nethermind.Core;
+using Nethermind.Core.Extensions;
+using Nethermind.Db;
+using Nethermind.Db.Rocks;
+using Nethermind.Db.Rocks.Config;
+using Nethermind.Logging;
+using Nethermind.State.Flat;
+
+namespace Nethermind.Init.Modules;
+
+///
+/// Adjust rocksdb config depending on the flatdb config
+///
+internal class FlatRocksDbConfigAdjuster(
+ IRocksDbConfigFactory rocksDbConfigFactory,
+ IFlatDbConfig flatDbConfig,
+ IDisposableStack disposeStack,
+ ILogManager logManager)
+ : IRocksDbConfigFactory
+{
+ private readonly ILogger _logger = logManager.GetClassLogger();
+
+ public IRocksDbConfig GetForDatabase(string databaseName, string? columnName)
+ {
+ IRocksDbConfig config = rocksDbConfigFactory.GetForDatabase(databaseName, columnName);
+ if (databaseName == nameof(DbNames.Flat))
+ {
+ string additionalConfig = "";
+ if (flatDbConfig.Layout == FlatLayout.FlatInTrie)
+ {
+ // For flat in trie, add optimize filter for hits and turn on partitioned index, this reduces
+ // memory at expense of latency.
+ additionalConfig = config.RocksDbOptions +
+ "optimize_filters_for_hits=true;" +
+ "block_based_table_factory.partition_filters=true;" +
+ "block_based_table_factory.index_type=kTwoLevelIndexSearch;";
+ }
+
+ IntPtr? cacheHandle = null;
+ if (columnName == nameof(FlatDbColumns.Account))
+ {
+ ulong cacheCapacity = (ulong)(flatDbConfig.BlockCacheSizeBudget * 0.3);
+ if (_logger.IsInfo) _logger.Info($"Setting {(cacheCapacity / (ulong)1.MiB()):N0} MB of block cache to account");
+ HyperClockCacheWrapper cacheWrapper = new(cacheCapacity);
+ cacheHandle = cacheWrapper.Handle;
+ disposeStack.Push(cacheWrapper);
+ }
+
+ if (columnName == nameof(FlatDbColumns.Storage))
+ {
+ ulong cacheCapacity = (ulong)(flatDbConfig.BlockCacheSizeBudget * 0.7);
+ if (_logger.IsInfo) _logger.Info($"Setting {(cacheCapacity / (ulong)1.MiB()):N0} MB of block cache to storage");
+ HyperClockCacheWrapper cacheWrapper = new(cacheCapacity);
+ cacheHandle = cacheWrapper.Handle;
+ disposeStack.Push(cacheWrapper);
+ }
+
+ config = new AdjustedRocksdbConfig(config, additionalConfig, config.WriteBufferSize.GetValueOrDefault(), cacheHandle);
+ }
+
+ return config;
+ }
+}
diff --git a/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs b/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs
new file mode 100644
index 00000000000..c603ae8befe
--- /dev/null
+++ b/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs
@@ -0,0 +1,130 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using Autofac;
+using Microsoft.AspNetCore.Http;
+using Nethermind.Api.Steps;
+using Nethermind.Blockchain;
+using Nethermind.Blockchain.Find;
+using Nethermind.Blockchain.FullPruning;
+using Nethermind.Blockchain.Synchronization;
+using Nethermind.Config;
+using Nethermind.Core;
+using Nethermind.Db;
+using Nethermind.Db.Rocks.Config;
+using Nethermind.Init.Steps;
+using Nethermind.JsonRpc;
+using Nethermind.JsonRpc.Modules.Admin;
+using Nethermind.Logging;
+using Nethermind.Monitoring.Config;
+using Nethermind.State;
+using Nethermind.State.Flat;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.State.Flat.ScopeProvider;
+
+namespace Nethermind.Init.Modules;
+
+public class FlatWorldStateModule(IFlatDbConfig flatDbConfig) : Module
+{
+ protected override void Load(ContainerBuilder builder)
+ {
+ builder
+
+ // Implementation of nethermind interfaces
+ .AddSingleton()
+ .OnActivate((worldStateManager, ctx) =>
+ {
+ new TrieStoreBoundaryWatcher(worldStateManager, ctx.Resolve(), ctx.Resolve());
+ })
+ .AddSingleton()
+
+ // Disable some pruning trie store specific components
+ .AddSingleton()
+ .AddSingleton(_ => throw new NotSupportedException($"{nameof(MainPruningTrieStoreFactory)} disabled."))
+ .AddSingleton(_ => throw new NotSupportedException($"{nameof(PruningTrieStateFactory)} disabled."))
+
+ // The actual flatDb components
+ .AddSingleton((ctx) => new FlatDbManager(
+ ctx.Resolve(),
+ ctx.Resolve(),
+ ctx.Resolve(),
+ ctx.Resolve(),
+ ctx.Resolve(),
+ ctx.Resolve(),
+ ctx.Resolve(),
+ ctx.Resolve(),
+ ctx.Resolve().EnableDetailedMetric))
+ .AddSingleton()
+ .AddSingleton()
+ .AddSingleton()
+ .AddSingleton()
+ .AddSingleton()
+ .AddSingleton(flatDbConfig.TrieWarmerWorkerCount == 0
+ ? _ => new NoopTrieWarmer()
+ : ctx => ctx.Resolve())
+ .AddSingleton()
+ .Add()
+
+ // Persistences
+ .AddColumnDatabase(DbNames.Flat)
+ .AddSingleton()
+ .AddSingleton()
+ .AddDecorator()
+
+ .AddSingleton()
+ .AddDatabase(DbNames.Preimage)
+
+ .AddSingleton((flatDbConfig, exitSource, logManager, ctx) =>
+ {
+ IPersistence persistence = flatDbConfig.Layout switch
+ {
+ FlatLayout.Flat => ctx.Resolve(),
+ FlatLayout.FlatInTrie => ctx.Resolve(),
+ FlatLayout.PreimageFlat => ctx.Resolve(),
+ _ => throw new NotSupportedException($"Unsupported layout {flatDbConfig.Layout}")
+ };
+
+ if (flatDbConfig.EnablePreimageRecording)
+ {
+ IDb preimageDb = ctx.ResolveKeyed(DbNames.Preimage);
+ persistence = new PreimageRecordingPersistence(persistence, preimageDb);
+ }
+
+ return new CachedReaderPersistence(persistence, exitSource, logManager);
+ })
+ ;
+
+ if (flatDbConfig.ImportFromPruningTrieState)
+ {
+ builder
+ .AddSingleton()
+ .AddStep(typeof(ImportFlatDb));
+ }
+ else
+ {
+ builder
+ .AddDecorator((ctx, syncConfig) =>
+ {
+ ILogger logger = ctx.Resolve().GetClassLogger();
+ if (syncConfig.FastSync || syncConfig.SnapSync)
+ {
+ if (logger.IsWarn) logger.Warn("Fast sync and snap sync turned off with FlatDB");
+ syncConfig.FastSync = false;
+ syncConfig.SnapSync = false;
+ }
+ return syncConfig;
+ });
+ }
+ }
+
+ ///
+ /// Need to stub out, or it will register trie store specific module
+ ///
+ private class PruningTrieStateAdminRpcModuleStub : IPruningTrieStateAdminRpcModule
+ {
+ public ResultWrapper admin_prune() => ResultWrapper.Success(PruningStatus.Disabled);
+
+ public ResultWrapper admin_verifyTrie(BlockParameter block) => ResultWrapper.Success("disable");
+ }
+}
diff --git a/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs b/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs
index 41df70db238..00828ee5e45 100644
--- a/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs
+++ b/src/Nethermind/Nethermind.Init/Modules/NethermindModule.cs
@@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
// SPDX-License-Identifier: LGPL-3.0-only
+using System;
using System.IO.Abstractions;
using Autofac;
using Nethermind.Abi;
@@ -15,6 +16,7 @@
using Nethermind.Core.Specs;
using Nethermind.Core.Timers;
using Nethermind.Crypto;
+using Nethermind.Db;
using Nethermind.Era1;
using Nethermind.JsonRpc;
using Nethermind.Logging;
@@ -81,6 +83,9 @@ protected override void Load(ContainerBuilder builder)
{
builder.AddSingleton(NullBlobTxStorage.Instance);
}
+
+ if (configProvider.GetConfig().Enabled)
+ builder.AddModule(new FlatWorldStateModule(configProvider.GetConfig()));
}
// Just a wrapper to make it clear, these three are expected to be available at the time of configurations.
diff --git a/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs b/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs
index a5131853f84..48f3a30e016 100644
--- a/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs
+++ b/src/Nethermind/Nethermind.Init/Modules/PrewarmerModule.cs
@@ -1,6 +1,7 @@
// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
// SPDX-License-Identifier: LGPL-3.0-only
+using System;
using Autofac;
using Nethermind.Blockchain;
using Nethermind.Config;
@@ -41,6 +42,8 @@ protected override void Load(ContainerBuilder builder)
// module, so singleton here is like scoped but exclude inner prewarmer lifetime.
.AddSingleton()
.AddScoped()
+
+ // This class create the block processing env with worldstate that populate the cache
.Add()
// These are the actual decorated component that provide cached result
diff --git a/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj b/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj
index f1473282530..3f3637408f2 100644
--- a/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj
+++ b/src/Nethermind/Nethermind.Init/Nethermind.Init.csproj
@@ -13,6 +13,7 @@
+
<_Parameter1>Nethermind.Runner.Test
diff --git a/src/Nethermind/Nethermind.Init/Steps/ImportFlatDb.cs b/src/Nethermind/Nethermind.Init/Steps/ImportFlatDb.cs
new file mode 100644
index 00000000000..776e8af38f1
--- /dev/null
+++ b/src/Nethermind/Nethermind.Init/Steps/ImportFlatDb.cs
@@ -0,0 +1,73 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using System.Threading;
+using System.Threading.Tasks;
+using Nethermind.Api.Steps;
+using Nethermind.Blockchain;
+using Nethermind.Config;
+using Nethermind.Core;
+using Nethermind.Db;
+using Nethermind.Logging;
+using Nethermind.Monitoring;
+using Nethermind.State.Flat;
+using Nethermind.State.Flat.Persistence;
+
+namespace Nethermind.Init.Steps;
+
+[RunnerStepDependencies(
+ dependencies: [typeof(InitializeBlockTree)],
+ dependents: [typeof(InitializeBlockchain)]
+)]
+public class ImportFlatDb(
+ IBlockTree blockTree,
+ IPersistence persistence,
+ Importer importer,
+ IProcessExitSource exitSource,
+ IFlatDbConfig flatDbConfig,
+ ILogManager logManager
+) : IStep
+{
+ ILogger _logger = logManager.GetClassLogger();
+
+ public async Task Execute(CancellationToken cancellationToken)
+ {
+ // Validate that we're not using PreimageFlat layout
+ if (flatDbConfig.Layout == FlatLayout.PreimageFlat)
+ {
+ if (_logger.IsError) _logger.Error("Cannot import with FlatLayout.PreimageFlat. Use FlatLayout.Flat or FlatLayout.FlatInTrie instead.");
+ if (_logger.IsError) _logger.Error("PreimageFlat mode does not support importing from trie state because the importer uses hash-based raw operations.");
+ exitSource.Exit(1);
+ return;
+ }
+
+ BlockHeader? head = blockTree.Head?.Header;
+ if (head is null) return;
+
+ using (var reader = persistence.CreateReader())
+ {
+ if (_logger.IsWarn) _logger.Warn($"Current state is {reader.CurrentState}");
+ if (reader.CurrentState.BlockNumber > 0)
+ {
+ if (_logger.IsInfo) _logger.Info("Flat db already exist");
+ return;
+ }
+ }
+
+ if (_logger.IsInfo) _logger.Info($"Copying state {head.ToString(BlockHeader.Format.Short)} with state root {head.StateRoot}");
+
+ try
+ {
+ await importer.Copy(new StateId(head), cancellationToken);
+ }
+ catch (OperationCanceledException)
+ {
+ if (_logger.IsInfo) _logger.Info("Import cancelled by user");
+ exitSource.Exit(1);
+ return;
+ }
+
+ exitSource.Exit(0);
+ }
+}
diff --git a/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs b/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs
index 5d849fd3eb3..c4abe9ab5b8 100644
--- a/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs
+++ b/src/Nethermind/Nethermind.Runner.Test/Ethereum/Steps/EthereumStepsLoaderTests.cs
@@ -40,7 +40,7 @@ public void BuildInSteps_IsCorrect()
steps.AddRange(LoadStepInfoFromAssembly(typeof(InitializeBlockTree).Assembly));
steps.AddRange(LoadStepInfoFromAssembly(typeof(EthereumRunner).Assembly));
- HashSet optionalSteps = [typeof(RunVerifyTrie), typeof(ExitOnInvalidBlock)];
+ HashSet optionalSteps = [typeof(RunVerifyTrie), typeof(ExitOnInvalidBlock), typeof(ImportFlatDb)];
steps = steps.Where((s) => !optionalSteps.Contains(s.StepBaseType)).ToHashSet();
using IContainer container = new ContainerBuilder()
diff --git a/src/Nethermind/Nethermind.Runner.Test/Module/FlatRocksDbConfigAdjusterTests.cs b/src/Nethermind/Nethermind.Runner.Test/Module/FlatRocksDbConfigAdjusterTests.cs
new file mode 100644
index 00000000000..3e2e87f6dc5
--- /dev/null
+++ b/src/Nethermind/Nethermind.Runner.Test/Module/FlatRocksDbConfigAdjusterTests.cs
@@ -0,0 +1,95 @@
+// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using FluentAssertions;
+using Nethermind.Core;
+using Nethermind.Db;
+using Nethermind.Db.Rocks.Config;
+using Nethermind.Init.Modules;
+using Nethermind.Logging;
+using Nethermind.State.Flat;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.Runner.Test.Module;
+
+[TestFixture]
+[Parallelizable(ParallelScope.Self)]
+public class FlatRocksDbConfigAdjusterTests
+{
+ private IRocksDbConfigFactory _baseFactory = null!;
+ private IFlatDbConfig _flatDbConfig = null!;
+ private IDisposableStack _disposeStack = null!;
+ private IRocksDbConfig _baseConfig = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _baseFactory = Substitute.For();
+ _flatDbConfig = Substitute.For();
+ _disposeStack = Substitute.For();
+ _baseConfig = Substitute.For();
+
+ _baseConfig.RocksDbOptions.Returns("base_options=true;");
+ _baseConfig.WriteBufferSize.Returns((ulong)64_000_000);
+
+ _baseFactory.GetForDatabase(Arg.Any(), Arg.Any()).Returns(_baseConfig);
+ }
+
+ [Test]
+ public void NonFlatDatabase_ReturnsBaseConfig()
+ {
+ _flatDbConfig.Layout.Returns(FlatLayout.Flat);
+ _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L);
+
+ var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance);
+
+ IRocksDbConfig result = adjuster.GetForDatabase("State0", null);
+
+ result.Should().BeSameAs(_baseConfig);
+ }
+
+ [Test]
+ public void FlatDatabase_WithFlatLayout_DoesNotAddPartitionedIndexOptions()
+ {
+ _flatDbConfig.Layout.Returns(FlatLayout.Flat);
+ _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L);
+
+ var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance);
+
+ IRocksDbConfig result = adjuster.GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Metadata));
+
+ result.RocksDbOptions.Should().NotContain("optimize_filters_for_hits");
+ result.RocksDbOptions.Should().NotContain("partition_filters");
+ result.RocksDbOptions.Should().NotContain("kTwoLevelIndexSearch");
+ }
+
+ [Test]
+ public void FlatDatabase_WithFlatInTrieLayout_AddsPartitionedIndexOptions()
+ {
+ _flatDbConfig.Layout.Returns(FlatLayout.FlatInTrie);
+ _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L);
+
+ var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance);
+
+ IRocksDbConfig result = adjuster.GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Metadata));
+
+ result.RocksDbOptions.Should().Contain("optimize_filters_for_hits=true;");
+ result.RocksDbOptions.Should().Contain("block_based_table_factory.partition_filters=true;");
+ result.RocksDbOptions.Should().Contain("block_based_table_factory.index_type=kTwoLevelIndexSearch;");
+ }
+
+ [Test]
+ public void FlatDatabase_DelegatesToBaseFactoryWithCorrectParameters()
+ {
+ _flatDbConfig.Layout.Returns(FlatLayout.Flat);
+ _flatDbConfig.BlockCacheSizeBudget.Returns(1_000_000_000L);
+
+ var adjuster = new FlatRocksDbConfigAdjuster(_baseFactory, _flatDbConfig, _disposeStack, LimboLogs.Instance);
+
+ adjuster.GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Account));
+
+ _baseFactory.Received(1).GetForDatabase(nameof(DbNames.Flat), nameof(FlatDbColumns.Account));
+ }
+}
diff --git a/src/Nethermind/Nethermind.Runner/packages.lock.json b/src/Nethermind/Nethermind.Runner/packages.lock.json
index 858ddab75fd..b406492fc3a 100644
--- a/src/Nethermind/Nethermind.Runner/packages.lock.json
+++ b/src/Nethermind/Nethermind.Runner/packages.lock.json
@@ -885,7 +885,8 @@
"Nethermind.Network.Discovery": "[1.37.0-unstable, )",
"Nethermind.Network.Dns": "[1.37.0-unstable, )",
"Nethermind.Network.Enr": "[1.37.0-unstable, )",
- "Nethermind.Specs": "[1.37.0-unstable, )"
+ "Nethermind.Specs": "[1.37.0-unstable, )",
+ "Nethermind.State.Flat": "[1.37.0-unstable, )"
}
},
"nethermind.init.snapshot": {
@@ -1117,6 +1118,18 @@
"Nethermind.Trie": "[1.37.0-unstable, )"
}
},
+ "nethermind.state.flat": {
+ "type": "Project",
+ "dependencies": {
+ "Nethermind.Core": "[1.37.0-unstable, )",
+ "Nethermind.Db": "[1.37.0-unstable, )",
+ "Nethermind.Evm": "[1.37.0-unstable, )",
+ "Nethermind.Serialization.Rlp": "[1.37.0-unstable, )",
+ "Nethermind.State": "[1.37.0-unstable, )",
+ "Nethermind.Trie": "[1.37.0-unstable, )",
+ "System.IO.Hashing": "[10.0.2, )"
+ }
+ },
"nethermind.synchronization": {
"type": "Project",
"dependencies": {
@@ -1569,6 +1582,12 @@
"System.Security.Cryptography.ProtectedData": "10.0.1"
}
},
+ "System.IO.Hashing": {
+ "type": "CentralTransitive",
+ "requested": "[10.0.2, )",
+ "resolved": "10.0.2",
+ "contentHash": "AKJknIFi9O3+rGExxTry188JPvUoZAPcCtS2qdqyFhIzsxQ1Ap94BeGDG0VzVEHakhmRxmJtVih6TsHoghIt/g=="
+ },
"System.Security.Cryptography.ProtectedData": {
"type": "CentralTransitive",
"requested": "[10.0.1, )",
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatDbManagerTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatDbManagerTests.cs
new file mode 100644
index 00000000000..1be8ff58074
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatDbManagerTests.cs
@@ -0,0 +1,161 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Threading;
+using System.Threading.Tasks;
+using Nethermind.Config;
+using Nethermind.Core.Crypto;
+using Nethermind.Db;
+using Nethermind.Logging;
+using Nethermind.State.Flat.Persistence;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixture]
+public class FlatDbManagerTests
+{
+ private IResourcePool _resourcePool = null!;
+ private IProcessExitSource _processExitSource = null!;
+ private ITrieNodeCache _trieNodeCache = null!;
+ private ISnapshotCompactor _snapshotCompactor = null!;
+ private ISnapshotRepository _snapshotRepository = null!;
+ private IPersistenceManager _persistenceManager = null!;
+ private IFlatDbConfig _config = null!;
+ private CancellationTokenSource _cts = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _resourcePool = Substitute.For();
+ _cts = new CancellationTokenSource();
+ _processExitSource = Substitute.For();
+ _processExitSource.Token.Returns(_cts.Token);
+ _trieNodeCache = Substitute.For();
+ _snapshotCompactor = Substitute.For();
+ _snapshotRepository = Substitute.For();
+ _persistenceManager = Substitute.For();
+ _config = new FlatDbConfig { CompactSize = 16, MaxInFlightCompactJob = 4, InlineCompaction = true };
+ }
+
+ [TearDown]
+ public void TearDown()
+ {
+ _cts.Cancel();
+ _cts.Dispose();
+ }
+
+ private FlatDbManager CreateManager() => new(
+ _resourcePool,
+ _processExitSource,
+ _trieNodeCache,
+ _snapshotCompactor,
+ _snapshotRepository,
+ _persistenceManager,
+ _config,
+ LimboLogs.Instance,
+ enableDetailedMetrics: false);
+
+ private static StateId CreateStateId(long blockNumber, byte rootByte = 0)
+ {
+ byte[] bytes = new byte[32];
+ bytes[0] = rootByte;
+ return new StateId(blockNumber, new ValueHash256(bytes));
+ }
+
+ [Test]
+ public async Task HasStateForBlock_FoundInRepository_ReturnsTrue()
+ {
+ StateId stateId = CreateStateId(10);
+ _snapshotRepository.HasState(stateId).Returns(true);
+ _persistenceManager.GetCurrentPersistedStateId().Returns(CreateStateId(5));
+
+ await using FlatDbManager manager = CreateManager();
+ bool result = manager.HasStateForBlock(stateId);
+
+ Assert.That(result, Is.True);
+ }
+
+ [Test]
+ public async Task HasStateForBlock_FoundInPersistence_ReturnsTrue()
+ {
+ StateId stateId = CreateStateId(10);
+ _snapshotRepository.HasState(stateId).Returns(false);
+ _persistenceManager.GetCurrentPersistedStateId().Returns(stateId);
+
+ await using FlatDbManager manager = CreateManager();
+ bool result = manager.HasStateForBlock(stateId);
+
+ Assert.That(result, Is.True);
+ }
+
+ [Test]
+ public async Task HasStateForBlock_NotFound_ReturnsFalse()
+ {
+ StateId stateId = CreateStateId(10);
+ _snapshotRepository.HasState(stateId).Returns(false);
+ _persistenceManager.GetCurrentPersistedStateId().Returns(CreateStateId(5));
+
+ await using FlatDbManager manager = CreateManager();
+ bool result = manager.HasStateForBlock(stateId);
+
+ Assert.That(result, Is.False);
+ }
+
+ [Test]
+ public async Task AddSnapshot_BlockBelowPersistedState_ReturnsEarlyAndLogsWarning()
+ {
+ StateId persistedStateId = CreateStateId(100);
+ _persistenceManager.GetCurrentPersistedStateId().Returns(persistedStateId);
+
+ ResourcePool realResourcePool = new(_config);
+ StateId snapshotFrom = CreateStateId(50);
+ StateId snapshotTo = CreateStateId(51);
+ Snapshot snapshot = realResourcePool.CreateSnapshot(snapshotFrom, snapshotTo, ResourcePool.Usage.MainBlockProcessing);
+ TransientResource transientResource = realResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+
+ await using FlatDbManager manager = CreateManager();
+ manager.AddSnapshot(snapshot, transientResource);
+
+ _snapshotRepository.DidNotReceive().TryAddSnapshot(Arg.Any());
+ }
+
+ [Test]
+ public async Task AddSnapshot_ValidSnapshot_AddsToRepository()
+ {
+ StateId persistedStateId = CreateStateId(5);
+ _persistenceManager.GetCurrentPersistedStateId().Returns(persistedStateId);
+ _snapshotRepository.TryAddSnapshot(Arg.Any()).Returns(true);
+
+ ResourcePool realResourcePool = new(_config);
+ StateId snapshotFrom = CreateStateId(10);
+ StateId snapshotTo = CreateStateId(11);
+ Snapshot snapshot = realResourcePool.CreateSnapshot(snapshotFrom, snapshotTo, ResourcePool.Usage.MainBlockProcessing);
+ TransientResource transientResource = realResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+
+ await using FlatDbManager manager = CreateManager();
+ manager.AddSnapshot(snapshot, transientResource);
+
+ _snapshotRepository.Received(1).TryAddSnapshot(snapshot);
+ }
+
+ [Test]
+ public async Task AddSnapshot_DuplicateSnapshot_DisposesSnapshotAndReturnsResource()
+ {
+ StateId persistedStateId = CreateStateId(5);
+ _persistenceManager.GetCurrentPersistedStateId().Returns(persistedStateId);
+ _snapshotRepository.TryAddSnapshot(Arg.Any()).Returns(false);
+
+ ResourcePool realResourcePool = new(_config);
+ StateId snapshotFrom = CreateStateId(10);
+ StateId snapshotTo = CreateStateId(11);
+ Snapshot snapshot = realResourcePool.CreateSnapshot(snapshotFrom, snapshotTo, ResourcePool.Usage.MainBlockProcessing);
+ TransientResource transientResource = realResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+
+ await using FlatDbManager manager = CreateManager();
+ manager.AddSnapshot(snapshot, transientResource);
+
+ _resourcePool.Received(1).ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, transientResource);
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatOverridableWorldScopeTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatOverridableWorldScopeTests.cs
new file mode 100644
index 00000000000..ca5a4c9ecae
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatOverridableWorldScopeTests.cs
@@ -0,0 +1,287 @@
+// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Threading;
+using Autofac;
+using Nethermind.Config;
+using Nethermind.Core;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Test;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
+using Nethermind.Evm.State;
+using Nethermind.Init.Modules;
+using Nethermind.Int256;
+using Nethermind.Logging;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.State.Flat.ScopeProvider;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+public class FlatOverridableWorldScopeTests
+{
+ private class TestContext : IDisposable
+ {
+ private readonly ContainerBuilder _containerBuilder;
+ private readonly CancellationTokenSource _cancellationTokenSource = new();
+
+ private IContainer? _container;
+ private IContainer Container => _container ??= _containerBuilder.Build();
+
+ public ResourcePool ResourcePool => field ??= Container.Resolve();
+ public IFlatDbManager FlatDbManager => field ??= Container.Resolve();
+ public FlatOverridableWorldScope OverridableScope => field ??= Container.Resolve();
+ public List<(Snapshot Snapshot, TransientResource Resource)> FlatDbManagerAddSnapshotCalls { get; } = [];
+
+ public TestContext(FlatDbConfig? config = null)
+ {
+ config ??= new FlatDbConfig();
+ IPersistence.IPersistenceReader persistenceReader = Substitute.For();
+
+ _containerBuilder = new ContainerBuilder()
+ .AddModule(new FlatWorldStateModule(config))
+ .AddSingleton(_ => persistenceReader)
+ .AddSingleton(ctx =>
+ {
+ IFlatDbManager flatDbManager = Substitute.For();
+ flatDbManager.When(it => it.AddSnapshot(Arg.Any(), Arg.Any()))
+ .Do(c =>
+ {
+ Snapshot snapshot = (Snapshot)c[0];
+ TransientResource transientResource = (TransientResource)c[1];
+ FlatDbManagerAddSnapshotCalls.Add((snapshot, transientResource));
+ });
+
+ flatDbManager.GatherReadOnlySnapshotBundle(Arg.Any())
+ .Returns(_ =>
+ {
+ SnapshotPooledList snapshotList = new(0);
+ return new ReadOnlySnapshotBundle(snapshotList, Substitute.For(), false);
+ });
+
+ flatDbManager.HasStateForBlock(Arg.Any())
+ .Returns(false);
+
+ return flatDbManager;
+ })
+ .Bind()
+ .AddSingleton(_ => new CancellationTokenSourceProcessExitSource(_cancellationTokenSource))
+ .AddSingleton(LimboLogs.Instance)
+ .AddSingleton(config)
+ .AddSingleton(_ => Substitute.For())
+ .AddSingleton(_ => new TrieStoreScopeProvider.KeyValueWithBatchingBackedCodeDb(new TestMemDb()));
+
+ // Register keyed IDb for code database
+ _containerBuilder.RegisterInstance(new TestMemDb()).Keyed(DbNames.Code);
+ }
+
+ public void Dispose()
+ {
+ _cancellationTokenSource.Cancel();
+
+ foreach ((Snapshot snapshot, TransientResource resource) in FlatDbManagerAddSnapshotCalls)
+ {
+ snapshot.Dispose();
+ ResourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, resource);
+ }
+
+ _container?.Dispose();
+ _cancellationTokenSource.Dispose();
+ }
+
+ private class CancellationTokenSourceProcessExitSource(CancellationTokenSource cancellationTokenSource) : IProcessExitSource
+ {
+ public CancellationToken Token => cancellationTokenSource.Token;
+ public void Exit(int exitCode) => throw new NotImplementedException();
+ }
+ }
+
+ [Test]
+ public void CommitThroughOverridableScope_StoresSnapshotLocally_ReadableWithinOverridableScope()
+ {
+ using TestContext ctx = new();
+ FlatOverridableWorldScope overridableScope = ctx.OverridableScope;
+
+ Address testAddress = TestItem.AddressA;
+ Account testAccount = TestItem.GenerateRandomAccount();
+ UInt256 storageIndex1 = 42;
+ UInt256 storageIndex2 = 100;
+ byte[] storageValue1 = [1, 2, 3, 4];
+ byte[] storageValue2 = [5, 6, 7, 8, 9, 10];
+
+ // Write account and storage, then commit
+ BlockHeader? baseBlock = null;
+ using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(null))
+ {
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(testAddress, testAccount);
+
+ using (IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 2))
+ {
+ storageBatch.Set(storageIndex1, storageValue1);
+ storageBatch.Set(storageIndex2, storageValue2);
+ }
+ }
+ scope.Commit(1);
+ baseBlock = Build.A.BlockHeader.WithNumber(1).WithStateRoot(scope.RootHash).TestObject;
+ }
+
+ // Verify account readable within new scope
+ using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(baseBlock))
+ {
+ Account? readAccount = scope.Get(testAddress);
+ Assert.That(readAccount, Is.Not.Null);
+ Assert.That(readAccount!.Balance, Is.EqualTo(testAccount.Balance));
+ }
+
+ // Verify account readable through GlobalStateReader
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(baseBlock, testAddress, out AccountStruct acc), Is.True);
+ Assert.That(acc.Balance, Is.EqualTo(testAccount.Balance));
+
+ // Verify storage readable through GlobalStateReader
+ ReadOnlySpan readValue1 = overridableScope.GlobalStateReader.GetStorage(baseBlock, testAddress, storageIndex1);
+ ReadOnlySpan readValue2 = overridableScope.GlobalStateReader.GetStorage(baseBlock, testAddress, storageIndex2);
+ Assert.That(readValue1.ToArray(), Is.EqualTo(storageValue1), "Storage slot 1 should be readable");
+ Assert.That(readValue2.ToArray(), Is.EqualTo(storageValue2), "Storage slot 2 should be readable");
+
+ // Verify non-existent slot returns zeros
+ ReadOnlySpan nonExistent = overridableScope.GlobalStateReader.GetStorage(baseBlock, testAddress, 999);
+ Assert.That(nonExistent.ToArray().All(b => b == 0), Is.True, "Non-existent storage slot should return zeros");
+ }
+
+ [Test]
+ public void CommitThroughOverridableScope_DoesNotCallMainFlatDbManager()
+ {
+ using TestContext ctx = new();
+ FlatOverridableWorldScope overridableScope = ctx.OverridableScope;
+
+ Address testAddress = TestItem.AddressA;
+ Account testAccount = TestItem.GenerateRandomAccount();
+
+ BlockHeader? baseBlock = null;
+ using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(baseBlock))
+ {
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(testAddress, testAccount);
+ }
+ scope.Commit(1);
+ }
+
+ // The main FlatDbManager should NOT receive any AddSnapshot calls
+ // because commits go to FlatOverridableWorldScope's local _snapshots dictionary
+ Assert.That(ctx.FlatDbManagerAddSnapshotCalls, Is.Empty);
+ }
+
+ [Test]
+ public void MultipleCommits_CreateChainedSnapshots_AllReadable()
+ {
+ using TestContext ctx = new();
+ FlatOverridableWorldScope overridableScope = ctx.OverridableScope;
+
+ Address addressA = TestItem.AddressA;
+ Address addressB = TestItem.AddressB;
+ Address addressC = TestItem.AddressC;
+ Account accountA = TestItem.GenerateRandomAccount();
+ Account accountB = TestItem.GenerateRandomAccount();
+ Account accountC = TestItem.GenerateRandomAccount();
+
+ // Commit block 1 with account A
+ BlockHeader? block1 = null;
+ using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(null))
+ {
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(addressA, accountA);
+ }
+ scope.Commit(1);
+ block1 = Build.A.BlockHeader.WithNumber(1).WithStateRoot(scope.RootHash).TestObject;
+ }
+
+ // Commit block 2 with account B (building on block 1)
+ BlockHeader? block2 = null;
+ using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(block1))
+ {
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(addressB, accountB);
+ }
+ scope.Commit(2);
+ block2 = Build.A.BlockHeader.WithNumber(2).WithStateRoot(scope.RootHash).TestObject;
+ }
+
+ // Commit block 3 with account C (building on block 2)
+ BlockHeader? block3 = null;
+ using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(block2))
+ {
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(addressC, accountC);
+ }
+ scope.Commit(3);
+ block3 = Build.A.BlockHeader.WithNumber(3).WithStateRoot(scope.RootHash).TestObject;
+ }
+
+ // Verify final state (block 3) sees all three accounts
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block3, addressA, out AccountStruct accA3), Is.True, "Block 3 should see account A");
+ Assert.That(accA3.Balance, Is.EqualTo(accountA.Balance));
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block3, addressB, out AccountStruct accB3), Is.True, "Block 3 should see account B");
+ Assert.That(accB3.Balance, Is.EqualTo(accountB.Balance));
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block3, addressC, out AccountStruct accC3), Is.True, "Block 3 should see account C");
+ Assert.That(accC3.Balance, Is.EqualTo(accountC.Balance));
+
+ // Verify intermediate state (block 2) sees A+B but not C
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block2, addressA, out AccountStruct accA2), Is.True, "Block 2 should see account A");
+ Assert.That(accA2.Balance, Is.EqualTo(accountA.Balance));
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block2, addressB, out AccountStruct accB2), Is.True, "Block 2 should see account B");
+ Assert.That(accB2.Balance, Is.EqualTo(accountB.Balance));
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block2, addressC, out _), Is.False, "Block 2 should NOT see account C");
+
+ // Verify initial state (block 1) sees only A
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, addressA, out AccountStruct accA1), Is.True, "Block 1 should see account A");
+ Assert.That(accA1.Balance, Is.EqualTo(accountA.Balance));
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, addressB, out _), Is.False, "Block 1 should NOT see account B");
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, addressC, out _), Is.False, "Block 1 should NOT see account C");
+
+ // Verify no calls to main FlatDbManager
+ Assert.That(ctx.FlatDbManagerAddSnapshotCalls, Is.Empty);
+ }
+
+ [Test]
+ public void ResetOverrides_DisposesAllLocalSnapshots()
+ {
+ using TestContext ctx = new();
+ FlatOverridableWorldScope overridableScope = ctx.OverridableScope;
+
+ Address testAddress = TestItem.AddressA;
+ Account testAccount = TestItem.GenerateRandomAccount();
+
+ // Commit multiple states
+ BlockHeader? block1 = null;
+ using (IWorldStateScopeProvider.IScope scope = overridableScope.WorldState.BeginScope(null))
+ {
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(testAddress, testAccount);
+ }
+ scope.Commit(1);
+ block1 = Build.A.BlockHeader.WithNumber(1).WithStateRoot(scope.RootHash).TestObject;
+ }
+
+ // Verify state exists before reset
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, testAddress, out _), Is.True, "Should see account before reset");
+
+ // Reset overrides
+ overridableScope.ResetOverrides();
+
+ // After reset, the local snapshots are cleared, so state falls through to main FlatDbManager
+ // which is mocked to return empty/not found
+ Assert.That(overridableScope.GlobalStateReader.TryGetAccount(block1, testAddress, out _), Is.False, "Should NOT see account after reset");
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs
new file mode 100644
index 00000000000..381201f1fa4
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs
@@ -0,0 +1,392 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using System.Collections.Generic;
+using System.Threading;
+using Nethermind.Core;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Extensions;
+using Nethermind.Core.Test;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
+using Nethermind.Int256;
+using Nethermind.Logging;
+using Nethermind.Serialization.Rlp;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.Trie;
+using Nethermind.Trie.Pruning;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+///
+/// Tests for FlatTrieVerifier which handles both hashed mode (single-pass co-iteration)
+/// and preimage mode (two-pass verification).
+///
+[TestFixture(FlatLayout.Flat)]
+[TestFixture(FlatLayout.PreimageFlat)]
+public class FlatTrieVerifierTests(FlatLayout layout)
+{
+ private MemDb _trieDb = null!;
+ private RawScopedTrieStore _trieStore = null!;
+ private StateTree _stateTree = null!;
+ private ILogManager _logManager = null!;
+ private TestMemColumnsDb _columnsDb = null!;
+ private IPersistence _persistence = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _trieDb = new MemDb();
+ _trieStore = new RawScopedTrieStore(_trieDb);
+ _stateTree = new StateTree(_trieStore, LimboLogs.Instance);
+ _logManager = LimboLogs.Instance;
+
+ _columnsDb = new TestMemColumnsDb();
+ _persistence = layout == FlatLayout.PreimageFlat
+ ? new PreimageRocksdbPersistence(_columnsDb)
+ : new RocksDbPersistence(_columnsDb);
+ }
+
+ [TearDown]
+ public void TearDown()
+ {
+ _trieDb.Dispose();
+ _columnsDb.Dispose();
+ }
+
+ private StateId GetCurrentState()
+ {
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ return reader.CurrentState;
+ }
+
+ private void WriteAccountToFlat(Address address, Account account, StateId toState)
+ {
+ StateId fromState = GetCurrentState();
+ using IPersistence.IWriteBatch batch = _persistence.CreateWriteBatch(fromState, toState, WriteFlags.DisableWAL);
+ batch.SetAccount(address, account);
+ }
+
+ private void WriteAccountsToFlat((Address address, Account account)[] accounts, StateId toState)
+ {
+ StateId fromState = GetCurrentState();
+ using IPersistence.IWriteBatch batch = _persistence.CreateWriteBatch(fromState, toState, WriteFlags.DisableWAL);
+ foreach ((Address address, Account account) in accounts)
+ {
+ batch.SetAccount(address, account);
+ }
+ }
+
+ private void WriteStorageDirectToDb(Address address, UInt256 slot, byte[] value)
+ {
+ TestMemDb storageDb = (TestMemDb)_columnsDb.GetColumnDb(FlatDbColumns.Storage);
+
+ ValueHash256 addrHash;
+ ValueHash256 slotHash;
+
+ if (layout == FlatLayout.PreimageFlat)
+ {
+ addrHash = CreatePreimageAddressKey(address);
+ slotHash = ValueKeccak.Zero;
+ slot.ToBigEndian(slotHash.BytesAsSpan);
+ }
+ else
+ {
+ addrHash = ValueKeccak.Compute(address.Bytes);
+ Span slotBytes = stackalloc byte[32];
+ slot.ToBigEndian(slotBytes);
+ slotHash = ValueKeccak.Compute(slotBytes);
+ }
+
+ byte[] storageKey = new byte[52];
+ addrHash.Bytes[..4].CopyTo(storageKey.AsSpan()[..4]);
+ slotHash.Bytes.CopyTo(storageKey.AsSpan()[4..36]);
+ addrHash.Bytes[4..20].CopyTo(storageKey.AsSpan()[36..52]);
+
+ storageDb.Set(storageKey, ((ReadOnlySpan)value).WithoutLeadingZeros().ToArray());
+ }
+
+ private void CorruptAccountInFlat(Address address, Account corruptedAccount)
+ {
+ TestMemDb accountDb = (TestMemDb)_columnsDb.GetColumnDb(FlatDbColumns.Account);
+ ValueHash256 addrKey = layout == FlatLayout.PreimageFlat
+ ? CreatePreimageAddressKey(address)
+ : ValueKeccak.Compute(address.Bytes);
+
+ using var stream = AccountDecoder.Slim.EncodeToNewNettyStream(corruptedAccount);
+ accountDb.Set(addrKey.BytesAsSpan[..20], stream.AsSpan().ToArray());
+ }
+
+ private static ValueHash256 CreatePreimageAddressKey(Address address)
+ {
+ ValueHash256 fakeHash = ValueKeccak.Zero;
+ address.Bytes.CopyTo(fakeHash.BytesAsSpan);
+ return fakeHash;
+ }
+
+ private StorageTree CreateStorageTree(Address address, (UInt256 slot, byte[] value)[] slots)
+ {
+ Hash256 addressHash = Keccak.Compute(address.Bytes);
+ IScopedTrieStore storageTrieStore = (IScopedTrieStore)_trieStore.GetStorageTrieNodeResolver(addressHash);
+ StorageTree storageTree = new StorageTree(storageTrieStore, _logManager);
+
+ foreach ((UInt256 slot, byte[] value) in slots)
+ {
+ storageTree.Set(slot, value);
+ }
+ storageTree.Commit();
+ return storageTree;
+ }
+
+ [Test]
+ public void Verify_EmptyState_Succeeds()
+ {
+ Hash256 stateRoot = Keccak.EmptyTreeHash;
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void Verify_SingleAccount_Matches()
+ {
+ Address address = TestItem.AddressA;
+ Account account = new Account(1, 100);
+
+ _stateTree.Set(address, account);
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountToFlat(address, account, toState);
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1));
+ Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void Verify_MultipleAccounts_AllMatch()
+ {
+ Address addressA = TestItem.AddressA;
+ Address addressB = TestItem.AddressB;
+ Address addressC = TestItem.AddressC;
+
+ Account accountA = new Account(1, 100);
+ Account accountB = new Account(2, 200);
+ Account accountC = new Account(3, 300);
+
+ _stateTree.Set(addressA, accountA);
+ _stateTree.Set(addressB, accountB);
+ _stateTree.Set(addressC, accountC);
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountsToFlat([(addressA, accountA), (addressB, accountB), (addressC, accountC)], toState);
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(3));
+ Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0));
+ }
+
+ [TestCase(1UL, 100UL, 1UL, 200UL, Description = "Mismatched balance")]
+ [TestCase(5UL, 100UL, 10UL, 100UL, Description = "Mismatched nonce")]
+ public void Verify_MismatchedAccount_DetectsMismatch(ulong trieNonce, ulong trieBalance, ulong flatNonce, ulong flatBalance)
+ {
+ Address address = TestItem.AddressA;
+ Account trieAccount = new Account(trieNonce, trieBalance);
+ Account flatAccount = new Account(flatNonce, flatBalance);
+
+ _stateTree.Set(address, trieAccount);
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountToFlat(address, trieAccount, toState);
+ CorruptAccountInFlat(address, flatAccount);
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1));
+ Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(1));
+ }
+
+ [Test]
+ public void Verify_AccountInTrieNotInFlat_DetectsMissingInFlat()
+ {
+ Address address = TestItem.AddressA;
+ Account account = new Account(1, 100);
+
+ // Add to trie but not to flat
+ _stateTree.Set(address, account);
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1));
+ Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(1));
+ Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void Verify_AccountInFlatNotInTrie_DetectsMissingInTrie()
+ {
+ Address address = TestItem.AddressA;
+ Account account = new Account(1, 100);
+
+ // Empty trie
+ Hash256 stateRoot = Keccak.EmptyTreeHash;
+
+ // Add to flat only
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountToFlat(address, account, toState);
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1));
+ Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(1));
+ }
+
+ [Test]
+ public void Verify_FlatHasExtraAccounts_ReportsMissing()
+ {
+ // Trie has 2 accounts, flat has 3 (1 extra)
+ Address addressA = TestItem.AddressA;
+ Address addressB = TestItem.AddressB;
+ Address addressExtra = TestItem.AddressC;
+
+ Account accountA = new Account(1, 100);
+ Account accountB = new Account(2, 200);
+ Account accountExtra = new Account(3, 300);
+
+ _stateTree.Set(addressA, accountA);
+ _stateTree.Set(addressB, accountB);
+ // Note: addressExtra NOT added to trie
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountsToFlat([(addressA, accountA), (addressB, accountB), (addressExtra, accountExtra)], toState);
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(3));
+ Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MissingInTrie, Is.EqualTo(1));
+ }
+
+ [Test]
+ public void Verify_Storage_AllMatch()
+ {
+ Address address = TestItem.AddressA;
+ StorageTree storageTree = CreateStorageTree(address, [((UInt256)1, [0x11]), ((UInt256)2, [0x22])]);
+ Account account = new Account(1, 100, storageTree.RootHash, Keccak.Compute([1]));
+
+ _stateTree.Set(address, account);
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountToFlat(address, account, toState);
+ WriteStorageDirectToDb(address, 1, [0x11]);
+ WriteStorageDirectToDb(address, 2, [0x22]);
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1));
+ Assert.That(verifier.Stats.SlotCount, Is.EqualTo(2));
+ Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(0));
+ Assert.That(verifier.Stats.MismatchedSlot, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void Verify_Storage_Mismatch()
+ {
+ Address address = TestItem.AddressA;
+ StorageTree storageTree = CreateStorageTree(address, [((UInt256)1, [0x11])]);
+ Account account = new Account(1, 100, storageTree.RootHash, Keccak.Compute([1]));
+
+ _stateTree.Set(address, account);
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountToFlat(address, account, toState);
+ WriteStorageDirectToDb(address, 1, [0xFF]); // Wrong value
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(1));
+ Assert.That(verifier.Stats.SlotCount, Is.EqualTo(1));
+ Assert.That(verifier.Stats.MismatchedSlot, Is.EqualTo(1));
+ }
+
+ [Test]
+ public void Verify_MixedScenario_DetectsAllIssues()
+ {
+ // Account A: in both, matches
+ Address addressA = TestItem.AddressA;
+ Account accountA = new Account(1, 100);
+
+ // Account B: in trie only (missing in flat)
+ Address addressB = TestItem.AddressB;
+ Account accountB = new Account(2, 200);
+
+ // Account C: mismatched
+ Address addressC = TestItem.AddressC;
+ Account trieAccountC = new Account(3, 300);
+ Account flatAccountC = new Account(3, 999);
+
+ _stateTree.Set(addressA, accountA);
+ _stateTree.Set(addressB, accountB);
+ _stateTree.Set(addressC, trieAccountC);
+ _stateTree.Commit();
+ Hash256 stateRoot = _stateTree.RootHash;
+
+ StateId toState = new StateId(1, stateRoot);
+ WriteAccountsToFlat([(addressA, accountA), (addressC, trieAccountC)], toState);
+ // Note: addressB not added to flat
+ CorruptAccountInFlat(addressC, flatAccountC);
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ FlatTrieVerifier verifier = new FlatTrieVerifier(_logManager);
+ verifier.Verify(reader, _trieStore, stateRoot, CancellationToken.None);
+
+ Assert.That(verifier.Stats.AccountCount, Is.EqualTo(3));
+ Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(1)); // Account C mismatched
+ Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(1)); // Account B missing in flat
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatWorldStateScopeProviderTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatWorldStateScopeProviderTests.cs
new file mode 100644
index 00000000000..e0291d3d6df
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatWorldStateScopeProviderTests.cs
@@ -0,0 +1,764 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using System.Threading;
+using Autofac;
+using Nethermind.Blockchain.Synchronization;
+using Nethermind.Config;
+using Nethermind.Core;
+using Nethermind.Core.Collections;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Test;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
+using Nethermind.Evm.State;
+using Nethermind.Init.Modules;
+using Nethermind.Int256;
+using Nethermind.Logging;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.State.Flat.ScopeProvider;
+using Nethermind.Trie;
+using Nethermind.Trie.Pruning;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+public class FlatWorldStateScopeProviderTests
+{
+
+ private class TestContext : IDisposable
+ {
+ private readonly ContainerBuilder _containerBuilder;
+ private readonly CancellationTokenSource _cancellationTokenSource = new CancellationTokenSource();
+
+ private IContainer? _container;
+ private IContainer Container => _container ??= _containerBuilder.Build();
+
+ public ResourcePool ResourcePool => field ??= Container.Resolve();
+ public SnapshotPooledList ReadOnlySnapshots = new SnapshotPooledList(0);
+ public IPersistence.IPersistenceReader PersistenceReader => field ??= Container.Resolve();
+ public Snapshot? LastCommittedSnapshot { get; set; }
+ public TransientResource? LastCreatedCachedResource { get; set; }
+
+ public TestContext(FlatDbConfig? config = null)
+ {
+ config ??= new FlatDbConfig();
+
+ _containerBuilder = new ContainerBuilder()
+ .AddModule(new FlatWorldStateModule(config))
+ .AddSingleton(_ => Substitute.For())
+ .AddSingleton((ctx) =>
+ {
+ ResourcePool resourcePool = ctx.Resolve();
+ IFlatDbManager flatDiff = Substitute.For();
+ flatDiff.When(it => it.AddSnapshot(Arg.Any(), Arg.Any()))
+ .Do(c =>
+ {
+ Snapshot snapshot = (Snapshot)c[0];
+ TransientResource transientResource = (TransientResource)c[1];
+
+ if (LastCommittedSnapshot is not null)
+ {
+ LastCommittedSnapshot.Dispose();
+ }
+ LastCommittedSnapshot = snapshot;
+
+ if (LastCreatedCachedResource is not null)
+ {
+ resourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, transientResource);
+ }
+ LastCreatedCachedResource = transientResource;
+ });
+
+ return flatDiff;
+ })
+ .Bind()
+ .AddSingleton(_ => new CancellationTokenSourceProcessExitSource(_cancellationTokenSource))
+ .AddSingleton(LimboLogs.Instance)
+ .AddSingleton(config)
+ .AddSingleton(_ => new TrieStoreScopeProvider.KeyValueWithBatchingBackedCodeDb(new TestMemDb()))
+ ;
+
+ // Externally owned because snapshot bundle take ownership
+ _containerBuilder.RegisterType()
+ .WithParameter(TypedParameter.From(false)) // recordDetailedMetrics
+ .WithParameter(TypedParameter.From(ReadOnlySnapshots))
+ .ExternallyOwned();
+
+ ConfigureSnapshotBundle();
+ ConfigureFlatWorldStateScope();
+ }
+
+ private void ConfigureSnapshotBundle()
+ {
+ _containerBuilder.RegisterType()
+ .SingleInstance()
+ .WithParameter(TypedParameter.From(ResourcePool.Usage.MainBlockProcessing))
+ .ExternallyOwned();
+ ;
+ }
+
+ private void ConfigureFlatWorldStateScope()
+ {
+ _containerBuilder.RegisterType()
+ .SingleInstance()
+ .WithParameter(TypedParameter.From(new StateId(0, Keccak.EmptyTreeHash)))
+ ;
+ }
+
+ public FlatWorldStateScope Scope => Container.Resolve();
+
+ public void Dispose()
+ {
+ _cancellationTokenSource.Cancel();
+
+ LastCommittedSnapshot?.Dispose();
+ if (LastCreatedCachedResource is not null) ResourcePool.ReturnCachedResource(ResourcePool.Usage.MainBlockProcessing, LastCreatedCachedResource);
+
+ _container?.Dispose();
+ _cancellationTokenSource.Dispose();
+ }
+
+ public class CancellationTokenSourceProcessExitSource(CancellationTokenSource cancellationTokenSource) : IProcessExitSource
+ {
+ public CancellationToken Token => cancellationTokenSource.Token;
+
+ public void Exit(int exitCode) => throw new NotImplementedException();
+ }
+
+ public void AddSnapshot(Action populator)
+ {
+ SnapshotContent snapshotContent = ResourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing);
+ populator(snapshotContent);
+
+ ReadOnlySnapshots.Add(new Snapshot(
+ StateId.PreGenesis,
+ StateId.PreGenesis,
+ snapshotContent,
+ ResourcePool,
+ ResourcePool.Usage.MainBlockProcessing));
+ }
+ }
+
+
+ #region Account and Slot Layering Tests
+
+ [Test]
+ public void TestAccountAndSlotShadowingInSnapshots()
+ {
+ using TestContext ctx = new TestContext();
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slotIndex = 1;
+
+ Account olderAccount = TestItem.GenerateRandomAccount();
+ byte[] olderSlotValue = { 0x01, 0x02 };
+
+ Account newerAccount = TestItem.GenerateRandomAccount();
+ byte[] newerSlotValue = { 0x03, 0x04, 0x05 };
+
+ // Layer 1: Older snapshot
+ ctx.AddSnapshot(content =>
+ {
+ content.Accounts[testAddress] = olderAccount;
+ content.Storages[(testAddress, slotIndex)] = SlotValue.FromSpanWithoutLeadingZero(olderSlotValue);
+ });
+
+ // Layer 2: Newer snapshot (shadowing Layer 1)
+ ctx.AddSnapshot(content =>
+ {
+ content.Accounts[testAddress] = newerAccount;
+ content.Storages[(testAddress, slotIndex)] = SlotValue.FromSpanWithoutLeadingZero(newerSlotValue);
+ });
+
+ // Layer 3: Another newer snapshot, but only for account
+ Account newestAccount = TestItem.GenerateRandomAccount();
+ ctx.AddSnapshot(content => content.Accounts[testAddress] = newestAccount);
+
+ // Verify account shadowed by newest snapshot (newestAccount)
+ Assert.That(ctx.Scope.Get(testAddress), Is.EqualTo(newestAccount));
+
+ // Verify slot shadowed by Layer 2 snapshot (newerSlotValue)
+ IWorldStateScopeProvider.IStorageTree storageTree = ctx.Scope.CreateStorageTree(testAddress);
+ Assert.That(storageTree.Get(slotIndex), Is.EqualTo(newerSlotValue));
+ }
+
+ [Test]
+ public void TestAccountAndSlotFromPersistence()
+ {
+ using TestContext ctx = new TestContext();
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slotIndex = 1;
+ Account persistedAccount = TestItem.GenerateRandomAccount();
+ byte[] persistedSlotValue = { 0xDE, 0xAD, 0xBE, 0xEF };
+
+ // Setup Persistence Reader
+ ctx.PersistenceReader.GetAccount(testAddress).Returns(persistedAccount);
+ SlotValue outValue = SlotValue.FromSpanWithoutLeadingZero(persistedSlotValue);
+ ctx.PersistenceReader.TryGetSlot(testAddress, slotIndex, ref Arg.Any())
+ .Returns(x =>
+ {
+ x[2] = outValue;
+ return true;
+ });
+
+ // Verify both are retrieved from persistence
+ Assert.That(ctx.Scope.Get(testAddress), Is.EqualTo(persistedAccount));
+
+ IWorldStateScopeProvider.IStorageTree storageTree = ctx.Scope.CreateStorageTree(testAddress);
+ Assert.That(storageTree.Get(slotIndex), Is.EqualTo(persistedSlotValue));
+ }
+
+ [Test]
+ public void TestAccountAndSlotFromWrittenBatch()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slotIndex = 1;
+ Account testAccount = TestItem.GenerateRandomAccount();
+ byte[] writtenSlotValue = { 0xFF, 0xFF };
+
+ Account persistenceAccount = TestItem.GenerateRandomAccount();
+ ctx.PersistenceReader.GetAccount(testAddress).Returns(persistenceAccount);
+
+ // Add dummy snapshot
+ ctx.AddSnapshot(content => { });
+
+ // Write directly to write batch
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(testAddress, testAccount);
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1);
+ storageBatch.Set(slotIndex, writtenSlotValue);
+ storageBatch.Dispose();
+ }
+
+ // Verify written items shadow everything else
+ Account? resultAccount = scope.Get(testAddress);
+ Assert.That(resultAccount!.Balance, Is.EqualTo(testAccount.Balance));
+ Assert.That(resultAccount!.Nonce, Is.EqualTo(testAccount.Nonce));
+
+ IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(testAddress);
+ Assert.That(storageTree.Get(slotIndex), Is.EqualTo(writtenSlotValue));
+ }
+
+ [Test]
+ public void TestAccountAndSlotAfterCommit()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slotIndex = 1;
+ Account testAccount = TestItem.GenerateRandomAccount();
+ byte[] slotValue = { 0xCA, 0xFE };
+
+ // Write both
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(testAddress, testAccount);
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1);
+ storageBatch.Set(slotIndex, slotValue);
+ storageBatch.Dispose();
+ }
+
+ // Commit both
+ scope.Commit(1);
+
+ // Verify in snapshot
+ Assert.That(ctx.LastCommittedSnapshot, Is.Not.Null);
+ ctx.LastCommittedSnapshot!.TryGetAccount(testAddress, out Account? committedAccount);
+ Assert.That(committedAccount!.Balance, Is.EqualTo(testAccount.Balance));
+ Assert.That(committedAccount!.Nonce, Is.EqualTo(testAccount.Nonce));
+
+ ctx.LastCommittedSnapshot!.TryGetStorage(testAddress, slotIndex, out SlotValue? committedSlot);
+ Assert.That(committedSlot!.Value.ToEvmBytes(), Is.EqualTo(slotValue));
+ }
+
+ #endregion
+
+ #region Selfdestruct Interaction Tests
+
+ [Test]
+ public void TestSelfDestructBlocksEarlierAccountAndSlot()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slotIndex = 1;
+ Account oldAccount = TestItem.GenerateRandomAccount();
+ byte[] oldSlotValue = { 0x01, 0x02, 0x03 };
+
+ // Layer 1: Account and Slot data
+ ctx.AddSnapshot(content =>
+ {
+ content.Accounts[testAddress] = oldAccount;
+ content.Storages[(testAddress, slotIndex)] = SlotValue.FromSpanWithoutLeadingZero(oldSlotValue);
+ });
+
+ // Layer 2: SELFDESTRUCT
+ // isNewAccount = false means there was storage to clear
+ ctx.AddSnapshot(content => content.SelfDestructedStorageAddresses[testAddress] = false);
+
+ // Layer 3: Empty snapshot after selfdestruct
+ ctx.AddSnapshot(content => { });
+
+ // Slot should be blocked by selfdestruct
+ IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(testAddress);
+ Assert.That(storageTree.Get(slotIndex), Is.EqualTo(StorageTree.ZeroBytes));
+ }
+
+ [Test]
+ public void TestSelfDestructIdxIsPassedCorrectly()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slot1 = 1;
+ UInt256 slot2 = 2;
+ byte[] slot1BeforeValue = { 0x01 };
+ byte[] slot2AfterValue = { 0x02 };
+
+ // Snapshot 0: slot1 exists
+ ctx.AddSnapshot(content => content.Storages[(testAddress, slot1)] = SlotValue.FromSpanWithoutLeadingZero(slot1BeforeValue));
+
+ // Snapshot 1: selfdestruct happens at this index
+ ctx.AddSnapshot(content => content.SelfDestructedStorageAddresses[testAddress] = false);
+
+ // Snapshot 2: slot2 is set after selfdestruct
+ ctx.AddSnapshot(content => content.Storages[(testAddress, slot2)] = SlotValue.FromSpanWithoutLeadingZero(slot2AfterValue));
+
+ IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(testAddress);
+
+ // slot1 should return zero (blocked by selfdestruct)
+ Assert.That(storageTree.Get(slot1), Is.EqualTo(StorageTree.ZeroBytes));
+
+ // slot2 should return the value (written after selfdestruct)
+ Assert.That(storageTree.Get(slot2), Is.EqualTo(slot2AfterValue));
+ }
+
+ #endregion
+
+ #region Storage Root Tests
+
+ [Test]
+ public void TestStorageRootAfterSingleSlotSet()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slotIndex = 1;
+ byte[] slotValue = { 0xAB, 0xCD };
+
+ Account initialAccount = TestItem.GenerateRandomAccount();
+ ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount);
+
+ // Set a single slot
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1);
+ storageBatch.Set(slotIndex, slotValue);
+ storageBatch.Dispose();
+ }
+
+ // Commit to update storage root
+ scope.Commit(1);
+
+ // Compute expected storage root using standalone StorageTree
+ TestMemDb testDb = new TestMemDb();
+ RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb);
+ StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance);
+ expectedTree.Set(slotIndex, slotValue);
+ expectedTree.UpdateRootHash();
+ Hash256 expectedRoot = expectedTree.RootHash;
+
+ // Verify actual storage root matches expected
+ Account? resultAccount = scope.Get(testAddress);
+ Assert.That(resultAccount, Is.Not.Null);
+ Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot));
+ }
+
+ [Test]
+ public void TestStorageRootAfterMultipleSlotsSingleCommit()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slot1 = 1;
+ UInt256 slot2 = 2;
+ UInt256 slot3 = 100;
+ byte[] value1 = { 0x01, 0x02 };
+ byte[] value2 = { 0xAA, 0xBB, 0xCC };
+ byte[] value3 = { 0xFF };
+
+ Account initialAccount = TestItem.GenerateRandomAccount();
+ ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount);
+
+ // Set multiple slots in single commit
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 3);
+ storageBatch.Set(slot1, value1);
+ storageBatch.Set(slot2, value2);
+ storageBatch.Set(slot3, value3);
+ storageBatch.Dispose();
+ }
+
+ scope.Commit(1);
+
+ // Compute expected storage root
+ TestMemDb testDb = new TestMemDb();
+ RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb);
+ StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance);
+ expectedTree.Set(slot1, value1);
+ expectedTree.Set(slot2, value2);
+ expectedTree.Set(slot3, value3);
+ expectedTree.UpdateRootHash();
+ Hash256 expectedRoot = expectedTree.RootHash;
+
+ // Verify
+ Account? resultAccount = scope.Get(testAddress);
+ Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot));
+ }
+
+ [Test]
+ public void TestStorageRootAfterMultipleCommits()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slot1 = 1;
+ UInt256 slot2 = 2;
+ byte[] value1 = { 0x11 };
+ byte[] value2 = { 0x22 };
+
+ Account initialAccount = TestItem.GenerateRandomAccount();
+ ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount);
+
+ // First commit - set slot1
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1);
+ storageBatch.Set(slot1, value1);
+ storageBatch.Dispose();
+ }
+ scope.Commit(1);
+
+ // Second commit - set slot2
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1);
+ storageBatch.Set(slot2, value2);
+ storageBatch.Dispose();
+ }
+ scope.Commit(2);
+
+ // Compute expected storage root with both slots
+ TestMemDb testDb = new TestMemDb();
+ RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb);
+ StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance);
+ expectedTree.Set(slot1, value1);
+ expectedTree.Set(slot2, value2);
+ expectedTree.UpdateRootHash();
+ Hash256 expectedRoot = expectedTree.RootHash;
+
+ // Verify
+ Account? resultAccount = scope.Get(testAddress);
+ Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot));
+ }
+
+ [Test]
+ public void TestStorageRootAfterSelfDestructAndNewSlots()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+ UInt256 slot1 = 1;
+ UInt256 slot2 = 2;
+ byte[] value1 = { 0xAA };
+ byte[] value2 = { 0xBB };
+
+ Account initialAccount = TestItem.GenerateRandomAccount();
+ ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount);
+
+ // Set initial slot
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1);
+ storageBatch.Set(slot1, value1);
+ storageBatch.Dispose();
+ }
+ scope.Commit(1);
+
+ // SelfDestruct - should clear storage
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 0);
+ storageBatch.Clear();
+ storageBatch.Dispose();
+ }
+ scope.Commit(2);
+
+ // Set new slot after selfdestruct
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(testAddress, 1);
+ storageBatch.Set(slot2, value2);
+ storageBatch.Dispose();
+ }
+ scope.Commit(3);
+
+ // Expected: only slot2 should exist (storage was cleared)
+ TestMemDb testDb = new TestMemDb();
+ RawScopedTrieStore trieStore = new RawScopedTrieStore(testDb);
+ StorageTree expectedTree = new StorageTree(trieStore, LimboLogs.Instance);
+ expectedTree.Set(slot2, value2);
+ expectedTree.UpdateRootHash();
+ Hash256 expectedRoot = expectedTree.RootHash;
+
+ // Verify
+ Account? resultAccount = scope.Get(testAddress);
+ Assert.That(resultAccount!.StorageRoot, Is.EqualTo(expectedRoot));
+ }
+
+ [Test]
+ public void TestEmptyStorageRootWhenNoSlots()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address testAddress = TestItem.AddressA;
+
+ Account initialAccount = new Account(0, 0);
+ ctx.PersistenceReader.GetAccount(testAddress).Returns(initialAccount);
+
+ // Don't set any slots, just get the account
+ Account? resultAccount = scope.Get(testAddress);
+
+ // Verify storage root is EmptyTreeHash
+ Assert.That(resultAccount, Is.Not.Null);
+ Assert.That(resultAccount!.StorageRoot, Is.EqualTo(Keccak.EmptyTreeHash));
+ }
+
+ #endregion
+
+ #region Account Snapshot Commit Tests
+
+ [Test]
+ public void TestMultipleAccountsAndSlotsCommittedInSnapshot()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address addr1 = TestItem.AddressA;
+ Address addr2 = TestItem.AddressB;
+ Account acc1 = new Account(100, 1000);
+ Account acc2 = new Account(200, 2000);
+ UInt256 slot1 = 1;
+ byte[] val1 = { 0x01 };
+
+ // Set multiple items
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(2))
+ {
+ writeBatch.Set(addr1, acc1);
+ writeBatch.Set(addr2, acc2);
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr1, 1);
+ storageBatch.Set(slot1, val1);
+ storageBatch.Dispose();
+ }
+
+ scope.Commit(1);
+
+ // Verify all committed to snapshot
+ Assert.That(ctx.LastCommittedSnapshot, Is.Not.Null);
+ ctx.LastCommittedSnapshot!.TryGetAccount(addr1, out Account? committedAcc1);
+ Assert.That(committedAcc1!.Balance, Is.EqualTo(acc1.Balance));
+
+ ctx.LastCommittedSnapshot!.TryGetAccount(addr2, out Account? committedAcc2);
+ Assert.That(committedAcc2!.Balance, Is.EqualTo(acc2.Balance));
+
+ ctx.LastCommittedSnapshot!.TryGetStorage(addr1, slot1, out SlotValue? committedSlot);
+ Assert.That(committedSlot!.Value.ToEvmBytes(), Is.EqualTo(val1));
+ }
+
+ [Test]
+ public void TestMultipleCommitsAccumulateData()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address addr1 = TestItem.AddressA;
+ Address addr2 = TestItem.AddressB;
+ Account acc1 = new Account(100, 1000);
+ Account acc2 = new Account(200, 2000);
+
+ // Commit 1
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(addr1, acc1);
+ }
+ scope.Commit(1);
+
+ // Commit 2
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(addr2, acc2);
+ }
+ scope.Commit(2);
+
+ // Verify scope Sees both
+ Assert.That(scope.Get(addr1), Is.EqualTo(acc1));
+ Assert.That(scope.Get(addr2), Is.EqualTo(acc2));
+ }
+
+ #endregion
+
+ #region Comprehensive Selfdestruct Blocking Tests
+
+ [Test]
+ public void TestSelfDestructBlocksPersistenceAndAllSnapshotLayers()
+ {
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address addr = TestItem.AddressA;
+ UInt256 slot = 1;
+ byte[] persistedVal = { 0xDE, 0xAD };
+ byte[] snapshotVal = { 0x01, 0x02 };
+
+ // Persistence setup
+ ctx.PersistenceReader.GetAccount(addr).Returns(TestItem.GenerateRandomAccount());
+ SlotValue outVal = SlotValue.FromSpanWithoutLeadingZero(persistedVal);
+ ctx.PersistenceReader.TryGetSlot(addr, slot, ref Arg.Any())
+ .Returns(x => { x[2] = outVal; return true; });
+
+ // Snapshot Setup
+ ctx.AddSnapshot(content => content.Storages[(addr, slot)] = SlotValue.FromSpanWithoutLeadingZero(snapshotVal));
+ ctx.AddSnapshot(content => content.SelfDestructedStorageAddresses[addr] = true);
+ ctx.AddSnapshot(content => { });
+
+ // Verify both are blocked
+ IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(addr);
+ Assert.That(storageTree.Get(slot), Is.EqualTo(StorageTree.ZeroBytes));
+ }
+
+ [Test]
+ public void TestStorageNodeLookupWithoutSelfDestructFallsThroughToReadOnlyBundle()
+ {
+ // This test verifies the fix for the bug where storage node lookup would exit early
+ // when selfDestructStateIdx == -1 (no self-destruct) and local _snapshots exist but
+ // don't contain the storage node. Before the fix, the condition `i >= currentBundleSelfDestructIdx`
+ // was always true when selfDestructStateIdx == -1, causing early exit.
+
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address addr1 = TestItem.AddressA;
+ Address addr2 = TestItem.AddressB;
+ Hash256 addr1Hash = Keccak.Compute(addr1.Bytes);
+ UInt256 slot1 = 1;
+ byte[] value1 = { 0x01 };
+
+ Account acc1 = TestItem.GenerateRandomAccount();
+ ctx.PersistenceReader.GetAccount(addr1).Returns(acc1);
+
+ // Add storage slot AND trie node for addr1 to ReadOnlySnapshots
+ ctx.AddSnapshot(content =>
+ {
+ content.Storages[(addr1, slot1)] = SlotValue.FromSpanWithoutLeadingZero(value1);
+
+ // Also add a storage trie node for addr1 at root path
+ TrieNode storageNode = new TrieNode(NodeType.Leaf, Keccak.Zero);
+ content.StorageNodes[(addr1Hash, TreePath.Empty)] = storageNode;
+ });
+
+ // Create local commits for addr2 (NOT addr1) - this creates local _snapshots
+ Account acc2 = TestItem.GenerateRandomAccount();
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ writeBatch.Set(addr2, acc2);
+ }
+ scope.Commit(1);
+
+ // Now lookup storage for addr1 - should fall through local _snapshots to ReadOnlySnapshots
+ // Before the fix: would fail because DoTryFindStorageNodeExternal exited early
+ // After the fix: properly falls through and finds storage in ReadOnlySnapshots
+ IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(addr1);
+ Assert.That(storageTree.Get(slot1), Is.EqualTo(value1));
+ }
+
+ [Test]
+ public void TestSelfDestructInLocalSnapshotsStopsAtExpectedSnapshot()
+ {
+ // This test verifies that when self-destruct is in local _snapshots (SnapshotBundle),
+ // the storage lookup correctly:
+ // 1. Finds storage added AFTER self-destruct (in newer snapshots)
+ // 2. Finds storage added AT the same commit as self-destruct
+ // 3. Returns null for storage that existed BEFORE self-destruct (blocked by self-destruct)
+
+ using TestContext ctx = new TestContext();
+ FlatWorldStateScope scope = ctx.Scope;
+
+ Address addr = TestItem.AddressA;
+ UInt256 slotBefore = 1;
+ UInt256 slotAtSelfDestruct = 2;
+ UInt256 slotAfter = 3;
+ byte[] valueBefore = { 0x01 };
+ byte[] valueAtSelfDestruct = { 0x02 };
+ byte[] valueAfter = { 0x03 };
+
+ Account acc = TestItem.GenerateRandomAccount();
+ ctx.PersistenceReader.GetAccount(addr).Returns(acc);
+
+ // Commit 1: Set slot BEFORE self-destruct
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr, 1);
+ storageBatch.Set(slotBefore, valueBefore);
+ storageBatch.Dispose();
+ }
+ scope.Commit(1);
+
+ // Commit 2: Self-destruct AND set new slot in same commit
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr, 1);
+ storageBatch.Clear();
+ storageBatch.Set(slotAtSelfDestruct, valueAtSelfDestruct);
+ storageBatch.Dispose();
+ }
+ scope.Commit(2);
+
+ // Commit 3: Set slot AFTER self-destruct
+ using (IWorldStateScopeProvider.IWorldStateWriteBatch writeBatch = scope.StartWriteBatch(1))
+ {
+ IWorldStateScopeProvider.IStorageWriteBatch storageBatch = writeBatch.CreateStorageWriteBatch(addr, 1);
+ storageBatch.Set(slotAfter, valueAfter);
+ storageBatch.Dispose();
+ }
+ scope.Commit(3);
+
+ // Verify storage behavior:
+ // - slotBefore should be blocked by self-destruct (return zero)
+ // - slotAtSelfDestruct should be found (set in same commit as self-destruct)
+ // - slotAfter should be found (added after self-destruct)
+ IWorldStateScopeProvider.IStorageTree storageTree = scope.CreateStorageTree(addr);
+ Assert.That(storageTree.Get(slotBefore), Is.EqualTo(StorageTree.ZeroBytes), "Slot before self-destruct should be zero");
+ Assert.That(storageTree.Get(slotAtSelfDestruct), Is.EqualTo(valueAtSelfDestruct), "Slot at self-destruct should be found");
+ Assert.That(storageTree.Get(slotAfter), Is.EqualTo(valueAfter), "Slot after self-destruct should be found");
+ }
+
+ #endregion
+
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/MpmcRingBufferTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/MpmcRingBufferTests.cs
new file mode 100644
index 00000000000..90796eb6f41
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/MpmcRingBufferTests.cs
@@ -0,0 +1,147 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using FluentAssertions;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+public class MpmcRingBufferTests
+{
+ [Test]
+ public void SmokeTest()
+ {
+ MpmcRingBuffer jobQueue = new MpmcRingBuffer(16);
+
+ jobQueue.TryEnqueue(1);
+ jobQueue.TryEnqueue(2);
+ jobQueue.TryEnqueue(3);
+ jobQueue.TryEnqueue(4);
+ jobQueue.TryEnqueue(5);
+
+ jobQueue.TryDequeue(out int j).Should().BeTrue();
+ j.Should().Be(1);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(2);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(3);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(4);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(5);
+ }
+
+ [Test]
+ public void RollingSmokeTest()
+ {
+ MpmcRingBuffer jobQueue = new MpmcRingBuffer(16);
+
+ jobQueue.TryEnqueue(1);
+ jobQueue.TryEnqueue(2);
+ jobQueue.TryEnqueue(3);
+ jobQueue.TryEnqueue(4);
+ jobQueue.TryEnqueue(5);
+
+ int j = 0;
+ for (int i = 0; i < 100; i++)
+ {
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(i + 1);
+ jobQueue.TryEnqueue(i + 5 + 1).Should().BeTrue();
+ }
+ }
+
+ [Test]
+ public void SmokeTestFullAndRolling()
+ {
+ MpmcRingBuffer jobQueue = new MpmcRingBuffer(16);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryEnqueue(1), Is.True);
+ }
+ Assert.That(jobQueue.TryEnqueue(1), Is.False);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryDequeue(out _), Is.True);
+ }
+ Assert.That(jobQueue.TryDequeue(out _), Is.False);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryEnqueue(1), Is.True);
+ }
+ Assert.That(jobQueue.TryEnqueue(1), Is.False);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryDequeue(out _), Is.True);
+ }
+ Assert.That(jobQueue.TryDequeue(out _), Is.False);
+ }
+
+ [Test]
+ public async Task HighConcurrency_StressTest_NoDataLoss()
+ {
+ int Capacity = 1024;
+ int ItemsToProduce = 1_000_000;
+ int ProducerCount = 4;
+ int ConsumerCount = 4;
+
+ MpmcRingBuffer buffer = new MpmcRingBuffer(Capacity);
+ int[] consumedCounts = new int[ItemsToProduce];
+ long totalConsumed = 0;
+
+ // Producer Task (Single Producer)
+ long itemLeftToProduce = ItemsToProduce;
+
+ // Producers Tasks (Multiple Producers)
+ Task[] producers = Enumerable.Range(0, ProducerCount).Select(_ => Task.Run(() =>
+ {
+ while (true)
+ {
+ long remaining = Interlocked.Read(ref itemLeftToProduce);
+ if (remaining == 0) break;
+ if (Interlocked.CompareExchange(ref itemLeftToProduce, remaining - 1, remaining) != remaining) continue;
+
+ while (!buffer.TryEnqueue((int)remaining - 1))
+ {
+ Thread.SpinWait(10); // Wait for space
+ }
+ }
+ })).ToArray();
+
+ // Consumer Tasks (Multiple Consumers)
+ Task[] consumers = Enumerable.Range(0, ConsumerCount).Select(_ => Task.Run(() =>
+ {
+ while (Interlocked.Read(ref totalConsumed) < ItemsToProduce)
+ {
+ if (buffer.TryDequeue(out int item))
+ {
+ // Track that this specific item was hit
+ Interlocked.Increment(ref consumedCounts[item]);
+ Interlocked.Increment(ref totalConsumed);
+ }
+ else
+ {
+ Thread.SpinWait(10);
+ }
+ }
+ })).ToArray();
+
+ await Task.WhenAll(producers);
+ await Task.WhenAll(consumers);
+
+ // Assertions
+ Assert.That(ItemsToProduce, Is.EqualTo(Interlocked.Read(ref totalConsumed)));
+
+ for (int i = 0; i < ItemsToProduce; i++)
+ {
+ Assert.That(consumedCounts[i] == 1, $"Item {i} was consumed {consumedCounts[i]} times!");
+ }
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/Nethermind.State.Flat.Test.csproj b/src/Nethermind/Nethermind.State.Flat.Test/Nethermind.State.Flat.Test.csproj
new file mode 100644
index 00000000000..a9ef96f63d5
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/Nethermind.State.Flat.Test.csproj
@@ -0,0 +1,16 @@
+
+
+
+
+
+ Nethermind.State.Flat.Test
+ enable
+
+
+
+
+
+
+
+
+
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/Persistence/BloomFilter/BloomFilterTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/BloomFilter/BloomFilterTests.cs
new file mode 100644
index 00000000000..0acfbafae10
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/BloomFilter/BloomFilterTests.cs
@@ -0,0 +1,173 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using FluentAssertions;
+using Nethermind.State.Flat.Persistence.BloomFilter;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test.Persistence.BloomFilter;
+
+[TestFixture]
+public class BloomFilterTests
+{
+ #region Basic Add/Query Tests
+
+ [Test]
+ public void Add_SingleItem_ShouldBeFound()
+ {
+ // Arrange
+ using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10);
+ ulong hash = 12345;
+
+ // Act
+ bloom.Add(hash);
+
+ // Assert
+ bloom.MightContain(hash).Should().BeTrue();
+ }
+
+ [Test]
+ public void Add_MultipleItems_ShouldAllBeFound()
+ {
+ // Arrange
+ using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10);
+ ulong[] hashes = { 1, 2, 3, 100, 1000, 99999 };
+
+ // Act
+ foreach (ulong hash in hashes)
+ {
+ bloom.Add(hash);
+ }
+
+ // Assert
+ foreach (ulong hash in hashes)
+ {
+ bloom.MightContain(hash).Should().BeTrue($"hash {hash} should be found");
+ }
+ }
+
+ #endregion
+
+ #region Concurrency Tests
+
+ [Test]
+ public void Add_Concurrent_ShouldBeThreadSafe()
+ {
+ // Arrange
+ using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 1000, bitsPerKey: 10);
+ int threadsCount = 10;
+ int itemsPerThread = 50;
+ using Barrier barrier = new(threadsCount);
+ System.Collections.Concurrent.ConcurrentBag addedHashes = new();
+
+ // Act - Multiple threads adding concurrently
+ Task[] tasks = Enumerable.Range(0, threadsCount).Select(threadId => Task.Run(() =>
+ {
+ barrier.SignalAndWait(); // Sync start
+ for (int i = 0; i < itemsPerThread; i++)
+ {
+ ulong hash = (ulong)(threadId * itemsPerThread + i);
+ bloom.Add(hash);
+ addedHashes.Add(hash);
+ }
+ })).ToArray();
+
+ Task.WaitAll(tasks);
+
+ // Assert - All items should be found
+ foreach (ulong hash in addedHashes)
+ {
+ bloom.MightContain(hash).Should().BeTrue($"hash {hash} should be found");
+ }
+ }
+
+ [Test]
+ public void Add_ConcurrentWithMightContain_ShouldWork()
+ {
+ // Arrange
+ using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 10000, bitsPerKey: 10);
+ int duration = 1000; // ms
+ CancellationTokenSource cts = new(duration);
+
+ // Act - Some threads adding, others querying
+ Task[] writerTasks = Enumerable.Range(0, 3).Select(threadId => Task.Run(() =>
+ {
+ ulong hash = (ulong)(threadId * 1000000);
+ while (!cts.Token.IsCancellationRequested)
+ {
+ bloom.Add(hash++);
+ }
+ })).ToArray();
+
+ Task[] readerTasks = Enumerable.Range(0, 3).Select(_ => Task.Run(() =>
+ {
+ ulong hash = 0;
+ while (!cts.Token.IsCancellationRequested)
+ {
+ bloom.MightContain(hash++);
+ Thread.Yield();
+ }
+ })).ToArray();
+
+ Task.WaitAll(writerTasks.Concat(readerTasks).ToArray());
+
+ // Assert - No exceptions thrown
+ Assert.Pass("Concurrent operations completed without exceptions");
+ }
+
+ #endregion
+
+ #region Edge Cases
+
+ [Test]
+ public void Dispose_MultipleTimes_ShouldNotThrow()
+ {
+ // Arrange
+ Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10);
+
+ // Act & Assert
+ bloom.Dispose();
+ Assert.DoesNotThrow(() => bloom.Dispose());
+ }
+
+ [Test]
+ public void MightContain_BeforeAnyAdds_ShouldReturnFalse()
+ {
+ // Arrange
+ using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: 100, bitsPerKey: 10);
+
+ // Act & Assert
+ // Empty bloom filter should generally return false (though false positives are theoretically possible)
+ bool result = bloom.MightContain(99999);
+ result.Should().BeFalse("empty bloom filter should return false for items not added");
+ }
+
+ [Test]
+ public void Add_LargeNumberOfItems_ShouldWork()
+ {
+ // Arrange
+ int totalItems = 500;
+ using Nethermind.State.Flat.Persistence.BloomFilter.BloomFilter bloom = new(capacity: totalItems, bitsPerKey: 10);
+
+ // Act
+ for (ulong i = 0; i < (ulong)totalItems; i++)
+ {
+ bloom.Add(i);
+ }
+
+ // Assert - Verify count
+ bloom.Count.Should().Be(totalItems);
+
+ // Verify sample of items can be found
+ for (ulong i = 0; i < 50; i++)
+ {
+ bloom.MightContain(i).Should().BeTrue($"hash {i} should be found");
+ }
+ }
+
+ #endregion
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/Persistence/PreimageRecordingPersistenceTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/PreimageRecordingPersistenceTests.cs
new file mode 100644
index 00000000000..3b752372472
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/Persistence/PreimageRecordingPersistenceTests.cs
@@ -0,0 +1,212 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using FluentAssertions;
+using Nethermind.Core;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
+using Nethermind.Int256;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.Trie;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test.Persistence;
+
+[TestFixture]
+public class PreimageRecordingPersistenceTests
+{
+ private const int PreimageLookupSize = 12;
+
+ private IPersistence _innerPersistence = null!;
+ private MemDb _preimageDb = null!;
+ private PreimageRecordingPersistence _sut = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _innerPersistence = Substitute.For();
+ _preimageDb = new MemDb();
+ _sut = new PreimageRecordingPersistence(_innerPersistence, _preimageDb);
+ }
+
+ [TearDown]
+ public void TearDown() => _preimageDb.Dispose();
+
+ [Test]
+ public void PassThroughOperations_DelegateToInnerPersistence()
+ {
+ // CreateReader
+ IPersistence.IPersistenceReader expectedReader = Substitute.For();
+ _innerPersistence.CreateReader().Returns(expectedReader);
+ _sut.CreateReader().Should().BeSameAs(expectedReader);
+
+ // CreateWriteBatch
+ StateId from = StateId.PreGenesis;
+ StateId to = new StateId(1, TestItem.KeccakA);
+ IPersistence.IWriteBatch innerBatch = Substitute.For();
+ _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch);
+ using IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None);
+ _innerPersistence.Received(1).CreateWriteBatch(from, to, WriteFlags.None);
+ }
+
+ [Test]
+ public void SetAccount_SetStorage_SelfDestruct_RecordPreimages()
+ {
+ StateId from = StateId.PreGenesis;
+ StateId to = new StateId(1, TestItem.KeccakA);
+ IPersistence.IWriteBatch innerBatch = Substitute.For();
+ _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch);
+
+ Address addressA = TestItem.AddressA;
+ Address addressB = TestItem.AddressB;
+ Account account = TestItem.GenerateIndexedAccount(0);
+ UInt256 slot = 42;
+ SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0x01, 0x02, 0x03]);
+
+ using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None))
+ {
+ batch.SetAccount(addressA, account);
+ batch.SetStorage(addressA, slot, value);
+ batch.SelfDestruct(addressB);
+ }
+
+ // Verify inner batch calls
+ innerBatch.Received(1).SetAccount(addressA, account);
+ innerBatch.Received(1).SetStorage(addressA, slot, Arg.Is(v => v != null));
+ innerBatch.Received(1).SelfDestruct(addressB);
+
+ // Verify address preimages
+ ValueHash256 addressAPath = addressA.ToAccountPath;
+ _preimageDb.Get(addressAPath.BytesAsSpan[..PreimageLookupSize]).Should().BeEquivalentTo(addressA.Bytes);
+
+ ValueHash256 addressBPath = addressB.ToAccountPath;
+ _preimageDb.Get(addressBPath.BytesAsSpan[..PreimageLookupSize]).Should().BeEquivalentTo(addressB.Bytes);
+
+ // Verify slot preimage
+ ValueHash256 slotHash = ValueKeccak.Zero;
+ StorageTree.ComputeKeyWithLookup(slot, ref slotHash);
+ _preimageDb.Get(slotHash.BytesAsSpan[..PreimageLookupSize]).Should().BeEquivalentTo(slot.ToBigEndian());
+ }
+
+ [Test]
+ public void TrieAndRawOperations_WithoutPreimage_DelegateAsRaw()
+ {
+ StateId from = StateId.PreGenesis;
+ StateId to = new StateId(1, TestItem.KeccakA);
+ IPersistence.IWriteBatch innerBatch = Substitute.For();
+ _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch);
+
+ TreePath path = TreePath.FromHexString("1234");
+ TrieNode node = new TrieNode(NodeType.Leaf, [0xc1, 0x01]);
+ Hash256 addrHash = TestItem.KeccakA;
+ Hash256 slotHash = TestItem.KeccakB;
+ Account account = TestItem.GenerateIndexedAccount(0);
+ SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0xff]);
+
+ using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None))
+ {
+ batch.SetStateTrieNode(path, node);
+ batch.SetStorageTrieNode(addrHash, path, node);
+ batch.SetStorageRaw(addrHash, slotHash, value);
+ batch.SetAccountRaw(addrHash, account);
+ }
+
+ // Verify trie operations delegated
+ innerBatch.Received(1).SetStateTrieNode(path, node);
+ innerBatch.Received(1).SetStorageTrieNode(addrHash, path, node);
+
+ // Without preimage, raw operations stay raw
+ innerBatch.Received(1).SetStorageRaw(addrHash, slotHash, Arg.Is(v => v != null));
+ innerBatch.Received(1).SetAccountRaw(addrHash, account);
+
+ // No preimages should be recorded for trie/raw operations
+ _preimageDb.Keys.Should().BeEmpty();
+ }
+
+ [Test]
+ public void RawOperations_WithPreimage_TranslatedToNonRaw()
+ {
+ Address address = TestItem.AddressA;
+ UInt256 slot = 42;
+ Account account = TestItem.GenerateIndexedAccount(0);
+ SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0xff]);
+
+ // Pre-populate preimage database with address and slot preimages
+ ValueHash256 addrHash = address.ToAccountPath;
+ _preimageDb.Set(addrHash.BytesAsSpan[..PreimageLookupSize], address.Bytes);
+
+ ValueHash256 slotHash = ValueKeccak.Zero;
+ StorageTree.ComputeKeyWithLookup(slot, ref slotHash);
+ _preimageDb.Set(slotHash.BytesAsSpan[..PreimageLookupSize], slot.ToBigEndian());
+
+ StateId from = StateId.PreGenesis;
+ StateId to = new StateId(1, TestItem.KeccakA);
+ IPersistence.IWriteBatch innerBatch = Substitute.For();
+ _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch);
+
+ using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None))
+ {
+ batch.SetStorageRaw(new Hash256(addrHash), new Hash256(slotHash), value);
+ batch.SetAccountRaw(new Hash256(addrHash), account);
+ }
+
+ // With preimage available, raw operations are translated to non-raw
+ innerBatch.Received(1).SetStorage(address, slot, Arg.Is(v => v != null));
+ innerBatch.Received(1).SetAccount(address, account);
+
+ // Raw operations should NOT be called
+ innerBatch.DidNotReceive().SetStorageRaw(Arg.Any(), Arg.Any(), Arg.Any());
+ innerBatch.DidNotReceive().SetAccountRaw(Arg.Any(), Arg.Any());
+ }
+
+ [Test]
+ public void SetStorageRaw_WithOnlyAddressPreimage_FallsBackToRaw()
+ {
+ Address address = TestItem.AddressA;
+ UInt256 slot = 42;
+ SlotValue? value = SlotValue.FromSpanWithoutLeadingZero([0xff]);
+
+ // Pre-populate only address preimage (missing slot preimage)
+ ValueHash256 addrHash = address.ToAccountPath;
+ _preimageDb.Set(addrHash.BytesAsSpan[..PreimageLookupSize], address.Bytes);
+
+ ValueHash256 slotHash = ValueKeccak.Zero;
+ StorageTree.ComputeKeyWithLookup(slot, ref slotHash);
+ // Note: NOT setting slot preimage
+
+ StateId from = StateId.PreGenesis;
+ StateId to = new StateId(1, TestItem.KeccakA);
+ IPersistence.IWriteBatch innerBatch = Substitute.For();
+ _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch);
+
+ using (IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None))
+ {
+ batch.SetStorageRaw(new Hash256(addrHash), new Hash256(slotHash), value);
+ }
+
+ // Without slot preimage, storage stays raw
+ innerBatch.Received(1).SetStorageRaw(new Hash256(addrHash), new Hash256(slotHash), Arg.Is(v => v != null));
+ innerBatch.DidNotReceive().SetStorage(Arg.Any(), Arg.Any(), Arg.Any());
+ }
+
+ [Test]
+ public void Dispose_DisposesPreimageBatchAndInnerBatch()
+ {
+ StateId from = StateId.PreGenesis;
+ StateId to = new StateId(1, TestItem.KeccakA);
+ IPersistence.IWriteBatch innerBatch = Substitute.For();
+ _innerPersistence.CreateWriteBatch(from, to, WriteFlags.None).Returns(innerBatch);
+
+ IPersistence.IWriteBatch batch = _sut.CreateWriteBatch(from, to, WriteFlags.None);
+ batch.SetAccount(TestItem.AddressA, TestItem.GenerateIndexedAccount(0));
+ batch.Dispose();
+
+ innerBatch.Received(1).Dispose();
+
+ // Preimages should be flushed after dispose
+ ValueHash256 addressPath = TestItem.AddressA.ToAccountPath;
+ _preimageDb.Get(addressPath.BytesAsSpan[..PreimageLookupSize]).Should().NotBeNull();
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/PersistenceManagerTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceManagerTests.cs
new file mode 100644
index 00000000000..8d647074d49
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceManagerTests.cs
@@ -0,0 +1,528 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Collections.Generic;
+using System.Linq;
+using Nethermind.Core;
+using Nethermind.Core.Collections;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
+using Nethermind.Int256;
+using Nethermind.Logging;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.Trie;
+using Nethermind.Trie.Pruning;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixture]
+public class PersistenceManagerTests
+{
+ private PersistenceManager _persistenceManager = null!;
+ private FlatDbConfig _config = null!;
+ private TestFinalizedStateProvider _finalizedStateProvider = null!;
+ private SnapshotRepository _snapshotRepository = null!;
+ private IPersistence _persistence = null!;
+ private ResourcePool _resourcePool = null!;
+ private StateId Block0 = new StateId(0, Keccak.EmptyTreeHash);
+
+ [SetUp]
+ public void SetUp()
+ {
+ _config = new FlatDbConfig
+ {
+ CompactSize = 16,
+ MinReorgDepth = 64,
+ MaxReorgDepth = 256
+ };
+
+ _resourcePool = new ResourcePool(_config);
+ _finalizedStateProvider = new TestFinalizedStateProvider();
+ _snapshotRepository = new SnapshotRepository(LimboLogs.Instance);
+ _persistence = Substitute.For();
+
+ IPersistence.IPersistenceReader persistenceReader = Substitute.For();
+ persistenceReader.CurrentState.Returns(Block0);
+ _persistence.CreateReader().Returns(persistenceReader);
+
+ _persistenceManager = new PersistenceManager(
+ _config,
+ _finalizedStateProvider,
+ _persistence,
+ _snapshotRepository,
+ LimboLogs.Instance);
+ }
+
+ [TearDown]
+ public void TearDown()
+ {
+ }
+
+ private StateId CreateStateId(long blockNumber, byte rootByte = 0)
+ {
+ byte[] bytes = new byte[32];
+ bytes[0] = rootByte;
+ return new StateId(blockNumber, new ValueHash256(bytes));
+ }
+
+ private Snapshot CreateSnapshot(StateId from, StateId to, bool compacted = false)
+ {
+ Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot.Content.Accounts[TestItem.AddressA] = new Account(1, 100);
+
+ if (compacted)
+ {
+ _snapshotRepository.TryAddCompactedSnapshot(snapshot);
+ }
+ else
+ {
+ _snapshotRepository.TryAddSnapshot(snapshot);
+ }
+
+ // AddStateId is needed for GetStatesAtBlockNumber to work
+ _snapshotRepository.AddStateId(to);
+
+ return snapshot;
+ }
+
+ private Snapshot CreateSnapshotWithSelfDestruct(StateId from, StateId to)
+ {
+ Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot.Content.SelfDestructedStorageAddresses[TestItem.AddressA] = false; // false = should be processed
+ return snapshot;
+ }
+
+ #region Basic Behavior Tests
+
+ [Test]
+ public void DetermineSnapshotToPersist_InsufficientInMemoryDepth_ReturnsNull()
+ {
+ // Setup: persisted at Block0 (0), latest at 60, after persist would be < 64 minimum
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(60);
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Null);
+ }
+
+ [TestCase(true, TestName = "DetermineSnapshotToPersist_SufficientDepthAndFinalized_ReturnsCompactedSnapshot")]
+ [TestCase(false, TestName = "DetermineSnapshotToPersist_SufficientDepthAndFinalized_FallsBackToUncompacted")]
+ public void DetermineSnapshotToPersist_SufficientDepthAndFinalized(bool useCompacted)
+ {
+ // Setup: persisted at Block0, latest at 100, finalized at 100
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(100);
+
+ // Vary target block and compaction based on parameter
+ int targetBlock = useCompacted ? 16 : 1; // compacted uses 16, fallback uses 1
+ StateId target = CreateStateId(targetBlock);
+
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(targetBlock, new Hash256(target.StateRoot.Bytes));
+
+ // Create snapshot (compacted or not based on parameter)
+ using Snapshot expectedSnapshot = CreateSnapshot(persisted, target, compacted: useCompacted);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Not.Null);
+ Assert.That(result!.From, Is.EqualTo(persisted));
+ Assert.That(result.To, Is.EqualTo(target));
+
+ result.Dispose();
+ }
+
+ #endregion
+
+ #region Unfinalized State Tests
+
+ [Test]
+ public void DetermineSnapshotToPersist_UnfinalizedButBelowForceLimit_ReturnsNull()
+ {
+ // Setup: persisted at Block0, latest at 150, finalized at 10 (way behind)
+ // After persist would be at 16, which is > finalized
+ // But in-memory depth is 150 (< 256 forced boundary)
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(150);
+ _finalizedStateProvider.SetFinalizedBlockNumber(10);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Null);
+ }
+
+ [TestCase(true, TestName = "DetermineSnapshotToPersist_UnfinalizedAndAboveForceLimit_ForcePersistsCompacted")]
+ [TestCase(false, TestName = "DetermineSnapshotToPersist_UnfinalizedAndAboveForceLimit_FallsBackToUncompacted")]
+ public void DetermineSnapshotToPersist_UnfinalizedAndAboveForceLimit(bool useCompacted)
+ {
+ // Setup: persisted at Block0, latest at 300, finalized at 10
+ // In-memory depth is ~301 (> 256 forced boundary)
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(300);
+
+ // Vary target block and compaction based on parameter
+ int targetBlock = useCompacted ? 16 : 1; // compacted uses 16, fallback uses 1
+ StateId target = CreateStateId(targetBlock);
+
+ _finalizedStateProvider.SetFinalizedBlockNumber(10);
+
+ // Create snapshot (compacted or not based on parameter)
+ using Snapshot expectedSnapshot = CreateSnapshot(persisted, target, compacted: useCompacted);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Not.Null);
+ Assert.That(result!.From, Is.EqualTo(persisted));
+ Assert.That(result.To, Is.EqualTo(target));
+
+ result.Dispose();
+ }
+
+ #endregion
+
+ #region Edge Cases
+
+ [Test]
+ public void DetermineSnapshotToPersist_NoSnapshotAvailable_ReturnsNull()
+ {
+ // Setup: sufficient depth but no snapshots in repository
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(100);
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(CreateStateId(16).StateRoot.Bytes));
+
+ // Don't create any snapshots
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Null);
+ }
+
+ [Test]
+ public void DetermineSnapshotToPersist_SnapshotWithWrongFromState_ReturnsNull()
+ {
+ // Setup: snapshot exists but doesn't start from current persisted state
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(100);
+ StateId wrongFrom = CreateStateId(5);
+ StateId target = CreateStateId(16);
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(target.StateRoot.Bytes));
+
+ // Create snapshot with wrong "from" state
+ using Snapshot wrongSnapshot = CreateSnapshot(wrongFrom, target, compacted: true);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Null);
+ }
+
+ [Test]
+ public void DetermineSnapshotToPersist_MultipleStatesAtBlock_SelectsCorrectOne()
+ {
+ // Setup: multiple state roots at same block number (reorg scenario)
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(100);
+ StateId target1 = CreateStateId(16, rootByte: 1);
+ StateId target2 = CreateStateId(16, rootByte: 2); // Different root
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(target2.StateRoot.Bytes)); // target2 is finalized
+
+ // Create both snapshots
+ using Snapshot snapshot1 = CreateSnapshot(persisted, target1, compacted: true);
+ using Snapshot snapshot2 = CreateSnapshot(persisted, target2, compacted: true);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Not.Null);
+ Assert.That(result!.To.StateRoot.Bytes.ToArray(), Is.EqualTo(target2.StateRoot.Bytes.ToArray())); // Should select finalized one
+
+ result.Dispose();
+ }
+
+ [Test]
+ public void DetermineSnapshotToPersist_ExactlyAtMinimumBoundary_ReturnsNull()
+ {
+ // Setup: persisted at Block0 (0), latest at 79
+ // After persist would be at 15, leaving depth of 64 (exactly at minimum boundary)
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(79);
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Null);
+ }
+
+ [Test]
+ public void DetermineSnapshotToPersist_OneAboveMinimumBoundary_ReturnsSnapshot()
+ {
+ // Setup: persisted at Block0 (0), latest at 80
+ // After persist would be at 15, leaving depth of 65 (one above minimum boundary)
+ StateId persisted = Block0;
+ StateId latest = CreateStateId(80);
+ StateId target = CreateStateId(16);
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(target.StateRoot.Bytes));
+
+ using Snapshot expectedSnapshot = CreateSnapshot(persisted, target, compacted: true);
+
+ Snapshot? result = _persistenceManager.DetermineSnapshotToPersist(latest);
+
+ Assert.That(result, Is.Not.Null);
+
+ result!.Dispose();
+ }
+
+ #endregion
+
+ #region PersistSnapshot Tests
+
+ [Test]
+ public void PersistSnapshot_WithAccountsStorageAndTrieNodes_WritesToBatch()
+ {
+ // Arrange
+ StateId from = Block0;
+ StateId to = CreateStateId(16);
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ // Add accounts
+ snapshot.Content.Accounts[TestItem.AddressA] = new Account(1, 100);
+ snapshot.Content.Accounts[TestItem.AddressB] = new Account(2, 200);
+
+ // Add storage
+ snapshot.Content.Storages[(TestItem.AddressA, (UInt256)1)] = SlotValue.FromSpanWithoutLeadingZero([42]);
+ snapshot.Content.Storages[(TestItem.AddressA, (UInt256)2)] = SlotValue.FromSpanWithoutLeadingZero([99]);
+
+ // Add trie nodes
+ TreePath path = TreePath.Empty;
+ TrieNode node = new TrieNode(NodeType.Leaf, Keccak.Zero);
+ snapshot.Content.StateNodes[path] = node;
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(from, to).Returns(writeBatch);
+
+ // Act
+ _persistenceManager.PersistSnapshot(snapshot);
+
+ // Assert
+ writeBatch.Received().SetAccount(TestItem.AddressA, Arg.Any());
+ writeBatch.Received().SetAccount(TestItem.AddressB, Arg.Any());
+ writeBatch.Received().SetStorage(TestItem.AddressA, (UInt256)1, Arg.Any());
+ writeBatch.Received().SetStorage(TestItem.AddressA, (UInt256)2, Arg.Any());
+ writeBatch.Received().SetStateTrieNode(Arg.Any(), Arg.Any());
+ Assert.That(node.IsPersisted, Is.True);
+ }
+
+ [Test]
+ public void PersistSnapshot_WithSelfDestructedAddresses_CallsSelfDestruct()
+ {
+ // Arrange
+ StateId from = Block0;
+ StateId to = CreateStateId(16);
+ using Snapshot snapshot = CreateSnapshotWithSelfDestruct(from, to);
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(from, to).Returns(writeBatch);
+
+ // Act
+ _persistenceManager.PersistSnapshot(snapshot);
+
+ // Assert
+ writeBatch.Received().SelfDestruct(TestItem.AddressA);
+ }
+
+ [Test]
+ public void PersistSnapshot_EmptySnapshot_CreatesWriteBatch()
+ {
+ // Arrange
+ StateId from = Block0;
+ StateId to = CreateStateId(16);
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(from, to).Returns(writeBatch);
+
+ // Act
+ _persistenceManager.PersistSnapshot(snapshot);
+
+ // Assert
+ _persistence.Received(1).CreateWriteBatch(from, to);
+ }
+
+ #endregion
+
+ #region AddToPersistence Tests
+
+ [Test]
+ public void AddToPersistence_WithAvailableSnapshot_PersistsAndUpdatesState()
+ {
+ // Arrange
+ StateId from = Block0;
+ StateId to = CreateStateId(16);
+ StateId latest = CreateStateId(100);
+
+ // Create a snapshot that should be persisted
+ using Snapshot snapshot = CreateSnapshot(from, to, compacted: true);
+
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(to.StateRoot.Bytes));
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch);
+
+ // Act
+ _persistenceManager.AddToPersistence(latest);
+
+ // Assert
+ // Verify write batch was created (persistence happened)
+ _persistence.Received().CreateWriteBatch(from, to);
+
+ // Verify current persisted state was updated
+ Assert.That(_persistenceManager.GetCurrentPersistedStateId(), Is.EqualTo(to));
+ }
+
+ #endregion
+
+ #region FlushToPersistence Tests
+
+ [Test]
+ public void FlushToPersistence_NoSnapshots_ReturnsCurrentPersistedState()
+ {
+ // Arrange - no snapshots added
+ StateId persisted = Block0;
+
+ // Act
+ StateId result = _persistenceManager.FlushToPersistence();
+
+ // Assert
+ Assert.That(result, Is.EqualTo(persisted));
+ }
+
+ [Test]
+ public void FlushToPersistence_WithFinalizedSnapshots_PersistsFinalizedFirst()
+ {
+ // Arrange
+ StateId state16 = CreateStateId(16);
+ StateId state32 = CreateStateId(32);
+
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(state16.StateRoot.Bytes));
+ _finalizedStateProvider.SetFinalizedStateRootAt(32, new Hash256(state32.StateRoot.Bytes));
+
+ using Snapshot snapshot1 = CreateSnapshot(Block0, state16, compacted: true);
+ using Snapshot snapshot2 = CreateSnapshot(state16, state32, compacted: true);
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch);
+
+ // Act
+ StateId result = _persistenceManager.FlushToPersistence();
+
+ // Assert
+ Assert.That(result, Is.EqualTo(state32));
+ _persistence.Received().CreateWriteBatch(Block0, state16);
+ _persistence.Received().CreateWriteBatch(state16, state32);
+ }
+
+ [Test]
+ public void FlushToPersistence_WithUnfinalizedSnapshots_FallsBackToFirstAvailable()
+ {
+ // Arrange - no finalization info available
+ StateId state16 = CreateStateId(16);
+ _finalizedStateProvider.SetFinalizedBlockNumber(0); // Nothing finalized
+
+ using Snapshot snapshot = CreateSnapshot(Block0, state16, compacted: true);
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch);
+
+ // Act
+ StateId result = _persistenceManager.FlushToPersistence();
+
+ // Assert
+ Assert.That(result, Is.EqualTo(state16));
+ _persistence.Received().CreateWriteBatch(Block0, state16);
+ }
+
+ [Test]
+ public void FlushToPersistence_PrefersFinalizedOverUnfinalized()
+ {
+ // Arrange - two snapshots at same block, one finalized
+ StateId finalizedState = CreateStateId(16, rootByte: 1);
+ StateId unfinalizedState = CreateStateId(16, rootByte: 2);
+
+ _finalizedStateProvider.SetFinalizedBlockNumber(100);
+ _finalizedStateProvider.SetFinalizedStateRootAt(16, new Hash256(finalizedState.StateRoot.Bytes));
+
+ // Create both snapshots
+ using Snapshot finalizedSnapshot = CreateSnapshot(Block0, finalizedState, compacted: true);
+ using Snapshot unfinalizedSnapshot = CreateSnapshot(Block0, unfinalizedState, compacted: true);
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch);
+
+ // Act
+ StateId result = _persistenceManager.FlushToPersistence();
+
+ // Assert - should persist finalized state
+ Assert.That(result.StateRoot.Bytes.ToArray(), Is.EqualTo(finalizedState.StateRoot.Bytes.ToArray()));
+ }
+
+ [Test]
+ public void FlushToPersistence_PersistsMultipleSnapshots_InOrder()
+ {
+ // Arrange
+ StateId state1 = CreateStateId(1);
+ StateId state2 = CreateStateId(2);
+ StateId state3 = CreateStateId(3);
+
+ // No finalization - will use first available
+ _finalizedStateProvider.SetFinalizedBlockNumber(0);
+
+ using Snapshot snapshot1 = CreateSnapshot(Block0, state1, compacted: false);
+ using Snapshot snapshot2 = CreateSnapshot(state1, state2, compacted: false);
+ using Snapshot snapshot3 = CreateSnapshot(state2, state3, compacted: false);
+
+ IPersistence.IWriteBatch writeBatch = Substitute.For();
+ _persistence.CreateWriteBatch(Arg.Any(), Arg.Any()).Returns(writeBatch);
+
+ // Act
+ StateId result = _persistenceManager.FlushToPersistence();
+
+ // Assert
+ Assert.That(result, Is.EqualTo(state3));
+ Received.InOrder(() =>
+ {
+ _persistence.CreateWriteBatch(Block0, state1);
+ _persistence.CreateWriteBatch(state1, state2);
+ _persistence.CreateWriteBatch(state2, state3);
+ });
+ }
+
+ #endregion
+
+ #region Helper Classes
+
+ private class TestFinalizedStateProvider : IFinalizedStateProvider
+ {
+ private long _finalizedBlockNumber;
+ private readonly Dictionary _finalizedStateRoots = new();
+
+ public long FinalizedBlockNumber => _finalizedBlockNumber;
+
+ public void SetFinalizedBlockNumber(long blockNumber) => _finalizedBlockNumber = blockNumber;
+
+ public void SetFinalizedStateRootAt(long blockNumber, Hash256 stateRoot) => _finalizedStateRoots[blockNumber] = stateRoot;
+
+ public Hash256? GetFinalizedStateRootAt(long blockNumber) =>
+ _finalizedStateRoots.TryGetValue(blockNumber, out Hash256? root) ? root : null;
+ }
+
+ #endregion
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/PersistenceScenario.cs b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceScenario.cs
new file mode 100644
index 00000000000..8c772d79d1d
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/PersistenceScenario.cs
@@ -0,0 +1,878 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+
+using System;
+using System.Collections.Generic;
+using Autofac;
+using Nethermind.Api;
+using Nethermind.Config;
+using Nethermind.Core;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Extensions;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Core.Test.IO;
+using Nethermind.Db;
+using Nethermind.Init.Modules;
+using Nethermind.Int256;
+using Nethermind.Logging;
+using Nethermind.Serialization.Rlp;
+using Nethermind.Specs.ChainSpecStyle;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.Trie;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixtureSource(nameof(TestConfigs))]
+public class PersistenceScenario(PersistenceScenario.TestConfiguration configuration)
+{
+ private TempPath _tmpDirectory = null!;
+ private IContainer _container = null!;
+ private IPersistence _persistence = null!;
+
+ // Helper method to convert TryGetSlot to GetSlot-like behavior
+ private static byte[]? GetSlot(IPersistence.IPersistenceReader reader, Address address, in UInt256 slot)
+ {
+ SlotValue slotValue = default;
+ if (reader.TryGetSlot(address, in slot, ref slotValue))
+ {
+ return slotValue.ToEvmBytes();
+ }
+ return null;
+ }
+
+ public record TestConfiguration(FlatDbConfig FlatDbConfig, string Name)
+ {
+ public override string ToString() => Name;
+ }
+
+ public static IEnumerable TestConfigs()
+ {
+ yield return new TestConfiguration(new FlatDbConfig()
+ {
+ Enabled = true,
+ Layout = FlatLayout.Flat
+ }, "Flat");
+ yield return new TestConfiguration(new FlatDbConfig()
+ {
+ Enabled = true,
+ Layout = FlatLayout.FlatInTrie
+ }, "FlatInTrie");
+ yield return new TestConfiguration(new FlatDbConfig()
+ {
+ Enabled = true,
+ Layout = FlatLayout.PreimageFlat
+ }, "PreimageFlat");
+ }
+
+ [SetUp]
+ public void Setup()
+ {
+ _tmpDirectory = TempPath.GetTempDirectory();
+ _container = new ContainerBuilder()
+ .AddModule(
+ new NethermindModule(
+ new ChainSpec(),
+ new ConfigProvider(
+ configuration.FlatDbConfig,
+ new InitConfig()
+ {
+ BaseDbPath = _tmpDirectory.Path,
+ }),
+ LimboLogs.Instance))
+ .AddSingleton(Substitute.For())
+ .Build();
+
+ _persistence = _container.Resolve();
+ }
+
+ [TearDown]
+ public void TearDown()
+ {
+ _container.Dispose();
+ _tmpDirectory.Dispose();
+ }
+
+ [Test]
+ public void TestCanWriteAccount()
+ {
+ Account acc = TestItem.GenerateIndexedAccount(0);
+ Address address = TestItem.AddressA;
+
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.GetAccount(address), Is.Null);
+ }
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, acc);
+ }
+
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.GetAccount(address), Is.EqualTo(acc));
+ }
+ }
+
+ [Test]
+ public void TestCanAccountSnapshot()
+ {
+ Address address = TestItem.AddressA;
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, TestItem.GenerateIndexedAccount(0));
+ }
+
+ using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader();
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, TestItem.GenerateIndexedAccount(1));
+ }
+
+ using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader();
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, TestItem.GenerateIndexedAccount(2));
+ }
+
+ using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader();
+
+ Assert.That(reader1.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(0)));
+ Assert.That(reader2.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(1)));
+ Assert.That(reader3.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(2)));
+ }
+
+ [Test]
+ public void TestSelfDestructAccount()
+ {
+ Account acc = TestItem.GenerateIndexedAccount(0);
+ Account acc2 = TestItem.GenerateIndexedAccount(1);
+ Address address = TestItem.AddressA;
+ Address address2 = TestItem.AddressB;
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, acc);
+ writer.SetStorage(address, UInt256.MinValue, SlotValue.FromSpanWithoutLeadingZero([1]));
+ writer.SetStorage(address, 123, SlotValue.FromSpanWithoutLeadingZero([2]));
+ writer.SetStorage(address, UInt256.MaxValue, SlotValue.FromSpanWithoutLeadingZero([3]));
+
+ writer.SetAccount(address2, acc2);
+ writer.SetStorage(address2, UInt256.MinValue, SlotValue.FromSpanWithoutLeadingZero([1]));
+ writer.SetStorage(address2, 123, SlotValue.FromSpanWithoutLeadingZero([2]));
+ writer.SetStorage(address2, UInt256.MaxValue, SlotValue.FromSpanWithoutLeadingZero([3]));
+ }
+
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.EqualTo([1]));
+ Assert.That(GetSlot(reader, address, 123), Is.EqualTo([2]));
+ Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.EqualTo([3]));
+ }
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SelfDestruct(address);
+ }
+
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.Null);
+ Assert.That(GetSlot(reader, address, 123), Is.Null);
+ Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.Null);
+
+ Assert.That(GetSlot(reader, address2, UInt256.MinValue), Is.EqualTo([1]));
+ Assert.That(GetSlot(reader, address2, 123), Is.EqualTo([2]));
+ Assert.That(GetSlot(reader, address2, UInt256.MaxValue), Is.EqualTo([3]));
+ }
+ }
+
+ [Test]
+ public void TestCanWriteAndReadStorage()
+ {
+ Account acc = TestItem.GenerateIndexedAccount(0);
+ Address address = TestItem.AddressA;
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, acc);
+ }
+
+ // Initially, slots should be null
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.Null);
+ Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.Null);
+ }
+
+ // Write various storage slots
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorage(address, UInt256.MinValue, SlotValue.FromSpanWithoutLeadingZero([1, 2, 3]));
+ writer.SetStorage(address, 42, SlotValue.FromSpanWithoutLeadingZero([0x42]));
+ writer.SetStorage(address, 12345, SlotValue.FromSpanWithoutLeadingZero([0x10, 0x20, 0x30, 0x40]));
+ writer.SetStorage(address, UInt256.MaxValue, SlotValue.FromSpanWithoutLeadingZero([0xff, 0xfe, 0xfd]));
+ }
+
+ // Verify all slots can be read back
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(GetSlot(reader, address, UInt256.MinValue), Is.EqualTo([1, 2, 3]));
+ Assert.That(GetSlot(reader, address, 42), Is.EqualTo([0x42]));
+ Assert.That(GetSlot(reader, address, 12345), Is.EqualTo([0x10, 0x20, 0x30, 0x40]));
+ Assert.That(GetSlot(reader, address, UInt256.MaxValue), Is.EqualTo([0xff, 0xfe, 0xfd]));
+ }
+ }
+
+ [Test]
+ public void TestCanStorageSnapshot()
+ {
+ Account acc = TestItem.GenerateIndexedAccount(0);
+ Address address = TestItem.AddressA;
+ UInt256 slot = 100;
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, acc);
+ writer.SetStorage(address, slot, SlotValue.FromSpanWithoutLeadingZero([1]));
+ }
+
+ using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader();
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorage(address, slot, SlotValue.FromSpanWithoutLeadingZero([2]));
+ }
+
+ using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader();
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorage(address, slot, SlotValue.FromSpanWithoutLeadingZero([3]));
+ }
+
+ using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader();
+
+ Assert.That(GetSlot(reader1, address, slot), Is.EqualTo([1]));
+ Assert.That(GetSlot(reader2, address, slot), Is.EqualTo([2]));
+ Assert.That(GetSlot(reader3, address, slot), Is.EqualTo([3]));
+ }
+
+ [Test]
+ public void TestRemoveAccount()
+ {
+ Account acc = TestItem.GenerateIndexedAccount(0);
+ Address address = TestItem.AddressA;
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, acc);
+ writer.SetStorage(address, 1, SlotValue.FromSpanWithoutLeadingZero([0x01]));
+ writer.SetStorage(address, 2, SlotValue.FromSpanWithoutLeadingZero([0x02]));
+ }
+
+ // Verify account and storage exist
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.GetAccount(address), Is.EqualTo(acc));
+ Assert.That(GetSlot(reader, address, 1), Is.EqualTo([0x01]));
+ }
+
+ // Remove account
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, null);
+ }
+
+ // Verify account is removed (storage should remain unless explicitly removed)
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.GetAccount(address), Is.Null);
+ }
+ }
+
+ [Test]
+ public void TestRawOperations()
+ {
+ if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat) Assert.Ignore("Preimage mode does not support raw operation");
+
+ Account acc = TestItem.GenerateIndexedAccount(0);
+ Hash256 addrHash = new Hash256(TestItem.AddressA.ToAccountPath.Bytes);
+ Hash256 slotHash = Keccak.Compute([1, 2, 3]);
+
+ // Test raw account operations
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccountRaw(addrHash, acc);
+ }
+
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ byte[]? rawAccount = reader.GetAccountRaw(addrHash);
+ Assert.That(rawAccount, Is.Not.Null);
+
+ // Decode and verify
+ Rlp.ValueDecoderContext ctx = new Rlp.ValueDecoderContext(rawAccount);
+ Assert.That(AccountDecoder.Instance.Decode(ref ctx), Is.EqualTo(acc));
+ }
+
+ // Test raw storage operations
+ byte[] storageValue = Bytes.FromHexString("0x000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorageRaw(addrHash, slotHash, SlotValue.FromBytes(storageValue));
+ }
+
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ SlotValue rawValue = default;
+ Assert.That(reader.TryGetStorageRaw(addrHash, slotHash, ref rawValue), Is.EqualTo(storageValue is not null));
+ if (storageValue is not null)
+ {
+ Assert.That(rawValue.ToEvmBytes(), Is.EqualTo(storageValue.WithoutLeadingZeros().ToArray()));
+ }
+ }
+ }
+
+ [Test]
+ public void TestConcurrentSnapshots()
+ {
+ Account acc = TestItem.GenerateIndexedAccount(0);
+ Address address = TestItem.AddressA;
+ UInt256 slot1 = 100;
+ UInt256 slot2 = 200;
+
+ // Initial state
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, acc);
+ writer.SetStorage(address, slot1, SlotValue.FromSpanWithoutLeadingZero([1]));
+ writer.SetStorage(address, slot2, SlotValue.FromSpanWithoutLeadingZero([10]));
+ }
+
+ using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader();
+
+ // Modify account and slot1
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(address, TestItem.GenerateIndexedAccount(1));
+ writer.SetStorage(address, slot1, SlotValue.FromSpanWithoutLeadingZero([2]));
+ }
+
+ using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader();
+
+ // Modify slot2
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorage(address, slot2, SlotValue.FromSpanWithoutLeadingZero([20]));
+ }
+
+ using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader();
+
+ // Verify reader1 sees initial state
+ Assert.That(reader1.GetAccount(address), Is.EqualTo(acc));
+ Assert.That(GetSlot(reader1, address, slot1), Is.EqualTo([1]));
+ Assert.That(GetSlot(reader1, address, slot2), Is.EqualTo([10]));
+
+ // Verify reader2 sees second state
+ Assert.That(reader2.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(1)));
+ Assert.That(GetSlot(reader2, address, slot1), Is.EqualTo([2]));
+ Assert.That(GetSlot(reader2, address, slot2), Is.EqualTo([10]));
+
+ // Verify reader3 sees final state
+ Assert.That(reader3.GetAccount(address), Is.EqualTo(TestItem.GenerateIndexedAccount(1)));
+ Assert.That(GetSlot(reader3, address, slot1), Is.EqualTo([2]));
+ Assert.That(GetSlot(reader3, address, slot2), Is.EqualTo([20]));
+ }
+
+ [Test]
+ public void TestStorageAcrossMultipleAccounts()
+ {
+ Address addr1 = TestItem.AddressA;
+ Address addr2 = TestItem.AddressB;
+ Address addr3 = TestItem.AddressC;
+ UInt256 slot = 42;
+
+ // Write same slot number for different accounts
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(addr1, TestItem.GenerateIndexedAccount(0));
+ writer.SetAccount(addr2, TestItem.GenerateIndexedAccount(1));
+ writer.SetAccount(addr3, TestItem.GenerateIndexedAccount(2));
+
+ writer.SetStorage(addr1, slot, SlotValue.FromSpanWithoutLeadingZero([0x11]));
+ writer.SetStorage(addr2, slot, SlotValue.FromSpanWithoutLeadingZero([0x22]));
+ writer.SetStorage(addr3, slot, SlotValue.FromSpanWithoutLeadingZero([0x33]));
+ }
+
+ // Verify each account has its own isolated storage
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(GetSlot(reader, addr1, slot), Is.EqualTo([0x11]));
+ Assert.That(GetSlot(reader, addr2, slot), Is.EqualTo([0x22]));
+ Assert.That(GetSlot(reader, addr3, slot), Is.EqualTo([0x33]));
+ }
+
+ // Modify storage for addr2 only
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorage(addr2, slot, SlotValue.FromSpanWithoutLeadingZero([0xff]));
+ }
+
+ // Verify only addr2's storage changed
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(GetSlot(reader, addr1, slot), Is.EqualTo([0x11]));
+ Assert.That(GetSlot(reader, addr2, slot), Is.EqualTo([0xff]));
+ Assert.That(GetSlot(reader, addr3, slot), Is.EqualTo([0x33]));
+ }
+ }
+
+ [Test]
+ public void TestCanWriteAndReadTrieNodes()
+ {
+ // State trie nodes with various path lengths
+ TreePath stateShortPath = TreePath.FromHexString("12345"); // <=5 nibbles -> stateTopNodes
+ TreePath stateMediumPath = TreePath.FromHexString("123456789abc"); // >5 nibbles -> stateNodes
+ TreePath stateLongPath = TreePath.FromHexString("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef");
+
+ byte[] stateShortRlp = [0xc1, 0x01];
+ byte[] stateMediumRlp = [0xc1, 0x02];
+ byte[] stateLongRlp = [0xc1, 0x03];
+
+ // Storage trie nodes for different accounts
+ Hash256 account1 = TestItem.KeccakA;
+ Hash256 account2 = TestItem.KeccakB;
+ TreePath storageShortPath = TreePath.FromHexString("abcd");
+ TreePath storageLongPath = TreePath.FromHexString("abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789");
+
+ byte[] storage1ShortRlp = [0xc1, 0xaa];
+ byte[] storage1LongRlp = [0xc1, 0xab];
+ byte[] storage2ShortRlp = [0xc1, 0xbb];
+
+ // Write all trie nodes
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ // State trie nodes (address=null)
+ writer.SetStateTrieNode(in stateShortPath, new TrieNode(NodeType.Leaf, stateShortRlp));
+ writer.SetStateTrieNode(in stateMediumPath, new TrieNode(NodeType.Leaf, stateMediumRlp));
+ writer.SetStateTrieNode(in stateLongPath, new TrieNode(NodeType.Leaf, stateLongRlp));
+
+ // Storage trie nodes (with account address)
+ writer.SetStorageTrieNode(account1, in storageShortPath, new TrieNode(NodeType.Leaf, storage1ShortRlp));
+ writer.SetStorageTrieNode(account1, in storageLongPath, new TrieNode(NodeType.Leaf, storage1LongRlp));
+ writer.SetStorageTrieNode(account2, in storageShortPath, new TrieNode(NodeType.Leaf, storage2ShortRlp));
+ }
+
+ // Verify all nodes
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ // State trie nodes
+ Assert.That(reader.TryLoadStateRlp(in stateShortPath, ReadFlags.None), Is.EqualTo(stateShortRlp));
+ Assert.That(reader.TryLoadStateRlp(in stateMediumPath, ReadFlags.None), Is.EqualTo(stateMediumRlp));
+ Assert.That(reader.TryLoadStateRlp(in stateLongPath, ReadFlags.None), Is.EqualTo(stateLongRlp));
+
+ // Storage trie nodes - verify account isolation
+ Assert.That(reader.TryLoadStorageRlp(account1, in storageShortPath, ReadFlags.None), Is.EqualTo(storage1ShortRlp));
+ Assert.That(reader.TryLoadStorageRlp(account1, in storageLongPath, ReadFlags.None), Is.EqualTo(storage1LongRlp));
+ Assert.That(reader.TryLoadStorageRlp(account2, in storageShortPath, ReadFlags.None), Is.EqualTo(storage2ShortRlp));
+
+ // State and storage at same path are separate
+ Assert.That(reader.TryLoadStateRlp(in storageShortPath, ReadFlags.None), Is.Null);
+ }
+ }
+
+ [Test]
+ public void TestTrieNodeSnapshot()
+ {
+ TreePath path = TreePath.FromHexString("abcdef");
+
+ byte[] rlpData1 = [0xc1, 0x01];
+ byte[] rlpData2 = [0xc1, 0x02];
+ byte[] rlpData3 = [0xc1, 0x03];
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStateTrieNode(in path, new TrieNode(NodeType.Leaf, rlpData1));
+ }
+ using IPersistence.IPersistenceReader reader1 = _persistence.CreateReader();
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStateTrieNode(in path, new TrieNode(NodeType.Leaf, rlpData2));
+ }
+ using IPersistence.IPersistenceReader reader2 = _persistence.CreateReader();
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStateTrieNode(in path, new TrieNode(NodeType.Leaf, rlpData3));
+ }
+ using IPersistence.IPersistenceReader reader3 = _persistence.CreateReader();
+
+ Assert.That(reader1.TryLoadStateRlp(in path, ReadFlags.None), Is.EqualTo(rlpData1));
+ Assert.That(reader2.TryLoadStateRlp(in path, ReadFlags.None), Is.EqualTo(rlpData2));
+ Assert.That(reader3.TryLoadStateRlp(in path, ReadFlags.None), Is.EqualTo(rlpData3));
+ }
+
+ [Test]
+ public void TestTrieNodeBoundaryPathLengths()
+ {
+ // Test boundary conditions for path length thresholds:
+ // StateNodesTop: 0-5, StateNodes: 6-15, FallbackNodes: 16+
+ // StorageNodes: 0-15, FallbackNodes: 16+
+
+ // State trie boundary paths
+ TreePath statePath5 = TreePath.FromHexString("12345"); // exactly 5 -> StateNodesTop
+ TreePath statePath6 = TreePath.FromHexString("123456"); // exactly 6 -> StateNodes
+ TreePath statePath15 = TreePath.FromHexString("123456789abcdef"); // exactly 15 -> StateNodes
+ TreePath statePath16 = TreePath.FromHexString("123456789abcdef0"); // exactly 16 -> FallbackNodes
+
+ // Storage trie boundary paths
+ Hash256 account = TestItem.KeccakA;
+ TreePath storagePath15 = TreePath.FromHexString("abcdef123456789"); // exactly 15 -> StorageNodes
+ TreePath storagePath16 = TreePath.FromHexString("abcdef1234567890"); // exactly 16 -> FallbackNodes
+
+ byte[] rlp5 = [0xc1, 0x05];
+ byte[] rlp6 = [0xc1, 0x06];
+ byte[] rlp15 = [0xc1, 0x0f];
+ byte[] rlp16 = [0xc1, 0x10];
+ byte[] storageRlp15 = [0xc1, 0x1f];
+ byte[] storageRlp16 = [0xc1, 0x20];
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStateTrieNode(in statePath5, new TrieNode(NodeType.Leaf, rlp5));
+ writer.SetStateTrieNode(in statePath6, new TrieNode(NodeType.Leaf, rlp6));
+ writer.SetStateTrieNode(in statePath15, new TrieNode(NodeType.Leaf, rlp15));
+ writer.SetStateTrieNode(in statePath16, new TrieNode(NodeType.Leaf, rlp16));
+ writer.SetStorageTrieNode(account, in storagePath15, new TrieNode(NodeType.Leaf, storageRlp15));
+ writer.SetStorageTrieNode(account, in storagePath16, new TrieNode(NodeType.Leaf, storageRlp16));
+ }
+
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.TryLoadStateRlp(in statePath5, ReadFlags.None), Is.EqualTo(rlp5));
+ Assert.That(reader.TryLoadStateRlp(in statePath6, ReadFlags.None), Is.EqualTo(rlp6));
+ Assert.That(reader.TryLoadStateRlp(in statePath15, ReadFlags.None), Is.EqualTo(rlp15));
+ Assert.That(reader.TryLoadStateRlp(in statePath16, ReadFlags.None), Is.EqualTo(rlp16));
+ Assert.That(reader.TryLoadStorageRlp(account, in storagePath15, ReadFlags.None), Is.EqualTo(storageRlp15));
+ Assert.That(reader.TryLoadStorageRlp(account, in storagePath16, ReadFlags.None), Is.EqualTo(storageRlp16));
+ }
+ }
+
+ [Test]
+ public void TestSelfDestructTrieNodes()
+ {
+ // Test that SelfDestruct removes storage trie nodes for an account
+ // This tests both shortened storage nodes (path ≤15) and fallback storage nodes (path >15)
+
+ // SelfDestruct takes Address, but SetTrieNodes/TryLoadRlp take Hash256 (keccak of address)
+ Address address1 = TestItem.AddressA;
+ Address address2 = TestItem.AddressB;
+ Hash256 account1Hash = Keccak.Compute(address1.Bytes);
+ Hash256 account2Hash = Keccak.Compute(address2.Bytes);
+
+ // Various path lengths to test both StorageNodes and FallbackNodes columns
+ TreePath shortPath = TreePath.FromHexString("abcd"); // 4 nibbles -> StorageNodes
+ TreePath mediumPath = TreePath.FromHexString("123456789abcdef"); // 15 nibbles -> StorageNodes
+ TreePath longPath = TreePath.FromHexString("0123456789abcdef0123456789abcdef01234567"); // 40 nibbles -> FallbackNodes
+
+ byte[] rlpShort = [0xc1, 0x01];
+ byte[] rlpMedium = [0xc1, 0x02];
+ byte[] rlpLong = [0xc1, 0x03];
+
+ // Write trie nodes for both accounts
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ // Account 1 storage trie nodes
+ writer.SetStorageTrieNode(account1Hash, in shortPath, new TrieNode(NodeType.Leaf, rlpShort));
+ writer.SetStorageTrieNode(account1Hash, in mediumPath, new TrieNode(NodeType.Leaf, rlpMedium));
+ writer.SetStorageTrieNode(account1Hash, in longPath, new TrieNode(NodeType.Leaf, rlpLong));
+
+ // Account 2 storage trie nodes (same paths, different account)
+ writer.SetStorageTrieNode(account2Hash, in shortPath, new TrieNode(NodeType.Leaf, rlpShort));
+ writer.SetStorageTrieNode(account2Hash, in mediumPath, new TrieNode(NodeType.Leaf, rlpMedium));
+ writer.SetStorageTrieNode(account2Hash, in longPath, new TrieNode(NodeType.Leaf, rlpLong));
+ }
+
+ // Verify all nodes exist
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlpShort));
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in mediumPath, ReadFlags.None), Is.EqualTo(rlpMedium));
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in longPath, ReadFlags.None), Is.EqualTo(rlpLong));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlpShort));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in mediumPath, ReadFlags.None), Is.EqualTo(rlpMedium));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in longPath, ReadFlags.None), Is.EqualTo(rlpLong));
+ }
+
+ // SelfDestruct account1 (uses Address, internally converts to hash)
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SelfDestruct(address1);
+ }
+
+ // Verify account1's trie nodes are deleted, account2's remain
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ // Account 1 nodes should be gone
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.Null);
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in mediumPath, ReadFlags.None), Is.Null);
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in longPath, ReadFlags.None), Is.Null);
+
+ // Account 2 nodes should still exist
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlpShort));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in mediumPath, ReadFlags.None), Is.EqualTo(rlpMedium));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in longPath, ReadFlags.None), Is.EqualTo(rlpLong));
+ }
+ }
+
+ [Test]
+ public void TestSelfDestructTrieNodesWithSimilarAddressHashPrefix()
+ {
+ // Test that SelfDestruct correctly differentiates accounts even when their hashes
+ // might share the first 4 bytes (the prefix used in storage key encoding).
+ // The storage key uses first 4 bytes of hash as prefix, remaining 16 bytes at end.
+ // This tests that the suffix comparison works correctly.
+
+ // Create two hashes that share the same first 4 bytes but differ in later bytes
+ // We bypass Address->Hash256 conversion to directly test the hash-based logic
+ byte[] hash1Bytes = new byte[32];
+ byte[] hash2Bytes = new byte[32];
+ // Same prefix (first 4 bytes)
+ hash1Bytes[0] = 0xAA; hash1Bytes[1] = 0xBB; hash1Bytes[2] = 0xCC; hash1Bytes[3] = 0xDD;
+ hash2Bytes[0] = 0xAA; hash2Bytes[1] = 0xBB; hash2Bytes[2] = 0xCC; hash2Bytes[3] = 0xDD;
+ // Different suffix (bytes 4-19 are used in the key suffix check)
+ hash1Bytes[4] = 0x11;
+ hash2Bytes[4] = 0x22;
+
+ Hash256 account1Hash = new Hash256(hash1Bytes);
+ Hash256 account2Hash = new Hash256(hash2Bytes);
+
+ TreePath shortPath = TreePath.FromHexString("1234"); // -> StorageNodes
+ TreePath longPath = TreePath.FromHexString("0123456789abcdef0123456789abcdef01234567"); // -> FallbackNodes
+
+ byte[] rlp1 = [0xc1, 0x11];
+ byte[] rlp2 = [0xc1, 0x22];
+
+ // Write trie nodes using the hashes directly
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorageTrieNode(account1Hash, in shortPath, new TrieNode(NodeType.Leaf, rlp1));
+ writer.SetStorageTrieNode(account1Hash, in longPath, new TrieNode(NodeType.Leaf, rlp1));
+ writer.SetStorageTrieNode(account2Hash, in shortPath, new TrieNode(NodeType.Leaf, rlp2));
+ writer.SetStorageTrieNode(account2Hash, in longPath, new TrieNode(NodeType.Leaf, rlp2));
+ }
+
+ // Verify all nodes exist before SelfDestruct
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp1));
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in longPath, ReadFlags.None), Is.EqualTo(rlp1));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp2));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in longPath, ReadFlags.None), Is.EqualTo(rlp2));
+ }
+
+ // SelfDestruct account1 using an address that hashes to account1Hash
+ // Note: We use AddressC since we need a real Address for SelfDestruct
+ // This tests the general SelfDestruct flow; the prefix collision test above
+ // verifies the data is correctly written with similar prefixes
+ Address address1 = TestItem.AddressC;
+ Hash256 address1Hash = Keccak.Compute(address1.Bytes);
+
+ // Write and then delete using the real address flow
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetStorageTrieNode(address1Hash, in shortPath, new TrieNode(NodeType.Leaf, rlp1));
+ writer.SetStorageTrieNode(address1Hash, in longPath, new TrieNode(NodeType.Leaf, rlp1));
+ }
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SelfDestruct(address1);
+ }
+
+ // Verify address1's trie nodes are deleted
+ using (IPersistence.IPersistenceReader reader = _persistence.CreateReader())
+ {
+ Assert.That(reader.TryLoadStorageRlp(address1Hash, in shortPath, ReadFlags.None), Is.Null);
+ Assert.That(reader.TryLoadStorageRlp(address1Hash, in longPath, ReadFlags.None), Is.Null);
+
+ // The manually created hashes should still exist (they weren't self-destructed)
+ Assert.That(reader.TryLoadStorageRlp(account1Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp1));
+ Assert.That(reader.TryLoadStorageRlp(account2Hash, in shortPath, ReadFlags.None), Is.EqualTo(rlp2));
+ }
+ }
+
+ [Test]
+ public void TestAccountIterator_EnumeratesAllAccounts()
+ {
+ // Write multiple accounts
+ Address addr1 = TestItem.AddressA;
+ Address addr2 = TestItem.AddressB;
+ Address addr3 = TestItem.AddressC;
+
+ Account acc1 = TestItem.GenerateIndexedAccount(1);
+ Account acc2 = TestItem.GenerateIndexedAccount(2);
+ Account acc3 = TestItem.GenerateIndexedAccount(3);
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(addr1, acc1);
+ writer.SetAccount(addr2, acc2);
+ writer.SetAccount(addr3, acc3);
+ }
+
+ // Use iterator to enumerate accounts
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ using IPersistence.IFlatIterator iterator = reader.CreateAccountIterator();
+
+ int count = 0;
+ while (iterator.MoveNext())
+ {
+ count++;
+ }
+
+ // All layouts should find 3 accounts
+ Assert.That(count, Is.EqualTo(3));
+ }
+
+ [Test]
+ public void TestAccountIterator_EmptyState_ReturnsNoAccounts()
+ {
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+ using IPersistence.IFlatIterator iterator = reader.CreateAccountIterator();
+
+ int count = 0;
+ while (iterator.MoveNext())
+ {
+ count++;
+ }
+
+ Assert.That(count, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void TestStorageIterator_EnumeratesAccountStorage()
+ {
+ // PreimageFlat uses raw address, others use hashed address paths
+ if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat)
+ Assert.Ignore("Preimage mode uses raw address format which differs from hashed mode");
+
+ // Write account with storage
+ Address addr = TestItem.AddressA;
+ Account acc = TestItem.GenerateIndexedAccount(0);
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(addr, acc);
+ writer.SetStorage(addr, 1, SlotValue.FromSpanWithoutLeadingZero([0x11]));
+ writer.SetStorage(addr, 42, SlotValue.FromSpanWithoutLeadingZero([0x42]));
+ writer.SetStorage(addr, 100, SlotValue.FromSpanWithoutLeadingZero([0x64]));
+ }
+
+ // Use iterator to enumerate storage
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+
+ // Storage keys are written using addr.ToAccountPath (Keccak hash of address)
+ ValueHash256 accountKey = addr.ToAccountPath;
+
+ using IPersistence.IFlatIterator iterator = reader.CreateStorageIterator(accountKey);
+
+ int count = 0;
+ while (iterator.MoveNext())
+ {
+ count++;
+ }
+
+ // Should find 3 storage slots
+ Assert.That(count, Is.EqualTo(3));
+ }
+
+ [Test]
+ public void TestStorageIterator_NoStorage_ReturnsEmpty()
+ {
+ if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat)
+ Assert.Ignore("Preimage mode uses raw address format which differs from hashed mode");
+
+ // Write account without storage
+ Address addr = TestItem.AddressA;
+ Account acc = TestItem.GenerateIndexedAccount(0);
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(addr, acc);
+ }
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+
+ ValueHash256 accountKey = addr.ToAccountPath;
+
+ using IPersistence.IFlatIterator iterator = reader.CreateStorageIterator(accountKey);
+
+ int count = 0;
+ while (iterator.MoveNext())
+ {
+ count++;
+ }
+
+ Assert.That(count, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void TestStorageIterator_IsolatesAccountStorage()
+ {
+ if (configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat)
+ Assert.Ignore("Preimage mode uses raw address format which differs from hashed mode");
+
+ // Write storage for two accounts
+ Address addr1 = TestItem.AddressA;
+ Address addr2 = TestItem.AddressB;
+
+ using (IPersistence.IWriteBatch writer = _persistence.CreateWriteBatch(StateId.PreGenesis, StateId.PreGenesis, WriteFlags.None))
+ {
+ writer.SetAccount(addr1, TestItem.GenerateIndexedAccount(0));
+ writer.SetStorage(addr1, 1, SlotValue.FromSpanWithoutLeadingZero([0x11]));
+ writer.SetStorage(addr1, 2, SlotValue.FromSpanWithoutLeadingZero([0x22]));
+
+ writer.SetAccount(addr2, TestItem.GenerateIndexedAccount(1));
+ writer.SetStorage(addr2, 10, SlotValue.FromSpanWithoutLeadingZero([0xaa]));
+ writer.SetStorage(addr2, 20, SlotValue.FromSpanWithoutLeadingZero([0xbb]));
+ writer.SetStorage(addr2, 30, SlotValue.FromSpanWithoutLeadingZero([0xcc]));
+ }
+
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+
+ // Count storage for addr1 using proper address hash
+ ValueHash256 accountKey1 = addr1.ToAccountPath;
+ using IPersistence.IFlatIterator iterator1 = reader.CreateStorageIterator(accountKey1);
+ int count1 = 0;
+ while (iterator1.MoveNext()) count1++;
+
+ // Count storage for addr2 using proper address hash
+ ValueHash256 accountKey2 = addr2.ToAccountPath;
+ using IPersistence.IFlatIterator iterator2 = reader.CreateStorageIterator(accountKey2);
+ int count2 = 0;
+ while (iterator2.MoveNext()) count2++;
+
+ Assert.That(count1, Is.EqualTo(2));
+ Assert.That(count2, Is.EqualTo(3));
+ }
+
+ [Test]
+ public void TestIsPreimageMode_ReturnsCorrectValue()
+ {
+ using IPersistence.IPersistenceReader reader = _persistence.CreateReader();
+
+ // PreimageFlat layout should return true, others false
+ bool expected = configuration.FlatDbConfig.Layout == FlatLayout.PreimageFlat;
+ Assert.That(reader.IsPreimageMode, Is.EqualTo(expected));
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/ResourcePoolTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/ResourcePoolTests.cs
new file mode 100644
index 00000000000..a49df0cd846
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/ResourcePoolTests.cs
@@ -0,0 +1,162 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Collections.Generic;
+using Nethermind.Core;
+using Nethermind.Core.Crypto;
+using Nethermind.Db;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixture]
+public class ResourcePoolTests
+{
+ private ResourcePool _resourcePool;
+ private FlatDbConfig _config;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _config = new FlatDbConfig { CompactSize = 2 }; // Small compact size for testing limits
+ _resourcePool = new ResourcePool(_config);
+ }
+
+ [Test]
+ public void Test_GetSnapshotContent_ReturnsNewInstance_WhenPoolEmpty()
+ {
+ SnapshotContent content = _resourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing);
+ Assert.That(content, Is.Not.Null);
+ Assert.That(content.Accounts, Is.Empty);
+ }
+
+ [Test]
+ public void Test_ReturnSnapshotContent_RecyclesInstance()
+ {
+ ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing;
+ SnapshotContent content1 = _resourcePool.GetSnapshotContent(usage);
+
+ content1.Accounts[new AddressAsKey(new Address("0x1234567890123456789012345678901234567890"))] = new Account(1, 2);
+ Assert.That(content1.Accounts, Is.Not.Empty);
+
+ _resourcePool.ReturnSnapshotContent(usage, content1);
+
+ SnapshotContent content2 = _resourcePool.GetSnapshotContent(usage);
+
+ // Should be the same instance (LIFO)
+ Assert.That(content2, Is.SameAs(content1));
+ // Should have been reset
+ Assert.That(content2.Accounts, Is.Empty);
+ }
+
+ [Test]
+ public void Test_SnapshotContentPool_RespectsCapacity()
+ {
+ // For MainBlockProcessing: capacity = config.CompactSize + 8 = 2 + 8 = 10
+ ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing;
+ int capacity = _config.CompactSize + 8;
+ List items = new List();
+
+ for (int i = 0; i < capacity + 5; i++)
+ {
+ items.Add(_resourcePool.GetSnapshotContent(usage));
+ }
+
+ foreach (SnapshotContent item in items)
+ {
+ _resourcePool.ReturnSnapshotContent(usage, item);
+ }
+
+ // Now if we get 'capacity' items, they should be from the pool
+ for (int i = 0; i < capacity; i++)
+ {
+ SnapshotContent content = _resourcePool.GetSnapshotContent(usage);
+ Assert.That(items.Contains(content), Is.True, $"Item {i} should be from recycled items");
+ }
+
+ // The next one should be a new instance because pool is empty
+ SnapshotContent newContent = _resourcePool.GetSnapshotContent(usage);
+ Assert.That(items.Contains(newContent), Is.False, "Should be a new instance");
+ }
+
+ [Test]
+ public void Test_GetCachedResource_ReturnsNewInstance_WhenPoolEmpty()
+ {
+ TransientResource resource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ Assert.That(resource, Is.Not.Null);
+ Assert.That(resource.size.PrewarmedAddressSize, Is.EqualTo(1024));
+ Assert.That(resource.size.NodesCacheSize, Is.EqualTo(1024));
+ }
+
+ [Test]
+ public void Test_ReturnCachedResource_RecyclesInstance()
+ {
+ ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing;
+ TransientResource resource1 = _resourcePool.GetCachedResource(usage);
+
+ _resourcePool.ReturnCachedResource(usage, resource1);
+
+ TransientResource resource2 = _resourcePool.GetCachedResource(usage);
+
+ // Should be the same instance
+ Assert.That(resource2, Is.SameAs(resource1));
+ }
+
+ [Test]
+ public void Test_CachedResourcePool_RespectsCapacity()
+ {
+ // For MainBlockProcessing: capacity = 2
+ ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing;
+
+ TransientResource r1 = _resourcePool.GetCachedResource(usage);
+ TransientResource r2 = _resourcePool.GetCachedResource(usage);
+ TransientResource r3 = _resourcePool.GetCachedResource(usage);
+
+ _resourcePool.ReturnCachedResource(usage, r1);
+ _resourcePool.ReturnCachedResource(usage, r2);
+ _resourcePool.ReturnCachedResource(usage, r3); // This one should be disposed
+
+ TransientResource p1 = _resourcePool.GetCachedResource(usage);
+ TransientResource p2 = _resourcePool.GetCachedResource(usage);
+ TransientResource p3 = _resourcePool.GetCachedResource(usage);
+
+ Assert.That(p1, Is.SameAs(r2)); // LIFO
+ Assert.That(p2, Is.SameAs(r1));
+ Assert.That(p3, Is.Not.SameAs(r3));
+ }
+
+ [Test]
+ public void Test_CreateSnapshot_UsesPool()
+ {
+ StateId from = new StateId(1, Keccak.Zero);
+ StateId to = new StateId(2, Keccak.Zero);
+ ResourcePool.Usage usage = ResourcePool.Usage.MainBlockProcessing;
+
+ SnapshotContent content;
+ using (Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, usage))
+ {
+ Assert.That(snapshot.From, Is.EqualTo(from));
+ Assert.That(snapshot.To, Is.EqualTo(to));
+ Assert.That(snapshot.Content, Is.Not.Null);
+ content = snapshot.Content;
+ }
+
+ SnapshotContent recycledContent = _resourcePool.GetSnapshotContent(usage);
+ Assert.That(recycledContent, Is.SameAs(content));
+ }
+
+ [Test]
+ public void Test_DifferentUsages_HaveIndependentPools()
+ {
+ SnapshotContent contentMain = _resourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing);
+ SnapshotContent contentCompactor = _resourcePool.GetSnapshotContent(ResourcePool.Usage.Compactor);
+
+ _resourcePool.ReturnSnapshotContent(ResourcePool.Usage.MainBlockProcessing, contentMain);
+
+ SnapshotContent contentCompactor2 = _resourcePool.GetSnapshotContent(ResourcePool.Usage.Compactor);
+ Assert.That(contentCompactor2, Is.Not.SameAs(contentMain));
+
+ SnapshotContent contentMain2 = _resourcePool.GetSnapshotContent(ResourcePool.Usage.MainBlockProcessing);
+ Assert.That(contentMain2, Is.SameAs(contentMain));
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/SnapshotCompactorTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotCompactorTests.cs
new file mode 100644
index 00000000000..f58f4773b2d
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotCompactorTests.cs
@@ -0,0 +1,489 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Collections.Generic;
+using System.Linq;
+using Nethermind.Core;
+using Nethermind.Core.Collections;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
+using Nethermind.Int256;
+using Nethermind.Logging;
+using Nethermind.Trie;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixture]
+public class SnapshotCompactorTests
+{
+ private SnapshotCompactor _compactor = null!;
+ private ResourcePool _resourcePool = null!;
+ private FlatDbConfig _config = null!;
+ private SnapshotRepository _snapshotRepository;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _config = new FlatDbConfig { CompactSize = 16 };
+ _resourcePool = new ResourcePool(_config);
+ _snapshotRepository = new SnapshotRepository(LimboLogs.Instance);
+ _compactor = new SnapshotCompactor(_config, _resourcePool, _snapshotRepository, LimboLogs.Instance);
+ }
+
+ private static StateId CreateStateId(long blockNumber, byte rootByte = 0)
+ {
+ byte[] bytes = new byte[32];
+ bytes[0] = rootByte;
+ return new StateId(blockNumber, new ValueHash256(bytes));
+ }
+
+ private void BuildSnapshotChain(long startBlock, long endBlock)
+ {
+ for (long i = startBlock; i < endBlock; i++)
+ {
+ StateId from = CreateStateId(i);
+ StateId to = CreateStateId(i + 1);
+ Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ bool added = _snapshotRepository.TryAddSnapshot(snapshot);
+ Assert.That(added, Is.True, $"Failed to add snapshot {i}->{i + 1}");
+ _snapshotRepository.AddStateId(to);
+ }
+ }
+
+ private static void AssertSlotValueEqual(SlotValue? expected, SlotValue? actual)
+ {
+ Assert.That(actual, Is.Not.Null);
+ Assert.That(actual!.Value.AsReadOnlySpan.ToArray(), Is.EqualTo(expected!.Value.AsReadOnlySpan.ToArray()));
+ }
+
+ private static void AssertAccountSame(Account expected, Account? actual)
+ {
+ Assert.That(actual, Is.Not.Null);
+ Assert.That(actual!.Nonce, Is.EqualTo(expected.Nonce));
+ Assert.That(actual!.Balance, Is.EqualTo(expected.Balance));
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_SingleSnapshot_ReturnsCorrectStateIds()
+ {
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(1, Keccak.Zero);
+
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ Address address = new Address("0x1234567890123456789012345678901234567890");
+ snapshot.Content.Accounts[address] = new Account(1, 100);
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(1);
+ snapshots.Add(snapshot);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ Assert.That(compacted.From.BlockNumber, Is.EqualTo(0));
+ Assert.That(compacted.To.BlockNumber, Is.EqualTo(1));
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_SingleSnapshot_PreservesAllDataTypes()
+ {
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(1, Keccak.Zero);
+
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ Address address1 = new Address("0x1111111111111111111111111111111111111111");
+ Address address2 = new Address("0x2222222222222222222222222222222222222222");
+ UInt256 storageIndex1 = new UInt256(1);
+ UInt256 storageIndex2 = new UInt256(2);
+ TreePath statePath1 = TreePath.FromHexString("abcd");
+ TreePath statePath2 = TreePath.FromHexString("ef01");
+ TreePath storageNodePath1 = TreePath.FromHexString("1234");
+ TreePath storageNodePath2 = TreePath.FromHexString("5678");
+ Hash256 storageNodeHash1 = Keccak.Zero;
+ Hash256 storageNodeHash2 = Keccak.Zero;
+ SlotValue slotValue1 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 });
+ SlotValue slotValue2 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 200 });
+
+ // Add accounts
+ snapshot.Content.Accounts[address1] = new Account(1, 100);
+ snapshot.Content.Accounts[address2] = new Account(2, 200);
+
+ // Add storage values
+ snapshot.Content.Storages[(address1, storageIndex1)] = slotValue1;
+ snapshot.Content.Storages[(address2, storageIndex2)] = slotValue2;
+
+ // Add state nodes
+ snapshot.Content.StateNodes[statePath1] = new TrieNode(NodeType.Leaf, storageNodeHash1);
+ snapshot.Content.StateNodes[statePath2] = new TrieNode(NodeType.Branch, storageNodeHash2);
+
+ // Add storage nodes
+ Hash256 address1Hash = address1.ToAccountPath.ToCommitment();
+ Hash256 address2Hash = address2.ToAccountPath.ToCommitment();
+ snapshot.Content.StorageNodes[(address1Hash, storageNodePath1)] = new TrieNode(NodeType.Leaf, storageNodeHash1);
+ snapshot.Content.StorageNodes[(address2Hash, storageNodePath2)] = new TrieNode(NodeType.Branch, storageNodeHash2);
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(1);
+ snapshots.Add(snapshot);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ // Verify all data types are preserved
+ Assert.That(compacted.AccountsCount, Is.EqualTo(2));
+ AssertAccountSame(new Account(1, 100), compacted.Content.Accounts[address1]);
+ AssertAccountSame(new Account(2, 200), compacted.Content.Accounts[address2]);
+
+ Assert.That(compacted.StoragesCount, Is.EqualTo(2));
+ AssertSlotValueEqual(slotValue1, compacted.Content.Storages[(address1, storageIndex1)]);
+ AssertSlotValueEqual(slotValue2, compacted.Content.Storages[(address2, storageIndex2)]);
+
+ Assert.That(compacted.StateNodesCount, Is.EqualTo(2));
+ Assert.That(compacted.Content.StateNodes[statePath1].Keccak, Is.EqualTo(storageNodeHash1));
+ Assert.That(compacted.Content.StateNodes[statePath2].Keccak, Is.EqualTo(storageNodeHash2));
+
+ Assert.That(compacted.StorageNodesCount, Is.EqualTo(2));
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_MultipleSnapshots_MergesAllDataTypes()
+ {
+ Address address1 = new Address("0x1111111111111111111111111111111111111111");
+ Address address2 = new Address("0x2222222222222222222222222222222222222222");
+ UInt256 storageIndex1 = new UInt256(1);
+ UInt256 storageIndex2 = new UInt256(2);
+ TreePath statePath1 = TreePath.FromHexString("abcd");
+ TreePath statePath2 = TreePath.FromHexString("ef01");
+ TreePath storageNodePath1 = TreePath.FromHexString("1234");
+ TreePath storageNodePath2 = TreePath.FromHexString("5678");
+ SlotValue slotValue1 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 });
+ SlotValue slotValue2 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 200 });
+
+ // First snapshot
+ StateId from0 = new StateId(0, Keccak.Zero);
+ StateId to0 = new StateId(1, Keccak.Zero);
+ using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot0.Content.Accounts[address1] = new Account(1, 100);
+ snapshot0.Content.Storages[(address1, storageIndex1)] = slotValue1;
+ snapshot0.Content.StateNodes[statePath1] = new TrieNode(NodeType.Leaf, Keccak.Zero);
+ Hash256 address1Hash = address1.ToAccountPath.ToCommitment();
+ snapshot0.Content.StorageNodes[(address1Hash, storageNodePath1)] = new TrieNode(NodeType.Leaf, Keccak.Zero);
+
+ // Second snapshot with different items
+ StateId from1 = new StateId(1, Keccak.Zero);
+ StateId to1 = new StateId(2, Keccak.Zero);
+ using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot1.Content.Accounts[address2] = new Account(2, 200);
+ snapshot1.Content.Storages[(address2, storageIndex2)] = slotValue2;
+ snapshot1.Content.StateNodes[statePath2] = new TrieNode(NodeType.Branch, Keccak.Zero);
+ Hash256 address2Hash = address2.ToAccountPath.ToCommitment();
+ snapshot1.Content.StorageNodes[(address2Hash, storageNodePath2)] = new TrieNode(NodeType.Branch, Keccak.Zero);
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(2);
+ snapshots.Add(snapshot0);
+ snapshots.Add(snapshot1);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ // Verify all items from both snapshots are merged
+ Assert.That(compacted.AccountsCount, Is.EqualTo(2));
+ Assert.That(compacted.StoragesCount, Is.EqualTo(2));
+ Assert.That(compacted.StateNodesCount, Is.EqualTo(2));
+ Assert.That(compacted.StorageNodesCount, Is.EqualTo(2));
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_MultipleSnapshots_LatestValueOverridesForAllDataTypes()
+ {
+ Address address = new Address("0x1111111111111111111111111111111111111111");
+ UInt256 storageIndex = new UInt256(1);
+ TreePath statePath = TreePath.FromHexString("abcd");
+ TreePath storageNodePath = TreePath.FromHexString("1234");
+ SlotValue slotValue1 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 });
+ SlotValue slotValue2 = new SlotValue(new byte[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 200 });
+
+ // First snapshot with initial values
+ StateId from0 = new StateId(0, Keccak.Zero);
+ StateId to0 = new StateId(1, Keccak.Zero);
+ using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot0.Content.Accounts[address] = new Account(1, 100);
+ snapshot0.Content.Storages[(address, storageIndex)] = slotValue1;
+ snapshot0.Content.StateNodes[statePath] = new TrieNode(NodeType.Leaf, Keccak.Zero);
+ Hash256 addressHash = address.ToAccountPath.ToCommitment();
+ snapshot0.Content.StorageNodes[(addressHash, storageNodePath)] = new TrieNode(NodeType.Leaf, Keccak.Zero);
+
+ // Second snapshot with updated values for same keys
+ StateId from1 = new StateId(1, Keccak.Zero);
+ StateId to1 = new StateId(2, Keccak.Zero);
+ using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot1.Content.Accounts[address] = new Account(2, 200);
+ snapshot1.Content.Storages[(address, storageIndex)] = slotValue2;
+ snapshot1.Content.StateNodes[statePath] = new TrieNode(NodeType.Branch, Keccak.Zero);
+ snapshot1.Content.StorageNodes[(addressHash, storageNodePath)] = new TrieNode(NodeType.Branch, Keccak.Zero);
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(2);
+ snapshots.Add(snapshot0);
+ snapshots.Add(snapshot1);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ // Verify latest values override earlier ones
+ Assert.That(compacted.AccountsCount, Is.EqualTo(1));
+ AssertAccountSame(new Account(2, 200), compacted.Content.Accounts[address]);
+
+ Assert.That(compacted.StoragesCount, Is.EqualTo(1));
+ AssertSlotValueEqual(slotValue2, compacted.Content.Storages[(address, storageIndex)]);
+
+ Assert.That(compacted.StateNodesCount, Is.EqualTo(1));
+ Assert.That(compacted.StateNodesCount, Is.EqualTo(1));
+ Assert.That(compacted.StorageNodesCount, Is.EqualTo(1));
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_SelfDestructedAddress_RemovesStorageAndNodes()
+ {
+ Address address = new Address("0x1111111111111111111111111111111111111111");
+ UInt256 storageIndex = new UInt256(1);
+ TreePath storagePath = TreePath.FromHexString("1234");
+ Hash256 storageHash = Keccak.Zero;
+ SlotValue slotValue = new SlotValue(new byte[32]);
+
+ StateId from0 = new StateId(0, Keccak.Zero);
+ StateId to0 = new StateId(1, Keccak.Zero);
+ using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot0.Content.Accounts[address] = new Account(1, 100);
+ snapshot0.Content.Storages[(address, storageIndex)] = slotValue;
+ snapshot0.Content.StorageNodes[(address.ToAccountPath.ToCommitment(), storagePath)] = new TrieNode(NodeType.Leaf, storageHash);
+
+ StateId from1 = new StateId(1, Keccak.Zero);
+ StateId to1 = new StateId(2, Keccak.Zero);
+ using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot1.Content.SelfDestructedStorageAddresses[address] = false;
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(2);
+ snapshots.Add(snapshot0);
+ snapshots.Add(snapshot1);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ // Self-destructed address should be tracked, and its storage cleared
+ Assert.That(compacted.Content.SelfDestructedStorageAddresses.Count, Is.GreaterThan(0));
+ Assert.That(compacted.StoragesCount, Is.EqualTo(0));
+ Assert.That(compacted.StorageNodesCount, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_NewAccountSelfDestruct_MarkedAsTrue()
+ {
+ Address address = new Address("0x1111111111111111111111111111111111111111");
+
+ StateId from0 = new StateId(0, Keccak.Zero);
+ StateId to0 = new StateId(1, Keccak.Zero);
+ using Snapshot snapshot0 = _resourcePool.CreateSnapshot(from0, to0, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ StateId from1 = new StateId(1, Keccak.Zero);
+ StateId to1 = new StateId(2, Keccak.Zero);
+ using Snapshot snapshot1 = _resourcePool.CreateSnapshot(from1, to1, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ snapshot1.Content.SelfDestructedStorageAddresses[address] = true;
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(2);
+ snapshots.Add(snapshot0);
+ snapshots.Add(snapshot1);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ // New account marked as self-destructed should be tracked
+ Assert.That(compacted.Content.SelfDestructedStorageAddresses.Count, Is.GreaterThan(0));
+ // Verify at least one entry has true value
+ Assert.That(compacted.Content.SelfDestructedStorageAddresses.Values.Any(v => v), Is.True);
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_UsesCompactorUsageAtBoundary()
+ {
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(16, Keccak.Zero);
+
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(1);
+ snapshots.Add(snapshot);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ Assert.That(compacted.Usage, Is.EqualTo(ResourcePool.Usage.Compactor));
+ }
+
+ [Test]
+ public void CompactSnapshotBundle_UsesMidCompactorUsageNonBoundary()
+ {
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(15, Keccak.Zero);
+
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ SnapshotPooledList snapshots = new SnapshotPooledList(1);
+ snapshots.Add(snapshot);
+
+ using Snapshot compacted = _compactor.CompactSnapshotBundle(snapshots);
+
+ Assert.That(compacted.Usage, Is.EqualTo(ResourcePool.Usage.MidCompactor));
+ }
+
+ #region GetSnapshotsToCompact Tests
+
+ [Test]
+ public void Debug_AssembleSnapshotsUntil_Works()
+ {
+ BuildSnapshotChain(0, 4);
+
+ StateId target = CreateStateId(4);
+ SnapshotPooledList assembled = _snapshotRepository.AssembleSnapshotsUntil(target, 0, 10);
+
+ Assert.That(assembled.Count, Is.EqualTo(4));
+
+ foreach (Snapshot s in assembled) s.Dispose();
+ assembled.Dispose();
+ }
+
+ [Test]
+ public void GetSnapshotsToCompact_CompactSizeDisabled_ReturnsEmpty()
+ {
+ FlatDbConfig config = new FlatDbConfig { CompactSize = 0 };
+ SnapshotCompactor compactor = new SnapshotCompactor(config, _resourcePool, _snapshotRepository, LimboLogs.Instance);
+
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(16, Keccak.Zero);
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ using SnapshotPooledList snapshots = compactor.GetSnapshotsToCompact(snapshot);
+
+ Assert.That(snapshots.Count, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void GetSnapshotsToCompact_BlockZero_ReturnsEmpty()
+ {
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(0, Keccak.Zero);
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(snapshot);
+
+ Assert.That(snapshots.Count, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void GetSnapshotsToCompact_NotCompactionBlock_ReturnsEmpty()
+ {
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(5, Keccak.Zero);
+ using Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(snapshot);
+
+ Assert.That(snapshots.Count, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void GetSnapshotsToCompact_FullCompaction_ReturnsMultipleSnapshots()
+ {
+ // Build chain of 15 snapshots (0->1, 1->2, ..., 14->15)
+ BuildSnapshotChain(0, 15);
+
+ // Add the 16th snapshot (15->16) separately
+ StateId targetFrom = CreateStateId(15);
+ StateId targetTo = CreateStateId(16);
+ Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ _snapshotRepository.TryAddSnapshot(targetSnapshot);
+ _snapshotRepository.AddStateId(targetTo);
+
+ using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(targetSnapshot);
+
+ Assert.That(snapshots.Count, Is.EqualTo(16));
+ }
+
+ [Test]
+ public void GetSnapshotsToCompact_MidCompaction_ReturnsMultipleSnapshots()
+ {
+ FlatDbConfig config = new FlatDbConfig { CompactSize = 16, MidCompactSize = 8 };
+ SnapshotCompactor compactor = new SnapshotCompactor(config, _resourcePool, _snapshotRepository, LimboLogs.Instance);
+
+ // Build chain of 7 snapshots (0->1, 1->2, ..., 6->7)
+ BuildSnapshotChain(0, 7);
+
+ // Add the 8th snapshot (7->8) separately
+ StateId targetFrom = CreateStateId(7);
+ StateId targetTo = CreateStateId(8);
+ Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ _snapshotRepository.TryAddSnapshot(targetSnapshot);
+ _snapshotRepository.AddStateId(targetTo);
+
+ using SnapshotPooledList snapshots = compactor.GetSnapshotsToCompact(targetSnapshot);
+
+ Assert.That(snapshots.Count, Is.EqualTo(8));
+ }
+
+ [Test]
+ public void GetSnapshotsToCompact_SingleSnapshot_ReturnsEmpty()
+ {
+ StateId from = new StateId(0, Keccak.Zero);
+ StateId to = new StateId(16, Keccak.Zero);
+ Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ _snapshotRepository.TryAddSnapshot(snapshot);
+ _snapshotRepository.AddStateId(to);
+
+ using Snapshot targetSnapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(targetSnapshot);
+
+ Assert.That(snapshots.Count, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void GetSnapshotsToCompact_IncompleteChain_ReturnsEmpty()
+ {
+ // Missing 1
+ for (long i = 2; i < 16; i++)
+ {
+ StateId from = new StateId(i, Keccak.Zero);
+ StateId to = new StateId(i + 1, Keccak.Zero);
+ Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ _snapshotRepository.TryAddSnapshot(snapshot);
+ _snapshotRepository.AddStateId(to);
+ }
+
+ StateId targetFrom = new StateId(15, Keccak.Zero);
+ StateId targetTo = new StateId(16, Keccak.Zero);
+ using Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv);
+
+ using SnapshotPooledList snapshots = _compactor.GetSnapshotsToCompact(targetSnapshot);
+
+ Assert.That(snapshots.Count, Is.EqualTo(0));
+ }
+
+ #endregion
+
+ #region DoCompactSnapshot Tests
+
+ [Test]
+ public void DoCompactSnapshot_ValidChain_CreatesCompactedSnapshot()
+ {
+ // Build chain of 15 snapshots (0->1, 1->2, ..., 14->15)
+ BuildSnapshotChain(0, 15);
+
+ // Add the 16th snapshot (15->16) separately
+ StateId targetFrom = CreateStateId(15);
+ StateId targetTo = CreateStateId(16);
+ Snapshot targetSnapshot = _resourcePool.CreateSnapshot(targetFrom, targetTo, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ targetSnapshot.Content.Accounts[TestItem.AddressB] = new Account((UInt256)20, (UInt256)2000);
+ _snapshotRepository.TryAddSnapshot(targetSnapshot);
+ _snapshotRepository.AddStateId(targetTo);
+
+ _compactor.DoCompactSnapshot(targetSnapshot.To);
+
+ Assert.That(_snapshotRepository.CompactedSnapshotCount, Is.EqualTo(1));
+ }
+
+ #endregion
+
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/SnapshotRepositoryTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotRepositoryTests.cs
new file mode 100644
index 00000000000..587df139881
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/SnapshotRepositoryTests.cs
@@ -0,0 +1,356 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Collections.Generic;
+using Nethermind.Core;
+using Nethermind.Core.Collections;
+using Nethermind.Core.Crypto;
+using Nethermind.Core.Test.Builders;
+using Nethermind.Db;
+using Nethermind.Logging;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixture]
+public class SnapshotRepositoryTests
+{
+ private SnapshotRepository _repository = null!;
+ private ResourcePool _resourcePool = null!;
+ private FlatDbConfig _config = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _config = new FlatDbConfig { CompactSize = 16 };
+ _resourcePool = new ResourcePool(_config);
+ _repository = new SnapshotRepository(LimboLogs.Instance);
+ }
+
+ private StateId CreateStateId(long blockNumber, byte rootByte = 0)
+ {
+ byte[] bytes = new byte[32];
+ bytes[0] = rootByte;
+ return new StateId(blockNumber, new ValueHash256(bytes));
+ }
+
+ private Snapshot CreateSnapshot(StateId from, StateId to, bool withData = false)
+ {
+ Snapshot snapshot = _resourcePool.CreateSnapshot(from, to, ResourcePool.Usage.ReadOnlyProcessingEnv);
+ if (withData)
+ {
+ snapshot.Content.Accounts[TestItem.AddressA] = new Account(1, 100);
+ }
+ return snapshot;
+ }
+
+ private Snapshot AddSnapshotToRepository(long fromBlock, long toBlock, bool compacted = false, bool withData = false)
+ {
+ StateId from = CreateStateId(fromBlock);
+ StateId to = CreateStateId(toBlock);
+ Snapshot snapshot = CreateSnapshot(from, to, withData);
+
+ bool added = compacted
+ ? _repository.TryAddCompactedSnapshot(snapshot)
+ : _repository.TryAddSnapshot(snapshot);
+
+ Assert.That(added, Is.True, $"Failed to add snapshot {fromBlock}->{toBlock}");
+
+ if (!compacted)
+ {
+ _repository.AddStateId(to);
+ }
+
+ return snapshot;
+ }
+
+ private List BuildSnapshotChain(long startBlock, long endBlock)
+ {
+ List snapshots = new List();
+ for (long i = startBlock; i < endBlock; i++)
+ {
+ snapshots.Add(AddSnapshotToRepository(i, i + 1));
+ }
+ return snapshots;
+ }
+
+ #region Snapshot Addition and Removal
+
+ [Test]
+ public void TryAddSnapshot_NewAndDuplicate_BehavesCorrectly()
+ {
+ StateId from = CreateStateId(0);
+ StateId to = CreateStateId(1);
+ Snapshot snapshot1 = CreateSnapshot(from, to);
+ Snapshot snapshot2 = CreateSnapshot(from, to);
+
+ bool added1 = _repository.TryAddSnapshot(snapshot1);
+ bool added2 = _repository.TryAddSnapshot(snapshot2);
+
+ Assert.That(added1, Is.True);
+ Assert.That(added2, Is.False);
+
+ snapshot2.Dispose();
+ }
+
+ [Test]
+ public void TryAddCompactedSnapshot_NewAndDuplicate_BehavesCorrectly()
+ {
+ StateId from = CreateStateId(0);
+ StateId to = CreateStateId(1);
+ Snapshot snapshot1 = CreateSnapshot(from, to);
+ Snapshot snapshot2 = CreateSnapshot(from, to);
+
+ bool added1 = _repository.TryAddCompactedSnapshot(snapshot1);
+ bool added2 = _repository.TryAddCompactedSnapshot(snapshot2);
+
+ Assert.That(added1, Is.True);
+ Assert.That(added2, Is.False);
+
+ snapshot2.Dispose();
+ }
+
+ [Test]
+ public void AddAndRemoveSnapshot_CannotLeaseAfterRemoval()
+ {
+ StateId from = CreateStateId(0);
+ StateId to = CreateStateId(1);
+ Snapshot snapshot = CreateSnapshot(from, to);
+ _repository.AddStateId(to);
+
+ _repository.TryAddSnapshot(snapshot);
+ bool leasedBefore = _repository.TryLeaseState(to, out Snapshot? leasedSnapshot);
+ leasedSnapshot?.Dispose();
+
+ _repository.RemoveAndReleaseKnownState(to);
+ bool leasedAfter = _repository.TryLeaseState(to, out _);
+
+ Assert.That(leasedBefore, Is.True);
+ Assert.That(leasedAfter, Is.False);
+ }
+
+ [Test]
+ public void RemoveSnapshot_WithActiveLeases_DisposesWhenAllReleased()
+ {
+ AddSnapshotToRepository(0, 1);
+ StateId to = CreateStateId(1);
+
+ bool leased1 = _repository.TryLeaseState(to, out Snapshot? snapshot1);
+ bool leased2 = _repository.TryLeaseState(to, out Snapshot? snapshot2);
+
+ Assert.That(leased1, Is.True);
+ Assert.That(leased2, Is.True);
+
+ _repository.RemoveAndReleaseKnownState(to);
+
+ snapshot1!.Dispose();
+ snapshot2!.Dispose();
+
+ bool leasedAfter = _repository.TryLeaseState(to, out _);
+ Assert.That(leasedAfter, Is.False);
+ }
+
+ #endregion
+
+ #region Lease Operations
+
+ [Test]
+ public void TryLeaseState_ExistingAndNonExistent()
+ {
+ AddSnapshotToRepository(0, 1);
+
+ StateId existing = CreateStateId(1);
+ bool leasedExisting = _repository.TryLeaseState(existing, out Snapshot? snapshot);
+ Assert.That(leasedExisting, Is.True);
+ Assert.That(snapshot, Is.Not.Null);
+ snapshot!.Dispose();
+
+ StateId nonExistent = CreateStateId(999);
+ bool leasedNonExistent = _repository.TryLeaseState(nonExistent, out Snapshot? nonExistentSnapshot);
+ Assert.That(leasedNonExistent, Is.False);
+ Assert.That(nonExistentSnapshot, Is.Null);
+ }
+
+ [Test]
+ public void TryLeaseState_MultipleLeases_AllSucceed()
+ {
+ AddSnapshotToRepository(0, 1);
+
+ StateId to = CreateStateId(1);
+ bool leased1 = _repository.TryLeaseState(to, out Snapshot? snapshot1);
+ bool leased2 = _repository.TryLeaseState(to, out Snapshot? snapshot2);
+ bool leased3 = _repository.TryLeaseState(to, out Snapshot? snapshot3);
+
+ Assert.That(leased1, Is.True);
+ Assert.That(leased2, Is.True);
+ Assert.That(leased3, Is.True);
+
+ Assert.That(snapshot1, Is.SameAs(snapshot2));
+ Assert.That(snapshot2, Is.SameAs(snapshot3));
+
+ snapshot1!.Dispose();
+ snapshot2!.Dispose();
+ snapshot3!.Dispose();
+ }
+
+ [Test]
+ public void TryLeaseCompactedState_ExistingAndNonExistent()
+ {
+ AddSnapshotToRepository(0, 1, compacted: true);
+
+ StateId existing = CreateStateId(1);
+ bool leasedExisting = _repository.TryLeaseCompactedState(existing, out Snapshot? snapshot);
+ Assert.That(leasedExisting, Is.True);
+ Assert.That(snapshot, Is.Not.Null);
+ snapshot!.Dispose();
+
+ StateId nonExistent = CreateStateId(999);
+ bool leasedNonExistent = _repository.TryLeaseCompactedState(nonExistent, out Snapshot? nonExistentSnapshot);
+ Assert.That(leasedNonExistent, Is.False);
+ Assert.That(nonExistentSnapshot, Is.Null);
+ }
+
+ [Test]
+ public void TryLeaseCompactedState_MultipleLeases_AllSucceed()
+ {
+ AddSnapshotToRepository(0, 1, compacted: true);
+
+ StateId to = CreateStateId(1);
+ bool leased1 = _repository.TryLeaseCompactedState(to, out Snapshot? snapshot1);
+ bool leased2 = _repository.TryLeaseCompactedState(to, out Snapshot? snapshot2);
+
+ Assert.That(leased1, Is.True);
+ Assert.That(leased2, Is.True);
+
+ snapshot1!.Dispose();
+ snapshot2!.Dispose();
+ }
+
+ #endregion
+
+ #region Query Operations
+
+ [Test]
+ public void HasState_ExistingAndNonExistent()
+ {
+ AddSnapshotToRepository(0, 1);
+ StateId existing = CreateStateId(1);
+ StateId nonExistent = CreateStateId(999);
+
+ bool hasExisting = _repository.HasState(existing);
+ bool hasNonExistent = _repository.HasState(nonExistent);
+
+ Assert.That(hasExisting, Is.True);
+ Assert.That(hasNonExistent, Is.False);
+ }
+
+ [Test]
+ public void GetSnapshotBeforeStateId_EmptyRepository()
+ {
+ StateId target = CreateStateId(10);
+
+ ArrayPoolList states = _repository.GetSnapshotBeforeStateId(target);
+
+ Assert.That(states.Count, Is.EqualTo(0));
+ states.Dispose();
+ }
+
+ [Test]
+ public void GetSnapshotBeforeStateId_NoStatesBeforeTarget()
+ {
+ StateId state10 = CreateStateId(10);
+ _repository.AddStateId(state10);
+
+ StateId target = CreateStateId(5);
+ ArrayPoolList states = _repository.GetSnapshotBeforeStateId(target);
+
+ Assert.That(states.Count, Is.EqualTo(0));
+ states.Dispose();
+ }
+
+ [Test]
+ public void GetSnapshotBeforeStateId_StatesBeforeTarget()
+ {
+ StateId state1 = CreateStateId(1);
+ StateId state3 = CreateStateId(3);
+ StateId state5 = CreateStateId(5);
+ StateId state7 = CreateStateId(7);
+ StateId state10 = CreateStateId(10);
+
+ _repository.AddStateId(state1);
+ _repository.AddStateId(state3);
+ _repository.AddStateId(state5);
+ _repository.AddStateId(state7);
+ _repository.AddStateId(state10);
+
+ StateId target = CreateStateId(6);
+ ArrayPoolList states = _repository.GetSnapshotBeforeStateId(target);
+
+ Assert.That(states.Count, Is.EqualTo(3));
+ states.Dispose();
+ }
+
+ #endregion
+
+ #region AssembleSnapshotsUntil
+
+ [Test]
+ public void AssembleSnapshotsUntil_EmptyRepository()
+ {
+ StateId target = CreateStateId(10);
+
+ using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 0, 10);
+
+ Assert.That(assembled.Count, Is.EqualTo(0));
+ }
+
+ [Test]
+ public void AssembleSnapshotsUntil_SingleSnapshot()
+ {
+ AddSnapshotToRepository(0, 1);
+
+ StateId target = CreateStateId(1);
+ using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 0, 10);
+
+ Assert.That(assembled.Count, Is.EqualTo(1));
+ Assert.That(assembled[0].To, Is.EqualTo(target));
+ }
+
+ [Test]
+ public void AssembleSnapshotsUntil_LinearChain()
+ {
+ BuildSnapshotChain(0, 4);
+
+ StateId target = CreateStateId(4);
+ using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 0, 10);
+
+ Assert.That(assembled.Count, Is.EqualTo(4));
+ }
+
+ [Test]
+ public void AssembleSnapshotsUntil_StopsAtStartingBlock()
+ {
+ BuildSnapshotChain(0, 5);
+
+ StateId target = CreateStateId(4);
+ using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(target, 2, 10);
+
+ Assert.That(assembled.Count, Is.EqualTo(2));
+ }
+
+ [Test]
+ public void AssembleSnapshotsUntil_PrefersCompacted()
+ {
+ StateId from = CreateStateId(0);
+ StateId to = CreateStateId(1);
+
+ Snapshot compacted = CreateSnapshot(from, to);
+ _repository.TryAddCompactedSnapshot(compacted);
+
+ using SnapshotPooledList assembled = _repository.AssembleSnapshotsUntil(to, 0, 10);
+
+ Assert.That(assembled.Count, Is.EqualTo(1));
+ }
+
+ #endregion
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/SpmcRingBufferTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/SpmcRingBufferTests.cs
new file mode 100644
index 00000000000..e02435042a9
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/SpmcRingBufferTests.cs
@@ -0,0 +1,139 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using FluentAssertions;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+public class SpmcRingBufferTests
+{
+ [Test]
+ public void SmokeTest()
+ {
+ SpmcRingBuffer jobQueue = new SpmcRingBuffer(16);
+
+ jobQueue.TryEnqueue(1);
+ jobQueue.TryEnqueue(2);
+ jobQueue.TryEnqueue(3);
+ jobQueue.TryEnqueue(4);
+ jobQueue.TryEnqueue(5);
+
+ jobQueue.TryDequeue(out int j).Should().BeTrue();
+ j.Should().Be(1);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(2);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(3);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(4);
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(5);
+ }
+
+ [Test]
+ public void RollingSmokeTest()
+ {
+ SpmcRingBuffer jobQueue = new SpmcRingBuffer(16);
+
+ jobQueue.TryEnqueue(1);
+ jobQueue.TryEnqueue(2);
+ jobQueue.TryEnqueue(3);
+ jobQueue.TryEnqueue(4);
+ jobQueue.TryEnqueue(5);
+
+ int j = 0;
+ for (int i = 0; i < 100; i++)
+ {
+ jobQueue.TryDequeue(out j).Should().BeTrue();
+ j.Should().Be(i + 1);
+ jobQueue.TryEnqueue(i + 5 + 1).Should().BeTrue();
+ }
+ }
+
+ [Test]
+ public void SmokeTestFullAndRolling()
+ {
+ SpmcRingBuffer jobQueue = new SpmcRingBuffer(16);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryEnqueue(1), Is.True);
+ }
+ Assert.That(jobQueue.TryEnqueue(1), Is.False);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryDequeue(out _), Is.True);
+ }
+ Assert.That(jobQueue.TryDequeue(out _), Is.False);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryEnqueue(1), Is.True);
+ }
+ Assert.That(jobQueue.TryEnqueue(1), Is.False);
+
+ for (int i = 0; i < 16; i++)
+ {
+ Assert.That(jobQueue.TryDequeue(out _), Is.True);
+ }
+ Assert.That(jobQueue.TryDequeue(out _), Is.False);
+ }
+
+ [Test]
+ public async Task HighConcurrency_StressTest_NoDataLoss()
+ {
+ int Capacity = 1024;
+ int ItemsToProduce = 1_000_000;
+ int ConsumerCount = 4;
+
+ SpmcRingBuffer buffer = new SpmcRingBuffer(Capacity);
+ int[] consumedCounts = new int[ItemsToProduce];
+ long totalConsumed = 0;
+
+ // Producer Task (Single Producer)
+ Task producer = Task.Run(() =>
+ {
+ for (int i = 0; i < ItemsToProduce; i++)
+ {
+ while (!buffer.TryEnqueue(i))
+ {
+ Thread.SpinWait(10); // Wait for space
+ }
+ }
+ });
+
+ // Consumer Tasks (Multiple Consumers)
+ Task[] consumers = Enumerable.Range(0, ConsumerCount).Select(_ => Task.Run(() =>
+ {
+ while (Interlocked.Read(ref totalConsumed) < ItemsToProduce)
+ {
+ if (buffer.TryDequeue(out int item))
+ {
+ // Track that this specific item was hit
+ Interlocked.Increment(ref consumedCounts[item]);
+ Interlocked.Increment(ref totalConsumed);
+ }
+ else
+ {
+ Thread.SpinWait(10);
+ }
+ }
+ })).ToArray();
+
+ await Task.WhenAll(producer);
+ await Task.WhenAll(consumers);
+
+ // Assertions
+ Assert.That(ItemsToProduce, Is.EqualTo(Interlocked.Read(ref totalConsumed)));
+
+ for (int i = 0; i < ItemsToProduce; i++)
+ {
+ Assert.That(consumedCounts[i] == 1, $"Item {i} was consumed {consumedCounts[i]} times!");
+ }
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/TrieNodeCacheTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/TrieNodeCacheTests.cs
new file mode 100644
index 00000000000..77737d8747f
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/TrieNodeCacheTests.cs
@@ -0,0 +1,500 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using Nethermind.Core.Crypto;
+using Nethermind.Db;
+using Nethermind.Logging;
+using Nethermind.Trie;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixture]
+public class TrieNodeCacheTests
+{
+ private TrieNodeCache _cache = null!;
+ private FlatDbConfig _config = null!;
+ private ResourcePool _resourcePool = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _config = new FlatDbConfig { TrieCacheMemoryBudget = 1024 * 1024 };
+ _cache = new TrieNodeCache(_config, LimboLogs.Instance);
+ _resourcePool = new ResourcePool(_config);
+ }
+
+ [Test]
+ public void TryGet_ReturnsNotFound_WhenCacheEmpty()
+ {
+ TreePath path = TreePath.FromHexString("1234");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+
+ bool found = _cache.TryGet(null, in path, hash, out TrieNode? node);
+
+ Assert.That(found, Is.False);
+ Assert.That(node, Is.Null);
+ }
+
+ [Test]
+ public void TryGet_ReturnsNotFound_WithStorageAddress_WhenCacheEmpty()
+ {
+ Hash256 address = Keccak.Compute([0xaa, 0xbb]);
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash = Keccak.Compute([4, 5, 6]);
+
+ bool found = _cache.TryGet(address, in path, hash, out TrieNode? node);
+
+ Assert.That(found, Is.False);
+ Assert.That(node, Is.Null);
+ }
+
+ [Test]
+ public void Constructor_WithZeroMemoryTarget_DoesNotThrow()
+ {
+ FlatDbConfig config = new FlatDbConfig { TrieCacheMemoryBudget = 0 };
+ Assert.DoesNotThrow(() => new TrieNodeCache(config, LimboLogs.Instance));
+ }
+
+ [Test]
+ public void Constructor_WithSmallMemoryTarget_UseMinimumBucketSize()
+ {
+ FlatDbConfig config = new FlatDbConfig { TrieCacheMemoryBudget = 1 };
+ Assert.DoesNotThrow(() => new TrieNodeCache(config, LimboLogs.Instance));
+ }
+
+ [Test]
+ public void Add_ThenTryGet_ReturnsNode()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+ TrieNode trieNode = new TrieNode(NodeType.Leaf, hash);
+
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource.Nodes.Set(null, in path, trieNode);
+
+ _cache.Add(transientResource);
+
+ bool found = _cache.TryGet(null, in path, hash, out TrieNode? retrievedNode);
+
+ Assert.That(found, Is.True);
+ Assert.That(retrievedNode!.Keccak, Is.EqualTo(hash));
+ }
+
+ [Test]
+ public void Add_WithStorageAddress_ThenTryGet_ReturnsNode()
+ {
+ Hash256 address = Keccak.Compute([0xaa, 0xbb]);
+ TreePath path = TreePath.FromHexString("1234");
+ Hash256 hash = Keccak.Compute([3, 4, 5]);
+ TrieNode trieNode = new TrieNode(NodeType.Branch, hash);
+
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource.Nodes.Set(address, in path, trieNode);
+
+ _cache.Add(transientResource);
+
+ bool found = _cache.TryGet(address, in path, hash, out TrieNode? retrievedNode);
+
+ Assert.That(found, Is.True);
+ Assert.That(retrievedNode!.Keccak, Is.EqualTo(hash));
+ }
+
+ [Test]
+ public void Add_WithZeroMemoryTarget_DoesNotCacheNodes()
+ {
+ FlatDbConfig zeroConfig = new FlatDbConfig { TrieCacheMemoryBudget = 0 };
+ TrieNodeCache zeroCache = new TrieNodeCache(zeroConfig, LimboLogs.Instance);
+ ResourcePool zeroResourcePool = new ResourcePool(zeroConfig);
+
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+ TrieNode trieNode = new TrieNode(NodeType.Leaf, hash);
+
+ TransientResource transientResource = zeroResourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource.Nodes.Set(null, in path, trieNode);
+
+ zeroCache.Add(transientResource);
+
+ bool found = zeroCache.TryGet(null, in path, hash, out TrieNode? retrievedNode);
+
+ Assert.That(found, Is.False);
+ Assert.That(retrievedNode, Is.Null);
+ }
+
+ [Test]
+ public void Add_MultipleNodes_AllRetrievable()
+ {
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+
+ TreePath path1 = TreePath.FromHexString("1111");
+ TreePath path2 = TreePath.FromHexString("2222");
+ TreePath path3 = TreePath.FromHexString("3333");
+ Hash256 hash1 = Keccak.Compute([1]);
+ Hash256 hash2 = Keccak.Compute([2]);
+ Hash256 hash3 = Keccak.Compute([3]);
+
+ transientResource.Nodes.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1));
+ transientResource.Nodes.Set(null, in path2, new TrieNode(NodeType.Branch, hash2));
+ transientResource.Nodes.Set(null, in path3, new TrieNode(NodeType.Extension, hash3));
+
+ _cache.Add(transientResource);
+
+ Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.True);
+ Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.True);
+ Assert.That(_cache.TryGet(null, in path3, hash3, out _), Is.True);
+ }
+
+ [Test]
+ public void Add_MixedStateAndStorageNodes_AllRetrievable()
+ {
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+
+ Hash256 storageAddress = Keccak.Compute([0xaa]);
+ TreePath statePath = TreePath.FromHexString("1111");
+ TreePath storagePath = TreePath.FromHexString("2222");
+ Hash256 stateHash = Keccak.Compute([1]);
+ Hash256 storageHash = Keccak.Compute([2]);
+
+ transientResource.Nodes.Set(null, in statePath, new TrieNode(NodeType.Leaf, stateHash));
+ transientResource.Nodes.Set(storageAddress, in storagePath, new TrieNode(NodeType.Leaf, storageHash));
+
+ _cache.Add(transientResource);
+
+ Assert.That(_cache.TryGet(null, in statePath, stateHash, out _), Is.True);
+ Assert.That(_cache.TryGet(storageAddress, in storagePath, storageHash, out _), Is.True);
+ }
+
+ [Test]
+ public void TryGet_ReturnsNotFound_WhenHashDoesNotMatch()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 storedHash = Keccak.Compute([1, 2, 3]);
+ Hash256 queryHash = Keccak.Compute([4, 5, 6]);
+ TrieNode trieNode = new TrieNode(NodeType.Leaf, storedHash);
+
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource.Nodes.Set(null, in path, trieNode);
+
+ _cache.Add(transientResource);
+
+ bool found = _cache.TryGet(null, in path, queryHash, out TrieNode? retrievedNode);
+
+ Assert.That(found, Is.False);
+ Assert.That(retrievedNode, Is.Null);
+ }
+
+ [Test]
+ public void Add_OverwritesExistingNode_OnCollision()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash1 = Keccak.Compute([1, 2, 3]);
+ Hash256 hash2 = Keccak.Compute([4, 5, 6]);
+
+ TransientResource transientResource1 = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource1.Nodes.Set(null, in path, new TrieNode(NodeType.Leaf, hash1));
+
+ _cache.Add(transientResource1);
+
+ TransientResource transientResource2 = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource2.Nodes.Set(null, in path, new TrieNode(NodeType.Leaf, hash2));
+
+ _cache.Add(transientResource2);
+
+ Assert.That(_cache.TryGet(null, in path, hash1, out _), Is.False);
+ Assert.That(_cache.TryGet(null, in path, hash2, out _), Is.True);
+ }
+
+ [Test]
+ public void Sharding_DifferentFirstBytes_GoToDifferentShards()
+ {
+ TreePath path1 = TreePath.FromHexString("1000");
+ TreePath path2 = TreePath.FromHexString("2000");
+ Hash256 hash1 = Keccak.Compute([1]);
+ Hash256 hash2 = Keccak.Compute([2]);
+
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource.Nodes.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1));
+ transientResource.Nodes.Set(null, in path2, new TrieNode(NodeType.Leaf, hash2));
+
+ _cache.Add(transientResource);
+
+ Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.True);
+ Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.True);
+ }
+
+ [Test]
+ public void Sharding_StorageNodes_ShardByAddressFirstByte()
+ {
+ Hash256 address1 = new Hash256("0x1000000000000000000000000000000000000000000000000000000000000000");
+ Hash256 address2 = new Hash256("0x2000000000000000000000000000000000000000000000000000000000000000");
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash1 = Keccak.Compute([1]);
+ Hash256 hash2 = Keccak.Compute([2]);
+
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+ transientResource.Nodes.Set(address1, in path, new TrieNode(NodeType.Leaf, hash1));
+ transientResource.Nodes.Set(address2, in path, new TrieNode(NodeType.Leaf, hash2));
+
+ _cache.Add(transientResource);
+
+ Assert.That(_cache.TryGet(address1, in path, hash1, out _), Is.True);
+ Assert.That(_cache.TryGet(address2, in path, hash2, out _), Is.True);
+ }
+
+ [Test]
+ public void Clear_RemovesAllCachedNodes()
+ {
+ // Add multiple nodes across different shards
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+
+ TreePath path1 = TreePath.FromHexString("1000");
+ TreePath path2 = TreePath.FromHexString("2000");
+ TreePath path3 = TreePath.FromHexString("3000");
+ Hash256 hash1 = Keccak.Compute([1]);
+ Hash256 hash2 = Keccak.Compute([2]);
+ Hash256 hash3 = Keccak.Compute([3]);
+
+ transientResource.Nodes.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1));
+ transientResource.Nodes.Set(null, in path2, new TrieNode(NodeType.Branch, hash2));
+ transientResource.Nodes.Set(null, in path3, new TrieNode(NodeType.Extension, hash3));
+
+ _cache.Add(transientResource);
+
+ // Verify nodes are cached
+ Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.True);
+ Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.True);
+ Assert.That(_cache.TryGet(null, in path3, hash3, out _), Is.True);
+
+ // Clear the cache
+ _cache.Clear();
+
+ // Verify all nodes are removed
+ Assert.That(_cache.TryGet(null, in path1, hash1, out _), Is.False);
+ Assert.That(_cache.TryGet(null, in path2, hash2, out _), Is.False);
+ Assert.That(_cache.TryGet(null, in path3, hash3, out _), Is.False);
+ }
+
+ [Test]
+ public void Clear_RemovesStateAndStorageNodes()
+ {
+ TransientResource transientResource = _resourcePool.GetCachedResource(ResourcePool.Usage.MainBlockProcessing);
+
+ Hash256 storageAddress = Keccak.Compute([0xaa]);
+ TreePath statePath = TreePath.FromHexString("1111");
+ TreePath storagePath = TreePath.FromHexString("2222");
+ Hash256 stateHash = Keccak.Compute([1]);
+ Hash256 storageHash = Keccak.Compute([2]);
+
+ transientResource.Nodes.Set(null, in statePath, new TrieNode(NodeType.Leaf, stateHash));
+ transientResource.Nodes.Set(storageAddress, in storagePath, new TrieNode(NodeType.Leaf, storageHash));
+
+ _cache.Add(transientResource);
+
+ // Verify nodes are cached
+ Assert.That(_cache.TryGet(null, in statePath, stateHash, out _), Is.True);
+ Assert.That(_cache.TryGet(storageAddress, in storagePath, storageHash, out _), Is.True);
+
+ // Clear the cache
+ _cache.Clear();
+
+ // Verify all nodes are removed
+ Assert.That(_cache.TryGet(null, in statePath, stateHash, out _), Is.False);
+ Assert.That(_cache.TryGet(storageAddress, in storagePath, storageHash, out _), Is.False);
+ }
+}
+
+[TestFixture]
+public class ChildCacheTests
+{
+ private TrieNodeCache.ChildCache _cache = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _cache = new TrieNodeCache.ChildCache(1024);
+ }
+
+ [Test]
+ public void TryGet_ReturnsNotFound_WhenCacheEmpty()
+ {
+ TreePath path = TreePath.FromHexString("1234");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+
+ bool found = _cache.TryGet(null, in path, hash, out TrieNode? node);
+
+ Assert.That(found, Is.False);
+ Assert.That(node, Is.Null);
+ }
+
+ [Test]
+ public void Set_ThenTryGet_ReturnsNode()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+ TrieNode trieNode = new TrieNode(NodeType.Leaf, hash);
+
+ _cache.Set(null, in path, trieNode);
+
+ bool found = _cache.TryGet(null, in path, hash, out TrieNode? retrievedNode);
+
+ Assert.That(found, Is.True);
+ Assert.That(retrievedNode, Is.SameAs(trieNode));
+ }
+
+ [Test]
+ public void Set_WithStorageAddress_ThenTryGet_ReturnsNode()
+ {
+ Hash256 address = Keccak.Compute([0xaa, 0xbb]);
+ TreePath path = TreePath.FromHexString("1234");
+ Hash256 hash = Keccak.Compute([3, 4, 5]);
+ TrieNode trieNode = new TrieNode(NodeType.Branch, hash);
+
+ _cache.Set(address, in path, trieNode);
+
+ bool found = _cache.TryGet(address, in path, hash, out TrieNode? retrievedNode);
+
+ Assert.That(found, Is.True);
+ Assert.That(retrievedNode, Is.SameAs(trieNode));
+ }
+
+ [Test]
+ public void TryGet_ReturnsNotFound_WhenHashMismatch()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 storedHash = Keccak.Compute([1, 2, 3]);
+ Hash256 queryHash = Keccak.Compute([4, 5, 6]);
+ TrieNode trieNode = new TrieNode(NodeType.Leaf, storedHash);
+
+ _cache.Set(null, in path, trieNode);
+
+ bool found = _cache.TryGet(null, in path, queryHash, out TrieNode? retrievedNode);
+
+ Assert.That(found, Is.False);
+ Assert.That(retrievedNode, Is.Null);
+ }
+
+ [Test]
+ public void GetOrAdd_ReturnsExistingNode_WhenPresent()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+ TrieNode existingNode = new TrieNode(NodeType.Leaf, hash);
+ TrieNode newNode = new TrieNode(NodeType.Leaf, hash);
+
+ _cache.Set(null, in path, existingNode);
+ TrieNode result = _cache.GetOrAdd(null, in path, newNode);
+
+ Assert.That(result, Is.SameAs(existingNode));
+ }
+
+ [Test]
+ public void GetOrAdd_AddsAndReturnsNewNode_WhenNotPresent()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+ TrieNode newNode = new TrieNode(NodeType.Leaf, hash);
+
+ TrieNode result = _cache.GetOrAdd(null, in path, newNode);
+
+ Assert.That(result, Is.SameAs(newNode));
+ Assert.That(_cache.Count, Is.EqualTo(1));
+ }
+
+ [Test]
+ public void GetOrAdd_WithStorageAddress_ReturnsExistingNode()
+ {
+ Hash256 address = Keccak.Compute([0xaa, 0xbb]);
+ TreePath path = TreePath.FromHexString("1234");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+ TrieNode existingNode = new TrieNode(NodeType.Branch, hash);
+ TrieNode newNode = new TrieNode(NodeType.Branch, hash);
+
+ _cache.Set(address, in path, existingNode);
+ TrieNode result = _cache.GetOrAdd(address, in path, newNode);
+
+ Assert.That(result, Is.SameAs(existingNode));
+ }
+
+ [Test]
+ public void Reset_ClearsCache()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 hash = Keccak.Compute([1, 2, 3]);
+ TrieNode trieNode = new TrieNode(NodeType.Leaf, hash);
+
+ _cache.Set(null, in path, trieNode);
+ Assert.That(_cache.Count, Is.EqualTo(1));
+
+ _cache.Reset();
+
+ Assert.That(_cache.Count, Is.EqualTo(0));
+ bool found = _cache.TryGet(null, in path, hash, out _);
+ Assert.That(found, Is.False);
+ }
+
+ [Test]
+ public void Count_IncrementsOnSet()
+ {
+ Assert.That(_cache.Count, Is.EqualTo(0));
+
+ TreePath path1 = TreePath.FromHexString("1111");
+ TreePath path2 = TreePath.FromHexString("2222");
+ Hash256 hash1 = Keccak.Compute([1]);
+ Hash256 hash2 = Keccak.Compute([2]);
+
+ _cache.Set(null, in path1, new TrieNode(NodeType.Leaf, hash1));
+ Assert.That(_cache.Count, Is.EqualTo(1));
+
+ _cache.Set(null, in path2, new TrieNode(NodeType.Leaf, hash2));
+ Assert.That(_cache.Count, Is.EqualTo(2));
+ }
+
+ [Test]
+ public void Capacity_ReturnsExpectedValue()
+ {
+ TrieNodeCache.ChildCache smallCache = new TrieNodeCache.ChildCache(16);
+ Assert.That(smallCache.Capacity, Is.GreaterThan(0));
+ }
+
+ [Test]
+ public void Reset_ResizesCache_WhenCountExceedsCapacity()
+ {
+ TrieNodeCache.ChildCache smallCache = new TrieNodeCache.ChildCache(16);
+ int initialCapacity = smallCache.Capacity;
+
+ for (int i = 0; i < initialCapacity * 3; i++)
+ {
+ TreePath path = TreePath.FromHexString(i.ToString("x8"));
+ Hash256 hash = Keccak.Compute([(byte)i]);
+ smallCache.Set(null, in path, new TrieNode(NodeType.Leaf, hash));
+ }
+
+ smallCache.Reset();
+
+ Assert.That(smallCache.Count, Is.EqualTo(0));
+ Assert.That(smallCache.Capacity, Is.GreaterThanOrEqualTo(initialCapacity));
+ }
+
+ [Test]
+ public void StateNodes_AndStorageNodes_AreSeparate()
+ {
+ TreePath path = TreePath.FromHexString("abcd");
+ Hash256 stateHash = Keccak.Compute([1, 2, 3]);
+ Hash256 storageHash = Keccak.Compute([4, 5, 6]);
+ Hash256 storageAddress = Keccak.Compute([0xaa]);
+ TrieNode stateNode = new TrieNode(NodeType.Leaf, stateHash);
+ TrieNode storageNode = new TrieNode(NodeType.Branch, storageHash);
+
+ _cache.Set(null, in path, stateNode);
+ _cache.Set(storageAddress, in path, storageNode);
+
+ bool foundState = _cache.TryGet(null, in path, stateHash, out TrieNode? retrievedState);
+ bool foundStorage = _cache.TryGet(storageAddress, in path, storageHash, out TrieNode? retrievedStorage);
+
+ Assert.That(foundState, Is.True);
+ Assert.That(foundStorage, Is.True);
+ Assert.That(retrievedState, Is.SameAs(stateNode));
+ Assert.That(retrievedStorage, Is.SameAs(storageNode));
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat.Test/TrieWarmerTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/TrieWarmerTests.cs
new file mode 100644
index 00000000000..fc6f52d5db8
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat.Test/TrieWarmerTests.cs
@@ -0,0 +1,91 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Threading;
+using System.Threading.Tasks;
+using Nethermind.Config;
+using Nethermind.Core;
+using Nethermind.Db;
+using Nethermind.Int256;
+using Nethermind.Logging;
+using Nethermind.State.Flat.ScopeProvider;
+using NSubstitute;
+using NUnit.Framework;
+
+namespace Nethermind.State.Flat.Test;
+
+[TestFixture]
+public class TrieWarmerTests
+{
+ private IProcessExitSource _processExitSource = null!;
+ private CancellationTokenSource _cts = null!;
+ private ILogManager _logManager = null!;
+ private FlatDbConfig _config = null!;
+
+ [SetUp]
+ public void SetUp()
+ {
+ _cts = new CancellationTokenSource();
+ _processExitSource = Substitute.For();
+ _processExitSource.Token.Returns(_cts.Token);
+ _logManager = LimboLogs.Instance;
+ _config = new FlatDbConfig { TrieWarmerWorkerCount = 2 };
+ }
+
+ [TearDown]
+ public void TearDown() => _cts?.Dispose();
+
+ [Test]
+ public async Task PushAddressJob_CallsWarmUpStateTrie()
+ {
+ TrieWarmer warmer = new TrieWarmer(_processExitSource, _logManager, _config);
+
+ ITrieWarmer.IAddressWarmer addressWarmer = Substitute.For();
+ Address address = new Address("0x1234567890123456789012345678901234567890");
+
+ warmer.PushAddressJob(addressWarmer, address, sequenceId: 1);
+
+ await Task.Delay(200);
+
+ addressWarmer.Received().WarmUpStateTrie(address, 1);
+
+ _cts.Cancel();
+ await warmer.DisposeAsync();
+ }
+
+ [Test]
+ public async Task PushSlotJob_CallsWarmUpStorageTrie()
+ {
+ TrieWarmer warmer = new TrieWarmer(_processExitSource, _logManager, _config);
+
+ ITrieWarmer.IStorageWarmer storageWarmer = Substitute.For();
+ UInt256 index = 42;
+
+ warmer.PushSlotJob(storageWarmer, index, sequenceId: 5);
+
+ await Task.Delay(200);
+
+ storageWarmer.Received().WarmUpStorageTrie(index, 5);
+
+ _cts.Cancel();
+ await warmer.DisposeAsync();
+ }
+
+ [Test]
+ public async Task PushAddressJob_PassesCorrectSequenceId()
+ {
+ TrieWarmer warmer = new TrieWarmer(_processExitSource, _logManager, _config);
+
+ ITrieWarmer.IAddressWarmer addressWarmer = Substitute.For();
+ Address address = new Address("0x1111111111111111111111111111111111111111");
+
+ warmer.PushAddressJob(addressWarmer, address, sequenceId: 999);
+
+ await Task.Delay(200);
+
+ addressWarmer.Received().WarmUpStateTrie(address, 999);
+
+ _cts.Cancel();
+ await warmer.DisposeAsync();
+ }
+}
diff --git a/src/Nethermind/Nethermind.State.Flat/FlatDbColumns.cs b/src/Nethermind/Nethermind.State.Flat/FlatDbColumns.cs
new file mode 100644
index 00000000000..12dddcbc57f
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat/FlatDbColumns.cs
@@ -0,0 +1,15 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+namespace Nethermind.State.Flat;
+
+public enum FlatDbColumns
+{
+ Metadata,
+ Account,
+ Storage,
+ StateNodes,
+ StateTopNodes,
+ StorageNodes,
+ FallbackNodes,
+}
diff --git a/src/Nethermind/Nethermind.State.Flat/FlatDbManager.cs b/src/Nethermind/Nethermind.State.Flat/FlatDbManager.cs
new file mode 100644
index 00000000000..6bc14238de0
--- /dev/null
+++ b/src/Nethermind/Nethermind.State.Flat/FlatDbManager.cs
@@ -0,0 +1,411 @@
+// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited
+// SPDX-License-Identifier: LGPL-3.0-only
+
+using System.Collections.Concurrent;
+using System.Diagnostics;
+using System.Threading.Channels;
+using Nethermind.Config;
+using Nethermind.Core.Collections;
+using Nethermind.Core.Crypto;
+using Nethermind.Db;
+using Nethermind.Logging;
+using Nethermind.State.Flat.Persistence;
+using Nethermind.Trie.Pruning;
+
+namespace Nethermind.State.Flat;
+
+///
+/// The main top level FlatDb orchestrator.
+///
+public class FlatDbManager : IFlatDbManager, IAsyncDisposable
+{
+ private static readonly TimeSpan GatherGiveUpDeadline = TimeSpan.FromSeconds(5);
+
+ private readonly ILogger _logger;
+ private readonly IPersistenceManager _persistenceManager;
+ private readonly ISnapshotCompactor _snapshotCompactor;
+ private readonly ISnapshotRepository _snapshotRepository;
+ private readonly ITrieNodeCache _trieNodeCache;
+ private readonly IResourcePool _resourcePool;
+
+ // Cache for assembling `ReadOnlySnapshotBundle`. Its not actually slow, but its called 1.8k per sec so caching
+ // it save a decent amount of CPU.
+ private readonly ConcurrentDictionary _readonlySnapshotBundleCache = new();
+
+ // First it go to here
+ private readonly Task _compactorTask;
+ private readonly Channel _compactorJobs;
+
+ // And here in parallel.
+ // The node cache is kinda important for performance, so we want it populated as quickly as possible.
+ private readonly Task _populateTrieNodeCacheTask;
+ private readonly Channel