diff --git a/src/Nethermind/Nethermind.Blockchain/Synchronization/ISyncConfig.cs b/src/Nethermind/Nethermind.Blockchain/Synchronization/ISyncConfig.cs index acd70b687a4..1d6d44a13e0 100644 --- a/src/Nethermind/Nethermind.Blockchain/Synchronization/ISyncConfig.cs +++ b/src/Nethermind/Nethermind.Blockchain/Synchronization/ISyncConfig.cs @@ -165,7 +165,7 @@ public interface ISyncConfig : IConfig [ConfigItem(Description = "_Technical._ Memory budget for in memory dependencies of fast headers.", DefaultValue = "0", HiddenFromDocs = true)] ulong FastHeadersMemoryBudget { get; set; } - [ConfigItem(Description = "_Technical._ Enable storage range split.", DefaultValue = "false", HiddenFromDocs = true)] + [ConfigItem(Description = "_Technical._ Enable storage range split.", DefaultValue = "true", HiddenFromDocs = true)] bool EnableSnapSyncStorageRangeSplit { get; set; } [ConfigItem(Description = "_Technical._ Estimated size of memory for storing blocks during download.", DefaultValue = "200000000", HiddenFromDocs = true)] diff --git a/src/Nethermind/Nethermind.Blockchain/Synchronization/SyncConfig.cs b/src/Nethermind/Nethermind.Blockchain/Synchronization/SyncConfig.cs index 3060c83855a..1b7b5026ce3 100644 --- a/src/Nethermind/Nethermind.Blockchain/Synchronization/SyncConfig.cs +++ b/src/Nethermind/Nethermind.Blockchain/Synchronization/SyncConfig.cs @@ -84,7 +84,7 @@ public string? PivotHash public int HeaderStateDistance { get; set; } = 0; public ulong FastHeadersMemoryBudget { get; set; } = (ulong)128.MB(); - public bool EnableSnapSyncStorageRangeSplit { get; set; } = false; + public bool EnableSnapSyncStorageRangeSplit { get; set; } = true; public long ForwardSyncDownloadBufferMemoryBudget { get; set; } = 200.MiB(); public long ForwardSyncBlockProcessingQueueMemoryBudget { get; set; } = 200.MiB(); diff --git a/src/Nethermind/Nethermind.Core.Test/Builders/TestItem.Tree.cs b/src/Nethermind/Nethermind.Core.Test/Builders/TestItem.Tree.cs index 15b58d459c3..2033e43a19f 100644 --- a/src/Nethermind/Nethermind.Core.Test/Builders/TestItem.Tree.cs +++ b/src/Nethermind/Nethermind.Core.Test/Builders/TestItem.Tree.cs @@ -39,12 +39,12 @@ public static class Tree public static PathWithStorageSlot[] SlotsWithPaths = new PathWithStorageSlot[] { - new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001101234"), Rlp.Encode(Bytes.FromHexString("0xab12000000000000000000000000000000000000000000000000000000000000000000000000000000")).Bytes), - new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001112345"), Rlp.Encode(Bytes.FromHexString("0xab34000000000000000000000000000000000000000000000000000000000000000000000000000000")).Bytes), - new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001113456"), Rlp.Encode(Bytes.FromHexString("0xab56000000000000000000000000000000000000000000000000000000000000000000000000000000")).Bytes), - new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001114567"), Rlp.Encode(Bytes.FromHexString("0xab78000000000000000000000000000000000000000000000000000000000000000000000000000000")).Bytes), - new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001123456"), Rlp.Encode(Bytes.FromHexString("0xab90000000000000000000000000000000000000000000000000000000000000000000000000000000")).Bytes), - new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001123457"), Rlp.Encode(Bytes.FromHexString("0xab9a000000000000000000000000000000000000000000000000000000000000000000000000000000")).Bytes), + new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001101234"), Rlp.Encode(Bytes.FromHexString("0xab12000000000000000000000000000000000000000000000000000000000000")).Bytes), + new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001112345"), Rlp.Encode(Bytes.FromHexString("0xab34000000000000000000000000000000000000000000000000000000000000")).Bytes), + new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001113456"), Rlp.Encode(Bytes.FromHexString("0xab56000000000000000000000000000000000000000000000000000000000000")).Bytes), + new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001114567"), Rlp.Encode(Bytes.FromHexString("0xab78000000000000000000000000000000000000000000000000000000000000")).Bytes), + new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001123456"), Rlp.Encode(Bytes.FromHexString("0xab90000000000000000000000000000000000000000000000000000000000000")).Bytes), + new PathWithStorageSlot(new Hash256("0000000000000000000000000000000000000000000000000000000001123457"), Rlp.Encode(Bytes.FromHexString("0xab9a000000000000000000000000000000000000000000000000000000000000")).Bytes), }; public static StateTree GetStateTree(ITrieStore? store = null) diff --git a/src/Nethermind/Nethermind.Core.Test/KeccakTests.cs b/src/Nethermind/Nethermind.Core.Test/KeccakTests.cs index f1437d98f85..1495f4dd26a 100644 --- a/src/Nethermind/Nethermind.Core.Test/KeccakTests.cs +++ b/src/Nethermind/Nethermind.Core.Test/KeccakTests.cs @@ -136,6 +136,28 @@ public void Sanity_checks(string hexString, string expected) stream.GetHash().Bytes.ToHexString().Should().Be(expected); } + [TestCase("0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000001", Description = "Normal increment")] + [TestCase("0x00000000000000000000000000000000000000000000000000000000000000ff", "0x0000000000000000000000000000000000000000000000000000000000000100", Description = "Byte boundary carry")] + [TestCase("0x000000000000000000000000000000000000000000000000000000000000ffff", "0x0000000000000000000000000000000000000000000000000000000000010000", Description = "Multiple byte carry")] + [TestCase("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", Description = "Overflow returns max")] + public void IncrementPath_ReturnsExpected(string inputHex, string expectedHex) + { + ValueHash256 path = new(inputHex); + ValueHash256 result = path.IncrementPath(); + Assert.That(result, Is.EqualTo(new ValueHash256(expectedHex))); + } + + [TestCase("0x0000000000000000000000000000000000000000000000000000000000000001", "0x0000000000000000000000000000000000000000000000000000000000000000", Description = "Normal decrement")] + [TestCase("0x0000000000000000000000000000000000000000000000000000000000000100", "0x00000000000000000000000000000000000000000000000000000000000000ff", Description = "Byte boundary borrow")] + [TestCase("0x0000000000000000000000000000000000000000000000000000000000010000", "0x000000000000000000000000000000000000000000000000000000000000ffff", Description = "Multiple byte borrow")] + [TestCase("0x0000000000000000000000000000000000000000000000000000000000000000", "0x0000000000000000000000000000000000000000000000000000000000000000", Description = "Underflow returns zero")] + public void DecrementPath_ReturnsExpected(string inputHex, string expectedHex) + { + ValueHash256 path = new(inputHex); + ValueHash256 result = path.DecrementPath(); + Assert.That(result, Is.EqualTo(new ValueHash256(expectedHex))); + } + public static string[][] KeccakCases = { ["0x4","f343681465b9efe82c933c3e8748c70cb8aa06539c361de20f72eac04e766393"], diff --git a/src/Nethermind/Nethermind.Core/Crypto/Hash256.cs b/src/Nethermind/Nethermind.Core/Crypto/Hash256.cs index 0ee2476c076..d881ca35524 100644 --- a/src/Nethermind/Nethermind.Core/Crypto/Hash256.cs +++ b/src/Nethermind/Nethermind.Core/Crypto/Hash256.cs @@ -97,6 +97,46 @@ public string ToShortString(bool withZeroX = true) public UInt256 ToUInt256(bool isBigEndian = true) => new UInt256(Bytes, isBigEndian: isBigEndian); public Hash256 ToHash256() => new Hash256(this); private bool IsZero => _bytes == default; + + public ValueHash256 IncrementPath() + { + ValueHash256 result = this; + Span bytes = result.BytesAsSpan; + + for (int i = 31; i >= 0; i--) + { + if (bytes[i] < 0xFF) + { + bytes[i]++; + return result; + } + bytes[i] = 0x00; + } + + // Overflow - return max (shouldn't happen in practice) + result = ValueKeccak.Zero; + result.BytesAsSpan.Fill(0xFF); + return result; + } + + public ValueHash256 DecrementPath() + { + ValueHash256 result = this; + Span bytes = result.BytesAsSpan; + + for (int i = 31; i >= 0; i--) + { + if (bytes[i] > 0) + { + bytes[i]--; + return result; + } + bytes[i] = 0xFF; + } + + // Underflow - return zero (shouldn't happen in practice) + return ValueKeccak.Zero; + } } public readonly struct Hash256AsKey(Hash256 key) : IEquatable, IComparable diff --git a/src/Nethermind/Nethermind.Core/Extensions/Bytes.cs b/src/Nethermind/Nethermind.Core/Extensions/Bytes.cs index 4fe4d044ee5..ed39f7d3a00 100644 --- a/src/Nethermind/Nethermind.Core/Extensions/Bytes.cs +++ b/src/Nethermind/Nethermind.Core/Extensions/Bytes.cs @@ -85,6 +85,36 @@ public static int Compare(ReadOnlySpan x, ReadOnlySpan y) { return x.SequenceCompareTo(y); } + + public static int CompareWithCorrectLength(ReadOnlySpan x, ReadOnlySpan y) + { + if (Unsafe.AreSame(ref MemoryMarshal.GetReference(x), ref MemoryMarshal.GetReference(y)) && + x.Length == y.Length) + { + return 0; + } + + if (x.Length == 0) + { + return y.Length == 0 ? 0 : -1; // empty < non-empty + } + + for (int i = 0; i < x.Length; i++) + { + if (y.Length <= i) + { + return 1; // x is longer, so x > y + } + + int result = x[i].CompareTo(y[i]); + if (result != 0) + { + return result; + } + } + + return y.Length > x.Length ? 1 : 0; + } } public static readonly byte[] Zero32 = new byte[32]; diff --git a/src/Nethermind/Nethermind.Db/InMemoryColumnBatch.cs b/src/Nethermind/Nethermind.Db/InMemoryColumnBatch.cs index 9cff6e2fa95..6be5176861a 100644 --- a/src/Nethermind/Nethermind.Db/InMemoryColumnBatch.cs +++ b/src/Nethermind/Nethermind.Db/InMemoryColumnBatch.cs @@ -1,14 +1,14 @@ // SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only -using System.Collections.Generic; +using System.Collections.Concurrent; using Nethermind.Core; namespace Nethermind.Db { public class InMemoryColumnWriteBatch : IColumnsWriteBatch { - private readonly IList _underlyingBatch = new List(); + private readonly ConcurrentDictionary _writeBatches = new(); private readonly IColumnsDb _columnsDb; public InMemoryColumnWriteBatch(IColumnsDb columnsDb) @@ -18,14 +18,12 @@ public InMemoryColumnWriteBatch(IColumnsDb columnsDb) public IWriteBatch GetColumnBatch(TKey key) { - InMemoryWriteBatch writeBatch = new InMemoryWriteBatch(_columnsDb.GetColumnDb(key)); - _underlyingBatch.Add(writeBatch); - return writeBatch; + return _writeBatches.GetOrAdd(key, key => new InMemoryWriteBatch(_columnsDb.GetColumnDb(key))); } public void Dispose() { - foreach (IWriteBatch batch in _underlyingBatch) + foreach (IWriteBatch batch in _writeBatches.Values) { batch.Dispose(); } diff --git a/src/Nethermind/Nethermind.Db/InMemoryWriteBatch.cs b/src/Nethermind/Nethermind.Db/InMemoryWriteBatch.cs index c8054ac8b6d..8eb4eb706e3 100644 --- a/src/Nethermind/Nethermind.Db/InMemoryWriteBatch.cs +++ b/src/Nethermind/Nethermind.Db/InMemoryWriteBatch.cs @@ -2,16 +2,16 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; -using System.Collections.Concurrent; -using System.Collections.Generic; using Nethermind.Core; +using Nethermind.Core.Collections; namespace Nethermind.Db { public class InMemoryWriteBatch : IWriteBatch { private readonly IKeyValueStore _store; - private readonly ConcurrentDictionary _currentItems = new(); + // Note: need to keep order of operation + private readonly ArrayPoolList<(byte[] Key, byte[]? Value)> _writes = new(1); private WriteFlags _writeFlags = WriteFlags.None; public InMemoryWriteBatch(IKeyValueStore storeWithNoBatchSupport) @@ -21,22 +21,23 @@ public InMemoryWriteBatch(IKeyValueStore storeWithNoBatchSupport) public void Dispose() { - foreach (KeyValuePair keyValuePair in _currentItems) + foreach ((byte[] Key, byte[]? Value) item in _writes) { - _store.Set(keyValuePair.Key, keyValuePair.Value, _writeFlags); + _store.Set(item.Key, item.Value, _writeFlags); } + _writes.Dispose(); GC.SuppressFinalize(this); } public void Clear() { - _currentItems.Clear(); + _writes.Clear(); } public void Set(ReadOnlySpan key, byte[]? value, WriteFlags flags = WriteFlags.None) { - _currentItems[key.ToArray()] = value; + _writes.Add((key.ToArray(), value)); _writeFlags = flags; } diff --git a/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs b/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs index c603ae8befe..a5f07d04eba 100644 --- a/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs +++ b/src/Nethermind/Nethermind.Init/Modules/FlatWorldStateModule.cs @@ -22,6 +22,11 @@ using Nethermind.State.Flat; using Nethermind.State.Flat.Persistence; using Nethermind.State.Flat.ScopeProvider; +using Nethermind.State.Flat.Sync; +using Nethermind.State.Flat.Sync.Snap; +using Nethermind.Synchronization.FastSync; +using Nethermind.Synchronization.ParallelSync; +using Nethermind.Synchronization.SnapSync; namespace Nethermind.Init.Modules; @@ -66,6 +71,18 @@ protected override void Load(ContainerBuilder builder) .AddSingleton() .Add() + // Sync components + .AddSingleton() + .AddSingleton((ctx) => new FlatStateRootIndex( + ctx.Resolve(), + ctx.Resolve().SnapServingMaxDepth)) + .AddSingleton() + .Intercept((syncConfig) => + { + syncConfig.SnapServingEnabled = true; + }) + .AddSingleton() + // Persistences .AddColumnDatabase(DbNames.Flat) .AddSingleton() @@ -101,21 +118,6 @@ protected override void Load(ContainerBuilder builder) .AddSingleton() .AddStep(typeof(ImportFlatDb)); } - else - { - builder - .AddDecorator((ctx, syncConfig) => - { - ILogger logger = ctx.Resolve().GetClassLogger(); - if (syncConfig.FastSync || syncConfig.SnapSync) - { - if (logger.IsWarn) logger.Warn("Fast sync and snap sync turned off with FlatDB"); - syncConfig.FastSync = false; - syncConfig.SnapSync = false; - } - return syncConfig; - }); - } } /// diff --git a/src/Nethermind/Nethermind.Runner/packages.lock.json b/src/Nethermind/Nethermind.Runner/packages.lock.json index b406492fc3a..925f29bb4f9 100644 --- a/src/Nethermind/Nethermind.Runner/packages.lock.json +++ b/src/Nethermind/Nethermind.Runner/packages.lock.json @@ -1121,11 +1121,13 @@ "nethermind.state.flat": { "type": "Project", "dependencies": { + "Nethermind.Blockchain": "[1.37.0-unstable, )", "Nethermind.Core": "[1.37.0-unstable, )", "Nethermind.Db": "[1.37.0-unstable, )", "Nethermind.Evm": "[1.37.0-unstable, )", "Nethermind.Serialization.Rlp": "[1.37.0-unstable, )", "Nethermind.State": "[1.37.0-unstable, )", + "Nethermind.Synchronization": "[1.37.0-unstable, )", "Nethermind.Trie": "[1.37.0-unstable, )", "System.IO.Hashing": "[10.0.2, )" } diff --git a/src/Nethermind/Nethermind.State.Flat.Test/DeletionRangeCalculationTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/DeletionRangeCalculationTests.cs new file mode 100644 index 00000000000..a44ac10ac79 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/DeletionRangeCalculationTests.cs @@ -0,0 +1,377 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Collections.Generic; +using System.Linq; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.State.Flat.Sync; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test; + +[TestFixture] +public class DeletionRangeCalculationTests +{ + private static readonly byte[] DummyValue = new byte[33]; + private static readonly byte[] InlineValue = new byte[5]; // Small enough for inline (RLP < 32 bytes) + + private static IEnumerable ComputeDeletionRangesTestCases() + { + // Note to AI: please keep the deletion range to left so that its easy to read. + + // === Helper functions === + static byte[] NibblesFromHexString(string hex) => + hex.Select(c => (byte)(c >= 'a' ? c - 'a' + 10 : c >= 'A' ? c - 'A' + 10 : c - '0')).ToArray(); + + TrieNode CreateBranchWithChildren(ushort childBitset) + { + TrieNode branch = TrieNodeFactory.CreateBranch(); + for (int i = 0; i < 16; i++) + if ((childBitset & (1 << i)) != 0) + branch[i] = TrieNodeFactory.CreateLeaf([0], DummyValue); + return branch; + } + + TrieNode CreateBranchWithInlineChildren(ushort childBitset) + { + TrieNode branch = TrieNodeFactory.CreateBranch(); + for (int i = 0; i < 16; i++) + if ((childBitset & (1 << i)) != 0) + branch[i] = TrieNodeFactory.CreateLeaf([0], InlineValue); + return branch; + } + + TrieNode CreateLeaf(string hexKey) => + TrieNodeFactory.CreateLeaf(NibblesFromHexString(hexKey), DummyValue); + + TrieNode CreateExtension(string hexKey) => + TrieNodeFactory.CreateExtension(NibblesFromHexString(hexKey), TrieNodeFactory.CreateBranch()); + + // === Branch tests === + + // With path prefix "ab", children 0, 1, 2 null (consecutive) + yield return new TestCaseData( + "ab", + CreateBranchWithChildren(0b1111_1111_1111_1000), + null, + new (string, string)[] + { + ("0xab00000000000000000000000000000000000000000000000000000000000000", "0xab2fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch: with path prefix, consecutive null 0-2 yield 1 range"); + + // Children 3, 4, 5 are null with path "abcde" + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b1111_1111_1100_0111), + null, + new (string, string)[] + { + ("0xabcde30000000000000000000000000000000000000000000000000000000000", "0xabcde5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch: with 5-nibble path, consecutive null 3-5 yield 1 range"); + + // Children 0, 1 and 14, 15 null with path "abcde" + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0011_1111_1111_1100), + null, + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + ("0xabcdee0000000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch: with 5-nibble path, two null groups yield 2 ranges"); + + // Single null child 5 with path "abcde" + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b1111_1111_1101_1111), + null, + new (string, string)[] + { + ("0xabcde50000000000000000000000000000000000000000000000000000000000", "0xabcde5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch: with 5-nibble path, single null child yields 1 range"); + + // === Leaf tests === + // Leaf logic changed: now just deletes the whole subtree range (path.ToLowerBound to path.ToUpperBound) + + // Leaf with path prefix "ab" - deletes ab00...00 to abff...ff + yield return new TestCaseData( + "ab", + CreateLeaf("500000000000000000000000000000000000000000000000000000000000"), + null, + new (string, string)[] + { + ("0xab00000000000000000000000000000000000000000000000000000000000000", "0xabffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Leaf: with path prefix deletes subtree range"); + + // Leaf with path "abcde" - deletes abcde00...00 to abcdef...ff + yield return new TestCaseData( + "abcde", + CreateLeaf("00000000000000000000000000000000000000000000000000000000000"), + null, + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Leaf: with 5-nibble path deletes subtree range"); + + // Leaf with path "abcde" (different key, same result) + yield return new TestCaseData( + "abcde", + CreateLeaf("fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + null, + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Leaf: with 5-nibble path deletes subtree range regardless of key"); + + // Leaf with path "abcde" with middle key (same result) + yield return new TestCaseData( + "abcde", + CreateLeaf("80000000000000000000000000000000000000000000000000000000000"), + null, + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Leaf: with 5-nibble path deletes subtree range regardless of key"); + + // === Extension tests === + + // Extension at start with path "abcde" + yield return new TestCaseData( + "abcde", + CreateExtension("0000"), + null, + new (string, string)[] + { + ("0xabcde00010000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Extension: with 5-nibble path, at start yields only after gap"); + + // Extension at end with path "abcde" - only yields before gap (no overflow into next prefix range) + yield return new TestCaseData( + "abcde", + CreateExtension("ffff"), + null, + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcdefffefffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Extension: with 5-nibble path, at end yields only before gap"); + + // Extension in middle with path "abcde" + yield return new TestCaseData( + "abcde", + CreateExtension("8"), + null, + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + ("0xabcde90000000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Extension: with 5-nibble path, in middle yields both gaps"); + + // Extension with longer key "12345" with path "abcde" + yield return new TestCaseData( + "abcde", + CreateExtension("12345"), + null, + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde12344ffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + ("0xabcde12346000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Extension: with 5-nibble path, longer key creates narrow survival zone"); + + // === Optimized deletion tests (with existing nodes) === + + // Branch to Branch: Only deleted removed children + // Existing: children at 0, 1 (null at 2-15); New: children at 0, 3 (null at 1-2, 4-15) + // Only position 1 needs deletion (went from non-null to null) + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0000_1001), + CreateBranchWithChildren(0b0000_0000_0000_0011), + new (string, string)[] + { + ("0xabcde10000000000000000000000000000000000000000000000000000000000", "0xabcde1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch→Branch: Existing(0,1) to New(0,3) deletes only child 1"); + + // Same children structure - no deletions + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0010_0000), + CreateBranchWithChildren(0b0000_0000_0010_0000), + Array.Empty<(string, string)>() + ).SetName("Branch→Branch: Same structure yields no deletions"); + + // All children to single child - deletes 0-4, 6-15 + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0010_0000), + CreateBranchWithChildren(0b1111_1111_1111_1111), + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + ("0xabcde60000000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch→Branch: All children to single child deletes 0-4, 6-15"); + + // Branch(inline)→Branch(inline): Both have inline children at shared positions + // newNode: inline at 0, 3 (bitset 0b1001); existingNode: inline at 0, 1 (bitset 0b0011) + // Position 0: newNode has inline (no hash), existing has inline → DELETE + // Position 1: newNode is null, existing has inline → DELETE + // Position 3: newNode has inline, existing is null → no delete + yield return new TestCaseData( + "abcde", + CreateBranchWithInlineChildren(0b0000_0000_0000_1001), + CreateBranchWithInlineChildren(0b0000_0000_0000_0011), + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch(inline)→Branch(inline): Inline children at shared positions trigger deletion"); + + // Branch(hash)→Branch(inline): Existing has hash refs, new has inline children + // newNode: inline at 0, 3 (bitset 0b1001); existingNode: hash ref at 0, 1 (bitset 0b0011) + // Position 0: newNode has inline (no hash), existing has hash → DELETE + // Position 1: newNode is null, existing has hash → DELETE + yield return new TestCaseData( + "abcde", + CreateBranchWithInlineChildren(0b0000_0000_0000_1001), + CreateBranchWithChildren(0b0000_0000_0000_0011), + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch(hash)→Branch(inline): Inline new replaces hash existing"); + + // === Bottom-up sync: hash ref children handle their own deletion === + + // Branch(inline)→Branch(hash): Existing has inline, new has hash refs + // newNode: hash at 0, 3; existingNode: inline at 0, 1 + // Position 0: newNode has hash, existing has inline → NO DELETE (child handles) + // Position 1: newNode is null, existing has inline → DELETE + // Position 3: newNode has hash, existing is null → no delete + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0000_1001), + CreateBranchWithInlineChildren(0b0000_0000_0000_0011), + new (string, string)[] + { + ("0xabcde10000000000000000000000000000000000000000000000000000000000", "0xabcde1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch(inline)→Branch(hash): inline→hash at pos 0 NOT deleted (child handles), inline→null at pos 1 deleted"); + + // Branch(hash)→Branch(partial null): Existing has hash refs, new removes some children + // newNode: hash at 3 only; existingNode: hash at 0, 1, 3 + // Position 0: newNode is null, existing has hash → DELETE (no child to handle) + // Position 1: newNode is null, existing has hash → DELETE + // Position 3: newNode has hash, existing has hash → NO DELETE + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0000_1000), + CreateBranchWithChildren(0b0000_0000_0000_1011), + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch(hash)→Branch(hash): hash→null at pos 0,1 deleted (no child), hash→hash at pos 3 NOT deleted"); + + // Branch(inline)→Branch(partial hash): Mixed transitions + // newNode: hash at 3 only; existingNode: inline at 0, 1, 3 + // Position 0: newNode is null, existing has inline → DELETE + // Position 1: newNode is null, existing has inline → DELETE + // Position 3: newNode has hash, existing has inline → NO DELETE (child handles) + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0000_1000), + CreateBranchWithInlineChildren(0b0000_0000_0000_1011), + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcde1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Branch(inline)→Branch(hash): inline→null at pos 0,1 deleted, inline→hash at pos 3 NOT deleted (child handles)"); + + // Same type, same key: No deletion - Leaf → Leaf with same key + yield return new TestCaseData( + "abcde", + CreateLeaf("50000000000000000000000000000000000000000000000000000000000"), + CreateLeaf("50000000000000000000000000000000000000000000000000000000000"), + Array.Empty<(string, string)>() + ).SetName("Leaf→Leaf: Same key yields no deletions"); + + // Extension → Extension with same key + yield return new TestCaseData( + "abcde", + CreateExtension("56"), + CreateExtension("56"), + Array.Empty<(string, string)>() + ).SetName("Extension→Extension: Same key yields no deletions"); + + // Cross-type transitions: Leaf at 5abc... → Branch with child 5 non-null + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0010_0000), + CreateLeaf("5abc0000000000000000000000000000000000000000000000000000000"), + Array.Empty<(string, string)>() + ).SetName("Leaf→Branch: Optimized deletion - only childNibble considered, child 5 has hash ref"); + + // Leaf at 5abc... → Branch with child 6 non-null + yield return new TestCaseData( + "abcde", + CreateBranchWithChildren(0b0000_0000_0100_0000), + CreateLeaf("5abc0000000000000000000000000000000000000000000000000000000"), + new (string, string)[] + { + ("0xabcde50000000000000000000000000000000000000000000000000000000000", "0xabcde5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Leaf→Branch: Optimized deletion - childNibble=5, new null at 5 → delete 5 only"); + + // Extension("56") → Leaf at 5678...: Leaf just deletes its whole range + yield return new TestCaseData( + "abcde", + CreateLeaf("56780000000000000000000000000000000000000000000000000000000"), + CreateExtension("56"), + new (string, string)[] + { + ("0xabcde00000000000000000000000000000000000000000000000000000000000", "0xabcdefffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + ).SetName("Extension→Leaf: Leaf deletes entire range"); + } + + [TestCaseSource(nameof(ComputeDeletionRangesTestCases))] + public void ComputeDeletionRanges_ReturnsExpectedRanges(string hexPath, TrieNode node, TrieNode? existingNode, (string From, string To)[] expectedRanges) + { + // Note to AI: You are not allowed to change this method. + + TreePath path = TreePath.FromHexString(hexPath); + node.ResolveKey(NullTrieNodeResolver.Instance, ref path); + existingNode?.ResolveKey(NullTrieNodeResolver.Instance, ref path); + + path = TreePath.FromHexString(hexPath); + RefList16 ranges = new(); + FlatTreeSyncStore.ComputeDeletionRanges(path, node, existingNode, ref ranges); + + Assert.That(ranges.Count, Is.EqualTo(expectedRanges.Length)); + for (int i = 0; i < expectedRanges.Length; i++) + { + Assert.That(expectedRanges[i].From.Length, Is.EqualTo(66)); // 64 hex chars + "0x" prefix + Assert.That(expectedRanges[i].To.Length, Is.EqualTo(66)); + Assert.That(ranges[i].From, Is.EqualTo(new ValueHash256(expectedRanges[i].From)), $"Range[{i}].From mismatch"); + Assert.That(ranges[i].To, Is.EqualTo(new ValueHash256(expectedRanges[i].To)), $"Range[{i}].To mismatch"); + } + } + +} diff --git a/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs index 381201f1fa4..b40c7a390a8 100644 --- a/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs +++ b/src/Nethermind/Nethermind.State.Flat.Test/FlatTrieVerifierTests.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Generic; +using System.Linq; using System.Threading; using Nethermind.Core; using Nethermind.Core.Crypto; @@ -389,4 +390,5 @@ public void Verify_MixedScenario_DetectsAllIssues() Assert.That(verifier.Stats.MismatchedAccount, Is.EqualTo(1)); // Account C mismatched Assert.That(verifier.Stats.MissingInFlat, Is.EqualTo(1)); // Account B missing in flat } + } diff --git a/src/Nethermind/Nethermind.State.Flat.Test/Sync/FlatEntryWriterTests.cs b/src/Nethermind/Nethermind.State.Flat.Test/Sync/FlatEntryWriterTests.cs new file mode 100644 index 00000000000..71f53172462 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat.Test/Sync/FlatEntryWriterTests.cs @@ -0,0 +1,179 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Linq; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Serialization.Rlp; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.Sync; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; +using NSubstitute; +using NUnit.Framework; + +namespace Nethermind.State.Flat.Test.Sync; + +[TestFixture] +public class FlatEntryWriterTests +{ + private static readonly byte[] SmallSlotValue = [0x01]; + + // 56-nibble key creates leaf RLP > 32 bytes (becomes hash reference) + private const string LargeKeyHex = "1234567890abcdef1234567890abcdef1234567890abcdef12345678"; + + private static byte[] Nibbles(string hex) => + hex.Select(c => (byte)(c >= 'a' ? c - 'a' + 10 : c >= 'A' ? c - 'A' + 10 : c - '0')).ToArray(); + + private static byte[] SmallAccountRlp() => new AccountDecoder().Encode(new Account(0, 1)).Bytes; + + #region WriteAccountFlatEntries Tests + + [TestCase( + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab", // 60 nibble path + "cdef", // 4 nibble leaf key + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")] // 64 nibbles + [TestCase( + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef123456", // 60 nibble path + "7890", // 4 nibble leaf key + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890")] // 64 nibbles + public void WriteAccountFlatEntries_LeafNode_WritesAccountAtCorrectPath( + string pathHex, string leafKeyHex, string expectedPathHex) + { + IPersistence.IWriteBatch writeBatch = Substitute.For(); + TreePath path = TreePath.FromHexString(pathHex); + TrieNode leaf = TrieNodeFactory.CreateLeaf(Nibbles(leafKeyHex), SmallAccountRlp()); + TreePath empty = TreePath.Empty; + leaf.ResolveKey(NullTrieNodeResolver.Instance, ref empty); + + FlatEntryWriter.WriteAccountFlatEntries(writeBatch, path, leaf); + + writeBatch.Received(1).SetAccountRaw(new Hash256(Bytes.FromHexString(expectedPathHex)), Arg.Any()); + } + + #endregion + + #region WriteStorageFlatEntries Tests + + [TestCase( + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef123456", // 60 nibble path + "7890", // 4 nibble leaf key + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890")] // 64 nibbles + [TestCase( + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890ab", // 60 nibble path + "cdef", // 4 nibble leaf key + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")] // 64 nibbles + public void WriteStorageFlatEntries_LeafNode_WritesStorageAtCorrectPath( + string pathHex, string leafKeyHex, string expectedPathHex) + { + IPersistence.IWriteBatch writeBatch = Substitute.For(); + Hash256 address = Keccak.Compute("address"); + TreePath path = TreePath.FromHexString(pathHex); + TrieNode leaf = TrieNodeFactory.CreateLeaf(Nibbles(leafKeyHex), SmallSlotValue); + TreePath empty = TreePath.Empty; + leaf.ResolveKey(NullTrieNodeResolver.Instance, ref empty); + + FlatEntryWriter.WriteStorageFlatEntries(writeBatch, address, path, leaf); + + writeBatch.Received(1).SetStorageRaw(address, new Hash256(Bytes.FromHexString(expectedPathHex)), Arg.Any()); + } + + // path (62 nibbles) + branch index (1) + leaf key (1) = 64 nibbles + [TestCase(3, 7, "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcd", "a", "b", + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcd3a", + "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcd7b")] + [TestCase(0, 15, "abcdef1234567890abcdef1234567890abcdef1234567890abcdef12345678", "c", "d", + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef123456780c", + "abcdef1234567890abcdef1234567890abcdef1234567890abcdef12345678fd")] + public void WriteStorageFlatEntries_BranchWithInlineLeaves_WritesAllInlineLeavesAtCorrectPaths( + int index1, int index2, string pathHex, string leafKey1Hex, string leafKey2Hex, string expectedPath1Hex, string expectedPath2Hex) + { + IPersistence.IWriteBatch writeBatch = Substitute.For(); + Hash256 address = Keccak.Compute("address"); + TreePath path = TreePath.FromHexString(pathHex); + + TrieNode branch = TrieNodeFactory.CreateBranch(); + branch[index1] = TrieNodeFactory.CreateLeaf(Nibbles(leafKey1Hex), SmallSlotValue); + branch[index2] = TrieNodeFactory.CreateLeaf(Nibbles(leafKey2Hex), SmallSlotValue); + TreePath empty = TreePath.Empty; + branch.ResolveKey(NullTrieNodeResolver.Instance, ref empty); + + FlatEntryWriter.WriteStorageFlatEntries(writeBatch, address, path, branch); + + writeBatch.Received(1).SetStorageRaw(address, new Hash256(Bytes.FromHexString(expectedPath1Hex)), Arg.Any()); + writeBatch.Received(1).SetStorageRaw(address, new Hash256(Bytes.FromHexString(expectedPath2Hex)), Arg.Any()); + } + + [Test] + public void WriteStorageFlatEntries_BranchWithMixedChildren_OnlyWritesInlineLeaves() + { + IPersistence.IWriteBatch writeBatch = Substitute.For(); + Hash256 address = Keccak.Compute("address"); + TreePath path = TreePath.FromHexString("abcd1234"); + + TrieNode branch = TrieNodeFactory.CreateBranch(); + branch[3] = TrieNodeFactory.CreateLeaf([5], SmallSlotValue); // Inline (small) + branch[7] = TrieNodeFactory.CreateLeaf(Nibbles(LargeKeyHex), SmallSlotValue); // Hash ref (large) + TreePath empty = TreePath.Empty; + branch.ResolveKey(NullTrieNodeResolver.Instance, ref empty); + + FlatEntryWriter.WriteStorageFlatEntries(writeBatch, address, path, branch); + + writeBatch.Received(1).SetStorageRaw(address, new Hash256(Bytes.FromHexString("abcd123435000000000000000000000000000000000000000000000000000000")), Arg.Any()); + } + + #endregion + + #region BranchInlineChildLeafEnumerator Tests + + [TestCase("abcd", new[] { 5, 10 }, 2)] + [TestCase("1234", new[] { 0, 8, 15 }, 3)] + [TestCase("5678", new[] { 3 }, 1)] + public void BranchEnumerator_WithInlineLeaves_YieldsCorrectCountAndReturnsFullPath( + string pathHex, int[] indices, int expectedCount) + { + TrieNode branch = TrieNodeFactory.CreateBranch(); + foreach (int idx in indices) + branch[idx] = TrieNodeFactory.CreateLeaf([0xa], SmallSlotValue); + TreePath empty = TreePath.Empty; + branch.ResolveKey(NullTrieNodeResolver.Instance, ref empty); + + TreePath path = TreePath.FromHexString(pathHex); + FlatEntryWriter.BranchInlineChildLeafEnumerator enumerator = new(ref path, branch); + + int count = 0; + while (enumerator.MoveNext()) + { + count++; + // CurrentPath is now ValueHash256 (complete 64-nibble path) + Assert.That(enumerator.CurrentPath, Is.Not.EqualTo(default(ValueHash256))); + } + + Assert.That(count, Is.EqualTo(expectedCount)); + } + + [Test] + public void BranchEnumerator_SkipsHashReferences_AndCurrentNodeReturnsValidLeaf() + { + TrieNode branch = TrieNodeFactory.CreateBranch(); + branch[3] = TrieNodeFactory.CreateLeaf([0xb], SmallSlotValue); // Inline + branch[7] = TrieNodeFactory.CreateLeaf(Nibbles(LargeKeyHex), SmallSlotValue); // Hash ref + TreePath empty = TreePath.Empty; + branch.ResolveKey(NullTrieNodeResolver.Instance, ref empty); + + TreePath path = TreePath.FromHexString("5678"); + FlatEntryWriter.BranchInlineChildLeafEnumerator enumerator = new(ref path, branch); + + int count = 0; + while (enumerator.MoveNext()) + { + count++; + Assert.That(enumerator.CurrentNode.IsLeaf, Is.True); + } + + Assert.That(count, Is.EqualTo(1)); + } + + #endregion +} diff --git a/src/Nethermind/Nethermind.State.Flat/IPersistenceManager.cs b/src/Nethermind/Nethermind.State.Flat/IPersistenceManager.cs index 8a55edf811b..eb644609712 100644 --- a/src/Nethermind/Nethermind.State.Flat/IPersistenceManager.cs +++ b/src/Nethermind/Nethermind.State.Flat/IPersistenceManager.cs @@ -11,4 +11,5 @@ public interface IPersistenceManager StateId GetCurrentPersistedStateId(); void AddToPersistence(StateId latestSnapshot); StateId FlushToPersistence(); + void ResetPersistedStateId(); } diff --git a/src/Nethermind/Nethermind.State.Flat/Nethermind.State.Flat.csproj b/src/Nethermind/Nethermind.State.Flat/Nethermind.State.Flat.csproj index f0c27bcc209..d705dd8aac0 100644 --- a/src/Nethermind/Nethermind.State.Flat/Nethermind.State.Flat.csproj +++ b/src/Nethermind/Nethermind.State.Flat/Nethermind.State.Flat.csproj @@ -7,10 +7,12 @@ true + + diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/BaseFlatPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseFlatPersistence.cs index 084dcb18bb7..a735356a97a 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/BaseFlatPersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseFlatPersistence.cs @@ -198,10 +198,11 @@ public bool MoveNext() public void Dispose() => view.Dispose(); } - public readonly struct WriteBatch( + public struct WriteBatch( + ISortedKeyValueStore stateSnap, ISortedKeyValueStore storageSnap, - IWriteOnlyKeyValueStore state, - IWriteOnlyKeyValueStore storage, + IWriteBatch state, + IWriteBatch storage, WriteFlags flags ) : BasePersistence.IHashedFlatWriteBatch { @@ -253,5 +254,45 @@ public void SetAccount(in ValueHash256 addrHash, ReadOnlySpan account) ReadOnlySpan key = addrHash.Bytes[..AccountKeyLength]; state.PutSpan(key, account, flags); } + + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath) + { + // Account keys are the first 20 bytes of the address hash + Span firstKey = stackalloc byte[AccountKeyLength]; + Span lastKey = stackalloc byte[AccountKeyLength + 1]; // +1 for exclusive upper bound + fromPath.Bytes[..AccountKeyLength].CopyTo(firstKey); + toPath.Bytes[..AccountKeyLength].CopyTo(lastKey); + lastKey[AccountKeyLength] = 0; // Exclusive upper bound + + using ISortedView view = stateSnap.GetViewBetween(firstKey, lastKey); + while (view.MoveNext()) + { + if (view.CurrentKey.Length != AccountKeyLength) continue; + state.Remove(view.CurrentKey); + } + } + + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath) + { + // Storage key layout: <4-byte-addr><32-byte-slot><16-byte-addr> + // We need to iterate all keys in the slot range with the same address + Span firstKey = stackalloc byte[StorageKeyLength]; + Span lastKey = stackalloc byte[StorageKeyLength + 1]; + EncodeStorageKeyHashedWithShortPrefix(firstKey, addressHash, fromPath); + EncodeStorageKeyHashedWithShortPrefix(lastKey[..StorageKeyLength], addressHash, toPath); + lastKey[StorageKeyLength] = 0; // Exclusive upper bound + + using ISortedView view = storageSnap.GetViewBetween(firstKey, lastKey); + while (view.MoveNext()) + { + if (view.CurrentKey.Length != StorageKeyLength) continue; + + // Verify the 16-byte address suffix matches + if (Bytes.AreEqual(view.CurrentKey[(StoragePrefixPortion + StorageSlotKeySize)..], addressHash.Bytes[StoragePrefixPortion..(StoragePrefixPortion + StoragePostfixPortion)])) + { + storage.Remove(view.CurrentKey); + } + } + } } } diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/BasePersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/BasePersistence.cs index 01f9fd71e5d..d53572cad6e 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/BasePersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/BasePersistence.cs @@ -55,6 +55,10 @@ public interface IHashedFlatWriteBatch public void SetAccount(in ValueHash256 address, ReadOnlySpan value); public void SetStorage(in ValueHash256 address, in ValueHash256 slotHash, in SlotValue? value); + + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath); + + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath); } public interface IFlatReader @@ -79,6 +83,10 @@ public interface IFlatWriteBatch public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value); public void SetAccountRaw(Hash256 addrHash, Account account); + + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath); + + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath); } public interface ITrieReader @@ -92,6 +100,8 @@ public interface ITrieWriteBatch public void SelfDestruct(in ValueHash256 address); public void SetStateTrieNode(in TreePath path, TrieNode tnValue); public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tnValue); + public void DeleteStateTrieNodeRange(in TreePath fromPath, in TreePath toPath); + public void DeleteStorageTrieNodeRange(in ValueHash256 addressHash, in TreePath fromPath, in TreePath toPath); } public struct ToHashedWriteBatch( @@ -132,6 +142,12 @@ public void SetAccountRaw(Hash256 addrHash, Account account) using NettyRlpStream stream = _accountDecoder.EncodeToNewNettyStream(account); _flatWriteBatch.SetAccount(addrHash, stream.AsSpan()); } + + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath) => + _flatWriteBatch.DeleteAccountRange(fromPath, toPath); + + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath) => + _flatWriteBatch.DeleteStorageRange(addressHash, fromPath, toPath); } public struct ToHashedFlatReader( @@ -263,5 +279,17 @@ public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? valu public void SetAccountRaw(Hash256 addrHash, Account account) => _flatWriter.SetAccountRaw(addrHash, account); + + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath) => + _flatWriter.DeleteAccountRange(fromPath, toPath); + + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath) => + _flatWriter.DeleteStorageRange(addressHash, fromPath, toPath); + + public void DeleteStateTrieNodeRange(in TreePath fromPath, in TreePath toPath) => + _trieWriteBatch.DeleteStateTrieNodeRange(fromPath, toPath); + + public void DeleteStorageTrieNodeRange(in ValueHash256 addressHash, in TreePath fromPath, in TreePath toPath) => + _trieWriteBatch.DeleteStorageTrieNodeRange(addressHash, fromPath, toPath); } } diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/BaseTriePersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseTriePersistence.cs index 83877c6a301..50d158552cc 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/BaseTriePersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/BaseTriePersistence.cs @@ -131,16 +131,17 @@ private static ReadOnlySpan EncodeFullStorageNodeKey(Span buffer, Ha } public readonly struct WriteBatch( + ISortedKeyValueStore stateTopNodesSnap, + ISortedKeyValueStore stateNodesSnap, ISortedKeyValueStore storageNodesSnap, ISortedKeyValueStore fallbackNodesSnap, - IWriteOnlyKeyValueStore stateTopNodes, - IWriteOnlyKeyValueStore stateNodes, - IWriteOnlyKeyValueStore storageNodes, - IWriteOnlyKeyValueStore fallbackNodes, + Nethermind.Core.IWriteBatch stateTopNodes, + Nethermind.Core.IWriteBatch stateNodes, + Nethermind.Core.IWriteBatch storageNodes, + Nethermind.Core.IWriteBatch fallbackNodes, WriteFlags flags ) : BasePersistence.ITrieWriteBatch { - [SkipLocalsInit] public void SelfDestruct(in ValueHash256 accountPath) { @@ -211,6 +212,113 @@ public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tn) break; } } + + public void DeleteStateTrieNodeRange(in TreePath fromPath, in TreePath toPath) + { + // State trie nodes are stored across 3 columns based on path length: + // - StateNodesTop: path length 0-5 (3 byte keys) + // - StateNodes: path length 6-15 (8 byte keys) + // - FallbackNodes: path length 16+ (34 byte keys with 0x00 prefix) + + // Delete from StateNodesTop (path length 0-5) + { + Span firstKey = stackalloc byte[StateNodesTopPathLength]; + Span lastKey = stackalloc byte[StateNodesTopPathLength + 1]; + EncodeStateTopNodeKey(firstKey, fromPath); + EncodeStateTopNodeKey(lastKey[..StateNodesTopPathLength], toPath); + lastKey[StateNodesTopPathLength] = 0; // Exclusive upper bound + + using ISortedView view = stateTopNodesSnap.GetViewBetween(firstKey, lastKey); + while (view.MoveNext()) + { + if (view.CurrentKey.Length != StateNodesTopPathLength) continue; + stateTopNodes.Remove(view.CurrentKey); + } + } + + // Delete from StateNodes (path length 6-15) + { + Span firstKey = stackalloc byte[ShortenedPathLength]; + Span lastKey = stackalloc byte[ShortenedPathLength + 1]; + EncodeShortenedStateNodeKey(firstKey, fromPath); + EncodeShortenedStateNodeKey(lastKey[..ShortenedPathLength], toPath); + lastKey[ShortenedPathLength] = 0; // Exclusive upper bound + + using ISortedView view = stateNodesSnap.GetViewBetween(firstKey, lastKey); + while (view.MoveNext()) + { + if (view.CurrentKey.Length != ShortenedPathLength) continue; + stateNodes.Remove(view.CurrentKey); + } + } + + // Delete from FallbackNodes (path length 16+, prefix 0x00) + { + Span firstKey = stackalloc byte[FullStateNodesKeyLength]; + Span lastKey = stackalloc byte[FullStateNodesKeyLength + 1]; + EncodeFullStateNodeKey(firstKey, fromPath); + EncodeFullStateNodeKey(lastKey[..FullStateNodesKeyLength], toPath); + lastKey[FullStateNodesKeyLength] = 0; // Exclusive upper bound + + using ISortedView view = fallbackNodesSnap.GetViewBetween(firstKey, lastKey); + while (view.MoveNext()) + { + if (view.CurrentKey.Length != FullStateNodesKeyLength) continue; + if (view.CurrentKey[0] != 0) continue; // State nodes have 0x00 prefix + fallbackNodes.Remove(view.CurrentKey); + } + } + } + + public void DeleteStorageTrieNodeRange(in ValueHash256 addressHash, in TreePath fromPath, in TreePath toPath) + { + // Storage trie nodes are stored across 2 columns based on path length: + // - StorageNodes: path length 0-15 (28 byte keys) + // - FallbackNodes: path length 16+ (54 byte keys with 0x01 prefix) + + Hash256 address = new Hash256(addressHash); + + // Delete from StorageNodes (path length 0-15) + { + Span firstKey = stackalloc byte[ShortenedStorageNodesKeyLength]; + Span lastKey = stackalloc byte[ShortenedStorageNodesKeyLength + 1]; + EncodeShortenedStorageNodeKey(firstKey, address, fromPath); + EncodeShortenedStorageNodeKey(lastKey[..ShortenedStorageNodesKeyLength], address, toPath); + lastKey[ShortenedStorageNodesKeyLength] = 0; // Exclusive upper bound + + using ISortedView view = storageNodesSnap.GetViewBetween(firstKey, lastKey); + while (view.MoveNext()) + { + if (view.CurrentKey.Length != ShortenedStorageNodesKeyLength) continue; + // Verify the 16-byte address suffix matches + if (Bytes.AreEqual(view.CurrentKey[(StoragePrefixPortion + ShortenedPathLength)..], addressHash.Bytes[StoragePrefixPortion..StorageHashPrefixLength])) + { + storageNodes.Remove(view.CurrentKey); + } + } + } + + // Delete from FallbackNodes (path length 16+, prefix 0x01) + { + Span firstKey = stackalloc byte[FullStorageNodesKeyLength]; + Span lastKey = stackalloc byte[FullStorageNodesKeyLength + 1]; + EncodeFullStorageNodeKey(firstKey, address, fromPath); + EncodeFullStorageNodeKey(lastKey[..FullStorageNodesKeyLength], address, toPath); + lastKey[FullStorageNodesKeyLength] = 0; // Exclusive upper bound + + using ISortedView view = fallbackNodesSnap.GetViewBetween(firstKey, lastKey); + while (view.MoveNext()) + { + if (view.CurrentKey.Length != FullStorageNodesKeyLength) continue; + if (view.CurrentKey[0] != 1) continue; // Storage nodes have 0x01 prefix + // Verify the 16-byte address suffix matches + if (Bytes.AreEqual(view.CurrentKey[(1 + StoragePrefixPortion + FullPathLength + PathLengthLength)..], addressHash.Bytes[StoragePrefixPortion..StorageHashPrefixLength])) + { + fallbackNodes.Remove(view.CurrentKey); + } + } + } + } } diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/CachedReaderPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/CachedReaderPersistence.cs index e1df994364c..d9a3885bc93 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/CachedReaderPersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/CachedReaderPersistence.cs @@ -100,6 +100,12 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, public void Flush() => _inner.Flush(); + public void Clear() + { + ClearReaderCache(); + _inner.Clear(); + } + private void ClearReaderCache() { using Lock.Scope _ = _readerCacheLock.EnterScope(); @@ -128,6 +134,10 @@ private class ClearCacheOnWriteBatchComplete(IPersistence.IWriteBatch inner, Cac public void SetStorageTrieNode(Hash256 address, in TreePath path, TrieNode tnValue) => inner.SetStorageTrieNode(address, path, tnValue); public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value) => inner.SetStorageRaw(addrHash, slotHash, value); public void SetAccountRaw(Hash256 addrHash, Account account) => inner.SetAccountRaw(addrHash, account); + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath) => inner.DeleteAccountRange(fromPath, toPath); + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath) => inner.DeleteStorageRange(addressHash, fromPath, toPath); + public void DeleteStateTrieNodeRange(in TreePath fromPath, in TreePath toPath) => inner.DeleteStateTrieNodeRange(fromPath, toPath); + public void DeleteStorageTrieNodeRange(in ValueHash256 addressHash, in TreePath fromPath, in TreePath toPath) => inner.DeleteStorageTrieNodeRange(addressHash, fromPath, toPath); public void Dispose() { diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/FlatInTriePersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/FlatInTriePersistence.cs index be6b84b56ec..0ee4a0bf96c 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/FlatInTriePersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/FlatInTriePersistence.cs @@ -14,6 +14,19 @@ public class FlatInTriePersistence(IColumnsDb db) : IPersistence { public void Flush() => db.Flush(); + public void Clear() + { + foreach (FlatDbColumns column in db.ColumnKeys) + { + IDb columnDb = db.GetColumnDb(column); + using IWriteBatch batch = columnDb.StartWriteBatch(); + foreach (byte[] key in columnDb.GetAllKeys()) + { + batch.Remove(key); + } + } + } + public IPersistence.IPersistenceReader CreateReader() { IColumnDbSnapshot snapshot = db.CreateSnapshot(); @@ -55,7 +68,7 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, { IColumnDbSnapshot dbSnap = db.CreateSnapshot(); StateId currentState = RocksDbPersistence.ReadCurrentState(dbSnap.GetColumn(FlatDbColumns.Metadata)); - if (currentState != from) + if (from != StateId.Sync && currentState != from) { dbSnap.Dispose(); throw new InvalidOperationException($"Attempted to apply snapshot on top of wrong state. Snapshot from: {from}, Db state: {currentState}"); @@ -63,7 +76,9 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, IColumnsWriteBatch batch = db.StartWriteBatch(); - BaseTriePersistence.WriteBatch trieWriteBatch = new( + BaseTriePersistence.WriteBatch trieWriteBatch = new BaseTriePersistence.WriteBatch( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StateTopNodes), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StateNodes), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.FallbackNodes), batch.GetColumnBatch(FlatDbColumns.StateTopNodes), @@ -76,6 +91,7 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, return new BasePersistence.WriteBatch, BaseTriePersistence.WriteBatch>( new BasePersistence.ToHashedWriteBatch( new BaseFlatPersistence.WriteBatch( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StateNodes), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), batch.GetColumnBatch(FlatDbColumns.StateNodes), batch.GetColumnBatch(FlatDbColumns.StorageNodes), @@ -85,7 +101,8 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, trieWriteBatch, new Reactive.AnonymousDisposable(() => { - RocksDbPersistence.SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); + if (toCopy != StateId.Sync) + RocksDbPersistence.SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); batch.Dispose(); dbSnap.Dispose(); if (!flags.HasFlag(WriteFlags.DisableWAL)) diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/IPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/IPersistence.cs index d39988ac796..8d4528ec848 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/IPersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/IPersistence.cs @@ -16,6 +16,7 @@ public interface IPersistence // Note: RocksdbPersistence already flush WAL on writing batch dispose. You don't need this unless you are skipping WAL. void Flush(); + void Clear(); public interface IPersistenceReader : IDisposable { @@ -49,6 +50,11 @@ public interface IWriteBatch : IDisposable void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? value); void SetAccountRaw(Hash256 addrHash, Account account); + + void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath); + void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath); + void DeleteStateTrieNodeRange(in TreePath fromPath, in TreePath toPath); + void DeleteStorageTrieNodeRange(in ValueHash256 addressHash, in TreePath fromPath, in TreePath toPath); } /// diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRecordingPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRecordingPersistence.cs index 8f9d4169e4f..173bbea92f8 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRecordingPersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRecordingPersistence.cs @@ -38,6 +38,7 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, } public void Flush() => _inner.Flush(); + public void Clear() => _inner.Clear(); private class RecordingWriteBatch(IPersistence.IWriteBatch inner, IWriteBatch preimageWriteBatch, IDb preimageDb) : IPersistence.IWriteBatch { @@ -112,5 +113,17 @@ private void RecordSlotPreimage(in UInt256 slot) StorageTree.ComputeKeyWithLookup(slot, ref slotHash); preimageWriteBatch.PutSpan(slotHash.BytesAsSpan[..PreimageLookupSize], slot.ToBigEndian()); } + + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath) => + inner.DeleteAccountRange(fromPath, toPath); + + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath) => + inner.DeleteStorageRange(addressHash, fromPath, toPath); + + public void DeleteStateTrieNodeRange(in TreePath fromPath, in TreePath toPath) => + inner.DeleteStateTrieNodeRange(fromPath, toPath); + + public void DeleteStorageTrieNodeRange(in ValueHash256 addressHash, in TreePath fromPath, in TreePath toPath) => + inner.DeleteStorageTrieNodeRange(addressHash, fromPath, toPath); } } diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRocksdbPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRocksdbPersistence.cs index e135f1dc56d..04ab2921051 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRocksdbPersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/PreimageRocksdbPersistence.cs @@ -27,6 +27,19 @@ public class PreimageRocksdbPersistence(IColumnsDb db) : IPersist public void Flush() => db.Flush(); + public void Clear() + { + foreach (FlatDbColumns column in db.ColumnKeys) + { + IDb columnDb = db.GetColumnDb(column); + using IWriteBatch batch = columnDb.StartWriteBatch(); + foreach (byte[] key in columnDb.GetAllKeys()) + { + batch.Remove(key); + } + } + } + internal static StateId ReadCurrentState(IReadOnlyKeyValueStore kv) { byte[]? bytes = kv.Get(CurrentStateKey); @@ -88,7 +101,7 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, IColumnsWriteBatch batch = db.StartWriteBatch(); IColumnDbSnapshot dbSnap = db.CreateSnapshot(); StateId currentState = ReadCurrentState(dbSnap.GetColumn(FlatDbColumns.Metadata)); - if (currentState != from) + if (from != StateId.Sync && currentState != from) { dbSnap.Dispose(); throw new InvalidOperationException( @@ -97,14 +110,17 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, FakeHashWriter flatWriter = new( new BaseFlatPersistence.WriteBatch( - ((ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.Storage)), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.Account), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.Storage), batch.GetColumnBatch(FlatDbColumns.Account), batch.GetColumnBatch(FlatDbColumns.Storage), flags ) ); - BaseTriePersistence.WriteBatch trieWriteBatch = new( + BaseTriePersistence.WriteBatch trieWriteBatch = new BaseTriePersistence.WriteBatch( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StateTopNodes), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StateNodes), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.FallbackNodes), batch.GetColumnBatch(FlatDbColumns.StateTopNodes), @@ -119,7 +135,8 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, trieWriteBatch, new Reactive.AnonymousDisposable(() => { - SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); + if (toCopy != StateId.Sync) + SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); batch.Dispose(); dbSnap.Dispose(); if (!flags.HasFlag(WriteFlags.DisableWAL)) @@ -175,6 +192,12 @@ public void SetStorageRaw(Hash256 addrHash, Hash256 slotHash, in SlotValue? valu public void SetAccountRaw(Hash256 addrHash, Account account) => throw new InvalidOperationException("Raw operations not available in preimage mode"); + + public void DeleteAccountRange(in ValueHash256 fromPath, in ValueHash256 toPath) => + throw new InvalidOperationException("Range deletion not available in preimage mode"); + + public void DeleteStorageRange(in ValueHash256 addressHash, in ValueHash256 fromPath, in ValueHash256 toPath) => + throw new InvalidOperationException("Range deletion not available in preimage mode"); } public struct FakeHashFlatReader( diff --git a/src/Nethermind/Nethermind.State.Flat/Persistence/RocksDbPersistence.cs b/src/Nethermind/Nethermind.State.Flat/Persistence/RocksDbPersistence.cs index 6bdc33857b8..2cdc44d5159 100644 --- a/src/Nethermind/Nethermind.State.Flat/Persistence/RocksDbPersistence.cs +++ b/src/Nethermind/Nethermind.State.Flat/Persistence/RocksDbPersistence.cs @@ -32,6 +32,19 @@ internal static void SetCurrentState(IWriteOnlyKeyValueStore kv, in StateId stat public void Flush() => db.Flush(); + public void Clear() + { + foreach (FlatDbColumns column in db.ColumnKeys) + { + IDb columnDb = db.GetColumnDb(column); + using IWriteBatch batch = columnDb.StartWriteBatch(); + foreach (byte[] key in columnDb.GetAllKeys()) + { + batch.Remove(key); + } + } + } + public IPersistence.IPersistenceReader CreateReader() { IColumnDbSnapshot snapshot = db.CreateSnapshot(); @@ -73,7 +86,7 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, { IColumnDbSnapshot dbSnap = db.CreateSnapshot(); StateId currentState = ReadCurrentState(dbSnap.GetColumn(FlatDbColumns.Metadata)); - if (currentState != from) + if (from != StateId.Sync && currentState != from) { dbSnap.Dispose(); throw new InvalidOperationException($"Attempted to apply snapshot on top of wrong state. Snapshot from: {from}, Db state: {currentState}"); @@ -81,7 +94,9 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, IColumnsWriteBatch batch = db.StartWriteBatch(); - BaseTriePersistence.WriteBatch trieWriteBatch = new( + BaseTriePersistence.WriteBatch trieWriteBatch = new BaseTriePersistence.WriteBatch( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StateTopNodes), + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StateNodes), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.StorageNodes), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.FallbackNodes), batch.GetColumnBatch(FlatDbColumns.StateTopNodes), @@ -95,6 +110,7 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, return new BasePersistence.WriteBatch, BaseTriePersistence.WriteBatch>( new BasePersistence.ToHashedWriteBatch( new BaseFlatPersistence.WriteBatch( + (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.Account), (ISortedKeyValueStore)dbSnap.GetColumn(FlatDbColumns.Storage), batch.GetColumnBatch(FlatDbColumns.Account), batch.GetColumnBatch(FlatDbColumns.Storage), @@ -104,7 +120,8 @@ public IPersistence.IWriteBatch CreateWriteBatch(in StateId from, in StateId to, trieWriteBatch, new Reactive.AnonymousDisposable(() => { - SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); + if (toCopy != StateId.Sync) + SetCurrentState(batch.GetColumnBatch(FlatDbColumns.Metadata), toCopy); batch.Dispose(); dbSnap.Dispose(); if (!flags.HasFlag(WriteFlags.DisableWAL)) diff --git a/src/Nethermind/Nethermind.State.Flat/PersistenceManager.cs b/src/Nethermind/Nethermind.State.Flat/PersistenceManager.cs index 76ab641d874..dad2ee0f29c 100644 --- a/src/Nethermind/Nethermind.State.Flat/PersistenceManager.cs +++ b/src/Nethermind/Nethermind.State.Flat/PersistenceManager.cs @@ -15,6 +15,7 @@ using Nethermind.Trie.Pruning; [assembly: InternalsVisibleTo("Nethermind.State.Flat.Test")] +[assembly: InternalsVisibleTo("Nethermind.Synchronization.Test")] namespace Nethermind.State.Flat; @@ -220,6 +221,12 @@ public StateId FlushToPersistence() return currentPersistedState; } + public void ResetPersistedStateId() + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + _currentPersistedStateId = reader.CurrentState; + } + internal void PersistSnapshot(Snapshot snapshot) { long compactLength = snapshot.To.BlockNumber! - snapshot.From.BlockNumber!; diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/AbstractMinimalTrieStore.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/AbstractMinimalTrieStore.cs index 4530ac8ef23..46c26045cb3 100644 --- a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/AbstractMinimalTrieStore.cs +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/AbstractMinimalTrieStore.cs @@ -30,7 +30,7 @@ public byte[] LoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlag public INodeStorage.KeyScheme Scheme => INodeStorage.KeyScheme.HalfPath; - public bool IsPersisted(in TreePath path, in ValueHash256 keccak) => throw new UnsupportedOperationException("Persisted check not supported"); + public virtual bool IsPersisted(in TreePath path, in ValueHash256 keccak) => throw new UnsupportedOperationException("Persisted check not supported"); public abstract class AbstractMinimalCommitter(ConcurrencyController quota) : ICommitter { diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateManager.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateManager.cs index af26aacfb89..a5098673cdd 100644 --- a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateManager.cs +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/FlatWorldStateManager.cs @@ -7,6 +7,8 @@ using Nethermind.Evm.State; using Nethermind.Logging; using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.Sync; +using Nethermind.State.Flat.Sync.Snap; using Nethermind.State.SnapServer; using Nethermind.Trie.Pruning; @@ -20,6 +22,7 @@ public class FlatWorldStateManager( ITrieWarmer trieWarmer, Func overridableWorldScopeFactory, [KeyFilter(DbNames.Code)] IDb codeDb, + IFlatStateRootIndex flatStateRootIndex, ILogManager logManager) : IWorldStateManager { @@ -34,9 +37,15 @@ public class FlatWorldStateManager( private readonly FlatTrieVerifier _trieVerifier = new(flatDbManager, persistence, logManager); + private FlatSnapServer? _snapServer; + public IWorldStateScopeProvider GlobalWorldState => _mainWorldState; public IStateReader GlobalStateReader => flatStateReader; - public ISnapServer? SnapServer => null; + public ISnapServer? SnapServer => _snapServer ??= new FlatSnapServer( + flatDbManager, + codeDb, + flatStateRootIndex, + logManager); public IReadOnlyKeyValueStore? HashServer => null; public IWorldStateScopeProvider CreateResettableWorldState() => diff --git a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ReadOnlyStateTrieStoreAdapter.cs b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ReadOnlyStateTrieStoreAdapter.cs index 3cc7539ebf6..f778627e4cf 100644 --- a/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ReadOnlyStateTrieStoreAdapter.cs +++ b/src/Nethermind/Nethermind.State.Flat/ScopeProvider/ReadOnlyStateTrieStoreAdapter.cs @@ -19,6 +19,8 @@ public override ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) = address is null ? this : new ReadOnlyStorageTrieStoreAdapter(bundle, address); // Used in trie visitor and weird very edge case that cuts the whole thing to pieces + + public IScopedTrieStore GetStorageTrieStore(Hash256 address) => new ReadOnlyStorageTrieStoreAdapter(bundle, address); } internal class ReadOnlyStorageTrieStoreAdapter( diff --git a/src/Nethermind/Nethermind.State.Flat/StateId.cs b/src/Nethermind/Nethermind.State.Flat/StateId.cs index 3caf59bbed7..81640375147 100644 --- a/src/Nethermind/Nethermind.State.Flat/StateId.cs +++ b/src/Nethermind/Nethermind.State.Flat/StateId.cs @@ -15,6 +15,7 @@ public StateId(BlockHeader? header) : this(header?.Number ?? -1, header?.StateRo } public static StateId PreGenesis = new(-1, Keccak.EmptyTreeHash); + public static StateId Sync = new(long.MinValue, Keccak.EmptyTreeHash); public int CompareTo(StateId other) { diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/FlatEntryWriter.cs b/src/Nethermind/Nethermind.State.Flat/Sync/FlatEntryWriter.cs new file mode 100644 index 00000000000..8a535cb6762 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/FlatEntryWriter.cs @@ -0,0 +1,227 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using System.Diagnostics; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Serialization.Rlp; +using Nethermind.State.Flat.Persistence; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.Sync; + +/// +/// Utility for writing flat entries during sync. The purpose is to cerrectly identify the required flat entry to +/// save given a trie node. Handles both direct leaf nodes and inline leaf children (nodes with RLP < 32 bytes embedded in parent). +/// +internal static class FlatEntryWriter +{ + /// + /// Write flat account entries for a node and any inline leaf children. + /// + public static void WriteAccountFlatEntries( + IPersistence.IWriteBatch writeBatch, + TreePath path, + TrieNode node) + { + if (node.IsLeaf) + { + int originalPathLength = path.Length; + try + { + path.AppendMut(node.Key); + Account account = AccountDecoder.Instance.Decode(node.Value.Span)!; + writeBatch.SetAccountRaw(path.Path.ToCommitment(), account); + } + finally + { + path.TruncateMut(originalPathLength); + } + return; + } + + if (node.IsBranch) + { + BranchInlineChildLeafEnumerator enumerator = new(ref path, node); + while (enumerator.MoveNext()) + { + Account account = AccountDecoder.Instance.Decode(enumerator.CurrentValue)!; + writeBatch.SetAccountRaw(enumerator.CurrentPath.ToCommitment(), account); + } + } + else if (node.IsExtension) + { + // Extension children are never inline branches in practice. An inline branch + // (RLP < 32 bytes) requires ≥2 keys sharing 224+ bits of prefix. Even a large + // contract with 2^20 storage slots gives collision probability of ~(2^20)² / 2^224 + // = 2^(-184). Effectively zero - safe to assume hash references. + } + } + + /// + /// Write flat storage entries for a node and any inline leaf children. + /// + public static void WriteStorageFlatEntries( + IPersistence.IWriteBatch writeBatch, + Hash256 address, + TreePath path, + TrieNode node) + { + if (node.IsLeaf) + { + int originalPathLength = path.Length; + try + { + path.AppendMut(node.Key); + byte[] toWrite = ((ReadOnlySpan)node.Value.Span).IsEmpty + ? State.StorageTree.ZeroBytes + : ((ReadOnlySpan)node.Value.Span).AsRlpValueContext().DecodeByteArray(); + writeBatch.SetStorageRaw(address, path.Path.ToCommitment(), SlotValue.FromSpanWithoutLeadingZero(toWrite)); + } + finally + { + path.TruncateMut(originalPathLength); + } + return; + } + + if (node.IsBranch) + { + BranchInlineChildLeafEnumerator enumerator = new(ref path, node); + while (enumerator.MoveNext()) + { + byte[] toWrite = enumerator.CurrentValue.IsEmpty + ? State.StorageTree.ZeroBytes + : enumerator.CurrentValue.AsRlpValueContext().DecodeByteArray(); + writeBatch.SetStorageRaw(address, enumerator.CurrentPath.ToCommitment(), SlotValue.FromSpanWithoutLeadingZero(toWrite)); + } + } + else if (node.IsExtension) + { + // Extension children are never inline branches in practice. An inline branch + // (RLP < 32 bytes) requires ≥2 keys sharing 224+ bits of prefix. Even a large + // contract with 2^20 storage slots gives collision probability of ~(2^20)² / 2^224 + // = 2^(-184). Effectively zero - safe to assume hash references. + } + } + + /// + /// High-performance enumerator for inline leaf children of a branch node. + /// Operates directly on RLP data to avoid TrieNode wrapper allocations. + /// Iterates 16 children, yielding only inline leaves. + /// + public ref struct BranchInlineChildLeafEnumerator + { + private readonly ReadOnlySpan _rlp; + private readonly int _originalPathLength; + private ref TreePath _path; + private int _index; + private int _rlpPosition; + + private ValueHash256 _currentFullPath; + private ReadOnlySpan _currentValue; + private ReadOnlySpan _currentRlp; + + public BranchInlineChildLeafEnumerator(ref TreePath path, TrieNode node) + { + _path = ref path; + _rlp = node.FullRlp.Span; + _originalPathLength = path.Length; + _index = -1; + _currentFullPath = default; + _currentValue = default; + _currentRlp = default; + + // Skip list prefix to position at first child + Rlp.ValueDecoderContext ctx = new(_rlp); + ctx.SkipLength(); + _rlpPosition = ctx.Position; + } + + public ValueHash256 CurrentPath => _currentFullPath; + public ReadOnlySpan CurrentValue => _currentValue; + + /// + /// Creates a TrieNode from the current inline leaf RLP. + /// Use this when you need the full TrieNode object (e.g., for deletion range computation). + /// + public TrieNode CurrentNode + { + get + { + TrieNode node = new(NodeType.Unknown, _currentRlp.ToArray()); + node.ResolveNode(NullTrieNodeResolver.Instance, _path); + return node; + } + } + + public bool MoveNext() + { + _path.TruncateMut(_originalPathLength); + Rlp.ValueDecoderContext ctx = new(_rlp) { Position = _rlpPosition }; + + while (++_index < 16) + { + int prefix = ctx.ReadByte(); + + switch (prefix) + { + case 0: + case 128: // Empty/null child (0x80) + continue; + + case 160: // Hash reference (0xa0 = 32-byte Keccak) + ctx.Position--; + ctx.SkipItem(); + continue; + + default: // Inline node + ctx.Position--; + int length = ctx.PeekNextRlpLength(); + ReadOnlySpan inlineRlp = ctx.PeekNextItem(); + + if (!TryExtractLeafData(inlineRlp, out ReadOnlySpan currentKey, out _currentValue)) + throw new UnreachableException("There should not be any non-leaf inline child node"); + + _currentRlp = inlineRlp; + _currentFullPath = _path.Append(_index).Append(currentKey).Path; + + // Save position for next loop + _rlpPosition = ctx.Position + length; + return true; + + } + } + + return false; + } + + } + + private static bool TryExtractLeafData( + ReadOnlySpan nodeRlp, + out ReadOnlySpan key, + out ReadOnlySpan value) + { + Rlp.ValueDecoderContext ctx = new(nodeRlp); + ctx.ReadSequenceLength(); + + ReadOnlySpan keySpan = ctx.DecodeByteArraySpan(); + (byte[] keyBytes, bool isLeaf) = HexPrefix.FromBytes(keySpan); + + // Check if leaf (0x20 bit set in first nibble) + if (isLeaf) + { + value = ctx.DecodeByteArraySpan(); + key = keyBytes; + return true; + } + + key = default; + value = default; + return false; + } + +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/FlatFullStateFinder.cs b/src/Nethermind/Nethermind.State.Flat/Sync/FlatFullStateFinder.cs new file mode 100644 index 00000000000..4a9fcef885f --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/FlatFullStateFinder.cs @@ -0,0 +1,15 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Synchronization.ParallelSync; + +namespace Nethermind.State.Flat.Sync; + +public class FlatFullStateFinder(PersistenceManager persistenceManager) : IFullStateFinder +{ + public long FindBestFullState() + { + long blockNumber = persistenceManager.GetCurrentPersistedStateId().BlockNumber; + return blockNumber < 0 ? 0 : blockNumber; + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/FlatTreeSyncStore.cs b/src/Nethermind/Nethermind.State.Flat/Sync/FlatTreeSyncStore.cs new file mode 100644 index 00000000000..b268ed7afa2 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/FlatTreeSyncStore.cs @@ -0,0 +1,295 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.Serialization.Rlp; +using Nethermind.State; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.Synchronization.FastSync; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.Sync; + +public class FlatTreeSyncStore(IPersistence persistence, IPersistenceManager persistenceManager, ILogManager logManager) : ITreeSyncStore +{ + // For flat, one cannot continue syncing after finalization as it will corrupt existing state. + private bool _wasFinalized = false; + + internal readonly record struct DeletionRange(ValueHash256 From, ValueHash256 To); + + public bool NodeExists(Hash256? address, in TreePath path, in ValueHash256 hash) + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + byte[]? data = address is null + ? reader.TryLoadStateRlp(path, ReadFlags.None) + : reader.TryLoadStorageRlp(address, path, ReadFlags.None); + + if (data is null) return false; + + // Rehash and verify + ValueHash256 computedHash = ValueKeccak.Compute(data); + return computedHash == hash; + } + + public void SaveNode(Hash256? address, in TreePath path, in ValueHash256 hash, ReadOnlySpan data) + { + if (_wasFinalized) throw new InvalidOperationException("Db was finalized"); + + using IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(StateId.Sync, StateId.Sync, WriteFlags.DisableWAL); + + TrieNode node = new(NodeType.Unknown, data.ToArray()); + node.ResolveNode(NullTrieNodeResolver.Instance, path); + + TrieNode? existingNode = ReadExistingNode(address, path); + + if (address is null) + { + RequestStateDeletion(writeBatch, path, node, existingNode); + + writeBatch.SetStateTrieNode(path, node); + FlatEntryWriter.WriteAccountFlatEntries(writeBatch, path, node); + } + else + { + RequestStorageDeletion(writeBatch, address, path, node, existingNode); + + writeBatch.SetStorageTrieNode(address, path, node); + FlatEntryWriter.WriteStorageFlatEntries(writeBatch, address, path, node); + } + } + + private TrieNode? ReadExistingNode(Hash256? address, TreePath path) + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + byte[]? existingData = address is null + ? reader.TryLoadStateRlp(path, ReadFlags.None) + : reader.TryLoadStorageRlp(address, path, ReadFlags.None); + if (existingData is null) return null; + + TrieNode existingNode = new TrieNode(NodeType.Unknown, existingData); + existingNode.ResolveNode(NullTrieNodeResolver.Instance, path); + return existingNode; + } + + private void RequestStateDeletion(IPersistence.IWriteBatch writeBatch, in TreePath path, TrieNode newNode, TrieNode? existingNode) + { + RefList16 ranges = new(); + ComputeDeletionRanges(path, newNode, existingNode, ref ranges); + foreach (DeletionRange range in ranges.AsSpan()) + { + writeBatch.DeleteAccountRange(range.From, range.To); + writeBatch.DeleteStateTrieNodeRange(ComputeTreePathForHash(range.From, 64), ComputeTreePathForHash(range.To, 64)); + } + } + + private void RequestStorageDeletion(IPersistence.IWriteBatch writeBatch, Hash256 address, in TreePath path, TrieNode newNode, TrieNode? existingNode) + { + ValueHash256 addressHash = address.ValueHash256; + RefList16 ranges = new(); + ComputeDeletionRanges(path, newNode, existingNode, ref ranges); + foreach (DeletionRange range in ranges.AsSpan()) + { + writeBatch.DeleteStorageRange(addressHash, range.From, range.To); + writeBatch.DeleteStorageTrieNodeRange(addressHash, ComputeTreePathForHash(range.From, 64), ComputeTreePathForHash(range.To, 64)); + } + } + + /// + /// Computes the deletion ranges when replacing an existing node with a new node. + /// Only generates ranges for areas where existing had data but new doesn't. + /// When existingNode is null, assumes full coverage and deletes everything outside new node's coverage. + /// + internal static void ComputeDeletionRanges(in TreePath path, TrieNode newNode, TrieNode? existingNode, ref RefList16 ranges) + { + switch (newNode.NodeType) + { + case NodeType.Branch: + ComputeToBranchDeletionRanges(path, newNode, existingNode, ref ranges); + break; + case NodeType.Leaf: + ComputeToLeafDeletionRanges(path, newNode, existingNode, ref ranges); + break; + case NodeType.Extension: + ComputeToExtensionDeletionRanges(path, newNode, existingNode, ref ranges); + break; + } + } + + /// + /// To Branch: If existing is also Branch, only delete where existing had hash ref but new doesn't. + /// Otherwise delete all ranges where new has no hash reference. + /// + private static void ComputeToBranchDeletionRanges(TreePath path, TrieNode newNode, TrieNode? existingNode, ref RefList16 ranges) + { + int? nibbleRangeStart = null; + bool existingIsBranch = existingNode is { NodeType: NodeType.Branch }; + + int childNibble = -1; + if (!existingIsBranch && existingNode is not null) + { + childNibble = existingNode.Key![0]; + } + + for (int i = 0; i < 16; i++) + { + bool needsDelete = false; + bool newNodeHasNonInlineChild = newNode.GetChildHashAsValueKeccak(i, out _); + bool newNodeIsNullOrInline = !newNodeHasNonInlineChild; + + // Note: for inline node, the child hash is null, hence range will be deleted. But the existingNode child may + // also be inline node, in which case, it still need to be deleted instead of just assuming its empty. + if (existingIsBranch) + { + // Branch→Branch: only delete where existing had hash ref but new doesn't + bool existingNodeHasChild = !existingNode!.IsChildNull(i); + if (existingNodeHasChild) + { + needsDelete = newNodeIsNullOrInline; + } + else + { + // Nothing to delete + } + } + else + { + if (childNibble == -1 || i == childNibble) + { + // Other→Branch: delete all where new has no hash reference + needsDelete = newNodeIsNullOrInline; + } + } + + if (needsDelete) + { + nibbleRangeStart ??= i; + } + else if (nibbleRangeStart.HasValue) + { + ranges.Add(ComputeSubtreeRangeForNibble(path, nibbleRangeStart.Value, i - 1)); + nibbleRangeStart = null; + } + } + + if (nibbleRangeStart.HasValue) + { + ranges.Add(ComputeSubtreeRangeForNibble(path, nibbleRangeStart.Value, 15)); + } + } + + /// + /// To Leaf: If existing is also Leaf with same key, no deletion needed. + /// Otherwise delete the whole subtree. + /// + private static void ComputeToLeafDeletionRanges(TreePath path, TrieNode newNode, TrieNode? existingNode, ref RefList16 ranges) + { + if (existingNode is { NodeType: NodeType.Leaf } && newNode.Key.SequenceEqual(existingNode.Key)) + return; + + ranges.Add(ComputeSubtreeRange(path)); + } + + /// + /// To Extension: If existing is also Extension with same key, no deletion needed. + /// Otherwise delete gaps before and after the extension's subtree. + /// + private static void ComputeToExtensionDeletionRanges(TreePath path, TrieNode newNode, TrieNode? existingNode, ref RefList16 ranges) + { + if (existingNode is { NodeType: NodeType.Extension } && newNode.Key.SequenceEqual(existingNode.Key)) + return; + + TreePath extendedPath = path.Append(newNode.Key); + + // Gap before the extension + ValueHash256 subtreeStart = path.ToLowerBoundPath(); + ValueHash256 extensionStart = extendedPath.ToLowerBoundPath(); + if (extensionStart.CompareTo(subtreeStart) > 0) + ranges.Add(new DeletionRange(subtreeStart, extensionStart.DecrementPath())); + + // Gap after the extension + ValueHash256 extensionEnd = extendedPath.ToUpperBoundPath(); + ValueHash256 afterExtension = extensionEnd.IncrementPath(); + ValueHash256 subtreeEnd = path.ToUpperBoundPath(); + if (afterExtension.CompareTo(subtreeEnd) <= 0) + ranges.Add(new DeletionRange(afterExtension, subtreeEnd)); + } + + /// + /// Compute the range of full paths covered by a subtree rooted at childPath. + /// + private static DeletionRange ComputeSubtreeRange(in TreePath childPath) => + new(childPath.ToLowerBoundPath(), childPath.ToUpperBoundPath()); + + /// + /// Compute the merged range covering path.from.0000... to path.to.ffff... for a nibble range. + /// + private static DeletionRange ComputeSubtreeRangeForNibble(TreePath path, int from, int to) => + new(path.Append(from).ToLowerBoundPath(), path.Append(to).ToUpperBoundPath()); + + /// + /// Create a TreePath from a ValueHash256 with specified length. + /// + private static TreePath ComputeTreePathForHash(in ValueHash256 hash, int length) => + new(hash, length); + + public void FinalizeSync(BlockHeader pivotHeader) + { + if (Interlocked.CompareExchange(ref _wasFinalized, true, false)) throw new InvalidOperationException("Db was finalized"); + + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + StateId from = reader.CurrentState; + StateId to = new StateId(pivotHeader); + + // Create and immediately dispose to increment state ID + // This pattern is used by Importer - the from->to transition updates the current state pointer + using (persistence.CreateWriteBatch(from, to)) + { + // Empty batch - just incrementing state + } + persistenceManager.ResetPersistedStateId(); + persistence.Flush(); + } + + public ITreeSyncVerificationContext CreateVerificationContext(byte[] rootNodeData) => + new FlatVerificationContext(persistence, rootNodeData, logManager); + + private class FlatVerificationContext : ITreeSyncVerificationContext, IDisposable + { + private readonly StateTree _stateTree; + private readonly IPersistence.IPersistenceReader _reader; + private readonly AccountDecoder _accountDecoder = AccountDecoder.Instance; + + public FlatVerificationContext(IPersistence persistence, byte[] rootNodeData, ILogManager logManager) + { + _reader = persistence.CreateReader(); + _stateTree = new StateTree(new FlatSyncTrieStore(_reader), logManager); + _stateTree.RootRef = new TrieNode(NodeType.Unknown, rootNodeData); + } + + public Account? GetAccount(Hash256 addressHash) + { + ReadOnlySpan bytes = _stateTree.Get(addressHash.Bytes); + return bytes.IsEmpty ? null : _accountDecoder.Decode(bytes); + } + + public void Dispose() => _reader.Dispose(); + } + + /// + /// Minimal trie store for verification context using IPersistenceReader directly. + /// + private class FlatSyncTrieStore(IPersistence.IPersistenceReader reader) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + new(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + reader.TryLoadStateRlp(path, flags); + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapServer.cs b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapServer.cs new file mode 100644 index 00000000000..7f83238f4f9 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapServer.cs @@ -0,0 +1,296 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; +using Nethermind.Logging; +using Nethermind.Serialization.Rlp; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.State.Snap; +using Nethermind.State.SnapServer; +using Nethermind.Trie; + +namespace Nethermind.State.Flat.Sync.Snap; + +public class FlatSnapServer( + IFlatDbManager flatDbManager, + IReadOnlyKeyValueStore codeDb, + IFlatStateRootIndex stateRootIndex, + ILogManager logManager) : ISnapServer +{ + private readonly ILogger _logger = logManager.GetClassLogger(); + private readonly AccountDecoder _decoder = new(); + + private const long HardResponseByteLimit = 2000000; + private const int HardResponseNodeLimit = 100000; + + // Flat state uses HintCacheMiss since it has different I/O patterns than Patricia + private readonly ReadFlags _optimizedReadFlags = ReadFlags.HintCacheMiss; + + private bool TryGetBundle(Hash256 rootHash, out ReadOnlySnapshotBundle bundle, out StateId stateId) + { + if (!stateRootIndex.TryGetStateId(rootHash, out stateId)) + { + bundle = null!; + return false; + } + + bundle = flatDbManager.GatherReadOnlySnapshotBundle(stateId); + return true; + } + + public IOwnedReadOnlyList? GetTrieNodes(IReadOnlyList pathSet, Hash256 rootHash, CancellationToken cancellationToken) + { + if (!TryGetBundle(rootHash, out ReadOnlySnapshotBundle bundle, out StateId stateId)) + return ArrayPoolList.Empty(); + + using (bundle) + { + if (_logger.IsDebug) _logger.Debug($"Get trie nodes {pathSet.Count}"); + + int pathLength = pathSet.Count; + ArrayPoolList response = new(pathLength); + ReadOnlyStateTrieStoreAdapter trieStore = new(bundle); + StateTree tree = new(trieStore, logManager); + bool abort = false; + + for (int i = 0; i < pathLength && !abort && !cancellationToken.IsCancellationRequested; i++) + { + byte[][]? requestedPath = pathSet[i].Group; + switch (requestedPath.Length) + { + case 0: + return null; + case 1: + try + { + byte[]? rlp = tree.GetNodeByPath(Nibbles.CompactToHexEncode(requestedPath[0]), stateId.StateRoot.ToCommitment()); + if (rlp is not null) + response.Add(rlp); + } + catch (MissingTrieNodeException) + { + abort = true; + } + break; + default: + try + { + Hash256 storagePath = new( + requestedPath[0].Length == Hash256.Size + ? requestedPath[0] + : requestedPath[0].PadRight(Hash256.Size)); + Account? account = GetAccountByPath(tree, stateId.StateRoot.ToCommitment(), requestedPath[0]); + if (account is not null) + { + Hash256? storageRoot = account.StorageRoot; + StorageTree sTree = new(trieStore.GetStorageTrieStore(storagePath), storageRoot, logManager); + + for (int reqStorage = 1; reqStorage < requestedPath.Length; reqStorage++) + { + byte[]? sRlp = sTree.GetNodeByPath(Nibbles.CompactToHexEncode(requestedPath[reqStorage])); + response.Add(sRlp!); + } + } + } + catch (MissingTrieNodeException) + { + abort = true; + } + break; + } + } + + if (response.Count == 0) return ArrayPoolList.Empty(); + return response; + } + } + + public IOwnedReadOnlyList GetByteCodes(IReadOnlyList requestedHashes, long byteLimit, CancellationToken cancellationToken) + { + long currentByteCount = 0; + ArrayPoolList response = new(requestedHashes.Count); + + if (byteLimit > HardResponseByteLimit) + { + byteLimit = HardResponseByteLimit; + } + + foreach (ValueHash256 codeHash in requestedHashes) + { + if (currentByteCount > byteLimit || cancellationToken.IsCancellationRequested) + { + break; + } + + if (codeHash.Bytes.SequenceEqual(Keccak.OfAnEmptyString.Bytes)) + { + response.Add([]); + currentByteCount += 1; + continue; + } + + byte[]? code = codeDb[codeHash.Bytes]; + if (code is not null) + { + response.Add(code); + currentByteCount += code.Length; + } + } + + return response; + } + + public (IOwnedReadOnlyList, IOwnedReadOnlyList) GetAccountRanges( + Hash256 rootHash, + in ValueHash256 startingHash, + in ValueHash256? limitHash, + long byteLimit, + CancellationToken cancellationToken) + { + if (!TryGetBundle(rootHash, out ReadOnlySnapshotBundle bundle, out StateId stateId)) + return (ArrayPoolList.Empty(), ArrayPoolList.Empty()); + + using (bundle) + { + byteLimit = Math.Max(Math.Min(byteLimit, HardResponseByteLimit), 1); + + AccountCollector accounts = new(); + (long _, IOwnedReadOnlyList proofs, _) = GetNodesFromTrieVisitor( + bundle, + stateId.StateRoot, + startingHash, + limitHash?.ToCommitment() ?? Keccak.MaxValue, + byteLimit, + null, + null, + accounts, + cancellationToken); + + ArrayPoolList nodes = accounts.Accounts; + return (nodes, proofs); + } + } + + public (IOwnedReadOnlyList>, IOwnedReadOnlyList?) GetStorageRanges( + Hash256 rootHash, + IReadOnlyList accounts, + in ValueHash256? startingHash, + in ValueHash256? limitHash, + long byteLimit, + CancellationToken cancellationToken) + { + if (!TryGetBundle(rootHash, out ReadOnlySnapshotBundle bundle, out StateId stateId)) + return (ArrayPoolList>.Empty(), ArrayPoolList.Empty()); + + using (bundle) + { + byteLimit = Math.Max(Math.Min(byteLimit, HardResponseByteLimit), 1); + + ValueHash256 startingHash1 = startingHash ?? ValueKeccak.Zero; + ValueHash256 limitHash1 = limitHash ?? ValueKeccak.MaxValue; + if (limitHash1 == ValueKeccak.Zero) + { + limitHash1 = ValueKeccak.MaxValue; + } + + long responseSize = 0; + ReadOnlyStateTrieStoreAdapter trieStore = new(bundle); + StateTree tree = startingHash1 == ValueKeccak.Zero + ? new StateTree(new CachedTrieStore(trieStore), logManager) + : new StateTree(trieStore, logManager); + + ArrayPoolList> responseNodes = new(accounts.Count); + for (int i = 0; i < accounts.Count; i++) + { + if (responseSize > byteLimit || cancellationToken.IsCancellationRequested) + { + break; + } + + if (responseSize > 1 && (byteLimit - responseSize) < 10000) + { + break; + } + + Account? accountNeth = GetAccountByPath(tree, stateId.StateRoot.ToCommitment(), accounts[i].Path.Bytes.ToArray()); + if (accountNeth is null) + { + break; + } + + Hash256? storagePath = accounts[i].Path.ToCommitment(); + + PathWithStorageCollector pathWithStorageCollector = new(); + (long innerResponseSize, IOwnedReadOnlyList proofs, bool stoppedEarly) = GetNodesFromTrieVisitor( + bundle, + stateId.StateRoot, + startingHash1, + limitHash1, + byteLimit - responseSize, + storagePath, + accountNeth.StorageRoot, + pathWithStorageCollector, + cancellationToken); + + if (pathWithStorageCollector.Slots.Count == 0) + { + // return proof of absence + return (responseNodes, proofs); + } + + responseNodes.Add(pathWithStorageCollector.Slots); + if (stoppedEarly || startingHash1 != Keccak.Zero) + { + return (responseNodes, proofs); + } + + proofs.Dispose(); + responseSize += innerResponseSize; + } + + return (responseNodes, ArrayPoolList.Empty()); + } + } + + private (long bytesSize, IOwnedReadOnlyList proofs, bool stoppedEarly) GetNodesFromTrieVisitor( + ReadOnlySnapshotBundle bundle, + in ValueHash256 rootHash, + in ValueHash256 startingHash, + in ValueHash256 limitHash, + long byteLimit, + in ValueHash256? storage, + in ValueHash256? storageRoot, + RangeQueryVisitor.ILeafValueCollector valueCollector, + CancellationToken cancellationToken) + { + ReadOnlyStateTrieStoreAdapter trieStore = new(bundle); + PatriciaTree tree = new(trieStore, logManager); + using RangeQueryVisitor visitor = new(startingHash, limitHash, valueCollector, byteLimit, HardResponseNodeLimit, readFlags: _optimizedReadFlags, cancellationToken); + VisitingOptions opt = new(); + tree.Accept(visitor, rootHash.ToCommitment(), opt, storageAddr: storage?.ToCommitment(), storageRoot: storageRoot?.ToCommitment()); + + ArrayPoolList proofs = startingHash != Keccak.Zero || visitor.StoppedEarly ? visitor.GetProofs() : ArrayPoolList.Empty(); + return (visitor.GetBytesSize(), proofs, visitor.StoppedEarly); + } + + private Account? GetAccountByPath(StateTree tree, in ValueHash256 rootHash, byte[] accountPath) + { + try + { + ReadOnlySpan bytes = tree.Get(accountPath, rootHash.ToCommitment()); + Rlp.ValueDecoderContext rlpContext = new(bytes); + return bytes.IsNullOrEmpty() ? null : _decoder.Decode(ref rlpContext); + } + catch (TrieNodeException) + { + return null; + } + catch (MissingTrieNodeException) + { + return null; + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapStateTree.cs b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapStateTree.cs new file mode 100644 index 00000000000..81ed1064173 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapStateTree.cs @@ -0,0 +1,93 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.Synchronization.SnapSync; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.Sync.Snap; + +/// +/// ISnapTree adapter for flat snap sync (state). +/// Owns reader (for IsPersisted) and writeBatch (for commits), disposing them on Dispose. +/// +public class FlatSnapStateTree : ISnapTree +{ + private readonly IPersistence.IPersistenceReader _reader; + private readonly IPersistence.IWriteBatch _writeBatch; + private SnapUpperBoundAdapter _adapter; + private readonly StateTree _tree; + + public FlatSnapStateTree(IPersistence.IPersistenceReader reader, IPersistence.IWriteBatch writeBatch, ILogManager logManager) + { + _reader = reader; + _writeBatch = writeBatch; + _adapter = new SnapUpperBoundAdapter(new PersistenceTrieStoreAdapter(reader, writeBatch)); + _tree = new StateTree(_adapter, logManager); + } + + public Hash256 RootHash => _tree.RootHash; + + public void SetRootFromProof(TrieNode root) => _tree.RootRef = root; + + public bool IsPersisted(in TreePath path, in ValueHash256 keccak) + { + byte[]? rlp = _reader.TryLoadStateRlp(path, ReadFlags.None); + return rlp is not null && ValueKeccak.Compute(rlp) == keccak; + } + + public void BulkSetAndUpdateRootHash(in ArrayPoolListRef entries) + { + _tree.BulkSet(entries, PatriciaTree.Flags.WasSorted); + _tree.UpdateRootHash(); + } + + public void Commit(ValueHash256 upperBound) + { + _adapter.UpperBound = upperBound; + _tree.Commit(true, WriteFlags.DisableWAL); + } + + public void Dispose() + { + _writeBatch.Dispose(); + _reader.Dispose(); + } + + /// + /// Trie store adapter that writes trie nodes AND flat entries to IPersistence.IWriteBatch. + /// Uses IPersistenceReader for IsPersisted queries during snap sync. + /// + private class PersistenceTrieStoreAdapter( + IPersistence.IPersistenceReader reader, + IPersistence.IWriteBatch writeBatch) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + new(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + reader.TryLoadStateRlp(path, flags); + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + new StateCommitter(writeBatch); + + private sealed class StateCommitter(IPersistence.IWriteBatch writeBatch) : ICommitter + { + public TrieNode CommitNode(ref TreePath path, TrieNode node) + { + writeBatch.SetStateTrieNode(path, node); + FlatEntryWriter.WriteAccountFlatEntries(writeBatch, path, node); + return node; + } + + public void Dispose() { } + } + } + +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapStorageTree.cs b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapStorageTree.cs new file mode 100644 index 00000000000..dc4fbc43b21 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapStorageTree.cs @@ -0,0 +1,95 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.Synchronization.SnapSync; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.State.Flat.Sync.Snap; + +/// +/// ISnapTree adapter for flat snap sync (storage). +/// Owns reader (for IsPersisted) and writeBatch (for commits), disposing them on Dispose. +/// +public class FlatSnapStorageTree : ISnapTree +{ + private readonly IPersistence.IPersistenceReader _reader; + private readonly IPersistence.IWriteBatch _writeBatch; + private readonly StorageTree _tree; + private readonly Hash256 _addressHash; + private readonly SnapUpperBoundAdapter _adapter; + + public FlatSnapStorageTree(IPersistence.IPersistenceReader reader, IPersistence.IWriteBatch writeBatch, Hash256 addressHash, ILogManager logManager) + { + _reader = reader; + _writeBatch = writeBatch; + _addressHash = addressHash; + _adapter = new SnapUpperBoundAdapter(new PersistenceStorageTrieStoreAdapter(reader, writeBatch, addressHash)); + _tree = new StorageTree(_adapter, logManager); + } + + public Hash256 RootHash => _tree.RootHash; + + public void SetRootFromProof(TrieNode root) => _tree.RootRef = root; + + public bool IsPersisted(in TreePath path, in ValueHash256 keccak) + { + byte[]? rlp = _reader.TryLoadStorageRlp(_addressHash, path, ReadFlags.None); + return rlp is not null && ValueKeccak.Compute(rlp) == keccak; + } + + public void BulkSetAndUpdateRootHash(in ArrayPoolListRef entries) + { + _tree.BulkSet(entries, PatriciaTree.Flags.WasSorted); + _tree.UpdateRootHash(); + } + + public void Commit(ValueHash256 upperBound) + { + _adapter.UpperBound = upperBound; + _tree.Commit(writeFlags: WriteFlags.DisableWAL); + } + + public void Dispose() + { + _writeBatch.Dispose(); + _reader.Dispose(); + } + + /// + /// Storage trie store adapter that writes trie nodes AND flat storage entries to IPersistence.IWriteBatch. + /// Uses IPersistenceReader for IsPersisted queries during snap sync. + /// + private class PersistenceStorageTrieStoreAdapter( + IPersistence.IPersistenceReader reader, + IPersistence.IWriteBatch writeBatch, + Hash256 addressHash) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => new(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + reader.TryLoadStorageRlp(addressHash, path, flags); + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + new StorageCommitter(writeBatch, addressHash); + + private sealed class StorageCommitter(IPersistence.IWriteBatch writeBatch, Hash256 address) : ICommitter + { + public TrieNode CommitNode(ref TreePath path, TrieNode node) + { + writeBatch.SetStorageTrieNode(address, path, node); + FlatEntryWriter.WriteStorageFlatEntries(writeBatch, address, path, node); + return node; + } + + public void Dispose() { } + } + } + +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapTrieFactory.cs b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapTrieFactory.cs new file mode 100644 index 00000000000..36a05304b52 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatSnapTrieFactory.cs @@ -0,0 +1,54 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.State.Flat.Persistence; +using Nethermind.Synchronization.SnapSync; + +namespace Nethermind.State.Flat.Sync.Snap; + +/// +/// ISnapTrieFactory implementation for flat state storage. +/// Uses IPersistence to create reader/writeBatch per tree for proper resource management. +/// +public class FlatSnapTrieFactory(IPersistence persistence, ILogManager logManager) : ISnapTrieFactory +{ + private readonly ILogger _logger = logManager.GetClassLogger(); + private readonly Lock _lock = new Lock(); + + private bool _initialized = false; + + public ISnapTree CreateStateTree() + { + EnsureDatabaseCleared(); + + IPersistence.IPersistenceReader reader = persistence.CreateReader(); + IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(reader.CurrentState, reader.CurrentState, WriteFlags.DisableWAL); + return new FlatSnapStateTree(reader, writeBatch, logManager); + } + + public ISnapTree CreateStorageTree(in ValueHash256 accountPath) + { + EnsureDatabaseCleared(); + + IPersistence.IPersistenceReader reader = persistence.CreateReader(); + IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(reader.CurrentState, reader.CurrentState, WriteFlags.DisableWAL); + return new FlatSnapStorageTree(reader, writeBatch, accountPath.ToCommitment(), logManager); + } + + private void EnsureDatabaseCleared() + { + if (_initialized) return; + + using (_lock.EnterScope()) + { + if (_initialized) return; + _initialized = true; + + _logger.Info("Clearing database"); + persistence.Clear(); + } + } +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatStateRootIndex.cs b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatStateRootIndex.cs new file mode 100644 index 00000000000..6a98fb3fd93 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/FlatStateRootIndex.cs @@ -0,0 +1,84 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Blockchain; +using Nethermind.Blockchain.Find; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; + +namespace Nethermind.State.Flat.Sync.Snap; + +/// +/// Tracks mapping from state root hash to StateId for serving snap sync requests. +/// Similar to but stores StateId for lookup. +/// +public class FlatStateRootIndex : IFlatStateRootIndex, IDisposable +{ + private readonly IBlockTree _blockTree; + private readonly int _lastN; + private Hash256? _lastQueuedStateRoot; + private Queue _stateRootQueue = new(); + private NonBlocking.ConcurrentDictionary _availableStateRoots = new(); + + public FlatStateRootIndex(IBlockTree blockTree, int lastN) + { + _blockTree = blockTree; + _lastN = lastN; + blockTree.BlockAddedToMain += BlockTreeOnNewHeadBlock; + if (blockTree.Head is not null) ResetAvailableStateRoots(blockTree.Head.Header, true); + } + + private void BlockTreeOnNewHeadBlock(object? sender, BlockEventArgs e) => + ResetAvailableStateRoots(e.Block.Header, false); + + private void ResetAvailableStateRoots(BlockHeader? newHead, bool resetQueue) + { + if (newHead?.StateRoot is null) return; + if (_availableStateRoots.ContainsKey(newHead.StateRoot)) return; + + BlockHeader? parent = _blockTree.FindParentHeader(newHead, BlockTreeLookupOptions.All); + if (parent?.StateRoot is null) return; + + if (!resetQueue && _lastQueuedStateRoot == parent.StateRoot) + { + // Queue is intact - just add the new state root + _availableStateRoots[newHead.StateRoot] = new StateId(newHead); + while (_stateRootQueue.Count >= _lastN && _stateRootQueue.TryDequeue(out Hash256? oldStateRoot)) + { + if (oldStateRoot is not null) + _availableStateRoots.TryRemove(oldStateRoot, out _); + } + _stateRootQueue.Enqueue(newHead.StateRoot); + _lastQueuedStateRoot = newHead.StateRoot; + return; + } + + // Reset the queue and rebuild from scratch + using ArrayPoolList<(Hash256 stateRoot, StateId stateId)> stateRoots = new(128); + NonBlocking.ConcurrentDictionary newStateRootSet = new(); + newStateRootSet[newHead.StateRoot] = new StateId(newHead); + stateRoots.Add((newHead.StateRoot, new StateId(newHead))); + + BlockHeader? current = parent; + while (current?.StateRoot is not null && stateRoots.Count < _lastN) + { + StateId stateId = new(current); + newStateRootSet[current.StateRoot] = stateId; + stateRoots.Add((current.StateRoot, stateId)); + current = _blockTree.FindParentHeader(current, BlockTreeLookupOptions.All); + } + + _availableStateRoots = newStateRootSet; + stateRoots.Reverse(); + _stateRootQueue = new Queue(stateRoots.Select(x => x.stateRoot)); + _lastQueuedStateRoot = newHead.StateRoot; + } + + public bool HasStateRoot(Hash256 stateRoot) => _availableStateRoots.ContainsKey(stateRoot); + + public bool TryGetStateId(Hash256 stateRoot, out StateId stateId) => + _availableStateRoots.TryGetValue(stateRoot, out stateId); + + public void Dispose() => _blockTree.BlockAddedToMain -= BlockTreeOnNewHeadBlock; +} diff --git a/src/Nethermind/Nethermind.State.Flat/Sync/Snap/IFlatStateRootIndex.cs b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/IFlatStateRootIndex.cs new file mode 100644 index 00000000000..84b70982122 --- /dev/null +++ b/src/Nethermind/Nethermind.State.Flat/Sync/Snap/IFlatStateRootIndex.cs @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core.Crypto; + +namespace Nethermind.State.Flat.Sync.Snap; + +/// +/// Tracks mapping from state root hash to StateId for serving snap sync requests. +/// Similar to but provides StateId for lookup. +/// +public interface IFlatStateRootIndex : ILastNStateRootTracker +{ + /// + /// Try to get the StateId for a given state root hash. + /// + bool TryGetStateId(Hash256 stateRoot, out StateId stateId); +} diff --git a/src/Nethermind/Nethermind.State/PrewarmerScopeProvider.cs b/src/Nethermind/Nethermind.State/PrewarmerScopeProvider.cs index d4260a7f527..186cf6f6a21 100644 --- a/src/Nethermind/Nethermind.State/PrewarmerScopeProvider.cs +++ b/src/Nethermind/Nethermind.State/PrewarmerScopeProvider.cs @@ -19,6 +19,7 @@ internal class PrewarmerGetTimeLabels(bool isPrewarmer) public static PrewarmerGetTimeLabels NonPrewarmer { get; } = new(false); public PrewarmerGetTimeLabel Commit { get; } = new("commit", isPrewarmer); + public PrewarmerGetTimeLabel WriteBatchToScopeDisposeTime { get; } = new("write_batch_to_dispose", isPrewarmer); public PrewarmerGetTimeLabel UpdateRootHash { get; } = new("update_root_hash", isPrewarmer); public PrewarmerGetTimeLabel AddressHit { get; } = new("address_hit", isPrewarmer); public PrewarmerGetTimeLabel AddressMiss { get; } = new("address_miss", isPrewarmer); @@ -51,7 +52,16 @@ private sealed class ScopeWrapper( private readonly bool _measureMetric = Metrics.DetailedMetricsEnabled; private readonly PrewarmerGetTimeLabels _labels = populatePreBlockCache ? PrewarmerGetTimeLabels.Prewarmer : PrewarmerGetTimeLabels.NonPrewarmer; - public void Dispose() => baseScope.Dispose(); + private long _writeBatchTime = 0; + + public void Dispose() + { + if (_measureMetric) + { + _metricObserver.Observe(Stopwatch.GetTimestamp() - _writeBatchTime, _labels.WriteBatchToScopeDisposeTime); + } + baseScope.Dispose(); + } public IWorldStateScopeProvider.ICodeDb CodeDb => baseScope.CodeDb; @@ -71,6 +81,7 @@ public IWorldStateScopeProvider.IWorldStateWriteBatch StartWriteBatch(int estima return baseScope.StartWriteBatch(estimatedAccountNum); } + _writeBatchTime = Stopwatch.GetTimestamp(); long sw = Stopwatch.GetTimestamp(); return new WriteBatchLifetimeMeasurer( baseScope.StartWriteBatch(estimatedAccountNum), diff --git a/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs index 497c3378c13..618ca58c70f 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/E2ESyncTests.cs @@ -29,6 +29,7 @@ using Nethermind.Crypto; using Nethermind.Db; using Nethermind.Evm; +using Nethermind.Evm.Tracing; using Nethermind.Int256; using Nethermind.Logging; using Nethermind.Merge.Plugin; @@ -73,12 +74,12 @@ public enum DbMode public static IEnumerable CreateTestCases() { yield return new TestFixtureParameters(DbMode.Default, false); - yield return new TestFixtureParameters(DbMode.Hash, false); - yield return new TestFixtureParameters(DbMode.NoPruning, false); - yield return new TestFixtureParameters(DbMode.Flat, false); yield return new TestFixtureParameters(DbMode.Default, true); + yield return new TestFixtureParameters(DbMode.Hash, false); yield return new TestFixtureParameters(DbMode.Hash, true); + yield return new TestFixtureParameters(DbMode.NoPruning, false); yield return new TestFixtureParameters(DbMode.NoPruning, true); + yield return new TestFixtureParameters(DbMode.Flat, false); yield return new TestFixtureParameters(DbMode.Flat, true); } @@ -102,6 +103,7 @@ private int AllocatePort() private async Task CreateNode(PrivateKey nodeKey, Func configurer) { IConfigProvider configProvider = new ConfigProvider(); + configProvider.GetConfig().PreWarmStateOnBlockProcessing = false; var loader = new ChainSpecFileLoader(new EthereumJsonSerializer(), LimboLogs.Instance); ChainSpec spec = loader.LoadEmbeddedOrFromFile("chainspec/foundation.json"); @@ -164,17 +166,20 @@ private async Task CreateNode(PrivateKey nodeKey, Func(); flatDbConfig.Enabled = true; + flatDbConfig.VerifyWithTrie = true; break; } } var builder = new ContainerBuilder() - .AddModule(new PseudoNethermindModule(spec, configProvider, new TestLogManager())) + .AddModule(new PseudoNethermindModule(spec, configProvider, LimboLogs.Instance)) .AddModule(new TestEnvironmentModule(nodeKey, $"{nameof(E2ESyncTests)} {dbMode} {isPostMerge}")) .AddSingleton() .AddSingleton() .AddSingleton() - ; + .AddSingleton() + .AddSingleton(new TestLogManager(LogLevel.Info)) // Put last or it wont work. + .AddDecorator(); if (isPostMerge) { @@ -218,26 +223,9 @@ public async Task SetupServer() SyncTestContext serverCtx = _server.Resolve(); await serverCtx.StartBlockProcessing(cancellationToken); - byte[] spam = Prepare.EvmCode - .ForCreate2Of( - Prepare.EvmCode - .PushData(100) - .PushData(100) - .Op(Instruction.SSTORE) - .PushData(100) - .PushData(101) - .Op(Instruction.SSTORE) - .PushData(100) - .Op(Instruction.SLOAD) - .PushData(101) - .Op(Instruction.SLOAD) - .PushData(102) - .Done) - .Done; - for (int i = 0; i < ChainLength; i++) { - await serverCtx.BuildBlockWithCode([spam, spam, spam], cancellationToken); + await serverCtx.BuildBlockWithStorage(i, cancellationToken); } await serverCtx.StartNetwork(cancellationToken); @@ -270,8 +258,6 @@ public async Task FullSync() [Retry(5)] public async Task FastSync() { - if (dbMode == DbMode.Flat) Assert.Ignore(); - using CancellationTokenSource cancellationTokenSource = new CancellationTokenSource().ThatCancelAfter(TestTimeout); PrivateKey clientKey = TestItem.PrivateKeyC; @@ -305,7 +291,6 @@ private async Task SetPivot(SyncConfig syncConfig, CancellationToken cancellatio [Retry(5)] public async Task SnapSync() { - if (dbMode == DbMode.Flat) Assert.Ignore(); if (dbMode == DbMode.Hash) Assert.Ignore("Hash db does not support snap sync"); using CancellationTokenSource cancellationTokenSource = new CancellationTokenSource().ThatCancelAfter(TestTimeout); @@ -460,16 +445,43 @@ private class SyncTestContext( IBlockProcessingQueue blockProcessingQueue, ITestEnv testEnv, IRlpxHost rlpxHost, + IWorldStateManager worldStateManager, PseudoNethermindRunner runner, - ImmediateDisconnectFailure immediateDisconnectFailure) + ImmediateDisconnectFailure immediateDisconnectFailure, + BlockProcessorExceptionDetector blockProcessorExceptionDetector) { // These check is really slow (it doubles the test time) so its disabled by default. private const bool CheckBlocksAndReceiptsContent = false; private const bool VerifyTrieOnFinished = false; + private const int DeployEveryNBlocks = 10; private readonly BlockDecoder _blockDecoder = new BlockDecoder(); private readonly ReceiptsMessageSerializer _receiptsMessageSerializer = new(specProvider); + // Track deployed contracts for storage testing + private readonly List
_deployedContracts = []; + private readonly Random _random = new(42); // Fixed seed for reproducibility + + // Runtime code: SLOAD slot 0, ADD 1, SSTORE to slot 0 + private readonly byte[] _runtimeCode = Prepare.EvmCode + .PushData(0) // slot 0 + .Op(Instruction.SLOAD) // load current value + .PushData(1) // value to add + .Op(Instruction.ADD) // add 1 + .PushData(0) // slot 0 + .Op(Instruction.SSTORE) // store incremented value + .Op(Instruction.STOP) + .Done; + + // Initcode: set initial value in slot 0, then return runtime code + private byte[]? _initCode; + private byte[] InitCode => _initCode ??= Prepare.EvmCode + .PushData(1) // initial value + .PushData(0) // slot 0 + .Op(Instruction.SSTORE) // set initial storage + .ForInitOf(_runtimeCode) // return runtime code + .Done; + public async Task StartBlockProcessing(CancellationToken cancellationToken) { await runner.StartBlockProcessing(cancellationToken); @@ -495,9 +507,8 @@ private async Task ConnectTo(IContainer server, CancellationToken cancellationTo public async Task BuildBlockWithCode(byte[][] codes, CancellationToken cancellation) { // 1 000 000 000 - long gasLimit = 100000; + long gasLimit = 1_000_000; - Hash256 stateRoot = blockTree.Head?.StateRoot!; nonces.TryGetValue(nodeKey.Address, out UInt256 currentNonce); IReleaseSpec spec = specProvider.GetSpec((blockTree.Head?.Number) + 1 ?? 0, null); Transaction[] txs = codes.Select((byteCode) => Build.A.Transaction @@ -511,6 +522,46 @@ public async Task BuildBlockWithCode(byte[][] codes, CancellationToken cancellat await testEnv.BuildBlockWithTxs(txs, cancellation); } + public async Task BuildBlockWithStorage(int blockNumber, CancellationToken cancellation) + { + long gasLimit = 200_000; + + nonces.TryGetValue(nodeKey.Address, out UInt256 currentNonce); + IReleaseSpec spec = specProvider.GetSpec((blockTree.Head?.Number ?? 0) + 1, null); + + Transaction tx; + + if (blockNumber % DeployEveryNBlocks == 0 || _deployedContracts.Count == 0) + { + // Deploy new contract + tx = Build.A.Transaction + .WithCode(InitCode) + .WithNonce(currentNonce++) + .WithGasLimit(gasLimit) + .WithGasPrice(10.GWei()) + .SignedAndResolved(ecdsa, nodeKey, spec.IsEip155Enabled).TestObject; + + // Calculate deployed address and track it + Address deployedAddress = ContractAddress.From(nodeKey.Address, currentNonce - 1); + _deployedContracts.Add(deployedAddress); + } + else + { + // Call random existing contract + Address target = _deployedContracts[_random.Next(_deployedContracts.Count)]; + tx = Build.A.Transaction + .WithTo(target) + .WithData([]) + .WithNonce(currentNonce++) + .WithGasLimit(gasLimit) + .WithGasPrice(10.GWei()) + .SignedAndResolved(ecdsa, nodeKey, spec.IsEip155Enabled).TestObject; + } + + nonces[nodeKey.Address] = currentNonce; + await testEnv.BuildBlockWithTxs([tx], cancellation); + } + private async Task VerifyHeadWith(IContainer server, CancellationToken cancellationToken) { await blockProcessingQueue.WaitForBlockProcessing(cancellationToken); @@ -590,12 +641,25 @@ public async Task SyncFromServer(IContainer server, CancellationToken cancellati { await immediateDisconnectFailure.WatchForDisconnection(async (token) => { - await runner.StartNetwork(token); - await ConnectTo(server, token); - await testEnv.SyncUntilFinished(server, token); - await VerifyHeadWith(server, token); - await VerifyAllBlocksAndReceipts(server, token); + await blockProcessorExceptionDetector.WatchForFailure(async (token) => + { + await runner.StartNetwork(token); + await ConnectTo(server, token); + await testEnv.SyncUntilFinished(server, token); + await VerifyHeadWith(server, token); + await VerifyAllBlocksAndReceipts(server, token); + }, token); }, cancellationToken); + + cancellationToken.ThrowIfCancellationRequested(); + + // On flat, verify trie only work with persistence + worldStateManager.FlushCache(cancellationToken); + + BlockHeader? head = blockTree.Head?.Header; + Console.Error.WriteLine($"On {head?.ToString(BlockHeader.Format.Short)}"); + bool stateVerified = worldStateManager.VerifyTrie(head!, cancellationToken); + Assert.That(stateVerified, Is.True); } } @@ -626,4 +690,57 @@ public async Task WatchForDisconnection(Func act, Cance } } } + + internal class BlockProcessorExceptionDetector + { + internal static void Configure(ContainerBuilder builder) + { + builder.AddSingleton() + .AddDecorator(); + } + + private Exception? BlockProcessingFailure; + private CancellationTokenSource _cts = new CancellationTokenSource(); + + private void ReportException(Exception exception) + { + BlockProcessingFailure = exception; + _cts.Cancel(); + } + + public async Task WatchForFailure(Func act, CancellationToken cancellationToken) + { + using var cts = CancellationTokenSource.CreateLinkedTokenSource(_cts.Token, cancellationToken); + try + { + await act(cts.Token); + if (BlockProcessingFailure != null) Assert.Fail($"Block processing failure detected. {BlockProcessingFailure}"); + } + catch (OperationCanceledException) + { + if (BlockProcessingFailure == null) throw; // Timeout without disconnect + Assert.Fail($"Block processing failure detected. {BlockProcessingFailure}"); + } + } + + internal class BlockProcessorInterceptor( + IBlockProcessor blockProcessor, + BlockProcessorExceptionDetector blockProcessorExceptionDetector) : IBlockProcessor + { + + public (Block Block, TxReceipt[] Receipts) ProcessOne(Block suggestedBlock, ProcessingOptions options, + IBlockTracer blockTracer, IReleaseSpec spec, CancellationToken token = default) + { + try + { + return blockProcessor.ProcessOne(suggestedBlock, options, blockTracer, spec, token); + } + catch (Exception ex) + { + blockProcessorExceptionDetector.ReportException(ex); + throw; + } + } + } + } } diff --git a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/FlatLocalDbContext.cs b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/FlatLocalDbContext.cs new file mode 100644 index 00000000000..9fcc09fcc1c --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/FlatLocalDbContext.cs @@ -0,0 +1,180 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using FluentAssertions; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.State; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.State.Flat.Sync; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; +using NUnit.Framework; + +namespace Nethermind.Synchronization.Test.FastSync; + +public class FlatLocalDbContext(IPersistence persistence, ILogManager logManager) : IStateSyncTestOperation +{ + public Hash256 RootHash + { + get + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + return reader.CurrentState.StateRoot.ToHash256(); + } + } + + public void UpdateRootHash() + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + using IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(reader.CurrentState, reader.CurrentState); + WritableTrieStore adapter = new(reader, writeBatch); + StateTree tree = new(adapter, logManager); + tree.UpdateRootHash(); + tree.Commit(); + } + + public void SetAccountsAndCommit(params (Hash256 Address, Account? Account)[] accounts) + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + using IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(reader.CurrentState, reader.CurrentState); + WritableTrieStore adapter = new(reader, writeBatch); + StateTree tree = new(adapter, logManager); + + foreach (var (address, account) in accounts) + tree.Set(address, account); + tree.Commit(); + } + + public void AssertFlushed() + { + // For flat, sync finalization writes to persistence. Verify root node exists. + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + reader.TryLoadStateRlp(TreePath.Empty, ReadFlags.None).Should().NotBeNull("root node should exist after flush"); + } + + public void CompareTrees(RemoteDbContext remote, ILogger logger, string stage, bool skipLogs = false) + { + if (!skipLogs) logger.Info($"==================== {stage} ===================="); + + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + StateTree localTree = new(new ReadOnlyTrieStore(reader), logManager); + localTree.RootHash = remote.StateTree.RootHash; + + if (!skipLogs) logger.Info("-------------------- REMOTE --------------------"); + TreeDumper dumper = new(); + remote.StateTree.Accept(dumper, remote.StateTree.RootHash); + string remoteStr = dumper.ToString(); + if (!skipLogs) logger.Info(remoteStr); + if (!skipLogs) logger.Info("-------------------- LOCAL --------------------"); + dumper.Reset(); + localTree.Accept(dumper, localTree.RootHash); + string localStr = dumper.ToString(); + if (!skipLogs) logger.Info(localStr); + + if (stage == "END") + { + Assert.That(localStr, Is.EqualTo(remoteStr), $"{stage}\n{remoteStr}\n{localStr}"); + } + } + + public void DeleteStateRoot() + { + using IPersistence.IPersistenceReader reader = persistence.CreateReader(); + using IPersistence.IWriteBatch writeBatch = persistence.CreateWriteBatch(reader.CurrentState, reader.CurrentState); + writeBatch.DeleteStateTrieNodeRange(TreePath.Empty, TreePath.Empty); + } + + /// + /// Read-only trie store for reading state trie nodes from flat persistence. + /// + private class ReadOnlyTrieStore(IPersistence.IPersistenceReader reader) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + new(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + reader.TryLoadStateRlp(path, flags); + + public override ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => + address is null ? this : new ReadOnlyStorageTrieStore(reader, address); + } + + /// + /// Read-only trie store for reading storage trie nodes from flat persistence. + /// + private class ReadOnlyStorageTrieStore(IPersistence.IPersistenceReader reader, Hash256 address) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + new(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + reader.TryLoadStorageRlp(address, path, flags); + } + + /// + /// Writable trie store that writes trie nodes and flat entries to persistence. + /// + private class WritableTrieStore( + IPersistence.IPersistenceReader reader, + IPersistence.IWriteBatch writeBatch) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + new(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + reader.TryLoadStateRlp(path, flags); + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + new StateCommitter(writeBatch); + + public override ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => + address is null ? this : new WritableStorageTrieStore(reader, writeBatch, address); + + private sealed class StateCommitter(IPersistence.IWriteBatch writeBatch) : ICommitter + { + public TrieNode CommitNode(ref TreePath path, TrieNode node) + { + writeBatch.SetStateTrieNode(path, node); + FlatEntryWriter.WriteAccountFlatEntries(writeBatch, path, node); + return node; + } + + public void Dispose() { } + public bool TryRequestConcurrentQuota() => false; + public void ReturnConcurrencyQuota() { } + } + } + + /// + /// Writable storage trie store that writes trie nodes and flat entries to persistence. + /// + private class WritableStorageTrieStore( + IPersistence.IPersistenceReader reader, + IPersistence.IWriteBatch writeBatch, + Hash256 address) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + new(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => + reader.TryLoadStorageRlp(address, path, flags); + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => + new StorageCommitter(writeBatch, address); + + private sealed class StorageCommitter(IPersistence.IWriteBatch writeBatch, Hash256 address) : ICommitter + { + public TrieNode CommitNode(ref TreePath path, TrieNode node) + { + writeBatch.SetStorageTrieNode(address, path, node); + FlatEntryWriter.WriteStorageFlatEntries(writeBatch, address, path, node); + return node; + } + + public void Dispose() { } + } + } +} diff --git a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/IStateSyncTestOperation.cs b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/IStateSyncTestOperation.cs new file mode 100644 index 00000000000..4ccbb17e0f4 --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/IStateSyncTestOperation.cs @@ -0,0 +1,18 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Logging; + +namespace Nethermind.Synchronization.Test.FastSync; + +public interface IStateSyncTestOperation +{ + Hash256 RootHash { get; } + void UpdateRootHash(); + void SetAccountsAndCommit(params (Hash256 Address, Account? Account)[] accounts); + void AssertFlushed(); + void CompareTrees(RemoteDbContext remote, ILogger logger, string stage, bool skipLogs = false); + void DeleteStateRoot(); +} diff --git a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/LocalDbContext.cs b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/LocalDbContext.cs new file mode 100644 index 00000000000..c293240981d --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/LocalDbContext.cs @@ -0,0 +1,81 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Autofac.Features.AttributeFilters; +using FluentAssertions; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Core.Test; +using Nethermind.Db; +using Nethermind.Logging; +using Nethermind.State; +using Nethermind.Synchronization.FastSync; +using Nethermind.Trie; +using NUnit.Framework; + +namespace Nethermind.Synchronization.Test.FastSync; + +public class LocalDbContext( + [KeyFilter(DbNames.Code)] IDb codeDb, + [KeyFilter(DbNames.State)] IDb stateDb, + INodeStorage nodeStorage, + ILogManager logManager) + : IStateSyncTestOperation +{ + private TestMemDb CodeDb { get; } = (TestMemDb)codeDb; + private TestMemDb Db { get; } = (TestMemDb)stateDb; + private INodeStorage NodeStorage { get; } = nodeStorage; + private StateTree StateTree { get; } = new(TestTrieStoreFactory.Build(nodeStorage, logManager), logManager); + + public Hash256 RootHash + { + get => StateTree.RootHash; + } + + public void UpdateRootHash() => StateTree.UpdateRootHash(); + + public void SetAccountsAndCommit(params (Hash256 Address, Account? Account)[] accounts) + { + foreach (var (address, account) in accounts) + StateTree.Set(address, account); + StateTree.Commit(); + } + + public void AssertFlushed() + { + Db.WasFlushed.Should().BeTrue(); + CodeDb.WasFlushed.Should().BeTrue(); + } + + public void CompareTrees(RemoteDbContext remote, ILogger logger, string stage, bool skipLogs = false) + { + if (!skipLogs) logger.Info($"==================== {stage} ===================="); + StateTree.RootHash = remote.StateTree.RootHash; + + if (!skipLogs) logger.Info("-------------------- REMOTE --------------------"); + TreeDumper dumper = new TreeDumper(); + remote.StateTree.Accept(dumper, remote.StateTree.RootHash); + string remoteStr = dumper.ToString(); + if (!skipLogs) logger.Info(remoteStr); + if (!skipLogs) logger.Info("-------------------- LOCAL --------------------"); + dumper.Reset(); + StateTree.Accept(dumper, StateTree.RootHash); + string localStr = dumper.ToString(); + if (!skipLogs) logger.Info(localStr); + + if (stage == "END") + { + Assert.That(localStr, Is.EqualTo(remoteStr), $"{stage}{Environment.NewLine}{remoteStr}{Environment.NewLine}{localStr}"); + TrieStatsCollector collector = new(CodeDb, LimboLogs.Instance); + StateTree.Accept(collector, StateTree.RootHash); + Assert.That(collector.Stats.MissingNodes, Is.EqualTo(0)); + Assert.That(collector.Stats.MissingCode, Is.EqualTo(0)); + } + } + + public void DeleteStateRoot() + { + NodeStorage.Set(null, TreePath.Empty, RootHash, null); + } +} diff --git a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedHealingTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedHealingTests.cs index 92ffa310957..8215cfc60c2 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedHealingTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedHealingTests.cs @@ -12,34 +12,40 @@ using Nethermind.State; using Nethermind.State.Proofs; using Nethermind.State.Snap; +using Nethermind.Logging; using Nethermind.Synchronization.FastSync; using Nethermind.Synchronization.SnapSync; using NUnit.Framework; namespace Nethermind.Synchronization.Test.FastSync; -[Parallelizable(ParallelScope.All)] [FixtureLifeCycle(LifeCycle.InstancePerTestCase)] -public class StateSyncFeedHealingTests : StateSyncFeedTestsBase +[TestFixture(false)] +[TestFixture(true)] +[Parallelizable(ParallelScope.All)] +public class StateSyncFeedHealingTests(bool useFlat) : StateSyncFeedTestsBase(useFlat) { [Test] public async Task HealTreeWithoutBoundaryProofs() { - DbContext dbContext = new DbContext(_logger, _logManager); - TestItem.Tree.FillStateTreeWithTestAccounts(dbContext.RemoteStateTree); + RemoteDbContext remote = new(_logManager); + TestItem.Tree.FillStateTreeWithTestAccounts(remote.StateTree); + + Hash256 rootHash = remote.StateTree.RootHash; - Hash256 rootHash = dbContext.RemoteStateTree.RootHash; + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); + ISnapTrieFactory snapTrieFactory = container.Resolve(); - ProcessAccountRange(dbContext.RemoteStateTree, dbContext.LocalStateTree, 1, rootHash, TestItem.Tree.AccountsWithPaths); + ProcessAccountRange(remote.StateTree, snapTrieFactory, 1, rootHash, TestItem.Tree.AccountsWithPaths); - await using IContainer container = PrepareDownloader(dbContext); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); DetailedProgress data = ctx.TreeFeed.GetDetailedProgress(); - dbContext.CompareTrees("END"); - Assert.That(dbContext.LocalStateTree.RootHash, Is.EqualTo(dbContext.RemoteStateTree.RootHash)); + local.CompareTrees(remote, _logger, "END"); + Assert.That(local.RootHash, Is.EqualTo(remote.StateTree.RootHash)); // I guess state root will be requested regardless Assert.That(data.RequestedNodesCount, Is.EqualTo(1)); // 4 boundary proof nodes stitched together => 0 @@ -48,11 +54,10 @@ public async Task HealTreeWithoutBoundaryProofs() [Test] public async Task HealBigSqueezedRandomTree() { - DbContext dbContext = new DbContext(_logger, _logManager); + RemoteDbContext remote = new(_logManager); int pathPoolCount = 100_000; Hash256[] pathPool = new Hash256[pathPoolCount]; - SortedDictionary accounts = new(); for (int i = 0; i < pathPoolCount; i++) { @@ -63,33 +68,35 @@ public async Task HealBigSqueezedRandomTree() pathPool[i] = keccak; } - // generate Remote Tree + int blockJumps = 5; + + // Store accounts snapshot at each block number + SortedDictionary[] accountsAtBlock = new SortedDictionary[blockJumps + 1]; + Hash256[] rootHashAtBlock = new Hash256[blockJumps + 1]; + + // Initialize accounts + SortedDictionary accounts = new(); + + // Generate initial Remote Tree (block 0) for (int accountIndex = 0; accountIndex < 10000; accountIndex++) { Account account = TestItem.GenerateRandomAccount(); Hash256 path = pathPool[TestItem.Random.Next(pathPool.Length - 1)]; - dbContext.RemoteStateTree.Set(path, account); + remote.StateTree.Set(path, account); accounts[path] = account; } - dbContext.RemoteStateTree.Commit(); + remote.StateTree.Commit(); - int startingHashIndex = 0; - int endHashIndex; - int blockJumps = 5; + // Pre-build all blocks and store state at each block for (int blockNumber = 1; blockNumber <= blockJumps; blockNumber++) { - for (int i = 0; i < 19; i++) - { - endHashIndex = startingHashIndex + 1000; - - ProcessAccountRange(dbContext.RemoteStateTree, dbContext.LocalStateTree, blockNumber, dbContext.RemoteStateTree.RootHash, - accounts.Where(a => a.Key >= pathPool[startingHashIndex] && a.Key <= pathPool[endHashIndex]).Select(a => new PathWithAccount(a.Key, a.Value)).ToArray()); - - startingHashIndex = endHashIndex + 1; - } + // Store snapshot of accounts and root hash at this block + accountsAtBlock[blockNumber] = new SortedDictionary(accounts); + rootHashAtBlock[blockNumber] = remote.StateTree.RootHash; + // Modify tree for next block for (int accountIndex = 0; accountIndex < 1000; accountIndex++) { Account account = TestItem.GenerateRandomAccount(); @@ -99,27 +106,56 @@ public async Task HealBigSqueezedRandomTree() { if (TestItem.Random.NextSingle() > 0.5) { - dbContext.RemoteStateTree.Set(path, account); + remote.StateTree.Set(path, account); accounts[path] = account; } else { - dbContext.RemoteStateTree.Set(path, null); + remote.StateTree.Set(path, null); accounts.Remove(path); } - - } else { - dbContext.RemoteStateTree.Set(path, account); + remote.StateTree.Set(path, account); accounts[path] = account; } } - dbContext.RemoteStateTree.Commit(); + remote.StateTree.Commit(); } + // Final state root + Hash256 finalRootHash = remote.StateTree.RootHash; + + await using IContainer container = PrepareDownloader(remote, syncDispatcherAllocateTimeoutMs: 1000); + var local = container.Resolve(); + ISnapTrieFactory snapTrieFactory = container.Resolve(); + + int startingHashIndex = 0; + int endHashIndex; + + // Now process account ranges using stored snapshots + for (int blockNumber = 1; blockNumber <= blockJumps; blockNumber++) + { + // Set remote tree to the state at this block number + remote.StateTree.RootHash = rootHashAtBlock[blockNumber]; + SortedDictionary blockAccounts = accountsAtBlock[blockNumber]; + + for (int i = 0; i < 19; i++) + { + endHashIndex = startingHashIndex + 1000; + + ProcessAccountRange(remote.StateTree, snapTrieFactory, blockNumber, rootHashAtBlock[blockNumber], + blockAccounts.Where(a => a.Key >= pathPool[startingHashIndex] && a.Key <= pathPool[endHashIndex]).Select(a => new PathWithAccount(a.Key, a.Value)).ToArray()); + + startingHashIndex = endHashIndex + 1; + } + } + + // Set remote tree back to final state for remaining processing + remote.StateTree.RootHash = finalRootHash; + endHashIndex = startingHashIndex + 1000; while (endHashIndex < pathPool.Length - 1) { @@ -129,28 +165,24 @@ public async Task HealBigSqueezedRandomTree() endHashIndex = pathPool.Length - 1; } - ProcessAccountRange(dbContext.RemoteStateTree, dbContext.LocalStateTree, blockJumps, dbContext.RemoteStateTree.RootHash, + ProcessAccountRange(remote.StateTree, snapTrieFactory, blockJumps, finalRootHash, accounts.Where(a => a.Key >= pathPool[startingHashIndex] && a.Key <= pathPool[endHashIndex]).Select(a => new PathWithAccount(a.Key, a.Value)).ToArray()); - startingHashIndex += 1000; } - dbContext.LocalStateTree.RootHash = dbContext.RemoteStateTree.RootHash; - - await using IContainer container = PrepareDownloader(dbContext, syncDispatcherAllocateTimeoutMs: 1000); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx, timeout: 20000); DetailedProgress data = ctx.TreeFeed.GetDetailedProgress(); - dbContext.LocalStateTree.UpdateRootHash(); - dbContext.CompareTrees("END"); + local.UpdateRootHash(); + local.CompareTrees(remote, _logger, "END"); _logger.Info($"REQUESTED NODES TO HEAL: {data.RequestedNodesCount}"); Assert.That(data.RequestedNodesCount, Is.LessThan(accounts.Count / 2)); } - private static void ProcessAccountRange(StateTree remoteStateTree, StateTree localStateTree, int blockNumber, Hash256 rootHash, PathWithAccount[] accounts) + private static void ProcessAccountRange(StateTree remoteStateTree, ISnapTrieFactory snapTrieFactory, int blockNumber, Hash256 rootHash, PathWithAccount[] accounts) { ValueHash256 startingHash = accounts.First().Path; ValueHash256 endHash = accounts.Last().Path; @@ -163,6 +195,6 @@ private static void ProcessAccountRange(StateTree remoteStateTree, StateTree loc remoteStateTree.Accept(accountProofCollector, remoteStateTree.RootHash); byte[][] lastProof = accountProofCollector.BuildResult().Proof!; - _ = SnapProviderHelper.AddAccountRange(localStateTree, blockNumber, rootHash, startingHash, limitHash, accounts, firstProof.Concat(lastProof).ToArray()); + _ = SnapProviderHelper.AddAccountRange(snapTrieFactory, blockNumber, rootHash, startingHash, limitHash, accounts, firstProof.Concat(lastProof).ToArray()); } } diff --git a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTests.cs index c7b0ca1aed6..ca33ab24d8f 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTests.cs @@ -26,13 +26,17 @@ namespace Nethermind.Synchronization.Test.FastSync { - [TestFixture(1, 0)] - [TestFixture(1, 100)] - [TestFixture(4, 0)] - [TestFixture(4, 100)] + [TestFixture(false, 1, 0)] + [TestFixture(false, 1, 100)] + [TestFixture(false, 4, 0)] + [TestFixture(false, 4, 100)] + [TestFixture(true, 1, 0)] [Parallelizable(ParallelScope.Fixtures)] - public class StateSyncFeedTests(int peerCount, int maxNodeLatency) - : StateSyncFeedTestsBase(peerCount, maxNodeLatency) + public class StateSyncFeedTests( + bool useFlat, + int peerCount, + int maxNodeLatency) + : StateSyncFeedTestsBase(useFlat, peerCount, maxNodeLatency) { // Useful for set and forget run. But this test is taking a long time to have it set to other than 1. private const int TestRepeatCount = 1; @@ -43,60 +47,54 @@ public class StateSyncFeedTests(int peerCount, int maxNodeLatency) [Explicit("This test is not stable, especially on slow Github Actions machines")] public async Task Big_test((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager) - { - RemoteCodeDb = - { - [Keccak.Compute(TrieScenarios.Code0).Bytes] = TrieScenarios.Code0, - [Keccak.Compute(TrieScenarios.Code1).Bytes] = TrieScenarios.Code1, - [Keccak.Compute(TrieScenarios.Code2).Bytes] = TrieScenarios.Code2, - [Keccak.Compute(TrieScenarios.Code3).Bytes] = TrieScenarios.Code3, - }, - }; - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); + RemoteDbContext remote = new(_logManager); + remote.CodeDb[Keccak.Compute(TrieScenarios.Code0).Bytes] = TrieScenarios.Code0; + remote.CodeDb[Keccak.Compute(TrieScenarios.Code1).Bytes] = TrieScenarios.Code1; + remote.CodeDb[Keccak.Compute(TrieScenarios.Code2).Bytes] = TrieScenarios.Code2; + remote.CodeDb[Keccak.Compute(TrieScenarios.Code3).Bytes] = TrieScenarios.Code3; + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); - dbContext.CompareTrees("BEFORE FIRST SYNC", true); + await using IContainer container = PrepareDownloader(remote, mock => + mock.SetFilter(((MemDb)remote.StateDb).Keys.Take(((MemDb)remote.StateDb).Keys.Count - 4).Select(HashKey).ToArray())); + var local = container.Resolve(); - await using IContainer container = PrepareDownloader(dbContext, mock => - mock.SetFilter(((MemDb)dbContext.RemoteStateDb).Keys.Take(((MemDb)dbContext.RemoteStateDb).Keys.Count - 4).Select(HashKey).ToArray())); + local.CompareTrees(remote, _logger, "BEFORE FIRST SYNC", true); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - dbContext.CompareTrees("AFTER FIRST SYNC", true); + local.CompareTrees(remote, _logger, "AFTER FIRST SYNC", true); - dbContext.LocalStateTree.RootHash = dbContext.RemoteStateTree.RootHash; for (byte i = 0; i < 8; i++) - dbContext.RemoteStateTree + remote.StateTree .Set(TestItem.Addresses[i], TrieScenarios.AccountJustState0.WithChangedBalance(i) .WithChangedNonce(1) .WithChangedCodeHash(Keccak.Compute(TrieScenarios.Code3)) - .WithChangedStorageRoot(SetStorage(dbContext.RemoteTrieStore, i, TestItem.Addresses[i]).RootHash)); + .WithChangedStorageRoot(SetStorage(remote.TrieStore, i, TestItem.Addresses[i]).RootHash)); - dbContext.RemoteStateTree.UpdateRootHash(); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.UpdateRootHash(); + remote.StateTree.Commit(); - await ctx.SuggestBlocksWithUpdatedRootHash(dbContext.RemoteStateTree.RootHash); + await ctx.SuggestBlocksWithUpdatedRootHash(remote.StateTree.RootHash); ctx.Feed.FallAsleep(); ctx.Pool.WakeUpAll(); await ActivateAndWait(ctx); - dbContext.CompareTrees("AFTER SECOND SYNC", true); + local.CompareTrees(remote, _logger, "AFTER SECOND SYNC", true); - dbContext.LocalStateTree.RootHash = dbContext.RemoteStateTree.RootHash; for (byte i = 0; i < 16; i++) - dbContext.RemoteStateTree + remote.StateTree .Set(TestItem.Addresses[i], TrieScenarios.AccountJustState0.WithChangedBalance(i) .WithChangedNonce(2) .WithChangedCodeHash(Keccak.Compute(TrieScenarios.Code3)) - .WithChangedStorageRoot(SetStorage(dbContext.RemoteTrieStore, (byte)(i % 7), TestItem.Addresses[i]).RootHash)); + .WithChangedStorageRoot(SetStorage(remote.TrieStore, (byte)(i % 7), TestItem.Addresses[i]).RootHash)); - dbContext.RemoteStateTree.UpdateRootHash(); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.UpdateRootHash(); + remote.StateTree.Commit(); - await ctx.SuggestBlocksWithUpdatedRootHash(dbContext.RemoteStateTree.RootHash); + await ctx.SuggestBlocksWithUpdatedRootHash(remote.StateTree.RootHash); ctx.Feed.FallAsleep(); ctx.Pool.WakeUpAll(); @@ -107,8 +105,8 @@ public async Task Big_test((string Name, Action Setu await ActivateAndWait(ctx); - dbContext.CompareTrees("END"); - dbContext.AssertFlushed(); + local.CompareTrees(remote, _logger, "END"); + local.AssertFlushed(); } private static Hash256 HashKey(byte[] k) @@ -121,28 +119,30 @@ private static Hash256 HashKey(byte[] k) [Repeat(TestRepeatCount)] public async Task Can_download_a_full_state((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); - dbContext.CompareTrees("BEGIN"); + local.CompareTrees(remote, _logger, "BEGIN"); - await using IContainer container = PrepareDownloader(dbContext); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] [Repeat(TestRepeatCount)] public async Task Can_download_an_empty_tree() { - DbContext dbContext = new(_logger, _logManager); - await using IContainer container = PrepareDownloader(dbContext); + RemoteDbContext remote = new(_logManager); + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] @@ -150,16 +150,15 @@ public async Task Can_download_an_empty_tree() [Repeat(TestRepeatCount)] public async Task Can_download_in_multiple_connections((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); - + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); - await using IContainer container = PrepareDownloader(dbContext, mock => - mock.SetFilter(new[] { dbContext.RemoteStateTree.RootHash })); + await using IContainer container = PrepareDownloader(remote, mock => + mock.SetFilter(new[] { remote.StateTree.RootHash })); + var local = container.Resolve(); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx, 1000); - ctx.Pool.WakeUpAll(); foreach (SyncPeerMock mock in ctx.SyncPeerMocks) { @@ -169,8 +168,7 @@ public async Task Can_download_in_multiple_connections((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); + await using IContainer container = PrepareDownloader(remote, static mock => mock.MaxResponseLength = 1); + var local = container.Resolve(); - dbContext.CompareTrees("BEGIN"); + local.CompareTrees(remote, _logger, "BEGIN"); - await using IContainer container = PrepareDownloader(dbContext, static mock => mock.MaxResponseLength = 1); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] public async Task When_saving_root_goes_asleep_and_then_restart_to_new_tree_when_reactivated() { - DbContext dbContext = new(_logger, _logManager); - dbContext.RemoteStateTree.Set(TestItem.KeccakA, Build.An.Account.TestObject); - dbContext.RemoteStateTree.Commit(); + RemoteDbContext remote = new(_logManager); + remote.StateTree.Set(TestItem.KeccakA, Build.An.Account.TestObject); + remote.StateTree.Commit(); + + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); - dbContext.CompareTrees("BEGIN"); + local.CompareTrees(remote, _logger, "BEGIN"); - await using IContainer container = PrepareDownloader(dbContext); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); ctx.Feed.CurrentState.Should().Be(SyncFeedState.Dormant); } @@ -216,36 +216,36 @@ public async Task When_saving_root_goes_asleep_and_then_restart_to_new_tree_when [Retry(3)] public async Task Can_download_with_moving_target((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); + + await using IContainer container = PrepareDownloader(remote, mock => + mock.SetFilter(((MemDb)remote.StateDb).Keys.Take(((MemDb)remote.StateDb).Keys.Count - 1).Select(k => HashKey(k)).ToArray())); + var local = container.Resolve(); - dbContext.CompareTrees("BEFORE FIRST SYNC"); + local.CompareTrees(remote, _logger, "BEFORE FIRST SYNC"); - await using IContainer container = PrepareDownloader(dbContext, mock => - mock.SetFilter(((MemDb)dbContext.RemoteStateDb).Keys.Take(((MemDb)dbContext.RemoteStateDb).Keys.Count - 1).Select(k => HashKey(k)).ToArray())); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx, TimeoutLength); + local.CompareTrees(remote, _logger, "AFTER FIRST SYNC"); - dbContext.CompareTrees("AFTER FIRST SYNC"); + remote.StateTree.Set(TestItem.AddressA, TrieScenarios.AccountJustState0.WithChangedBalance(123.Ether())); + remote.StateTree.Set(TestItem.AddressB, TrieScenarios.AccountJustState1.WithChangedBalance(123.Ether())); + remote.StateTree.Set(TestItem.AddressC, TrieScenarios.AccountJustState2.WithChangedBalance(123.Ether())); - dbContext.LocalStateTree.RootHash = dbContext.RemoteStateTree.RootHash; - dbContext.RemoteStateTree.Set(TestItem.AddressA, TrieScenarios.AccountJustState0.WithChangedBalance(123.Ether())); - dbContext.RemoteStateTree.Set(TestItem.AddressB, TrieScenarios.AccountJustState1.WithChangedBalance(123.Ether())); - dbContext.RemoteStateTree.Set(TestItem.AddressC, TrieScenarios.AccountJustState2.WithChangedBalance(123.Ether())); + local.CompareTrees(remote, _logger, "BEFORE ROOT HASH UPDATE"); - dbContext.CompareTrees("BEFORE ROOT HASH UPDATE"); + remote.StateTree.UpdateRootHash(); - dbContext.RemoteStateTree.UpdateRootHash(); + local.CompareTrees(remote, _logger, "BEFORE COMMIT"); - dbContext.CompareTrees("BEFORE COMMIT"); - - dbContext.RemoteStateTree.Commit(); + remote.StateTree.Commit(); ctx.Pool.WakeUpAll(); ctx.Feed.FallAsleep(); - await ctx.SuggestBlocksWithUpdatedRootHash(dbContext.RemoteStateTree.RootHash); + await ctx.SuggestBlocksWithUpdatedRootHash(remote.StateTree.RootHash); foreach (SyncPeerMock mock in ctx.SyncPeerMocks) { @@ -254,7 +254,7 @@ public async Task Can_download_with_moving_target((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); - + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); - StorageTree remoteStorageTree = new(dbContext.RemoteTrieStore.GetTrieStore(TestItem.AddressD), Keccak.EmptyTreeHash, LimboLogs.Instance); + StorageTree remoteStorageTree = new(remote.TrieStore.GetTrieStore(TestItem.AddressD), Keccak.EmptyTreeHash, LimboLogs.Instance); remoteStorageTree.Set( Bytes.FromHexString("eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeb000"), new byte[] { 1 }); remoteStorageTree.Set( @@ -277,17 +276,18 @@ public async Task Dependent_branch_counter_is_zero_and_leaf_is_short((string Nam Bytes.FromHexString("eeeeeeeeeeeeeeeeeeeeeb111111111111111111111111111111111111111111"), new byte[] { 1 }); remoteStorageTree.Commit(); - dbContext.RemoteStateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedStorageRoot(remoteStorageTree.RootHash)); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedStorageRoot(remoteStorageTree.RootHash)); + remote.StateTree.Commit(); + + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); - dbContext.CompareTrees("BEGIN"); + local.CompareTrees(remote, _logger, "BEGIN"); - await using IContainer container = PrepareDownloader(dbContext); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] @@ -295,24 +295,24 @@ public async Task Dependent_branch_counter_is_zero_and_leaf_is_short((string Nam [Repeat(TestRepeatCount)] public async Task Scenario_plus_one_code((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); - + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); - dbContext.RemoteCodeDb.Set(Keccak.Compute(TrieScenarios.Code0), TrieScenarios.Code0); + remote.CodeDb.Set(Keccak.Compute(TrieScenarios.Code0), TrieScenarios.Code0); Account changedAccount = TrieScenarios.AccountJustState0.WithChangedCodeHash(Keccak.Compute(TrieScenarios.Code0)); - dbContext.RemoteStateTree.Set(TestItem.AddressD, changedAccount); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.Set(TestItem.AddressD, changedAccount); + remote.StateTree.Commit(); + + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); - dbContext.CompareTrees("BEGIN"); + local.CompareTrees(remote, _logger, "BEGIN"); - await using IContainer container = PrepareDownloader(dbContext); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] @@ -320,27 +320,27 @@ public async Task Scenario_plus_one_code((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); - + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); - dbContext.RemoteCodeDb.Set(Keccak.Compute(TrieScenarios.Code0), TrieScenarios.Code0); + remote.CodeDb.Set(Keccak.Compute(TrieScenarios.Code0), TrieScenarios.Code0); - StorageTree remoteStorageTree = new(dbContext.RemoteTrieStore.GetTrieStore(TestItem.AddressD), Keccak.EmptyTreeHash, _logManager); + StorageTree remoteStorageTree = new(remote.TrieStore.GetTrieStore(TestItem.AddressD), Keccak.EmptyTreeHash, _logManager); remoteStorageTree.Set((UInt256)1, new byte[] { 1 }); remoteStorageTree.Commit(); - dbContext.RemoteStateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedCodeHash(Keccak.Compute(TrieScenarios.Code0)).WithChangedStorageRoot(remoteStorageTree.RootHash)); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedCodeHash(Keccak.Compute(TrieScenarios.Code0)).WithChangedStorageRoot(remoteStorageTree.RootHash)); + remote.StateTree.Commit(); - dbContext.CompareTrees("BEGIN"); + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); + + local.CompareTrees(remote, _logger, "BEGIN"); - await using IContainer container = PrepareDownloader(dbContext); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] @@ -348,35 +348,36 @@ public async Task Scenario_plus_one_code_one_storage((string Name, Action SetupTree) testCase) { - DbContext dbContext = new(_logger, _logManager); - testCase.SetupTree(dbContext.RemoteStateTree, dbContext.RemoteTrieStore, dbContext.RemoteCodeDb); + RemoteDbContext remote = new(_logManager); + testCase.SetupTree(remote.StateTree, remote.TrieStore, remote.CodeDb); - StorageTree remoteStorageTree = new(dbContext.RemoteTrieStore.GetTrieStore(TestItem.AddressD), Keccak.EmptyTreeHash, _logManager); + StorageTree remoteStorageTree = new(remote.TrieStore.GetTrieStore(TestItem.AddressD), Keccak.EmptyTreeHash, _logManager); remoteStorageTree.Set((UInt256)1, new byte[] { 1 }); remoteStorageTree.Commit(); - dbContext.RemoteStateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedStorageRoot(remoteStorageTree.RootHash)); - dbContext.RemoteStateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedStorageRoot(remoteStorageTree.RootHash)); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedStorageRoot(remoteStorageTree.RootHash)); + remote.StateTree.Set(TestItem.AddressD, TrieScenarios.AccountJustState0.WithChangedStorageRoot(remoteStorageTree.RootHash)); + remote.StateTree.Commit(); - dbContext.CompareTrees("BEGIN"); + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); + + local.CompareTrees(remote, _logger, "BEGIN"); - await using IContainer container = PrepareDownloader(dbContext); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] public async Task When_empty_response_received_return_lesser_quality() { - DbContext dbContext = new(_logger, _logManager); - dbContext.RemoteStateTree.Set(TestItem.KeccakA, Build.An.Account.TestObject); - dbContext.RemoteStateTree.Commit(); + RemoteDbContext remote = new(_logManager); + remote.StateTree.Set(TestItem.KeccakA, Build.An.Account.TestObject); + remote.StateTree.Commit(); - await using IContainer container = BuildTestContainerBuilder(dbContext) + await using IContainer container = BuildTestContainerBuilder(remote) .Build(); SafeContext ctx = container.Resolve(); @@ -391,11 +392,11 @@ public async Task When_empty_response_received_return_lesser_quality() [Test] public async Task When_empty_response_received_with_no_peer_return_not_allocated() { - DbContext dbContext = new(_logger, _logManager); - dbContext.RemoteStateTree.Set(TestItem.KeccakA, Build.An.Account.TestObject); - dbContext.RemoteStateTree.Commit(); + RemoteDbContext remote = new(_logManager); + remote.StateTree.Set(TestItem.KeccakA, Build.An.Account.TestObject); + remote.StateTree.Commit(); - await using IContainer container = BuildTestContainerBuilder(dbContext) + await using IContainer container = BuildTestContainerBuilder(remote) .Build(); SafeContext ctx = container.Resolve(); @@ -411,48 +412,44 @@ public async Task When_empty_response_received_with_no_peer_return_not_allocated [Repeat(TestRepeatCount)] public async Task RepairPossiblyMissingStorage() { - DbContext dbContext = new(_logger, _logManager) - { - RemoteCodeDb = - { - [Keccak.Compute(TrieScenarios.Code0).Bytes] = TrieScenarios.Code0, - [Keccak.Compute(TrieScenarios.Code1).Bytes] = TrieScenarios.Code1, - [Keccak.Compute(TrieScenarios.Code2).Bytes] = TrieScenarios.Code2, - [Keccak.Compute(TrieScenarios.Code3).Bytes] = TrieScenarios.Code3, - }, - }; + RemoteDbContext remote = new(_logManager); + remote.CodeDb[Keccak.Compute(TrieScenarios.Code0).Bytes] = TrieScenarios.Code0; + remote.CodeDb[Keccak.Compute(TrieScenarios.Code1).Bytes] = TrieScenarios.Code1; + remote.CodeDb[Keccak.Compute(TrieScenarios.Code2).Bytes] = TrieScenarios.Code2; + remote.CodeDb[Keccak.Compute(TrieScenarios.Code3).Bytes] = TrieScenarios.Code3; Hash256 theAccount = TestItem.KeccakA; - StorageTree storageTree = new StorageTree(dbContext.RemoteTrieStore.GetTrieStore(theAccount), LimboLogs.Instance); + StorageTree storageTree = new StorageTree(remote.TrieStore.GetTrieStore(theAccount), LimboLogs.Instance); for (int i = 0; i < 10; i++) { storageTree.Set((UInt256)i, TestItem.Keccaks[i].BytesToArray()); } storageTree.Commit(); - StateTree state = dbContext.RemoteStateTree; + StateTree state = remote.StateTree; state.Set(TestItem.KeccakA, Build.An.Account.WithNonce(1).WithStorageRoot(storageTree.RootHash).TestObject); state.Set(TestItem.KeccakB, Build.An.Account.WithNonce(1).TestObject); state.Set(TestItem.KeccakC, Build.An.Account.WithNonce(1).TestObject); state.Commit(); + await using IContainer container = PrepareDownloader(remote); + var local = container.Resolve(); + // Local state only have the state - state = dbContext.LocalStateTree; - state.Set(TestItem.KeccakA, Build.An.Account.WithNonce(1).WithStorageRoot(storageTree.RootHash).TestObject); - state.Set(TestItem.KeccakB, Build.An.Account.WithNonce(1).TestObject); - state.Set(TestItem.KeccakC, Build.An.Account.WithNonce(1).TestObject); - state.Commit(); + local.SetAccountsAndCommit( + (TestItem.KeccakA, Build.An.Account.WithNonce(1).WithStorageRoot(storageTree.RootHash).TestObject), + (TestItem.KeccakB, Build.An.Account.WithNonce(1).TestObject), + (TestItem.KeccakC, Build.An.Account.WithNonce(1).TestObject)); // Local state missing root so that it would start - dbContext.LocalNodeStorage.Set(null, TreePath.Empty, state.RootHash, null); + local.DeleteStateRoot(); - await using IContainer container = PrepareDownloader(dbContext); container.Resolve().UpdatedStorages.Add(theAccount); SafeContext ctx = container.Resolve(); await ActivateAndWait(ctx); - dbContext.CompareTrees("END"); + local.CompareTrees(remote, _logger, "END"); } [Test] @@ -460,20 +457,15 @@ public async Task RepairPossiblyMissingStorage() [CancelAfter(10000)] public async Task Pending_items_cache_mechanism_works_across_root_changes(CancellationToken cancellation) { - DbContext dbContext = new(_logger, _logManager) - { - RemoteCodeDb = - { - [Keccak.Compute(TrieScenarios.Code0).Bytes] = TrieScenarios.Code0, - [Keccak.Compute(TrieScenarios.Code1).Bytes] = TrieScenarios.Code1, - }, - }; + RemoteDbContext remote = new(_logManager); + remote.CodeDb[Keccak.Compute(TrieScenarios.Code0).Bytes] = TrieScenarios.Code0; + remote.CodeDb[Keccak.Compute(TrieScenarios.Code1).Bytes] = TrieScenarios.Code1; // Set some data for (byte i = 0; i < 12; i++) { - StorageTree storage = SetStorage(dbContext.RemoteTrieStore, (byte)(i + 1), TestItem.Addresses[i]); - dbContext.RemoteStateTree.Set( + StorageTree storage = SetStorage(remote.TrieStore, (byte)(i + 1), TestItem.Addresses[i]); + remote.StateTree.Set( TestItem.Addresses[i], TrieScenarios.AccountJustState0 .WithChangedBalance((UInt256)(i + 10)) @@ -481,10 +473,10 @@ public async Task Pending_items_cache_mechanism_works_across_root_changes(Cancel .WithChangedCodeHash(Keccak.Compute(TrieScenarios.Code0)) .WithChangedStorageRoot(storage.RootHash)); } - dbContext.RemoteStateTree.UpdateRootHash(); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.UpdateRootHash(); + remote.StateTree.Commit(); - await using IContainer container = PrepareDownloader(dbContext); + await using IContainer container = PrepareDownloader(remote); SafeContext ctx = container.Resolve(); ctx.Feed.SyncModeSelectorOnChanged(SyncMode.StateNodes); @@ -510,8 +502,8 @@ async Task RunOneRequest() for (byte i = 0; i < 4; i++) { - StorageTree storage = SetStorage(dbContext.RemoteTrieStore, (byte)(i + 2), TestItem.Addresses[i]); - dbContext.RemoteStateTree.Set( + StorageTree storage = SetStorage(remote.TrieStore, (byte)(i + 2), TestItem.Addresses[i]); + remote.StateTree.Set( TestItem.Addresses[i], TrieScenarios.AccountJustState0 .WithChangedBalance((UInt256)(i + 100)) @@ -519,10 +511,10 @@ async Task RunOneRequest() .WithChangedCodeHash(Keccak.Compute(TrieScenarios.Code1)) .WithChangedStorageRoot(storage.RootHash)); } - dbContext.RemoteStateTree.UpdateRootHash(); - dbContext.RemoteStateTree.Commit(); + remote.StateTree.UpdateRootHash(); + remote.StateTree.Commit(); - await ctx.SuggestBlocksWithUpdatedRootHash(dbContext.RemoteStateTree.RootHash); + await ctx.SuggestBlocksWithUpdatedRootHash(remote.StateTree.RootHash); ctx.Feed.FallAsleep(); ctx.Feed.SyncModeSelectorOnChanged(SyncMode.StateNodes); diff --git a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTestsBase.cs b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTestsBase.cs index ca3d449ac97..b342b0ab178 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTestsBase.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/FastSync/StateSyncFeedTestsBase.cs @@ -6,6 +6,7 @@ using System.Threading; using System.Threading.Tasks; using Autofac; +using Autofac.Features.AttributeFilters; using FluentAssertions; using Nethermind.Blockchain; using Nethermind.Blockchain.Synchronization; @@ -19,17 +20,22 @@ using Nethermind.Core.Test.Modules; using Nethermind.Core.Utils; using Nethermind.Db; +using Nethermind.Init.Modules; using Nethermind.Int256; using Nethermind.Logging; using Nethermind.Network.Contract.P2P; using Nethermind.Network.P2P.Subprotocols.Snap; using Nethermind.State; +using Nethermind.State.Flat; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.Sync; using Nethermind.State.Snap; using Nethermind.State.SnapServer; using Nethermind.Stats.Model; using Nethermind.Synchronization.FastSync; using Nethermind.Synchronization.ParallelSync; using Nethermind.Synchronization.Peers; +using Nethermind.Synchronization.SnapSync; using Nethermind.Synchronization.Test.ParallelSync; using Nethermind.Trie; using Nethermind.Trie.Pruning; @@ -37,13 +43,19 @@ namespace Nethermind.Synchronization.Test.FastSync; -public abstract class StateSyncFeedTestsBase(int defaultPeerCount = 1, int defaultPeerMaxRandomLatency = 0) +public abstract class StateSyncFeedTestsBase( + bool useFlat = false, + int defaultPeerCount = 1, + int defaultPeerMaxRandomLatency = 0) { public const int TimeoutLength = 20000; // Chain length used for test block trees, use a constant to avoid shared state private const int TestChainLength = 100; + // Stored for future flat store support + protected bool UseFlat { get; } = useFlat; + protected ILogger _logger; protected ILogManager _logManager = null!; @@ -72,7 +84,7 @@ protected static StorageTree SetStorage(ITrieStore trieStore, byte i, Address ad return remoteStorageTree; } - protected IContainer PrepareDownloader(DbContext dbContext, Action? mockMutator = null, int syncDispatcherAllocateTimeoutMs = 10) + protected IContainer PrepareDownloader(RemoteDbContext remote, Action? mockMutator = null, int syncDispatcherAllocateTimeoutMs = 10) { SyncPeerMock[] syncPeers = new SyncPeerMock[defaultPeerCount]; for (int i = 0; i < defaultPeerCount; i++) @@ -81,12 +93,12 @@ protected IContainer PrepareDownloader(DbContext dbContext, Action { EthDetails = "eth68", }; - SyncPeerMock mock = new SyncPeerMock(dbContext.RemoteStateDb, dbContext.RemoteCodeDb, node: node, maxRandomizedLatencyMs: defaultPeerMaxRandomLatency); + SyncPeerMock mock = new SyncPeerMock(remote.StateDb, remote.CodeDb, node: node, maxRandomizedLatencyMs: defaultPeerMaxRandomLatency); mockMutator?.Invoke(mock); syncPeers[i] = mock; } - ContainerBuilder builder = BuildTestContainerBuilder(dbContext, syncDispatcherAllocateTimeoutMs) + ContainerBuilder builder = BuildTestContainerBuilder(remote, syncDispatcherAllocateTimeoutMs) .AddSingleton(syncPeers); builder.RegisterBuildCallback((ctx) => @@ -104,7 +116,7 @@ protected IContainer PrepareDownloader(DbContext dbContext, Action return builder.Build(); } - protected ContainerBuilder BuildTestContainerBuilder(DbContext dbContext, int syncDispatcherAllocateTimeoutMs = 10) + protected ContainerBuilder BuildTestContainerBuilder(RemoteDbContext remote, int syncDispatcherAllocateTimeoutMs = 10) { ContainerBuilder containerBuilder = new ContainerBuilder() .AddModule(new TestNethermindModule(new ConfigProvider(new SyncConfig() @@ -117,18 +129,37 @@ protected ContainerBuilder BuildTestContainerBuilder(DbContext dbContext, int sy return syncConfig; }) .AddSingleton(_logManager) - .AddKeyedSingleton(DbNames.Code, dbContext.LocalCodeDb) - .AddKeyedSingleton(DbNames.State, dbContext.LocalStateDb) - .AddSingleton(dbContext.LocalNodeStorage) + .AddKeyedSingleton(DbNames.Code, (_) => new TestMemDb()) // Use factory function to make it lazy in case test need to replace IBlockTree // Cache key includes type name so different inherited test classes don't share the same blocktree .AddSingleton((ctx) => CachedBlockTreeBuilder.BuildCached( - $"{GetType().Name}{dbContext.RemoteStateTree.RootHash}{TestChainLength}", - () => Build.A.BlockTree().WithStateRoot(dbContext.RemoteStateTree.RootHash).OfChainLength(TestChainLength))) + $"{GetType().Name}{remote.StateTree.RootHash}{TestChainLength}", + () => Build.A.BlockTree().WithStateRoot(remote.StateTree.RootHash).OfChainLength(TestChainLength))) .Add(); + // State DB and INodeStorage are needed by SynchronizerModule components (e.g. PathNodeRecovery) + containerBuilder + .AddKeyedSingleton(DbNames.State, (_) => new TestMemDb()) + .AddSingleton((ctx) => new NodeStorage(ctx.ResolveNamed(DbNames.State))); + + if (UseFlat) + { + containerBuilder + .AddModule(new FlatWorldStateModule(new FlatDbConfig())) + .AddSingleton>(_ => new TestMemColumnsDb()) + .AddSingleton() + .AddDecorator((ctx, inner) => new ResettableFlatTreeSyncStore(inner, ctx.Resolve(), ctx.Resolve(), ctx.Resolve())) + ; + } + else + { + containerBuilder + .AddSingleton() + .AddSingleton(); + } + containerBuilder.RegisterBuildCallback((ctx) => { ctx.Resolve().Start(); @@ -206,70 +237,6 @@ public void Dispose() } } - protected class DbContext - { - private readonly ILogger _logger; - - public DbContext(ILogger logger, ILogManager logManager) - { - _logger = logger; - RemoteDb = new MemDb(); - LocalDb = new TestMemDb(); - RemoteStateDb = RemoteDb; - LocalStateDb = LocalDb; - LocalNodeStorage = new NodeStorage(LocalDb); - LocalCodeDb = new TestMemDb(); - RemoteCodeDb = new MemDb(); - RemoteTrieStore = TestTrieStoreFactory.Build(RemoteStateDb, logManager); - - RemoteStateTree = new StateTree(RemoteTrieStore, logManager); - LocalStateTree = new StateTree(TestTrieStoreFactory.Build(LocalStateDb, logManager), logManager); - } - - public MemDb RemoteCodeDb { get; } - public TestMemDb LocalCodeDb { get; } - public MemDb RemoteDb { get; } - public TestMemDb LocalDb { get; } - public ITrieStore RemoteTrieStore { get; } - public IDb RemoteStateDb { get; } - public IDb LocalStateDb { get; } - public NodeStorage LocalNodeStorage { get; } - public StateTree RemoteStateTree { get; } - public StateTree LocalStateTree { get; } - - public void CompareTrees(string stage, bool skipLogs = false) - { - if (!skipLogs) _logger.Info($"==================== {stage} ===================="); - LocalStateTree.RootHash = RemoteStateTree.RootHash; - - if (!skipLogs) _logger.Info("-------------------- REMOTE --------------------"); - TreeDumper dumper = new TreeDumper(); - RemoteStateTree.Accept(dumper, RemoteStateTree.RootHash); - string remote = dumper.ToString(); - if (!skipLogs) _logger.Info(remote); - if (!skipLogs) _logger.Info("-------------------- LOCAL --------------------"); - dumper.Reset(); - LocalStateTree.Accept(dumper, LocalStateTree.RootHash); - string local = dumper.ToString(); - if (!skipLogs) _logger.Info(local); - - if (stage == "END") - { - Assert.That(local, Is.EqualTo(remote), $"{stage}{Environment.NewLine}{remote}{Environment.NewLine}{local}"); - TrieStatsCollector collector = new(LocalCodeDb, LimboLogs.Instance); - LocalStateTree.Accept(collector, LocalStateTree.RootHash); - Assert.That(collector.Stats.MissingNodes, Is.EqualTo(0)); - Assert.That(collector.Stats.MissingCode, Is.EqualTo(0)); - } - } - - public void AssertFlushed() - { - LocalDb.WasFlushed.Should().BeTrue(); - LocalCodeDb.WasFlushed.Should().BeTrue(); - } - } - protected class SyncPeerMock : BaseSyncPeerMock { public override string Name => "Mock"; @@ -390,3 +357,59 @@ public override Task> GetTrieNodes(GetTrieNodesReques } } } + +public class RemoteDbContext +{ + public RemoteDbContext(ILogManager logManager) + { + CodeDb = new MemDb(); + Db = new MemDb(); + TrieStore = TestTrieStoreFactory.Build(Db, logManager); + StateTree = new StateTree(TrieStore, logManager); + } + + public MemDb CodeDb { get; } + public MemDb Db { get; } + public IDb StateDb => Db; + public ITrieStore TrieStore { get; } + public StateTree StateTree { get; } +} + +/// +/// Test wrapper around FlatTreeSyncStore that auto-resets after finalization. +/// Production FlatTreeSyncStore throws on SaveNode after FinalizeSync (one-way flag). +/// Tests with moving targets need multiple sync rounds, so this wrapper creates a fresh +/// inner store when the previous one was finalized. +/// +file class ResettableFlatTreeSyncStore( + ITreeSyncStore inner, + IPersistence persistence, + IPersistenceManager persistenceManager, + ILogManager logManager) : ITreeSyncStore +{ + private ITreeSyncStore _inner = inner; + + public bool NodeExists(Hash256? address, in TreePath path, in ValueHash256 hash) => + _inner.NodeExists(address, path, hash); + + public void SaveNode(Hash256? address, in TreePath path, in ValueHash256 hash, ReadOnlySpan data) + { + try + { + _inner.SaveNode(address, path, hash, data); + } + catch (InvalidOperationException) + { + // Previous round finalized — create fresh store for next sync round + _inner = new FlatTreeSyncStore(persistence, persistenceManager, logManager); + _inner.SaveNode(address, path, hash, data); + } + } + + public void FinalizeSync(BlockHeader pivotHeader) => + _inner.FinalizeSync(pivotHeader); + + public ITreeSyncVerificationContext CreateVerificationContext(byte[] rootNodeData) => + _inner.CreateVerificationContext(rootNodeData); +} + diff --git a/src/Nethermind/Nethermind.Synchronization.Test/Nethermind.Synchronization.Test.csproj b/src/Nethermind/Nethermind.Synchronization.Test/Nethermind.Synchronization.Test.csproj index dd5ddd9b757..5720da4ada3 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/Nethermind.Synchronization.Test.csproj +++ b/src/Nethermind/Nethermind.Synchronization.Test/Nethermind.Synchronization.Test.csproj @@ -11,6 +11,7 @@ + diff --git a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/ISnapTestHelper.cs b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/ISnapTestHelper.cs new file mode 100644 index 00000000000..dcde72fff1d --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/ISnapTestHelper.cs @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System.Linq; +using Autofac.Features.AttributeFilters; +using Nethermind.Core.Crypto; +using Nethermind.Db; +using Nethermind.State.Flat; + +namespace Nethermind.Synchronization.Test.SnapSync; + +public interface ISnapTestHelper +{ + int CountTrieNodes(); + bool TrieNodeKeyExists(Hash256 hash); + long TrieNodeWritesCount { get; } +} + +public class PatriciaSnapTestHelper([KeyFilter(DbNames.State)] IDb stateDb) : ISnapTestHelper +{ + public int CountTrieNodes() => stateDb.GetAllKeys().Count(); + public bool TrieNodeKeyExists(Hash256 hash) => stateDb.KeyExists(hash.Bytes); + public long TrieNodeWritesCount => ((MemDb)stateDb).WritesCount; +} + +public class FlatSnapTestHelper(IColumnsDb columnsDb) : ISnapTestHelper +{ + private static readonly FlatDbColumns[] TrieNodeColumns = + [FlatDbColumns.StateTopNodes, FlatDbColumns.StateNodes, FlatDbColumns.StorageNodes, FlatDbColumns.FallbackNodes]; + + public int CountTrieNodes() + { + int total = 0; + foreach (var col in TrieNodeColumns) + total += columnsDb.GetColumnDb(col).GetAllKeys().Count(); + return total; + } + + public bool TrieNodeKeyExists(Hash256 hash) => + columnsDb.GetColumnDb(FlatDbColumns.StateTopNodes).KeyExists(new byte[3]); + + public long TrieNodeWritesCount + { + get + { + long total = 0; + foreach (var col in TrieNodeColumns) + total += ((MemDb)columnsDb.GetColumnDb(col)).WritesCount; + return total; + } + } +} diff --git a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/ProgressTrackerTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/ProgressTrackerTests.cs index 637d854833f..a05110b0af4 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/ProgressTrackerTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/ProgressTrackerTests.cs @@ -58,37 +58,34 @@ public void Will_create_multiple_get_address_range_request() { using ProgressTracker progressTracker = CreateProgressTracker(accountRangePartition: 4); - bool finished = progressTracker.IsFinished(out SnapSyncBatch? request); - request!.AccountRangeRequest.Should().NotBeNull(); - request.AccountRangeRequest!.StartingHash.Bytes[0].Should().Be(0); - request.AccountRangeRequest.LimitHash!.Value.Bytes[0].Should().Be(64); - finished.Should().BeFalse(); - request.Dispose(); - - finished = progressTracker.IsFinished(out request); - request!.AccountRangeRequest.Should().NotBeNull(); - request.AccountRangeRequest!.StartingHash.Bytes[0].Should().Be(64); - request.AccountRangeRequest.LimitHash!.Value.Bytes[0].Should().Be(128); - finished.Should().BeFalse(); - request.Dispose(); - - finished = progressTracker.IsFinished(out request); - request!.AccountRangeRequest.Should().NotBeNull(); - request.AccountRangeRequest!.StartingHash.Bytes[0].Should().Be(128); - request.AccountRangeRequest.LimitHash!.Value.Bytes[0].Should().Be(192); - finished.Should().BeFalse(); - request.Dispose(); - - finished = progressTracker.IsFinished(out request); - request!.AccountRangeRequest.Should().NotBeNull(); - request.AccountRangeRequest!.StartingHash.Bytes[0].Should().Be(192); - request.AccountRangeRequest.LimitHash!.Value.Bytes[0].Should().Be(255); - finished.Should().BeFalse(); - request.Dispose(); + Hash256[] expectedStarts = + [ + new("0x0000000000000000000000000000000000000000000000000000000000000000"), + new("0x4000000000000000000000000000000000000000000000000000000000000000"), + new("0x8000000000000000000000000000000000000000000000000000000000000000"), + new("0xc000000000000000000000000000000000000000000000000000000000000000"), + ]; + Hash256[] expectedLimits = + [ + new("0x3fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + new("0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + new("0xbfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + new("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), + ]; + + for (int i = 0; i < 4; i++) + { + bool finished = progressTracker.IsFinished(out SnapSyncBatch? request); + request!.AccountRangeRequest.Should().NotBeNull(); + request.AccountRangeRequest!.StartingHash.Should().Be(expectedStarts[i]); + request.AccountRangeRequest.LimitHash!.Value.Should().Be(expectedLimits[i]); + finished.Should().BeFalse(); + request.Dispose(); + } - finished = progressTracker.IsFinished(out request); - request.Should().BeNull(); - finished.Should().BeFalse(); + bool finalFinished = progressTracker.IsFinished(out SnapSyncBatch? finalRequest); + finalRequest.Should().BeNull(); + finalFinished.Should().BeFalse(); } [Test] @@ -177,7 +174,7 @@ public void Should_partition_storage_request_if_last_processed_less_than_thresho StartingHash = new ValueHash256(start), LimitHash = limitHash }; - progressTracker.EnqueueNextSlot(storageRange, 0, lastProcessedHash); + progressTracker.EnqueueNextSlot(storageRange, 0, lastProcessedHash, 1_000_000_000); //ignore account range bool isFinished = progressTracker.IsFinished(out _); @@ -191,8 +188,8 @@ public void Should_partition_storage_request_if_last_processed_less_than_thresho isFinished.Should().BeFalse(); batch2.Should().NotBeNull(); - batch2?.StorageRangeRequest?.StartingHash.Should().Be(batch1?.StorageRangeRequest?.LimitHash); - batch1?.StorageRangeRequest?.StartingHash.Should().Be(lastProcessedHash); + batch2?.StorageRangeRequest?.StartingHash.Should().Be(batch1?.StorageRangeRequest?.LimitHash?.IncrementPath()); + batch1?.StorageRangeRequest?.StartingHash.Should().Be(lastProcessedHash.IncrementPath()); batch2?.StorageRangeRequest?.LimitHash.Should().Be(limitHash ?? Keccak.MaxValue); batch1?.StorageRangeRequest?.LimitHash.Should().Be(new ValueHash256(expectedSplit)); @@ -214,7 +211,7 @@ public void Should_not_partition_storage_request_if_last_processed_more_than_thr StartingHash = new ValueHash256(start), LimitHash = limitHash }; - progressTracker.EnqueueNextSlot(storageRange, 0, lastProcessedHash); + progressTracker.EnqueueNextSlot(storageRange, 0, lastProcessedHash, 100000000); //ignore account range bool isFinished = progressTracker.IsFinished(out _); @@ -224,7 +221,7 @@ public void Should_not_partition_storage_request_if_last_processed_more_than_thr isFinished.Should().BeFalse(); batch1.Should().NotBeNull(); - batch1?.StorageRangeRequest?.StartingHash.Should().Be(lastProcessedHash); + batch1?.StorageRangeRequest?.StartingHash.Should().Be(lastProcessedHash.IncrementPath()); batch1?.StorageRangeRequest?.LimitHash.Should().Be(limitHash ?? Keccak.MaxValue); } diff --git a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromAccountRangesTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromAccountRangesTests.cs index b8fd9101af4..968ffd36659 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromAccountRangesTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromAccountRangesTests.cs @@ -8,24 +8,30 @@ using System.Linq; using Autofac; using FluentAssertions; +using Nethermind.Config; using Nethermind.Core; using Nethermind.Core.Crypto; using Nethermind.Core.Test; using Nethermind.Core.Test.Builders; using Nethermind.Db; +using Nethermind.Init.Modules; using Nethermind.Int256; using Nethermind.Logging; using Nethermind.State; +using Nethermind.State.Flat; using Nethermind.State.Proofs; using Nethermind.State.Snap; using Nethermind.Synchronization.SnapSync; using Nethermind.Trie; using Nethermind.Trie.Pruning; +using NSubstitute; using NUnit.Framework; namespace Nethermind.Synchronization.Test.SnapSync; -public class RecreateStateFromAccountRangesTests +[TestFixture(true)] +[TestFixture(false)] +public class RecreateStateFromAccountRangesTests(bool useFlat) { private StateTree _inputTree; @@ -35,6 +41,31 @@ public void Setup() _inputTree = TestItem.Tree.GetStateTree(); } + private ContainerBuilder CreateContainerBuilder() + { + ContainerBuilder builder = new ContainerBuilder() + .AddModule(new TestSynchronizerModule(new TestSyncConfig())) + .AddSingleton() + ; + + if (useFlat) + { + FlatDbConfig flatDbConfig = new FlatDbConfig(); + builder + .AddSingleton(flatDbConfig) + .AddSingleton(Substitute.For>()) + .AddModule(new FlatWorldStateModule(flatDbConfig)) + .AddSingleton>((_) => new TestMemColumnsDb()) + .AddSingleton() + ; + } + + return builder; + } + + private IContainer CreateContainer() => + CreateContainerBuilder().Build(); + private byte[][] CreateProofForPath(ReadOnlySpan path, StateTree tree = null) { AccountProofCollector accountProofCollector = new(path); @@ -112,15 +143,15 @@ public void RecreateAccountStateFromOneRangeWithNonExistenceProof() byte[][] firstProof = CreateProofForPath(Keccak.Zero.Bytes); byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[5].Path.Bytes); - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); AddRangeResult result = snapProvider.AddAccountRange(1, rootHash, Keccak.Zero, TestItem.Tree.AccountsWithPaths, firstProof!.Concat(lastProof!).ToArray()); Assert.That(result, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching - Assert.That(db.KeyExists(rootHash), Is.False); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); } [Test] @@ -131,15 +162,15 @@ public void RecreateAccountStateFromOneRangeWithExistenceProof() byte[][] firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[0].Path.Bytes); byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[5].Path.Bytes); - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); var result = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[0].Path, TestItem.Tree.AccountsWithPaths, firstProof!.Concat(lastProof!).ToArray()); Assert.That(result, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching - Assert.That(db.KeyExists(rootHash), Is.False); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); } [Test] @@ -147,15 +178,15 @@ public void RecreateAccountStateFromOneRangeWithoutProof() { Hash256 rootHash = _inputTree.RootHash; // "0x8c81279168edc449089449bc0f2136fc72c9645642845755633cf259cd97988b" - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); var result = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[0].Path, TestItem.Tree.AccountsWithPaths); Assert.That(result, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(10)); // we don't have the proofs so we persist all nodes - Assert.That(db.KeyExists(rootHash), Is.False); // the root node is NOT a part of the proof nodes + Assert.That(helper.CountTrieNodes(), Is.EqualTo(10)); // we don't have the proofs so we persist all nodes + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); // the root node is NOT a part of the proof nodes } [Test] @@ -164,23 +195,23 @@ public void RecreateAccountStateFromMultipleRange() Hash256 rootHash = _inputTree.RootHash; // "0x8c81279168edc449089449bc0f2136fc72c9645642845755633cf259cd97988b" // output state - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); byte[][] firstProof = CreateProofForPath(Keccak.Zero.Bytes); byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[1].Path.Bytes); var result1 = snapProvider.AddAccountRange(1, rootHash, Keccak.Zero, TestItem.Tree.AccountsWithPaths[0..2], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(2)); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(2)); firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[2].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[3].Path.Bytes); var result2 = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[2].Path, TestItem.Tree.AccountsWithPaths[2..4], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(5)); // we don't persist proof nodes (boundary nodes) + Assert.That(helper.CountTrieNodes(), Is.EqualTo(4)); // we don't persist proof nodes (boundary nodes) firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[4].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[5].Path.Bytes); @@ -190,8 +221,8 @@ public void RecreateAccountStateFromMultipleRange() Assert.That(result1, Is.EqualTo(AddRangeResult.OK)); Assert.That(result2, Is.EqualTo(AddRangeResult.OK)); Assert.That(result3, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching - Assert.That(db.KeyExists(rootHash), Is.False); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(8)); // we persist proof nodes (boundary nodes) via stitching + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); } [Test] @@ -200,21 +231,21 @@ public void RecreateAccountStateFromMultipleRange_InReverseOrder() Hash256 rootHash = _inputTree.RootHash; // "0x8c81279168edc449089449bc0f2136fc72c9645642845755633cf259cd97988b" // output state - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); byte[][] firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[4].Path.Bytes); byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[5].Path.Bytes); var result3 = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[4].Path, TestItem.Tree.AccountsWithPaths[4..6], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(4)); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(4)); firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[2].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[3].Path.Bytes); var result2 = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[2].Path, TestItem.Tree.AccountsWithPaths[2..4], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(6)); // we don't persist proof nodes (boundary nodes) + Assert.That(helper.CountTrieNodes(), Is.EqualTo(6)); // we don't persist proof nodes (boundary nodes) firstProof = CreateProofForPath(Keccak.Zero.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[1].Path.Bytes); @@ -223,8 +254,8 @@ public void RecreateAccountStateFromMultipleRange_InReverseOrder() Assert.That(result1, Is.EqualTo(AddRangeResult.OK)); Assert.That(result2, Is.EqualTo(AddRangeResult.OK)); Assert.That(result3, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching - Assert.That(db.KeyExists(rootHash), Is.False); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(8)); // we persist proof nodes (boundary nodes) via stitching + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); } [Test] @@ -233,21 +264,21 @@ public void RecreateAccountStateFromMultipleRange_OutOfOrder() Hash256 rootHash = _inputTree.RootHash; // "0x8c81279168edc449089449bc0f2136fc72c9645642845755633cf259cd97988b" // output state - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); byte[][] firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[4].Path.Bytes); byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[5].Path.Bytes); var result3 = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[4].Path, TestItem.Tree.AccountsWithPaths[4..6], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(4)); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(4)); firstProof = CreateProofForPath(Keccak.Zero.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[1].Path.Bytes); var result1 = snapProvider.AddAccountRange(1, rootHash, Keccak.Zero, TestItem.Tree.AccountsWithPaths[0..2], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(6)); // we don't persist proof nodes (boundary nodes) + Assert.That(helper.CountTrieNodes(), Is.EqualTo(6)); // we don't persist proof nodes (boundary nodes) firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[2].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[3].Path.Bytes); @@ -256,8 +287,8 @@ public void RecreateAccountStateFromMultipleRange_OutOfOrder() Assert.That(result1, Is.EqualTo(AddRangeResult.OK)); Assert.That(result2, Is.EqualTo(AddRangeResult.OK)); Assert.That(result3, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching - Assert.That(db.KeyExists(rootHash), Is.False); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(8)); // we persist proof nodes (boundary nodes) via stitching + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); } [Test] @@ -266,16 +297,16 @@ public void RecreateAccountStateFromMultipleOverlappingRange() Hash256 rootHash = _inputTree.RootHash; // "0x8c81279168edc449089449bc0f2136fc72c9645642845755633cf259cd97988b" // output state - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); byte[][] firstProof = CreateProofForPath(Keccak.Zero.Bytes); byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[2].Path.Bytes); var result1 = snapProvider.AddAccountRange(1, rootHash, Keccak.Zero, TestItem.Tree.AccountsWithPaths[0..3], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(3)); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(3)); firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[2].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[3].Path.Bytes); @@ -287,7 +318,7 @@ public void RecreateAccountStateFromMultipleOverlappingRange() var result3 = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[3].Path, TestItem.Tree.AccountsWithPaths[3..5], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(6)); // we don't persist proof nodes (boundary nodes) + Assert.That(helper.CountTrieNodes(), Is.EqualTo(6)); // we don't persist proof nodes (boundary nodes) firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[4].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[5].Path.Bytes); @@ -298,8 +329,8 @@ public void RecreateAccountStateFromMultipleOverlappingRange() Assert.That(result2, Is.EqualTo(AddRangeResult.OK)); Assert.That(result3, Is.EqualTo(AddRangeResult.OK)); Assert.That(result4, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching - Assert.That(db.KeyExists(rootHash), Is.False); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(10)); // we persist proof nodes (boundary nodes) via stitching + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); } [Test] @@ -312,14 +343,15 @@ public void CorrectlyDetermineHasMoreChildren() byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[1].Path.Bytes); byte[][] proofs = firstProof.Concat(lastProof).ToArray(); - StateTree newTree = new(new TestRawTrieStore(new MemDb()), LimboLogs.Instance); + using IContainer container = CreateContainer(); + ISnapTrieFactory factory = container.Resolve(); PathWithAccount[] receiptAccounts = TestItem.Tree.AccountsWithPaths[0..2]; bool HasMoreChildren(ValueHash256 limitHash) { - (AddRangeResult _, bool moreChildrenToRight, IList _, IList _) = - SnapProviderHelper.AddAccountRange(newTree, 0, rootHash, Keccak.Zero, limitHash.ToCommitment(), receiptAccounts, proofs); + (AddRangeResult _, bool moreChildrenToRight, IList _, IList _, Hash256 _) = + SnapProviderHelper.AddAccountRange(factory, 0, rootHash, Keccak.Zero, limitHash.ToCommitment(), receiptAccounts, proofs); return moreChildrenToRight; } @@ -360,14 +392,15 @@ public void CorrectlyDetermineMaxKeccakExist() byte[][] lastProof = CreateProofForPath(ac2.Path.Bytes, tree); byte[][] proofs = firstProof.Concat(lastProof).ToArray(); - StateTree newTree = new(new TestRawTrieStore(new MemDb()), LimboLogs.Instance); + using IContainer container = CreateContainer(); + ISnapTrieFactory factory = container.Resolve(); PathWithAccount[] receiptAccounts = { ac1, ac2 }; bool HasMoreChildren(ValueHash256 limitHash) { - (AddRangeResult _, bool moreChildrenToRight, IList _, IList _) = - SnapProviderHelper.AddAccountRange(newTree, 0, rootHash, Keccak.Zero, limitHash.ToCommitment(), receiptAccounts, proofs); + (AddRangeResult _, bool moreChildrenToRight, IList _, IList _, Hash256 _) = + SnapProviderHelper.AddAccountRange(factory, 0, rootHash, Keccak.Zero, limitHash.ToCommitment(), receiptAccounts, proofs); return moreChildrenToRight; } @@ -389,16 +422,16 @@ public void MissingAccountFromRange() Hash256 rootHash = _inputTree.RootHash; // "0x8c81279168edc449089449bc0f2136fc72c9645642845755633cf259cd97988b" // output state - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); - IDb db = container.ResolveKeyed(DbNames.State); + ISnapTestHelper helper = container.Resolve(); byte[][] firstProof = CreateProofForPath(Keccak.Zero.Bytes); byte[][] lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[1].Path.Bytes); var result1 = snapProvider.AddAccountRange(1, rootHash, Keccak.Zero, TestItem.Tree.AccountsWithPaths[0..2], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(2)); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(2)); firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[2].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[3].Path.Bytes); @@ -406,7 +439,7 @@ public void MissingAccountFromRange() // missing TestItem.Tree.AccountsWithHashes[2] var result2 = snapProvider.AddAccountRange(1, rootHash, TestItem.Tree.AccountsWithPaths[2].Path, TestItem.Tree.AccountsWithPaths[3..4], firstProof!.Concat(lastProof!).ToArray()); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(2)); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(2)); firstProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[4].Path.Bytes); lastProof = CreateProofForPath(TestItem.Tree.AccountsWithPaths[5].Path.Bytes); @@ -416,7 +449,7 @@ public void MissingAccountFromRange() Assert.That(result1, Is.EqualTo(AddRangeResult.OK)); Assert.That(result2, Is.EqualTo(AddRangeResult.DifferentRootHash)); Assert.That(result3, Is.EqualTo(AddRangeResult.OK)); - Assert.That(db.GetAllKeys().Count, Is.EqualTo(6)); - Assert.That(db.KeyExists(rootHash), Is.False); + Assert.That(helper.CountTrieNodes(), Is.EqualTo(6)); + Assert.That(helper.TrieNodeKeyExists(rootHash), Is.False); } } diff --git a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromStorageRangesTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromStorageRangesTests.cs index c35e93d66f4..09b35663b07 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromStorageRangesTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/RecreateStateFromStorageRangesTests.cs @@ -7,6 +7,7 @@ using System.Linq; using Autofac; using FluentAssertions; +using Nethermind.Config; using Nethermind.Core; using Nethermind.Core.Collections; using Nethermind.Core.Crypto; @@ -14,22 +15,23 @@ using Nethermind.Core.Test; using Nethermind.Core.Test.Builders; using Nethermind.Db; +using Nethermind.Init.Modules; using Nethermind.Int256; -using Nethermind.Logging; using Nethermind.State; +using Nethermind.State.Flat; using Nethermind.State.Proofs; using Nethermind.State.Snap; using Nethermind.Synchronization.SnapSync; using Nethermind.Trie; -using Nethermind.Trie.Pruning; +using NSubstitute; using NUnit.Framework; namespace Nethermind.Synchronization.Test.SnapSync { - [TestFixture] - public class RecreateStateFromStorageRangesTests + [TestFixture(true)] + [TestFixture(false)] + public class RecreateStateFromStorageRangesTests(bool useFlat) { - private TestRawTrieStore _store; private StateTree _inputStateTree; private StorageTree _inputStorageTree; @@ -45,6 +47,32 @@ public void Setup() [OneTimeTearDown] public void TearDown() => ((IDisposable)_store)?.Dispose(); + private ContainerBuilder CreateContainerBuilder() + { + ContainerBuilder builder = new ContainerBuilder() + .AddModule(new TestSynchronizerModule(new TestSyncConfig())) + .AddKeyedSingleton(DbNames.State, (_) => (IDb)new TestMemDb()) + .AddSingleton() + ; + + if (useFlat) + { + FlatDbConfig flatDbConfig = new FlatDbConfig(); + builder + .AddSingleton(flatDbConfig) + .AddSingleton(Substitute.For>()) + .AddModule(new FlatWorldStateModule(flatDbConfig)) + .AddSingleton>((_) => new TestMemColumnsDb()) + .AddSingleton() + ; + } + + return builder; + } + + private IContainer CreateContainer() => + CreateContainerBuilder().Build(); + [Test] public void RecreateStorageStateFromOneRangeWithNonExistenceProof() { @@ -54,7 +82,7 @@ public void RecreateStorageStateFromOneRangeWithNonExistenceProof() _inputStateTree!.Accept(accountProofCollector, _inputStateTree.RootHash); var proof = accountProofCollector.BuildResult(); - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); var storageRange = PrepareStorageRequest(TestItem.Tree.AccountAddress0, rootHash, Keccak.Zero); @@ -72,7 +100,7 @@ public void RecreateAccountStateFromOneRangeWithExistenceProof() _inputStateTree!.Accept(accountProofCollector, _inputStateTree.RootHash); var proof = accountProofCollector.BuildResult(); - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); var storageRange = PrepareStorageRequest(TestItem.Tree.AccountAddress0, rootHash, Keccak.Zero); @@ -86,7 +114,7 @@ public void RecreateStorageStateFromOneRangeWithoutProof() { Hash256 rootHash = _inputStorageTree!.RootHash; // "..." - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); var storageRange = PrepareStorageRequest(TestItem.Tree.AccountAddress0, rootHash, TestItem.Tree.SlotsWithPaths[0].Path); @@ -101,7 +129,7 @@ public void RecreateAccountStateFromMultipleRange() Hash256 rootHash = _inputStorageTree!.RootHash; // "..." // output state - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); AccountProofCollector accountProofCollector = new(TestItem.Tree.AccountAddress0.Bytes, new ValueHash256[] { Keccak.Zero, TestItem.Tree.SlotsWithPaths[1].Path }); @@ -136,7 +164,7 @@ public void MissingAccountFromRange() Hash256 rootHash = _inputStorageTree!.RootHash; // "..." // output state - using IContainer container = new ContainerBuilder().AddModule(new TestSynchronizerModule(new TestSyncConfig())).Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); AccountProofCollector accountProofCollector = new(TestItem.Tree.AccountAddress0.Bytes, new ValueHash256[] { Keccak.Zero, TestItem.Tree.SlotsWithPaths[1].Path }); @@ -169,13 +197,15 @@ public void MissingAccountFromRange() public void AddStorageRange_WhereProofIsTheSameAsAllKey_ShouldStillStore() { Hash256 account = TestItem.KeccakA; - TestMemDb testMemDb = new(); - var rawTrieStore = new RawScopedTrieStore(new NodeStorage(testMemDb), account); - StorageTree tree = new(rawTrieStore, LimboLogs.Instance); - - (AddRangeResult result, bool moreChildrenToRight) = SnapProviderHelper.AddStorageRange( - tree, - new PathWithAccount(account, new Account(1, 1, new Hash256("0xeb8594ba5b3314111518b584bbd3801fb3aed5970bd8b47fd9ff744505fe101c"), TestItem.KeccakA)), + using IContainer container = CreateContainerBuilder() + .Build(); + ISnapTestHelper helper = container.Resolve(); + ISnapTrieFactory factory = container.Resolve(); + + var pathWithAccount = new PathWithAccount(account, new Account(1, 1, new Hash256("0xeb8594ba5b3314111518b584bbd3801fb3aed5970bd8b47fd9ff744505fe101c"), TestItem.KeccakA)); + (AddRangeResult result, bool moreChildrenToRight, Hash256 _, bool rootFinished) = SnapProviderHelper.AddStorageRange( + factory, + pathWithAccount, [ new PathWithStorageSlot(new ValueHash256("0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"), Bytes.FromHexString("94654f75e491acf8c380d2a6906e67e2e56813665e")), ], @@ -188,28 +218,30 @@ public void AddStorageRange_WhereProofIsTheSameAsAllKey_ShouldStillStore() result.Should().Be(AddRangeResult.OK); moreChildrenToRight.Should().BeFalse(); - testMemDb.WritesCount.Should().Be(1); + helper.TrieNodeWritesCount.Should().Be(1); } [Test] public void AddStorageRange_EmptySlots_ReturnsEmptySlots() { Hash256 account = TestItem.KeccakA; - TestMemDb testMemDb = new(); - var rawTrieStore = new RawScopedTrieStore(new NodeStorage(testMemDb), account); - StorageTree tree = new(rawTrieStore, LimboLogs.Instance); + using IContainer container = CreateContainerBuilder() + .Build(); - (AddRangeResult result, bool moreChildrenToRight) = SnapProviderHelper.AddStorageRange( - tree, - new PathWithAccount(account, new Account(1, 1, new Hash256("0xeb8594ba5b3314111518b584bbd3801fb3aed5970bd8b47fd9ff744505fe101c"), TestItem.KeccakA)), + ISnapTestHelper helper = container.Resolve(); + ISnapTrieFactory factory = container.Resolve(); + + var pathWithAccount = new PathWithAccount(account, new Account(1, 1, new Hash256("0xeb8594ba5b3314111518b584bbd3801fb3aed5970bd8b47fd9ff744505fe101c"), TestItem.KeccakA)); + (AddRangeResult result, bool moreChildrenToRight, Hash256 _, bool rootFinished) = SnapProviderHelper.AddStorageRange( + factory, + pathWithAccount, Array.Empty(), // Empty slots list Keccak.Zero, null, proofs: null); - result.Should().Be(AddRangeResult.EmptySlots); - moreChildrenToRight.Should().BeFalse(); - testMemDb.WritesCount.Should().Be(0); // No writes should happen + result.Should().Be(AddRangeResult.EmptyRange); + helper.TrieNodeWritesCount.Should().Be(0); // No writes should happen } private static StorageRange PrepareStorageRequest(ValueHash256 accountPath, Hash256 storageRoot, ValueHash256 startingHash) diff --git a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapProviderTests.cs b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapProviderTests.cs index c3fa5576784..3bc3f1eaca5 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapProviderTests.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapProviderTests.cs @@ -21,20 +21,49 @@ using Nethermind.Db; using Nethermind.Serialization.Rlp; using Nethermind.State; +using Nethermind.State.Flat; +using Nethermind.State.Proofs; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.Sync; +using Nethermind.State.Flat.Sync.Snap; using Nethermind.State.SnapServer; +using Nethermind.Trie.Pruning; using AccountRange = Nethermind.State.Snap.AccountRange; namespace Nethermind.Synchronization.Test.SnapSync; -[TestFixture] -public class SnapProviderTests +[TestFixture(true)] +[TestFixture(false)] +public class SnapProviderTests(bool useFlat) { + + private ContainerBuilder CreateContainerBuilder(TestSyncConfig? testSyncConfig = null) + { + TestSyncConfig testConfig = testSyncConfig ?? new TestSyncConfig(); + + ContainerBuilder builder = new ContainerBuilder() + .AddModule(new TestSynchronizerModule(testConfig)); + + if (useFlat) + { + builder = builder + .AddSingleton>((_) => new TestMemColumnsDb()) + .AddSingleton() + .AddSingleton(); + } + + return builder; + } + + private IContainer CreateContainer(TestSyncConfig? testSyncConfig = null) + { + return CreateContainerBuilder(testSyncConfig).Build(); + } + [Test] public void AddAccountRange_AccountListIsEmpty_ThrowArgumentException() { - using IContainer container = new ContainerBuilder() - .AddModule(new TestSynchronizerModule(new TestSyncConfig())) - .Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); @@ -50,9 +79,7 @@ public void AddAccountRange_AccountListIsEmpty_ThrowArgumentException() [Test] public void AddAccountRange_ResponseHasEmptyListOfAccountsAndOneProof_ReturnsExpiredRootHash() { - using IContainer container = new ContainerBuilder() - .AddModule(new TestSynchronizerModule(new TestSyncConfig())) - .Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); @@ -67,9 +94,7 @@ public void AddAccountRange_ResponseHasEmptyListOfAccountsAndOneProof_ReturnsExp [Test] public void AddStorageRange_ResponseReversedOrderedListOfAccounts_ReturnsInvalidOrder() { - using IContainer container = new ContainerBuilder() - .AddModule(new TestSynchronizerModule(new TestSyncConfig())) - .Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); ProgressTracker progressTracker = container.Resolve(); @@ -100,9 +125,7 @@ public void AddStorageRange_ResponseReversedOrderedListOfAccounts_ReturnsInvalid [Test] public void AddStorageRange_EmptySlotsList_ReturnsEmptySlots() { - using IContainer container = new ContainerBuilder() - .AddModule(new TestSynchronizerModule(new TestSyncConfig())) - .Build(); + using IContainer container = CreateContainer(); SnapProvider snapProvider = container.Resolve(); ProgressTracker progressTracker = container.Resolve(); @@ -119,11 +142,73 @@ public void AddStorageRange_EmptySlotsList_ReturnsEmptySlots() storage, 0, emptySlots, - null).Should().Be(AddRangeResult.EmptySlots); + null).Should().Be(AddRangeResult.EmptyRange); progressTracker.IsSnapGetRangesFinished().Should().BeFalse(); } + [Test] + public void AddStorageRange_ShouldPersistEntries() + { + const int slotCount = 6; + TestMemDb stateDb = new TestMemDb(); + TestRawTrieStore store = new TestRawTrieStore(stateDb); + + // Build storage tree with RLP-encoded 32-byte values + Hash256 accountHash = TestItem.Tree.AccountAddress0; + StorageTree storageTree = new StorageTree(store.GetTrieStore(accountHash), LimboLogs.Instance); + PathWithStorageSlot[] slots = new PathWithStorageSlot[slotCount]; + for (int i = 0; i < slotCount; i++) + { + ValueHash256 slotKey = Keccak.Compute(i.ToBigEndianByteArray()); + byte[] value = (i + 1).ToBigEndianByteArray(); + byte[] rlpValue = Rlp.Encode(value).Bytes; + storageTree.Set(slotKey, rlpValue, false); + slots[i] = new PathWithStorageSlot(slotKey, rlpValue); + } + storageTree.Commit(); + Array.Sort(slots, (a, b) => a.Path.CompareTo(b.Path)); + + StateTree stateTree = new StateTree(store.GetTrieStore(null), LimboLogs.Instance); + stateTree.Set(accountHash, Build.An.Account.WithBalance(1).WithStorageRoot(storageTree.RootHash).TestObject); + stateTree.Commit(); + + // Collect proofs + AccountProofCollector proofCollector = new(accountHash.Bytes, + new ValueHash256[] { Keccak.Zero, slots[^1].Path }); + stateTree.Accept(proofCollector, stateTree.RootHash); + var proof = proofCollector.BuildResult(); + + using IContainer container = CreateContainer(); + SnapProvider snapProvider = container.Resolve(); + + StorageRange storageRange = new() + { + StartingHash = Keccak.Zero, + Accounts = new ArrayPoolList(1) + { + new(accountHash, new Account(0, 1).WithChangedStorageRoot(storageTree.RootHash)) + }, + }; + + snapProvider.AddStorageRangeForAccount( + storageRange, 0, slots, + proof!.StorageProofs![0].Proof!.Concat(proof!.StorageProofs![1].Proof!).ToArray()) + .Should().Be(AddRangeResult.OK); + + if (useFlat) + { + IPersistence persistence = container.Resolve(); + using var reader = persistence.CreateReader(); + int count = 0; + using (var iter = reader.CreateStorageIterator(accountHash)) + { + while (iter.MoveNext()) count++; + } + count.Should().Be(slotCount); + } + } + [Test] public void AddAccountRange_SetStartRange_ToAfterLastPath() { @@ -140,11 +225,10 @@ public void AddAccountRange_SetStartRange_ToAfterLastPath() (SnapServer ss, Hash256 root) = BuildSnapServerFromEntries(entries); - using IContainer container = new ContainerBuilder() - .AddModule(new TestSynchronizerModule(new TestSyncConfig() - { - SnapSyncAccountRangePartitionCount = 1 - })) + using IContainer container = CreateContainerBuilder(new TestSyncConfig() + { + SnapSyncAccountRangePartitionCount = 1 + }) .WithSuggestedHeaderOfStateRoot(root) .Build(); @@ -171,22 +255,23 @@ public void AddAccountRange_ShouldNotStoreStorageAfterLimit() { (Hash256, Account)[] entries = [ - (TestItem.KeccakA, TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), - (TestItem.KeccakB, TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), - (TestItem.KeccakC, TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), - (TestItem.KeccakD, TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), - (TestItem.KeccakE, TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), - (TestItem.KeccakF, TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), + (new Hash256("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), + (new Hash256("2fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), + (new Hash256("7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), + // Should split it right here + + (new Hash256("9fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), + (new Hash256("afffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), + (new Hash256("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"), TestItem.GenerateRandomAccount().WithChangedStorageRoot(TestItem.GetRandomKeccak())), ]; Array.Sort(entries, static (e1, e2) => e1.Item1.CompareTo(e2.Item1)); (SnapServer ss, Hash256 root) = BuildSnapServerFromEntries(entries); - using IContainer container = new ContainerBuilder() - .AddModule(new TestSynchronizerModule(new TestSyncConfig() - { - SnapSyncAccountRangePartitionCount = 2 - })) + using IContainer container = CreateContainerBuilder(new TestSyncConfig() + { + SnapSyncAccountRangePartitionCount = 2 + }) .WithSuggestedHeaderOfStateRoot(root) .Build(); @@ -196,6 +281,7 @@ public void AddAccountRange_ShouldNotStoreStorageAfterLimit() (IOwnedReadOnlyList accounts, IOwnedReadOnlyList proofs) = ss.GetAccountRanges( root, Keccak.Zero, Keccak.MaxValue, 1.MB(), default); + // The range given out here should be half. progressTracker.IsFinished(out SnapSyncBatch? batch).Should().Be(false); using AccountsAndProofs accountsAndProofs = new(); @@ -204,7 +290,25 @@ public void AddAccountRange_ShouldNotStoreStorageAfterLimit() snapProvider.AddAccountRange(batch?.AccountRangeRequest!, accountsAndProofs).Should().Be(AddRangeResult.OK); - container.ResolveNamed(DbNames.State).GetAllKeys().Count().Should().Be(6); + if (useFlat) + { + IPersistence persistence = container.Resolve(); + using var reader = persistence.CreateReader(); + int accountCount = 0; + using (var accountIterator = reader.CreateAccountIterator(ValueKeccak.Zero, ValueKeccak.MaxValue)) + { + while (accountIterator.MoveNext()) + { + accountCount++; + } + } + + Assert.That(accountCount, Is.EqualTo(3)); + } + else + { + container.ResolveNamed(DbNames.State).GetAllKeys().Count().Should().Be(3); // 3 child. Root branch node not saved due to state sync compatibility + } } [TestCase("badreq-roothash.zip")] @@ -225,9 +329,11 @@ public void Test_EdgeCases(string testFileName) List pathWithAccounts = accounts.Select((acc, idx) => new PathWithAccount(paths[idx], acc)).ToList(); List proofs = asReq.Proofs.Select((str) => Bytes.FromHexString(str)).ToList(); - StateTree stree = new StateTree(new TestRawTrieStore(new TestMemDb()), LimboLogs.Instance); + var adapter = new SnapUpperBoundAdapter(new RawScopedTrieStore(new TestMemDb())); + StateTree stree = new StateTree(adapter, LimboLogs.Instance); + var factory = new TestSnapTrieFactory(() => new PatriciaSnapStateTree(stree, adapter)); SnapProviderHelper.AddAccountRange( - stree, + factory, 0, new ValueHash256(asReq.Root), new ValueHash256(asReq.StartingHash), diff --git a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapServerTest.cs b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapServerTest.cs index c945f4323cb..37a00257b43 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapServerTest.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/SnapServerTest.cs @@ -6,12 +6,19 @@ using Nethermind.Core; using Nethermind.Core.Collections; using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; using Nethermind.Core.Test; using Nethermind.Core.Test.Builders; +using Nethermind.Core.Threading; using Nethermind.Db; using Nethermind.Int256; using Nethermind.Logging; using Nethermind.State; +using Nethermind.State.Flat; +using Nethermind.State.Flat.Persistence; +using Nethermind.State.Flat.ScopeProvider; +using Nethermind.State.Flat.Sync; +using Nethermind.State.Flat.Sync.Snap; using Nethermind.State.Snap; using Nethermind.State.SnapServer; using Nethermind.Synchronization.FastSync; @@ -23,54 +30,306 @@ namespace Nethermind.Synchronization.Test.SnapSync; -public class SnapServerTest +[TestFixture(true)] +[TestFixture(false)] +public class SnapServerTest(bool useFlat) { - private class Context + internal interface IWriteBatch : IDisposable { - internal SnapServer Server { get; init; } = null!; - internal SnapProvider SnapProvider { get; init; } = null!; - internal StateTree Tree { get; init; } = null!; - internal MemDb ClientStateDb { get; init; } = null!; + void SetAccount(Address address, Account account); + void SetAccount(Hash256 accountPath, Account account); + void SetSlot(Hash256 storagePath, ValueHash256 slotKey, byte[] value, bool rlpEncode = true); } - private Context CreateContext(ILastNStateRootTracker? lastNStateRootTracker = null) + internal interface ISnapServerContext : IDisposable { - MemDb stateDbServer = new(); - MemDb codeDbServer = new(); - TestRawTrieStore store = new TestRawTrieStore(stateDbServer); - StateTree tree = new(store, LimboLogs.Instance); - SnapServer server = new(store.AsReadOnly(), codeDbServer, LimboLogs.Instance, lastNStateRootTracker); + ISnapServer Server { get; } + SnapProvider SnapProvider { get; } + Hash256 RootHash { get; } + int PersistedNodeCount { get; } - MemDb clientStateDb = new(); - using ProgressTracker progressTracker = new(clientStateDb, new TestSyncConfig(), new StateSyncPivot(null!, new TestSyncConfig(), LimboLogs.Instance), LimboLogs.Instance); + IWriteBatch BeginWriteBatch(); + Hash256 GetStorageRoot(Hash256 storagePath); + } - INodeStorage nodeStorage = new NodeStorage(clientStateDb); + private class TrieSnapServerContext : ISnapServerContext + { + private readonly TestRawTrieStore _store; + private readonly StateTree _tree; + private readonly MemDb _clientStateDb; - SnapProvider snapProvider = new(progressTracker, new MemDb(), nodeStorage, LimboLogs.Instance); + public ISnapServer Server { get; } + public SnapProvider SnapProvider { get; } + public Hash256 RootHash => _tree.RootHash; + public int PersistedNodeCount => _clientStateDb.Keys.Count; - return new Context + internal TrieSnapServerContext(ILastNStateRootTracker? lastNStateRootTracker = null) { - Server = server, - SnapProvider = snapProvider, - Tree = tree, - ClientStateDb = clientStateDb - }; + MemDb stateDbServer = new(); + MemDb codeDbServer = new(); + _store = new TestRawTrieStore(stateDbServer); + _tree = new StateTree(_store, LimboLogs.Instance); + Server = new SnapServer(_store.AsReadOnly(), codeDbServer, LimboLogs.Instance, lastNStateRootTracker); + + _clientStateDb = new MemDb(); + using ProgressTracker progressTracker = new(_clientStateDb, new TestSyncConfig(), new StateSyncPivot(null!, new TestSyncConfig(), LimboLogs.Instance), LimboLogs.Instance); + INodeStorage nodeStorage = new NodeStorage(_clientStateDb); + SnapProvider = new SnapProvider(progressTracker, new MemDb(), new PatriciaSnapTrieFactory(nodeStorage, LimboLogs.Instance), LimboLogs.Instance); + } + + public IWriteBatch BeginWriteBatch() => new WriteBatch(this); + public Hash256 GetStorageRoot(Hash256 accountPath) => _tree.Get(accountPath)!.StorageRoot; + public void Dispose() => ((IDisposable)_store).Dispose(); + + private class WriteBatch(TrieSnapServerContext ctx) : IWriteBatch + { + private readonly List<(Hash256 Path, Account Account)> _pendingAccounts = new(); + private readonly Dictionary _storageTrees = new(); + + public void SetAccount(Address address, Account account) => + _pendingAccounts.Add((address.ToAccountPath.ToCommitment(), account)); + + public void SetAccount(Hash256 accountPath, Account account) => + _pendingAccounts.Add((accountPath, account)); + + public void SetSlot(Hash256 storagePath, ValueHash256 slotKey, byte[] value, bool rlpEncode = true) + { + if (!_storageTrees.TryGetValue(storagePath, out StorageTree? st)) + { + st = new StorageTree(ctx._store.GetTrieStore(storagePath), LimboLogs.Instance); + _storageTrees[storagePath] = st; + } + st.Set(slotKey, value, rlpEncode); + } + + public void Dispose() + { + Dictionary storageRoots = new(); + foreach (var (path, st) in _storageTrees) + { + st.Commit(); + storageRoots[path] = st.RootHash; + } + + foreach (var (path, account) in _pendingAccounts) + { + Account finalAccount = storageRoots.TryGetValue(path, out Hash256? root) + ? account.WithChangedStorageRoot(root) + : account; + ctx._tree.Set(path, finalAccount); + } + + ctx._tree.Commit(); + } + } + } + + private class TestSnapshotContentTrieStore(SnapshotContent content) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + content.StateNodes.TryGetValue(path, out TrieNode? node) ? node : new TrieNode(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags) => null; + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags) => + new StateCommitter(content); + + public override ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => + address is null ? this : new TestSnapshotContentStorageTrieStore(content, address); + + private class StateCommitter(SnapshotContent content) : AbstractMinimalCommitter(new ConcurrencyController(1)) + { + public override TrieNode CommitNode(ref TreePath path, TrieNode node) + { + content.StateNodes[path] = node; + return node; + } + } + } + + private class TestSnapshotContentStorageTrieStore(SnapshotContent content, Hash256AsKey addressHash) : AbstractMinimalTrieStore + { + public override TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + content.StorageNodes.TryGetValue((addressHash, path), out TrieNode? node) ? node : new TrieNode(NodeType.Unknown, hash); + + public override byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags) => null; + + public override ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags) => + new StorageCommitter(content, addressHash); + + private class StorageCommitter(SnapshotContent content, Hash256AsKey addressHash) : AbstractMinimalCommitter(new ConcurrencyController(1)) + { + public override TrieNode CommitNode(ref TreePath path, TrieNode node) + { + content.StorageNodes[(addressHash, path)] = node; + return node; + } + } + } + + private class FlatSnapServerContext : ISnapServerContext + { + private readonly SnapshotContent _content; + private readonly StateTree _tree; + private readonly MemDb _clientStateDb; + private readonly IResourcePool _resourcePool; + + public ISnapServer Server { get; } + public SnapProvider SnapProvider { get; } + public Hash256 RootHash => _tree.RootHash; + public int PersistedNodeCount => _clientStateDb.Keys.Count; + + internal FlatSnapServerContext(ILastNStateRootTracker? lastNStateRootTracker = null) + { + _content = new SnapshotContent(); + var trieStore = new TestSnapshotContentTrieStore(_content); + _tree = new StateTree(trieStore, LimboLogs.Instance); + + _resourcePool = Substitute.For(); + + IFlatStateRootIndex stateRootIndex = Substitute.For(); + stateRootIndex.TryGetStateId(Arg.Any(), out Arg.Any()) + .Returns(call => + { + Hash256 hash = call.ArgAt(0); + if (lastNStateRootTracker?.HasStateRoot(hash) == false || _content.StateNodes.IsEmpty) + { + call[1] = default(StateId); + return false; + } + call[1] = new StateId(1, hash); + return true; + }); + + IFlatDbManager flatDbManager = Substitute.For(); + flatDbManager.GatherReadOnlySnapshotBundle(Arg.Any()) + .Returns(_ => CreateReadOnlyBundle()); + + MemDb codeDb = new(); + Server = new FlatSnapServer(flatDbManager, codeDb, stateRootIndex, LimboLogs.Instance); + + _clientStateDb = new MemDb(); + using ProgressTracker progressTracker = new(_clientStateDb, new TestSyncConfig(), new StateSyncPivot(null!, new TestSyncConfig(), LimboLogs.Instance), LimboLogs.Instance); + INodeStorage nodeStorage = new NodeStorage(_clientStateDb); + SnapProvider = new SnapProvider(progressTracker, new MemDb(), new PatriciaSnapTrieFactory(nodeStorage, LimboLogs.Instance), LimboLogs.Instance); + } + + private ReadOnlySnapshotBundle CreateReadOnlyBundle() + { + Snapshot snapshot = new(StateId.PreGenesis, new StateId(1, _tree.RootHash), _content, _resourcePool, ResourcePool.Usage.ReadOnlyProcessingEnv); + SnapshotPooledList list = new(1); + list.Add(snapshot); + var persistenceReader = Substitute.For(); + return new ReadOnlySnapshotBundle(list, persistenceReader, false); + } + + public IWriteBatch BeginWriteBatch() => new WriteBatch(this); + public Hash256 GetStorageRoot(Hash256 accountPath) => _tree.Get(accountPath)!.StorageRoot; + public void Dispose() { } + + private class WriteBatch(FlatSnapServerContext ctx) : IWriteBatch + { + private readonly List<(Hash256 Path, Account Account)> _pendingAccounts = new(); + private readonly Dictionary _storageTrees = new(); + + public void SetAccount(Address address, Account account) => + _pendingAccounts.Add((address.ToAccountPath.ToCommitment(), account)); + + public void SetAccount(Hash256 accountPath, Account account) => + _pendingAccounts.Add((accountPath, account)); + + public void SetSlot(Hash256 storagePath, ValueHash256 slotKey, byte[] value, bool rlpEncode = true) + { + if (!_storageTrees.TryGetValue(storagePath, out StorageTree? st)) + { + st = new StorageTree( + new TestSnapshotContentStorageTrieStore(ctx._content, storagePath), + LimboLogs.Instance); + _storageTrees[storagePath] = st; + } + st.Set(slotKey, value, rlpEncode); + } + + public void Dispose() + { + Dictionary storageRoots = new(); + foreach (var (path, st) in _storageTrees) + { + st.Commit(); + storageRoots[path] = st.RootHash; + } + + foreach (var (path, account) in _pendingAccounts) + { + Account finalAccount = storageRoots.TryGetValue(path, out Hash256? root) + ? account.WithChangedStorageRoot(root) + : account; + ctx._tree.Set(path, finalAccount); + } + + ctx._tree.Commit(); + } + } + } + + private ISnapServerContext CreateContext(ILastNStateRootTracker? lastNStateRootTracker = null) => + useFlat + ? new FlatSnapServerContext(lastNStateRootTracker) + : new TrieSnapServerContext(lastNStateRootTracker); + + private static void FillWithTestAccounts(ISnapServerContext context) + { + using var batch = context.BeginWriteBatch(); + foreach (var pwa in TestItem.Tree.AccountsWithPaths) + batch.SetAccount(pwa.Path.ToCommitment(), pwa.Account); + } + + private static void FillMultipleAccounts(ISnapServerContext context, int count) + { + using var batch = context.BeginWriteBatch(); + for (int i = 0; i < count; i++) + batch.SetAccount(Keccak.Compute(i.ToBigEndianByteArray()), Build.An.Account.WithBalance((UInt256)i).TestObject); + } + + private static Hash256 FillAccountWithDefaultStorage(ISnapServerContext context) + { + using (var batch = context.BeginWriteBatch()) + { + for (int i = 0; i < 6; i++) + batch.SetSlot(TestItem.Tree.AccountAddress0, TestItem.Tree.SlotsWithPaths[i].Path, TestItem.Tree.SlotsWithPaths[i].SlotRlpValue, rlpEncode: false); + batch.SetAccount(TestItem.Tree.AccountAddress0, Build.An.Account.WithBalance(1).TestObject); + } + return context.GetStorageRoot(TestItem.Tree.AccountAddress0); + } + + private static Hash256 FillAccountWithStorage(ISnapServerContext context, int slotCount) + { + using (var batch = context.BeginWriteBatch()) + { + for (int i = 0; i < slotCount; i++) + { + var key = Keccak.Compute(i.ToBigEndianByteArray()); + batch.SetSlot(TestItem.Tree.AccountAddress0, key, key.BytesToArray(), rlpEncode: false); + } + batch.SetAccount(TestItem.Tree.AccountAddress0, Build.An.Account.WithBalance(1).TestObject); + } + return context.GetStorageRoot(TestItem.Tree.AccountAddress0); } [Test] public void TestGetAccountRange() { - Context context = CreateContext(); - TestItem.Tree.FillStateTreeWithTestAccounts(context.Tree); + using var context = CreateContext(); + FillWithTestAccounts(context); (IOwnedReadOnlyList accounts, IOwnedReadOnlyList proofs) = - context.Server.GetAccountRanges(context.Tree.RootHash, Keccak.Zero, Keccak.MaxValue, 4000, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, Keccak.Zero, Keccak.MaxValue, 4000, CancellationToken.None); - AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.Tree.RootHash, Keccak.Zero, + AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.RootHash, Keccak.Zero, accounts.ToArray(), proofs.ToArray()); result.Should().Be(AddRangeResult.OK); - context.ClientStateDb.Keys.Count.Should().Be(10); + context.PersistedNodeCount.Should().Be(10); accounts.Dispose(); proofs.Dispose(); } @@ -78,11 +337,11 @@ public void TestGetAccountRange() [Test] public void TestGetAccountRange_InvalidRange() { - Context context = CreateContext(); - TestItem.Tree.FillStateTreeWithTestAccounts(context.Tree); + using var context = CreateContext(); + FillWithTestAccounts(context); (IOwnedReadOnlyList accounts, IOwnedReadOnlyList proofs) = - context.Server.GetAccountRanges(context.Tree.RootHash, Keccak.MaxValue, Keccak.Zero, 4000, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, Keccak.MaxValue, Keccak.Zero, 4000, CancellationToken.None); accounts.Count.Should().Be(0); accounts.Dispose(); @@ -92,15 +351,15 @@ public void TestGetAccountRange_InvalidRange() [Test] public void TestGetTrieNode_Root() { - Context context = CreateContext(); - TestItem.Tree.FillStateTreeWithTestAccounts(context.Tree); + using var context = CreateContext(); + FillWithTestAccounts(context); using IOwnedReadOnlyList result = context.Server.GetTrieNodes([ new PathGroup() { Group = [[]] } - ], context.Tree.RootHash, default)!; + ], context.RootHash, default)!; result.Count.Should().Be(1); } @@ -108,15 +367,15 @@ public void TestGetTrieNode_Root() [Test] public void TestGetTrieNode_Storage_Root() { - Context context = CreateContext(); - TestItem.Tree.FillStateTreeWithTestAccounts(context.Tree); + using var context = CreateContext(); + FillWithTestAccounts(context); using IOwnedReadOnlyList result = context.Server.GetTrieNodes([ new PathGroup() { Group = [TestItem.Tree.AccountsWithPaths[0].Path.Bytes.ToArray(), []] } - ], context.Tree.RootHash, default)!; + ], context.RootHash, default)!; result.Count.Should().Be(1); } @@ -132,15 +391,15 @@ public void TestNoState(bool withLastNStateTracker) lastNStateTracker.HasStateRoot(Arg.Any()).Returns(false); } - Context context = CreateContext(lastNStateRootTracker: lastNStateTracker); + using var context = CreateContext(lastNStateRootTracker: lastNStateTracker); (IOwnedReadOnlyList accounts, IOwnedReadOnlyList accountProofs) = - context.Server.GetAccountRanges(context.Tree.RootHash, Keccak.Zero, Keccak.MaxValue, 4000, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, Keccak.Zero, Keccak.MaxValue, 4000, CancellationToken.None); accounts.Count.Should().Be(0); (IOwnedReadOnlyList> storageSlots, IOwnedReadOnlyList? proofs) = - context.Server.GetStorageRanges(context.Tree.RootHash, [TestItem.Tree.AccountsWithPaths[0]], + context.Server.GetStorageRanges(context.RootHash, [TestItem.Tree.AccountsWithPaths[0]], ValueKeccak.Zero, ValueKeccak.MaxValue, 10, CancellationToken.None); storageSlots.Count.Should().Be(0); @@ -154,18 +413,18 @@ public void TestNoState(bool withLastNStateTracker) [Test] public void TestGetAccountRangeMultiple() { - Context context = CreateContext(); - TestItem.Tree.FillStateTreeWithTestAccounts(context.Tree); + using var context = CreateContext(); + FillWithTestAccounts(context); Hash256 startRange = Keccak.Zero; while (true) { (IOwnedReadOnlyList accounts, IOwnedReadOnlyList proofs) = - context.Server.GetAccountRanges(context.Tree.RootHash, startRange, Keccak.MaxValue, 100, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, startRange, Keccak.MaxValue, 100, CancellationToken.None); try { - AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.Tree.RootHash, startRange, + AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.RootHash, startRange, accounts, proofs); result.Should().Be(AddRangeResult.OK); @@ -181,7 +440,7 @@ public void TestGetAccountRangeMultiple() proofs.Dispose(); } } - context.ClientStateDb.Keys.Count.Should().Be(10); + context.PersistedNodeCount.Should().Be(10); } [TestCase(10, 10)] @@ -190,18 +449,18 @@ public void TestGetAccountRangeMultiple() [TestCase(10000, 10000)] public void TestGetAccountRangeMultipleLarger(int stateSize, int byteLimit) { - Context context = CreateContext(); - TestItem.Tree.FillStateTreeMultipleAccount(context.Tree, stateSize); + using var context = CreateContext(); + FillMultipleAccounts(context, stateSize); Hash256 startRange = Keccak.Zero; while (true) { (IOwnedReadOnlyList accounts, IOwnedReadOnlyList proofs) = - context.Server.GetAccountRanges(context.Tree.RootHash, startRange, Keccak.MaxValue, byteLimit, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, startRange, Keccak.MaxValue, byteLimit, CancellationToken.None); try { - AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.Tree.RootHash, startRange, + AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.RootHash, startRange, accounts, proofs); result.Should().Be(AddRangeResult.OK); @@ -226,19 +485,19 @@ public void TestGetAccountRangeMultipleLarger(int stateSize, int byteLimit) [TestCase(10000, 10000000)] public void TestGetAccountRangeArtificialLimit(int stateSize, int byteLimit) { - Context context = CreateContext(); - TestItem.Tree.FillStateTreeMultipleAccount(context.Tree, stateSize); + using var context = CreateContext(); + FillMultipleAccounts(context, stateSize); Hash256 startRange = Keccak.Zero; ValueHash256 limit = new ValueHash256("0x8000000000000000000000000000000000000000000000000000000000000000"); while (true) { (IOwnedReadOnlyList accounts, IOwnedReadOnlyList proofs) = context.Server - .GetAccountRanges(context.Tree.RootHash, startRange, limit, byteLimit, CancellationToken.None); + .GetAccountRanges(context.RootHash, startRange, limit, byteLimit, CancellationToken.None); try { - AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.Tree.RootHash, startRange, + AddRangeResult result = context.SnapProvider.AddAccountRange(1, context.RootHash, startRange, accounts, proofs); result.Should().Be(AddRangeResult.OK); @@ -260,22 +519,11 @@ public void TestGetAccountRangeArtificialLimit(int stateSize, int byteLimit) [Test] public void TestGetStorageRange() { - MemDb stateDb = new MemDb(); - MemDb codeDb = new MemDb(); - TestRawTrieStore store = new TestRawTrieStore(stateDb); - - (StateTree inputStateTree, StorageTree inputStorageTree, Hash256 _) = TestItem.Tree.GetTrees(store); - - SnapServer server = new(store.AsReadOnly(), codeDb, LimboLogs.Instance); - - IDb codeDb2 = new MemDb(); - IDb stateDb2 = new MemDb(); - - using ProgressTracker progressTracker = new(stateDb2, new TestSyncConfig(), new StateSyncPivot(null!, new TestSyncConfig(), LimboLogs.Instance), LimboLogs.Instance); - SnapProvider snapProvider = new(progressTracker, codeDb2, new NodeStorage(stateDb2), LimboLogs.Instance); + using var context = CreateContext(); + Hash256 storageRoot = FillAccountWithDefaultStorage(context); (IOwnedReadOnlyList> storageSlots, IOwnedReadOnlyList? proofs) = - server.GetStorageRanges(inputStateTree.RootHash, [TestItem.Tree.AccountsWithPaths[0]], + context.Server.GetStorageRanges(context.RootHash, [TestItem.Tree.AccountsWithPaths[0]], ValueKeccak.Zero, ValueKeccak.MaxValue, 10, CancellationToken.None); try @@ -283,9 +531,9 @@ public void TestGetStorageRange() var storageRangeRequest = new StorageRange() { StartingHash = Keccak.Zero, - Accounts = new ArrayPoolList(1) { new(TestItem.Tree.AccountsWithPaths[0].Path, new Account(UInt256.Zero).WithChangedStorageRoot(inputStorageTree.RootHash)) } + Accounts = new ArrayPoolList(1) { new(TestItem.Tree.AccountsWithPaths[0].Path, new Account(UInt256.Zero).WithChangedStorageRoot(storageRoot)) } }; - AddRangeResult result = snapProvider.AddStorageRangeForAccount(storageRangeRequest, 0, storageSlots[0], proofs); + AddRangeResult result = context.SnapProvider.AddStorageRangeForAccount(storageRangeRequest, 0, storageSlots[0], proofs); result.Should().Be(AddRangeResult.OK); } @@ -299,20 +547,15 @@ public void TestGetStorageRange() [Test] public void TestGetStorageRange_NoSlotsForAccount() { - MemDb stateDb = new MemDb(); - MemDb codeDb = new MemDb(); - TestRawTrieStore store = new TestRawTrieStore(stateDb); - - (StateTree inputStateTree, StorageTree inputStorageTree, Hash256 _) = TestItem.Tree.GetTrees(store); - - SnapServer server = new(store.AsReadOnly(), codeDb, LimboLogs.Instance); + using var context = CreateContext(); + FillAccountWithDefaultStorage(context); ValueHash256 lastStorageHash = TestItem.Tree.SlotsWithPaths[^1].Path; var asInt = lastStorageHash.ToUInt256(); ValueHash256 beyondLast = new ValueHash256((++asInt).ToBigEndian()); (IOwnedReadOnlyList> storageSlots, IOwnedReadOnlyList? proofs) = - server.GetStorageRanges(inputStateTree.RootHash, [TestItem.Tree.AccountsWithPaths[0]], + context.Server.GetStorageRanges(context.RootHash, [TestItem.Tree.AccountsWithPaths[0]], beyondLast, ValueKeccak.MaxValue, 10, CancellationToken.None); storageSlots.Count.Should().Be(0); @@ -325,25 +568,14 @@ public void TestGetStorageRange_NoSlotsForAccount() [Test] public void TestGetStorageRangeMulti() { - MemDb stateDb = new MemDb(); - MemDb codeDb = new MemDb(); - TestRawTrieStore store = new TestRawTrieStore(stateDb); - - (StateTree inputStateTree, StorageTree inputStorageTree, Hash256 _) = TestItem.Tree.GetTrees(store, 10000); - - SnapServer server = new(store.AsReadOnly(), codeDb, LimboLogs.Instance); - - IDb stateDb2 = new MemDb(); - IDb codeDb2 = new MemDb(); - - using ProgressTracker progressTracker = new(stateDb2, new TestSyncConfig(), new StateSyncPivot(null!, new TestSyncConfig(), LimboLogs.Instance), LimboLogs.Instance); - SnapProvider snapProvider = new(progressTracker, codeDb2, new NodeStorage(stateDb2), LimboLogs.Instance); + using var context = CreateContext(); + Hash256 storageRoot = FillAccountWithStorage(context, 10000); Hash256 startRange = Keccak.Zero; while (true) { (IOwnedReadOnlyList> storageSlots, IOwnedReadOnlyList? proofs) = - server.GetStorageRanges(inputStateTree.RootHash, [TestItem.Tree.AccountsWithPaths[0]], + context.Server.GetStorageRanges(context.RootHash, [TestItem.Tree.AccountsWithPaths[0]], startRange, ValueKeccak.MaxValue, 10000, CancellationToken.None); try @@ -351,9 +583,9 @@ public void TestGetStorageRangeMulti() var storageRangeRequest = new StorageRange() { StartingHash = startRange, - Accounts = new ArrayPoolList(1) { new(TestItem.Tree.AccountsWithPaths[0].Path, new Account(UInt256.Zero).WithChangedStorageRoot(inputStorageTree.RootHash)) } + Accounts = new ArrayPoolList(1) { new(TestItem.Tree.AccountsWithPaths[0].Path, new Account(UInt256.Zero).WithChangedStorageRoot(storageRoot)) } }; - AddRangeResult result = snapProvider.AddStorageRangeForAccount(storageRangeRequest, 0, storageSlots[0], proofs); + AddRangeResult result = context.SnapProvider.AddStorageRangeForAccount(storageRangeRequest, 0, storageSlots[0], proofs); result.Should().Be(AddRangeResult.OK); if (startRange == storageSlots[0][^1].Path.ToCommitment()) @@ -374,73 +606,65 @@ public void TestGetStorageRangeMulti() [Test] public void TestWithHugeTree() { - MemDb stateDb = new MemDb(); - MemDb codeDb = new MemDb(); - TestRawTrieStore store = new TestRawTrieStore(stateDb); - - StateTree stateTree = new(store, LimboLogs.Instance); + using var context = CreateContext(); // generate Remote Tree - for (int accountIndex = 0; accountIndex < 10000; accountIndex++) + using (var batch = context.BeginWriteBatch()) { - stateTree.Set(TestItem.GetRandomAddress(), TestItem.GenerateRandomAccount()); + for (int accountIndex = 0; accountIndex < 10000; accountIndex++) + batch.SetAccount(TestItem.GetRandomAddress(), TestItem.GenerateRandomAccount()); } - stateTree.Commit(); List accountWithStorage = new(); - for (int i = 1000; i < 10000; i += 1000) + using (var batch = context.BeginWriteBatch()) { - Address address = TestItem.GetRandomAddress(); - StorageTree storageTree = new(store.GetTrieStore(address), LimboLogs.Instance); - for (int j = 0; j < i; j += 1) + for (int i = 1000; i < 10000; i += 1000) { - storageTree.Set(TestItem.GetRandomKeccak(), TestItem.GetRandomKeccak().Bytes.ToArray()); + Address address = TestItem.GetRandomAddress(); + Hash256 storagePath = address.ToAccountPath.ToCommitment(); + for (int j = 0; j < i; j += 1) + batch.SetSlot(storagePath, TestItem.GetRandomKeccak(), TestItem.GetRandomKeccak().Bytes.ToArray()); + batch.SetAccount(address, TestItem.GenerateRandomAccount()); + accountWithStorage.Add(new PathWithAccount(address.ToAccountPath, new Account(0))); } - storageTree.Commit(); - var account = TestItem.GenerateRandomAccount().WithChangedStorageRoot(storageTree.RootHash); - stateTree.Set(address, account); - accountWithStorage.Add(new PathWithAccount() { Path = Keccak.Compute(address.Bytes), Account = account }); } - stateTree.Commit(); - - SnapServer server = new(store.AsReadOnly(), codeDb, LimboLogs.Instance); // size of one PathWithAccount ranges from 39 -> 72 (IOwnedReadOnlyList accounts, IOwnedReadOnlyList accountProofs) - = server.GetAccountRanges(stateTree.RootHash, Keccak.Zero, Keccak.MaxValue, 10, CancellationToken.None); + = context.Server.GetAccountRanges(context.RootHash, Keccak.Zero, Keccak.MaxValue, 10, CancellationToken.None); accounts.Count.Should().Be(1); accounts.Dispose(); accountProofs.Dispose(); (accounts, accountProofs) = - server.GetAccountRanges(stateTree.RootHash, Keccak.Zero, Keccak.MaxValue, 100, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, Keccak.Zero, Keccak.MaxValue, 100, CancellationToken.None); accounts.Count.Should().BeGreaterThan(2); accounts.Dispose(); accountProofs.Dispose(); (accounts, accountProofs) = - server.GetAccountRanges(stateTree.RootHash, Keccak.Zero, Keccak.MaxValue, 10000, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, Keccak.Zero, Keccak.MaxValue, 10000, CancellationToken.None); accounts.Count.Should().BeGreaterThan(138); accounts.Dispose(); accountProofs.Dispose(); // TODO: Double check the threshold (accounts, accountProofs) = - server.GetAccountRanges(stateTree.RootHash, Keccak.Zero, Keccak.MaxValue, 720000, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, Keccak.Zero, Keccak.MaxValue, 720000, CancellationToken.None); accounts.Count.Should().Be(10009); accounts.Dispose(); accountProofs.Dispose(); (accounts, accountProofs) = - server.GetAccountRanges(stateTree.RootHash, Keccak.Zero, Keccak.MaxValue, 10000000, CancellationToken.None); + context.Server.GetAccountRanges(context.RootHash, Keccak.Zero, Keccak.MaxValue, 10000000, CancellationToken.None); accounts.Count.Should().Be(10009); accounts.Dispose(); accountProofs.Dispose(); var accountWithStorageArray = accountWithStorage.ToArray(); - (IOwnedReadOnlyList> slots, IOwnedReadOnlyList? proofs) = server.GetStorageRanges(stateTree.RootHash, accountWithStorageArray[..1], ValueKeccak.Zero, ValueKeccak.MaxValue, 10, CancellationToken.None); + (IOwnedReadOnlyList> slots, IOwnedReadOnlyList? proofs) = context.Server.GetStorageRanges(context.RootHash, accountWithStorageArray[..1], ValueKeccak.Zero, ValueKeccak.MaxValue, 10, CancellationToken.None); slots.Count.Should().Be(1); slots[0].Count.Should().Be(1); proofs.Should().NotBeNull(); @@ -448,7 +672,7 @@ public void TestWithHugeTree() slots.DisposeRecursive(); proofs?.Dispose(); - (slots, proofs) = server.GetStorageRanges(stateTree.RootHash, accountWithStorageArray[..1], ValueKeccak.Zero, ValueKeccak.MaxValue, 1000000, CancellationToken.None); + (slots, proofs) = context.Server.GetStorageRanges(context.RootHash, accountWithStorageArray[..1], ValueKeccak.Zero, ValueKeccak.MaxValue, 1000000, CancellationToken.None); slots.Count.Should().Be(1); slots[0].Count.Should().Be(1000); proofs.Should().BeEmpty(); @@ -456,14 +680,14 @@ public void TestWithHugeTree() slots.DisposeRecursive(); proofs?.Dispose(); - (slots, proofs) = server.GetStorageRanges(stateTree.RootHash, accountWithStorageArray[..2], ValueKeccak.Zero, ValueKeccak.MaxValue, 10, CancellationToken.None); + (slots, proofs) = context.Server.GetStorageRanges(context.RootHash, accountWithStorageArray[..2], ValueKeccak.Zero, ValueKeccak.MaxValue, 10, CancellationToken.None); slots.Count.Should().Be(1); slots[0].Count.Should().Be(1); proofs.Should().NotBeNull(); slots.DisposeRecursive(); proofs?.Dispose(); - (slots, proofs) = server.GetStorageRanges(stateTree.RootHash, accountWithStorageArray[..2], ValueKeccak.Zero, ValueKeccak.MaxValue, 100000, CancellationToken.None); + (slots, proofs) = context.Server.GetStorageRanges(context.RootHash, accountWithStorageArray[..2], ValueKeccak.Zero, ValueKeccak.MaxValue, 100000, CancellationToken.None); slots.Count.Should().Be(2); slots[0].Count.Should().Be(1000); slots[1].Count.Should().Be(539); @@ -473,7 +697,7 @@ public void TestWithHugeTree() // incomplete tree will be returned as the hard limit is 2000000 - (slots, proofs) = server.GetStorageRanges(stateTree.RootHash, accountWithStorageArray, ValueKeccak.Zero, ValueKeccak.MaxValue, 3000000, CancellationToken.None); + (slots, proofs) = context.Server.GetStorageRanges(context.RootHash, accountWithStorageArray, ValueKeccak.Zero, ValueKeccak.MaxValue, 3000000, CancellationToken.None); slots.Count.Should().Be(8); slots[^1].Count.Should().BeLessThan(8000); proofs.Should().NotBeEmpty(); @@ -481,5 +705,4 @@ public void TestWithHugeTree() slots.DisposeRecursive(); proofs?.Dispose(); } - } diff --git a/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/TestSnapTrieFactory.cs b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/TestSnapTrieFactory.cs new file mode 100644 index 00000000000..ff84d523885 --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization.Test/SnapSync/TestSnapTrieFactory.cs @@ -0,0 +1,14 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core.Crypto; +using Nethermind.Synchronization.SnapSync; + +namespace Nethermind.Synchronization.Test.SnapSync; + +internal class TestSnapTrieFactory(Func createTree) : ISnapTrieFactory +{ + public ISnapTree CreateStateTree() => createTree(); + public ISnapTree CreateStorageTree(in ValueHash256 accountPath) => createTree(); +} diff --git a/src/Nethermind/Nethermind.Synchronization.Test/TestSynchronizerModule.cs b/src/Nethermind/Nethermind.Synchronization.Test/TestSynchronizerModule.cs index c4f86941504..683c457edaf 100644 --- a/src/Nethermind/Nethermind.Synchronization.Test/TestSynchronizerModule.cs +++ b/src/Nethermind/Nethermind.Synchronization.Test/TestSynchronizerModule.cs @@ -1,6 +1,7 @@ // SPDX-FileCopyrightText: 2024 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only +using System; using Autofac; using Nethermind.Api; using Nethermind.Blockchain; @@ -12,12 +13,15 @@ using Nethermind.Init.Modules; using Nethermind.Logging; using Nethermind.Stats; +using Nethermind.Synchronization.SnapSync; using Nethermind.Trie; using NSubstitute; namespace Nethermind.Synchronization.Test; -public class TestSynchronizerModule(ISyncConfig syncConfig) : Module +public class TestSynchronizerModule( + ISyncConfig syncConfig, + Func? factoryCreator = null) : Module { protected override void Load(ContainerBuilder builder) { @@ -35,5 +39,14 @@ protected override void Load(ContainerBuilder builder) .AddSingleton() .AddSingleton() .AddSingleton(LimboLogs.Instance); + + // Override factory if provided (must come after SynchronizerModule to override) + if (factoryCreator is not null) + { + builder.Register(c => factoryCreator( + c.Resolve(), + c.Resolve() + )).As().SingleInstance(); + } } } diff --git a/src/Nethermind/Nethermind.Synchronization/FastSync/ITreeSyncStore.cs b/src/Nethermind/Nethermind.Synchronization/FastSync/ITreeSyncStore.cs new file mode 100644 index 00000000000..67d781e3778 --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/FastSync/ITreeSyncStore.cs @@ -0,0 +1,55 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Trie; + +namespace Nethermind.Synchronization.FastSync; + +/// +/// High-level storage interface for TreeSync that abstracts both storage operations +/// and verification operations. Allows different backends (Patricia, Flat) to provide +/// completely different implementations. +/// +public interface ITreeSyncStore +{ + /// + /// Check if a trie node exists in storage. + /// + bool NodeExists(Hash256? address, in TreePath path, in ValueHash256 hash); + + /// + /// Save a trie node to storage. + /// + /// Storage address for storage tries, null for state trie. + /// The path to this node in the trie. + /// The hash of the node data. + /// The RLP-encoded node data. + void SaveNode(Hash256? address, in TreePath path, in ValueHash256 hash, ReadOnlySpan data); + + /// + /// Called when sync is complete and state should be finalized and flushed. + /// + /// The block header containing the synced state root. + void FinalizeSync(BlockHeader pivotHeader); + + /// + /// Create a verification context for checking storage roots during sync. + /// The context is created with root node data that hasn't been persisted yet. + /// + ITreeSyncVerificationContext CreateVerificationContext(byte[] rootNodeData); +} + +/// +/// Context for verifying storage roots during sync. +/// Allows querying accounts from in-flight (not yet persisted) trie data. +/// +public interface ITreeSyncVerificationContext +{ + /// + /// Get an account by its address hash for verification purposes. + /// + Account? GetAccount(Hash256 addressHash); +} diff --git a/src/Nethermind/Nethermind.Synchronization/FastSync/PatriciaTreeSyncStore.cs b/src/Nethermind/Nethermind.Synchronization/FastSync/PatriciaTreeSyncStore.cs new file mode 100644 index 00000000000..6d65641576b --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/FastSync/PatriciaTreeSyncStore.cs @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.State; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.Synchronization.FastSync; + +public class PatriciaTreeSyncStore(INodeStorage nodeStorage, ILogManager logManager) : ITreeSyncStore +{ + public bool NodeExists(Hash256? address, in TreePath path, in ValueHash256 hash) => + nodeStorage.KeyExists(address, path, hash); + + public void SaveNode(Hash256? address, in TreePath path, in ValueHash256 hash, ReadOnlySpan data) => + nodeStorage.Set(address, path, hash, data); + + public void FinalizeSync(BlockHeader pivotHeader) => + // Patricia trie doesn't need block header info, just flush + nodeStorage.Flush(onlyWal: false); + + public ITreeSyncVerificationContext CreateVerificationContext(byte[] rootNodeData) => + new PatriciaVerificationContext(nodeStorage, rootNodeData, logManager); + + private class PatriciaVerificationContext( + INodeStorage nodeStorage, + byte[] rootNodeData, + ILogManager logManager) : ITreeSyncVerificationContext + { + private readonly StateTree _stateTree = CreateStateTree(nodeStorage, rootNodeData, logManager); + + private static StateTree CreateStateTree(INodeStorage nodeStorage, byte[] rootNodeData, ILogManager logManager) + { + StateTree stateTree = new(new RawScopedTrieStore(nodeStorage, null), logManager); + stateTree.RootRef = new TrieNode(NodeType.Unknown, rootNodeData); + return stateTree; + } + + public Account? GetAccount(Hash256 addressHash) => + _stateTree.Get(addressHash); + } +} diff --git a/src/Nethermind/Nethermind.Synchronization/FastSync/TreeSync.cs b/src/Nethermind/Nethermind.Synchronization/FastSync/TreeSync.cs index d675b3dfcb7..f8cc34a28df 100644 --- a/src/Nethermind/Nethermind.Synchronization/FastSync/TreeSync.cs +++ b/src/Nethermind/Nethermind.Synchronization/FastSync/TreeSync.cs @@ -18,7 +18,6 @@ using Nethermind.Db; using Nethermind.Logging; using Nethermind.Serialization.Rlp; -using Nethermind.State; using Nethermind.Synchronization.ParallelSync; using Nethermind.Synchronization.Peers; using Nethermind.Trie; @@ -59,7 +58,7 @@ public class TreeSync : ITreeSync private readonly ILogger _logger; private readonly IDb _codeDb; - private readonly INodeStorage _nodeStorage; + private readonly ITreeSyncStore _store; private readonly IBlockTree _blockTree; private readonly StateSyncPivot _stateSyncPivot; @@ -81,11 +80,11 @@ public class TreeSync : ITreeSync public event EventHandler? SyncCompleted; - public TreeSync([KeyFilter(DbNames.Code)] IDb codeDb, INodeStorage nodeStorage, IBlockTree blockTree, StateSyncPivot stateSyncPivot, ISyncConfig syncConfig, ILogManager logManager) + public TreeSync([KeyFilter(DbNames.Code)] IDb codeDb, ITreeSyncStore store, IBlockTree blockTree, StateSyncPivot stateSyncPivot, ISyncConfig syncConfig, ILogManager logManager) { _syncMode = SyncMode.StateNodes; _codeDb = codeDb ?? throw new ArgumentNullException(nameof(codeDb)); - _nodeStorage = nodeStorage ?? throw new ArgumentNullException(nameof(nodeStorage)); + _store = store ?? throw new ArgumentNullException(nameof(store)); _blockTree = blockTree ?? throw new ArgumentNullException(nameof(blockTree)); _stateSyncPivot = stateSyncPivot; @@ -373,7 +372,7 @@ shorter than the request */ try { // it finished downloading - rootNodeKeyExists = _nodeStorage.KeyExists(null, TreePath.Empty, _rootNode); + rootNodeKeyExists = _store.NodeExists(null, TreePath.Empty, _rootNode); } catch (ObjectDisposedException) { @@ -547,7 +546,7 @@ private AddNodeResult AddNodeToPending(StateSyncItem syncItem, DependentItem? de } else { - keyExists = _nodeStorage.KeyExists(syncItem.Address, syncItem.Path, syncItem.Hash); + keyExists = _store.NodeExists(syncItem.Address, syncItem.Path, syncItem.Hash); } if (keyExists) @@ -677,7 +676,7 @@ private void SaveNode(StateSyncItem syncItem, byte[] data) Interlocked.Add(ref _data.DataSize, data.Length); Interlocked.Increment(ref Metrics.SyncedStateTrieNodes); - _nodeStorage.Set(syncItem.Address, syncItem.Path, syncItem.Hash, data); + _store.SaveNode(syncItem.Address, syncItem.Path, syncItem.Hash, data); } finally { @@ -695,7 +694,7 @@ private void SaveNode(StateSyncItem syncItem, byte[] data) { Interlocked.Add(ref _data.DataSize, data.Length); Interlocked.Increment(ref Metrics.SyncedStorageTrieNodes); - _nodeStorage.Set(syncItem.Address, syncItem.Path, syncItem.Hash, data); + _store.SaveNode(syncItem.Address, syncItem.Path, syncItem.Hash, data); } finally { @@ -727,7 +726,10 @@ private void SaveNode(StateSyncItem syncItem, byte[] data) { if (_logger.IsInfo) _logger.Info($"Saving root {syncItem.Hash} of {_branchProgress.CurrentSyncBlock}"); - _nodeStorage.Flush(onlyWal: false); + if (_stateSyncPivot.GetPivotHeader() is { } pivotHeader) + { + _store.FinalizeSync(pivotHeader); + } _codeDb.Flush(); Interlocked.Exchange(ref _rootSaved, 1); @@ -741,16 +743,13 @@ private bool VerifyStorageUpdated(StateSyncItem item, byte[] value) { DependentItem dependentItem = new DependentItem(item, value, _stateSyncPivot.UpdatedStorages.Count); - // Need complete state tree as the correct storage root may be different at this point. - StateTree stateTree = new StateTree(new RawScopedTrieStore(_nodeStorage, null), LimboLogs.Instance); - // The root is not persisted at this point yet, so we set it as root ref here. - stateTree.RootRef = new TrieNode(NodeType.Unknown, value); + ITreeSyncVerificationContext verificationContext = _store.CreateVerificationContext(value); if (_logger.IsDebug) _logger.Debug($"Checking {_stateSyncPivot.UpdatedStorages.Count} updated storages"); foreach (Hash256 updatedAddress in _stateSyncPivot.UpdatedStorages) { - Account? account = stateTree.Get(updatedAddress); + Account? account = verificationContext.GetAccount(updatedAddress); if (account?.StorageRoot is not null && AddNodeToPending(new StateSyncItem(account.StorageRoot, updatedAddress, TreePath.Empty, NodeDataType.Storage), dependentItem, "incomplete storage") == AddNodeResult.Added) @@ -759,6 +758,7 @@ private bool VerifyStorageUpdated(StateSyncItem item, byte[] value) } else { + if (_logger.IsDebug) _logger.Debug($"Storage {updatedAddress} is ok"); dependentItem.Counter--; } } diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/AddRangeResult.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/AddRangeResult.cs index 639e4e9fb8a..55d88adaa11 100644 --- a/src/Nethermind/Nethermind.Synchronization/SnapSync/AddRangeResult.cs +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/AddRangeResult.cs @@ -11,6 +11,6 @@ public enum AddRangeResult ExpiredRootHash, InvalidOrder, OutOfBounds, - EmptySlots + EmptyRange } } diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/ISnapTree.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/ISnapTree.cs new file mode 100644 index 00000000000..a16dfd5ab4e --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/ISnapTree.cs @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.Trie; + +namespace Nethermind.Synchronization.SnapSync; + +/// +/// Base interface for snap sync tree operations used in FillBoundaryTree. +/// +public interface ISnapTree : IDisposable +{ + Hash256 RootHash { get; } + + void SetRootFromProof(TrieNode root); + bool IsPersisted(in TreePath path, in ValueHash256 keccak); + void BulkSetAndUpdateRootHash(in ArrayPoolListRef entries); + void Commit(ValueHash256 upperBound); +} diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/ISnapTrieFactory.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/ISnapTrieFactory.cs new file mode 100644 index 00000000000..9c11eb2224f --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/ISnapTrieFactory.cs @@ -0,0 +1,12 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core.Crypto; + +namespace Nethermind.Synchronization.SnapSync; + +public interface ISnapTrieFactory +{ + ISnapTree CreateStateTree(); + ISnapTree CreateStorageTree(in ValueHash256 accountPath); +} diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapStateTree.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapStateTree.cs new file mode 100644 index 00000000000..d47268291ae --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapStateTree.cs @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.State; +using Nethermind.Trie; + +namespace Nethermind.Synchronization.SnapSync; + +public class PatriciaSnapStateTree(StateTree tree, SnapUpperBoundAdapter adapter) : ISnapTree +{ + public Hash256 RootHash => tree.RootHash; + + public void SetRootFromProof(TrieNode root) => tree.RootRef = root; + + public bool IsPersisted(in TreePath path, in ValueHash256 keccak) => + adapter.IsPersisted(path, keccak); + + public void BulkSetAndUpdateRootHash(in ArrayPoolListRef entries) + { + tree.BulkSet(entries, PatriciaTree.Flags.WasSorted); + tree.UpdateRootHash(); + } + + public void Commit(ValueHash256 upperBound) + { + adapter.UpperBound = upperBound; + tree.Commit(skipRoot: true, WriteFlags.DisableWAL); + } + + public void Dispose() { } // No-op - Patricia doesn't own resources +} diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapStorageTree.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapStorageTree.cs new file mode 100644 index 00000000000..7a27197d92c --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapStorageTree.cs @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core; +using Nethermind.Core.Collections; +using Nethermind.Core.Crypto; +using Nethermind.State; +using Nethermind.Trie; + +namespace Nethermind.Synchronization.SnapSync; + +public class PatriciaSnapStorageTree(StorageTree tree, SnapUpperBoundAdapter adapter) : ISnapTree +{ + public Hash256 RootHash => tree.RootHash; + + public void SetRootFromProof(TrieNode root) => tree.RootRef = root; + + public bool IsPersisted(in TreePath path, in ValueHash256 keccak) => + adapter.IsPersisted(path, keccak); + + public void BulkSetAndUpdateRootHash(in ArrayPoolListRef entries) + { + tree.BulkSet(entries, PatriciaTree.Flags.WasSorted); + tree.UpdateRootHash(); + } + + public void Commit(ValueHash256 upperBound) + { + adapter.UpperBound = upperBound; + tree.Commit(writeFlags: WriteFlags.DisableWAL); + } + + public void Dispose() { } // No-op - Patricia doesn't own resources +} diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapTrieFactory.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapTrieFactory.cs new file mode 100644 index 00000000000..5c6881104e9 --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/PatriciaSnapTrieFactory.cs @@ -0,0 +1,27 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using Nethermind.Core.Crypto; +using Nethermind.Logging; +using Nethermind.State; +using Nethermind.Trie.Pruning; + +namespace Nethermind.Synchronization.SnapSync; + +public class PatriciaSnapTrieFactory(INodeStorage nodeStorage, ILogManager logManager) : ISnapTrieFactory +{ + private readonly RawScopedTrieStore _stateTrieStore = new(nodeStorage, null); + + public ISnapTree CreateStateTree() + { + var adapter = new SnapUpperBoundAdapter(_stateTrieStore); + return new PatriciaSnapStateTree(new StateTree(adapter, logManager), adapter); + } + + public ISnapTree CreateStorageTree(in ValueHash256 accountPath) + { + var adapter = new SnapUpperBoundAdapter(new RawScopedTrieStore(nodeStorage, accountPath.ToCommitment())); + return new PatriciaSnapStorageTree(new StorageTree(adapter, logManager), adapter); + } + +} diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/ProgressTracker.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/ProgressTracker.cs index 6037e1b53f7..a7f02334cb0 100644 --- a/src/Nethermind/Nethermind.Synchronization/SnapSync/ProgressTracker.cs +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/ProgressTracker.cs @@ -60,7 +60,7 @@ public class ProgressTracker : IDisposable // partition is taking up most of the time at the end of the sync. private ConcurrentQueue AccountRangeReadyForRequest { get; set; } = new(); private ConcurrentQueue NextSlotRange { get; set; } = new(); - private ConcurrentQueue StoragesToRetrieve { get; set; } = new(); + public ConcurrentQueue StoragesToRetrieve { get; set; } = new(); private ConcurrentQueue CodesToRetrieve { get; set; } = new(); private ConcurrentQueue AccountsToRefresh { get; set; } = new(); @@ -104,7 +104,7 @@ private void SetupAccountRangePartition() curStartingPath += partitionSize; - Hash256 limitPath; + ValueHash256 limitPath; // Special case for the last partition if (i == _accountRangePartitionCount - 1) @@ -114,7 +114,8 @@ private void SetupAccountRangePartition() else { limitPath = new Hash256(Keccak.Zero.Bytes); - BinaryPrimitives.WriteUInt32BigEndian(limitPath.Bytes, curStartingPath); + BinaryPrimitives.WriteUInt32BigEndian(limitPath.BytesAsSpan, curStartingPath); + limitPath = limitPath.DecrementPath(); // Limit is inclusive } partition.AccountPathLimit = limitPath; @@ -331,7 +332,6 @@ public void EnqueueAccountStorage(PathWithAccount pwa) public void EnqueueAccountRefresh(PathWithAccount pathWithAccount, in ValueHash256? startingHash, in ValueHash256? hashLimit) { - _pivot.UpdatedStorages.Add(pathWithAccount.Path.ToCommitment()); AccountsToRefresh.Enqueue(new AccountWithStorageStartingHash() { PathAndAccount = pathWithAccount, StorageStartingHash = startingHash.GetValueOrDefault(), StorageHashLimit = hashLimit ?? Keccak.MaxValue }); } @@ -356,7 +356,7 @@ public void EnqueueNextSlot(StorageRange? storageRange) } } - public void EnqueueNextSlot(StorageRange parentRequest, int accountIndex, ValueHash256 lastProcessedHash) + public void EnqueueNextSlot(StorageRange parentRequest, int accountIndex, ValueHash256 lastProcessedHash, int slotCount) { ValueHash256 limitHash = parentRequest.LimitHash ?? Keccak.MaxValue; if (lastProcessedHash > limitHash) @@ -368,23 +368,30 @@ public void EnqueueNextSlot(StorageRange parentRequest, int accountIndex, ValueH UInt256 lastProcessed = new UInt256(lastProcessedHash.Bytes, true); UInt256 start = startingHash.HasValue ? new UInt256(startingHash.Value.Bytes, true) : UInt256.Zero; + // Splitting storage will cause the storage proof to not get stitched completely, causing more healing time and + // causes it to be tracked for healing, also, one more slot range to keep in memory. + // So we only split if the estimated remaining slot count is large enough. This is recursive, so large + // contract will continue getting split until the remaining slot count is low enough. + double slotSize = lastProcessed == start ? 0 : (double)(lastProcessed - start) / slotCount; + int estimatedRemainingSlotCount = slotSize == 0 ? 0 : (int)((double)(limit - lastProcessed) / slotSize); + UInt256 fullRange = limit - start; - if (_enableStorageRangeSplit && lastProcessed < fullRange / StorageRangeSplitFactor + start) + if (estimatedRemainingSlotCount > 10_000_000 && _enableStorageRangeSplit && lastProcessed < fullRange / StorageRangeSplitFactor + start) { ValueHash256 halfOfLeftHash = ((limit - lastProcessed) / 2 + lastProcessed).ToValueHash(); NextSlotRange.Enqueue(new StorageRange { Accounts = new ArrayPoolList(1) { account }, - StartingHash = lastProcessedHash, + StartingHash = lastProcessedHash.IncrementPath(), LimitHash = halfOfLeftHash }); NextSlotRange.Enqueue(new StorageRange { Accounts = new ArrayPoolList(1) { account }, - StartingHash = halfOfLeftHash, + StartingHash = halfOfLeftHash.IncrementPath(), LimitHash = limitHash }); @@ -397,7 +404,7 @@ public void EnqueueNextSlot(StorageRange parentRequest, int accountIndex, ValueH var storageRange = new StorageRange { Accounts = new ArrayPoolList(1) { account }, - StartingHash = lastProcessedHash, + StartingHash = lastProcessedHash.IncrementPath(), LimitHash = limitHash }; NextSlotRange.Enqueue(storageRange); @@ -488,6 +495,12 @@ private void FinishRangePhase() _db.Flush(); } + public void TrackAccountToHeal(ValueHash256 path) + { + _logger.Warn($"Tracked {path} for healing"); + _pivot.UpdatedStorages.Add(path.ToCommitment()); + } + private void LogRequest(string reqType) { if (_reqCount % 100 == 0 || _lastLogTime < DateTimeOffset.Now - _maxTimeBetweenLog) diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProvider.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProvider.cs index 26217d22f52..16eaf4ea86d 100644 --- a/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProvider.cs +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProvider.cs @@ -15,34 +15,26 @@ using Nethermind.Db; using Nethermind.Int256; using Nethermind.Logging; -using Nethermind.State; using Nethermind.State.Snap; -using Nethermind.Trie; -using Nethermind.Trie.Pruning; namespace Nethermind.Synchronization.SnapSync { public class SnapProvider : ISnapProvider { private readonly IDb _codeDb; - private readonly ILogManager _logManager; private readonly ILogger _logger; private readonly ProgressTracker _progressTracker; - private readonly INodeStorage _nodeStorage; + private readonly ISnapTrieFactory _trieFactory; // This is actually close to 97% effective. private readonly ClockKeyCache _codeExistKeyCache = new(1024 * 16); - private readonly RawScopedTrieStore _stateTrieStore; - public SnapProvider(ProgressTracker progressTracker, [KeyFilter(DbNames.Code)] IDb codeDb, INodeStorage nodeStorage, ILogManager logManager) + public SnapProvider(ProgressTracker progressTracker, [KeyFilter(DbNames.Code)] IDb codeDb, ISnapTrieFactory trieFactory, ILogManager logManager) { _codeDb = codeDb; _progressTracker = progressTracker; - _nodeStorage = nodeStorage; - _stateTrieStore = new RawScopedTrieStore(_nodeStorage, null); - - _logManager = logManager; + _trieFactory = trieFactory; _logger = logManager.GetClassLogger(); } @@ -93,12 +85,10 @@ public AddRangeResult AddAccountRange( { if (accounts.Count == 0) throw new ArgumentException("Cannot be empty.", nameof(accounts)); - StateTree tree = new(_stateTrieStore, _logManager); - ValueHash256 effectiveHashLimit = hashLimit ?? ValueKeccak.MaxValue; - (AddRangeResult result, bool moreChildrenToRight, List accountsWithStorage, List codeHashes) = - SnapProviderHelper.AddAccountRange(tree, blockNumber, expectedRootHash, startingHash, effectiveHashLimit, accounts, proofs); + (AddRangeResult result, bool moreChildrenToRight, List accountsWithStorage, List codeHashes, Hash256 actualRootHash) = + SnapProviderHelper.AddAccountRange(_trieFactory, blockNumber, expectedRootHash, startingHash, effectiveHashLimit, accounts, proofs); if (result == AddRangeResult.OK) { @@ -118,17 +108,16 @@ public AddRangeResult AddAccountRange( _progressTracker.EnqueueCodeHashes(filteredCodeHashes.AsSpan()); - UInt256 nextPath = accounts[^1].Path.ToUInt256(); - nextPath += UInt256.One; - _progressTracker.UpdateAccountRangePartitionProgress(effectiveHashLimit, nextPath.ToValueHash(), moreChildrenToRight); + ValueHash256 nextPath = accounts[^1].Path.IncrementPath(); + _progressTracker.UpdateAccountRangePartitionProgress(effectiveHashLimit, nextPath, moreChildrenToRight); } else if (result == AddRangeResult.MissingRootHashInProofs) { - _logger.Trace($"SNAP - AddAccountRange failed, missing root hash {tree.RootHash} in the proofs, startingHash:{startingHash}"); + _logger.Trace($"SNAP - AddAccountRange failed, missing root hash {actualRootHash} in the proofs, startingHash:{startingHash}"); } else if (result == AddRangeResult.DifferentRootHash) { - _logger.Trace($"SNAP - AddAccountRange failed, expected {blockNumber}:{expectedRootHash} but was {tree.RootHash}, startingHash:{startingHash}"); + _logger.Trace($"SNAP - AddAccountRange failed, expected {blockNumber}:{expectedRootHash} but was {actualRootHash}, startingHash:{startingHash}"); } else if (result == AddRangeResult.InvalidOrder) { @@ -138,6 +127,10 @@ public AddRangeResult AddAccountRange( { if (_logger.IsTrace) _logger.Trace($"SNAP - AddAccountRange failed, accounts are out of bounds, startingHash:{startingHash}"); } + else if (result == AddRangeResult.EmptyRange) + { + if (_logger.IsTrace) _logger.Trace($"SNAP - AddAccountRange failed, empty accounts, startingHash:{startingHash}"); + } return result; } @@ -199,61 +192,75 @@ public AddRangeResult AddStorageRange(StorageRange request, SlotsAndProofs respo public AddRangeResult AddStorageRangeForAccount(StorageRange request, int accountIndex, IReadOnlyList slots, IReadOnlyList? proofs = null) { PathWithAccount pathWithAccount = request.Accounts[accountIndex]; - StorageTree tree = new(new RawScopedTrieStore(_nodeStorage, pathWithAccount.Path.ToCommitment()), _logManager); - (AddRangeResult result, bool moreChildrenToRight) = SnapProviderHelper.AddStorageRange(tree, pathWithAccount, slots, request.StartingHash, request.LimitHash, proofs); - - if (result == AddRangeResult.OK) + try { - if (moreChildrenToRight) + (AddRangeResult result, bool moreChildrenToRight, Hash256 actualRootHash, bool isRootPersisted) = SnapProviderHelper.AddStorageRange(_trieFactory, pathWithAccount, slots, request.StartingHash, request.LimitHash, proofs); + if (result == AddRangeResult.OK) { - _progressTracker.EnqueueNextSlot(request, accountIndex, slots[^1].Path); + if (moreChildrenToRight) + { + _progressTracker.EnqueueNextSlot(request, accountIndex, slots[^1].Path, slots.Count); + } + else if (accountIndex == 0 && request.Accounts.Count == 1) + { + _progressTracker.OnCompletedLargeStorage(pathWithAccount); + } + + if (!moreChildrenToRight && (request.LimitHash == null || request.LimitHash == ValueKeccak.MaxValue) && !isRootPersisted) + { + // Sometimes the stitching does not work. Likely because part of the storage is using different + // pivot, sometimes the proof is in a form that we cannot cleanly verify if it should persist or not, + // but also because of stitching bug. So we just force trigger healing and continue on with our lives. + _progressTracker.TrackAccountToHeal(request.Accounts[accountIndex].Path); + } + + return result; + } + + if (result == AddRangeResult.MissingRootHashInProofs) + { + _logger.Trace( + $"SNAP - AddStorageRange failed, missing root hash {actualRootHash} in the proofs, startingHash:{request.StartingHash}"); } - else if (accountIndex == 0 && request.Accounts.Count == 1) + else if (result == AddRangeResult.DifferentRootHash) { - _progressTracker.OnCompletedLargeStorage(pathWithAccount); + _logger.Trace( + $"SNAP - AddStorageRange failed, expected storage root hash:{pathWithAccount.Account.StorageRoot} but was {actualRootHash}, startingHash:{request.StartingHash}"); + } + else if (result == AddRangeResult.InvalidOrder) + { + if (_logger.IsTrace) + _logger.Trace( + $"SNAP - AddStorageRange failed, slots are not in sorted order, startingHash:{request.StartingHash}"); + } + else if (result == AddRangeResult.OutOfBounds) + { + if (_logger.IsTrace) + _logger.Trace( + $"SNAP - AddStorageRange failed, slots are out of bounds, startingHash:{request.StartingHash}"); + } + else if (result == AddRangeResult.EmptyRange) + { + if (_logger.IsTrace) + _logger.Trace( + $"SNAP - AddStorageRange failed, slots list is empty, startingHash:{request.StartingHash}"); } + _progressTracker.EnqueueAccountRefresh(pathWithAccount, request.StartingHash, request.LimitHash); return result; - } - if (result == AddRangeResult.MissingRootHashInProofs) - { - _logger.Trace( - $"SNAP - AddStorageRange failed, missing root hash {pathWithAccount.Account.StorageRoot} in the proofs, startingHash:{request.StartingHash}"); - } - else if (result == AddRangeResult.DifferentRootHash) - { - _logger.Trace( - $"SNAP - AddStorageRange failed, expected storage root hash:{pathWithAccount.Account.StorageRoot} but was {tree.RootHash}, startingHash:{request.StartingHash}"); - } - else if (result == AddRangeResult.InvalidOrder) - { - if (_logger.IsTrace) - _logger.Trace( - $"SNAP - AddStorageRange failed, slots are not in sorted order, startingHash:{request.StartingHash}"); - } - else if (result == AddRangeResult.OutOfBounds) - { - if (_logger.IsTrace) - _logger.Trace( - $"SNAP - AddStorageRange failed, slots are out of bounds, startingHash:{request.StartingHash}"); } - else if (result == AddRangeResult.EmptySlots) + catch (Exception e) { - if (_logger.IsTrace) - _logger.Trace( - $"SNAP - AddStorageRange failed, slots list is empty, startingHash:{request.StartingHash}"); + _logger.Warn($"Error in storage {e}"); + throw; } - - _progressTracker.EnqueueAccountRefresh(pathWithAccount, request.StartingHash, request.LimitHash); - return result; } public void RefreshAccounts(AccountsToRefreshRequest request, IOwnedReadOnlyList response) { int respLength = response.Count; - IScopedTrieStore stateStore = _stateTrieStore; for (int reqIndex = 0; reqIndex < request.Paths.Count; reqIndex++) { var requestedPath = request.Paths[reqIndex]; @@ -269,35 +276,22 @@ public void RefreshAccounts(AccountsToRefreshRequest request, IOwnedReadOnlyList continue; } - try - { - TreePath emptyTreePath = TreePath.Empty; - TrieNode node = new(NodeType.Unknown, nodeData, isDirty: true); - node.ResolveNode(stateStore, emptyTreePath); - node.ResolveKey(stateStore, ref emptyTreePath); - - requestedPath.PathAndAccount.Account = requestedPath.PathAndAccount.Account.WithChangedStorageRoot(node.Keccak); + requestedPath.PathAndAccount.Account = requestedPath.PathAndAccount.Account.WithChangedStorageRoot(Keccak.Compute(nodeData)); - if (requestedPath.StorageStartingHash > ValueKeccak.Zero) - { - StorageRange range = new() - { - Accounts = new ArrayPoolList(1) { requestedPath.PathAndAccount }, - StartingHash = requestedPath.StorageStartingHash, - LimitHash = requestedPath.StorageHashLimit - }; - - _progressTracker.EnqueueNextSlot(range); - } - else + if (requestedPath.StorageStartingHash > ValueKeccak.Zero) + { + StorageRange range = new() { - _progressTracker.EnqueueAccountStorage(requestedPath.PathAndAccount); - } + Accounts = new ArrayPoolList(1) { requestedPath.PathAndAccount }, + StartingHash = requestedPath.StorageStartingHash, + LimitHash = requestedPath.StorageHashLimit + }; + + _progressTracker.EnqueueNextSlot(range); } - catch (Exception exc) + else { - RetryAccountRefresh(requestedPath); - _logger.Warn($"SNAP - {exc.Message}:{requestedPath.PathAndAccount.Path}:{Bytes.ToHexString(nodeData)}"); + _progressTracker.EnqueueAccountStorage(requestedPath.PathAndAccount); } } else @@ -369,5 +363,6 @@ public void Dispose() { _codeExistKeyCache.Clear(); } + } } diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProviderHelper.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProviderHelper.cs index 60877e0539f..2430c54f72e 100644 --- a/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProviderHelper.cs +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapProviderHelper.cs @@ -20,8 +20,8 @@ public static class SnapProviderHelper { private const int ExtensionRlpChildIndex = 1; - public static (AddRangeResult result, bool moreChildrenToRight, List storageRoots, List codeHashes) AddAccountRange( - StateTree tree, + public static (AddRangeResult result, bool moreChildrenToRight, List storageRoots, List codeHashes, Hash256 actualRootHash) AddAccountRange( + ISnapTrieFactory factory, long blockNumber, in ValueHash256 expectedRootHash, in ValueHash256 startingHash, @@ -30,100 +30,41 @@ public static (AddRangeResult result, bool moreChildrenToRight, List proofs = null ) { - // TODO: Check the accounts boundaries and sorting - if (accounts.Count == 0) - throw new ArgumentException("Cannot be empty.", nameof(accounts)); - - // Validate sorting order - for (int i = 1; i < accounts.Count; i++) - { - if (accounts[i - 1].Path.CompareTo(accounts[i].Path) >= 0) - { - return (AddRangeResult.InvalidOrder, true, null, null); - } - } - - ValueHash256 lastHash = accounts[^1].Path; - - (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundaryList, bool moreChildrenToRight) = - FillBoundaryTree(tree, startingHash, lastHash, limitHash, expectedRootHash, proofs); - - if (result != AddRangeResult.OK) - { - return (result, true, null, null); - } - - List accountsWithStorage = new(); - List codeHashes = new(); - bool hasExtraStorage = false; + using ISnapTree tree = factory.CreateStateTree(); using ArrayPoolListRef entries = new(accounts.Count); for (var index = 0; index < accounts.Count; index++) { PathWithAccount account = accounts[index]; - if (account.Account.HasStorage) - { - if (account.Path >= limitHash || account.Path < startingHash) - { - hasExtraStorage = true; - } - else - { - accountsWithStorage.Add(account); - } - } - - if (account.Account.HasCode) - { - codeHashes.Add(account.Account.CodeHash); - } - Account accountValue = account.Account; Rlp rlp = accountValue.IsTotallyEmpty ? StateTree.EmptyAccountRlp : Rlp.Encode(accountValue); entries.Add(new PatriciaTree.BulkSetEntry(account.Path, rlp.Bytes)); Interlocked.Add(ref Metrics.SnapStateSynced, rlp.Bytes.Length); } - tree.BulkSet(entries, PatriciaTree.Flags.WasSorted); - tree.UpdateRootHash(); + var (result, moreChildrenToRight, _) = CommitRange( + tree, entries, startingHash, limitHash, expectedRootHash, proofs); + if (result != AddRangeResult.OK) + return (result, true, null, null, tree.RootHash); - if (tree.RootHash.ValueHash256 != expectedRootHash) + List accountsWithStorage = new(); + List codeHashes = new(); + for (var index = 0; index < accounts.Count; index++) { - return (AddRangeResult.DifferentRootHash, true, null, null); - } + PathWithAccount account = accounts[index]; - if (hasExtraStorage) - { - // The server will always give one node extra after the limit path if it can fit in the response. - // When we have extra storage, the extra storage must not be re-stored as it may have already been set - // by another top level partition. If the sync pivot moved and the storage was modified, it must not be saved - // here along with updated ancestor so that healing can detect that the storage need to be healed. - // - // Unfortunately, without introducing large change to the tree, the easiest way to - // exclude the extra storage is to just rebuild the whole tree and also skip stitching. - // Fortunately, this should only happen n-1 time where n is the number of top level - // partition count. - - tree.RootHash = Keccak.EmptyTreeHash; - for (var index = 0; index < accounts.Count; index++) - { - PathWithAccount account = accounts[index]; - if (account.Path >= limitHash || account.Path < startingHash) continue; - _ = tree.Set(account.Path, account.Account); - } - } - else - { - StitchBoundaries(sortedBoundaryList, tree.TrieStore); - } + if (account.Account.HasStorage && account.Path <= limitHash) + accountsWithStorage.Add(account); - tree.Commit(skipRoot: true, writeFlags: WriteFlags.DisableWAL); + if (account.Account.HasCode) + codeHashes.Add(account.Account.CodeHash); + } - return (AddRangeResult.OK, moreChildrenToRight, accountsWithStorage, codeHashes); + return (AddRangeResult.OK, moreChildrenToRight, accountsWithStorage, codeHashes, null); } - public static (AddRangeResult result, bool moreChildrenToRight) AddStorageRange( - StorageTree tree, + public static (AddRangeResult result, bool moreChildrenToRight, Hash256 actualRootHash, bool isRootPersisted) AddStorageRange( + ISnapTrieFactory factory, PathWithAccount account, IReadOnlyList slots, in ValueHash256? startingHash, @@ -131,59 +72,84 @@ public static (AddRangeResult result, bool moreChildrenToRight) AddStorageRange( IReadOnlyList? proofs = null ) { - if (slots.Count == 0) - return (AddRangeResult.EmptySlots, false); - - // Validate sorting order - for (int i = 1; i < slots.Count; i++) - { - if (slots[i - 1].Path.CompareTo(slots[i].Path) >= 0) - { - return (AddRangeResult.InvalidOrder, true); - } - } - - ValueHash256 lastHash = slots[^1].Path; - - (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundaryList, bool moreChildrenToRight) = FillBoundaryTree( - tree, startingHash, lastHash, limitHash ?? Keccak.MaxValue, account.Account.StorageRoot, proofs); + using ISnapTree tree = factory.CreateStorageTree(account.Path); - if (result != AddRangeResult.OK) - { - return (result, true); - } + ValueHash256 effectiveLimitHash = limitHash ?? Keccak.MaxValue; + ValueHash256 effectiveStartingHash = startingHash ?? ValueKeccak.Zero; using ArrayPoolListRef entries = new(slots.Count); for (var index = 0; index < slots.Count; index++) { PathWithStorageSlot slot = slots[index]; + Interlocked.Add(ref Metrics.SnapStateSynced, slot.SlotRlpValue.Length); entries.Add(new PatriciaTree.BulkSetEntry(slot.Path, slot.SlotRlpValue)); } - tree.BulkSet(entries, PatriciaTree.Flags.WasSorted); - tree.UpdateRootHash(); + var (result, moreChildrenToRight, isRootPersisted) = CommitRange( + tree, entries, effectiveStartingHash, effectiveLimitHash, account.Account.StorageRoot, proofs); + if (result != AddRangeResult.OK) + return (result, true, tree.RootHash, false); + return (AddRangeResult.OK, moreChildrenToRight, null, isRootPersisted); + } + + private static (AddRangeResult result, bool moreChildrenToRight, bool isRootPersisted) CommitRange( + ISnapTree tree, + in ArrayPoolListRef entries, + in ValueHash256 startingHash, + in ValueHash256 limitHash, + in ValueHash256 expectedRootHash, + IReadOnlyList? proofs) + { + if (entries.Count == 0) + return (AddRangeResult.EmptyRange, true, false); - if (tree.RootHash.ValueHash256 != account.Account.StorageRoot) + // Validate sorting order + for (int i = 1; i < entries.Count; i++) { - return (AddRangeResult.DifferentRootHash, true); + if (entries[i - 1].Path.CompareTo(entries[i].Path) >= 0) + return (AddRangeResult.InvalidOrder, true, false); } - // This will work if all StorageRange requests share the same AccountWithPath object, which seems to be the case. - // If this is not true, the StorageRange request should be extended with a lock object. - // That lock object should be shared between all other StorageRange requests for the same account. - lock (account.Account) + if (entries[0].Path < startingHash) + return (AddRangeResult.InvalidOrder, true, false); + + ValueHash256 lastPath = entries[entries.Count - 1].Path; + + (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundaryList, bool moreChildrenToRight) = + FillBoundaryTree(tree, startingHash, lastPath, limitHash, expectedRootHash, proofs); + + if (result != AddRangeResult.OK) + return (result, true, false); + + tree.BulkSetAndUpdateRootHash(entries); + + if (tree.RootHash.ValueHash256 != expectedRootHash) + return (AddRangeResult.DifferentRootHash, true, false); + + StitchBoundaries(sortedBoundaryList, tree, startingHash); + + // The upper bound is used to prevent proof nodes that covers next range from being persisted, except if + // this is the last range. This prevent double node writes per path which break flat. It also prevent leaf o + // that is after the range from being persisted, which prevent double write again. + ValueHash256 upperBound = lastPath; + if (upperBound > limitHash) + { + upperBound = limitHash; + } + else { - StitchBoundaries(sortedBoundaryList, tree.TrieStore); - tree.Commit(writeFlags: WriteFlags.DisableWAL); + if (!moreChildrenToRight) upperBound = ValueKeccak.MaxValue; } + tree.Commit(upperBound); - return (AddRangeResult.OK, moreChildrenToRight); + bool isRootPersisted = sortedBoundaryList is not { Count: > 0 } || sortedBoundaryList[0].Item1.IsPersisted; + return (AddRangeResult.OK, moreChildrenToRight, isRootPersisted); } [SkipLocalsInit] private static (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundaryList, bool moreChildrenToRight) FillBoundaryTree( - PatriciaTree tree, + ISnapTree tree, in ValueHash256? startingHash, in ValueHash256 endHash, in ValueHash256 limitHash, @@ -201,7 +167,7 @@ private static (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundary ValueHash256 effectiveStartingHash = startingHash ?? ValueKeccak.Zero; List<(TrieNode, TreePath)> sortedBoundaryList = new(); - Dictionary dict = CreateProofDict(proofs, tree.TrieStore); + Dictionary dict = CreateProofDict(proofs); if (!dict.TryGetValue(expectedRootHash, out TrieNode root)) { @@ -221,7 +187,6 @@ private static (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundary TreePath leftBoundaryPath = TreePath.FromPath(effectiveStartingHash.Bytes); TreePath rightBoundaryPath = TreePath.FromPath(endHash.Bytes); - TreePath rightLimitPath = TreePath.FromPath(limitHash.Bytes); // For when in very-very unlikely case where the last remaining address is Keccak.MaxValue, (who knows why, // the chain have special handling for it maybe) and it is not included the returned account range, (again, @@ -233,7 +198,7 @@ private static (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundary // hash will not match. Stack<(TrieNode node, TreePath path)> proofNodesToProcess = new(); - tree.RootRef = root; + tree.SetRootFromProof(root); proofNodesToProcess.Push((root, TreePath.Empty)); sortedBoundaryList.Add((root, TreePath.Empty)); @@ -261,15 +226,33 @@ private static (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundary { int left = leftBoundaryPath.CompareToTruncated(path, path.Length) == 0 ? leftBoundaryPath[path.Length] : 0; int right = rightBoundaryPath.CompareToTruncated(path, path.Length) == 0 ? rightBoundaryPath[path.Length] : 15; - int limit = rightLimitPath.CompareToTruncated(path, path.Length) == 0 ? rightLimitPath[path.Length] : 15; int maxIndex = moreChildrenToRight ? right : 15; for (int ci = left; ci <= maxIndex; ci++) { bool hasKeccak = node.GetChildHashAsValueKeccak(ci, out ValueHash256 childKeccak); + TrieNode? child = null; + if (hasKeccak) + { + dict.TryGetValue(childKeccak, out child); + } - moreChildrenToRight |= hasKeccak && (ci > right && (ci <= limit || noLimit)); + if (child is null) + { + // Note: be careful with inline node. Inline node is not set in the proof dictionary + byte[]? inlineRlp = node.GetInlineNodeRlp(ci); + if (inlineRlp is not null) + { + child = new TrieNode(NodeType.Unknown, inlineRlp); + child.ResolveNode(NullTrieNodeResolver.Instance, path.Append(ci)); + } + } + + // The limit may have lower nibble that is less than the path's current nibble, even if upper + // nibble is higher. So need to check whole path + TreePath childPath = path.Append(ci); + moreChildrenToRight |= (hasKeccak || child is not null) && (ci > right && (childPath.Path < limitHash || noLimit)); if (ci >= left && ci <= right) { @@ -277,10 +260,19 @@ private static (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundary node.SetChild(ci, null); } - if (hasKeccak && (ci == left || ci == right) && dict.TryGetValue(childKeccak, out TrieNode child)) + if (child is not null && !hasKeccak && (ci == left || ci == right)) { - TreePath childPath = path.Append(ci); + // Inline node at boundary. Need to be set back or keccak will be incorrect. + // but must not be set as part of boundary list or break stitching. + TreePath wholePath = childPath.Append(child.Key); + if (leftBoundaryPath.CompareToTruncated(wholePath, wholePath.Length) > 0 || rightBoundaryPath.CompareToTruncated(wholePath, wholePath.Length) < 0) + { + node.SetChild(ci, child); + } + } + if (hasKeccak && (ci == left || ci == right) && child is not null) + { if (child.IsBranch) { node.SetChild(ci, child); @@ -318,7 +310,7 @@ private static (AddRangeResult result, List<(TrieNode, TreePath)> sortedBoundary return (AddRangeResult.OK, sortedBoundaryList, moreChildrenToRight); } - private static Dictionary CreateProofDict(IReadOnlyList proofs, IScopedTrieStore store) + private static Dictionary CreateProofDict(IReadOnlyList proofs) { Dictionary dict = new(); @@ -329,8 +321,8 @@ private static Dictionary CreateProofDict(IReadOnlyList< node.IsBoundaryProofNode = true; TreePath emptyPath = TreePath.Empty; - node.ResolveNode(store, emptyPath); - node.ResolveKey(store, ref emptyPath); + node.ResolveNode(UnknownNodeResolver.Instance, emptyPath); + node.ResolveKey(UnknownNodeResolver.Instance, ref emptyPath); dict[node.Keccak] = node; } @@ -338,23 +330,24 @@ private static Dictionary CreateProofDict(IReadOnlyList< return dict; } - private static void StitchBoundaries(List<(TrieNode, TreePath)> sortedBoundaryList, IScopedTrieStore store) + private static bool StitchBoundaries(List<(TrieNode, TreePath)>? sortedBoundaryList, ISnapTree tree, ValueHash256 startPath) { if (sortedBoundaryList is null || sortedBoundaryList.Count == 0) { - return; + // No proof means the tree is complete on its own + return true; } + bool stitchToTheRoot = false; for (int i = sortedBoundaryList.Count - 1; i >= 0; i--) { (TrieNode node, TreePath path) = sortedBoundaryList[i]; - if (!node.IsPersisted) { INodeData nodeData = node.NodeData; if (nodeData is ExtensionData extensionData) { - if (IsChildPersisted(node, ref path, extensionData._value, ExtensionRlpChildIndex, store)) + if (IsChildPersisted(node, ref path, extensionData._value, ExtensionRlpChildIndex, tree, startPath)) { node.IsBoundaryProofNode = false; } @@ -365,7 +358,7 @@ private static void StitchBoundaries(List<(TrieNode, TreePath)> sortedBoundaryLi int ci = 0; foreach (object? o in branchData.Branches) { - if (!IsChildPersisted(node, ref path, o, ci, store)) + if (!IsChildPersisted(node, ref path, o, ci, tree, startPath)) { isBoundaryProofNode = true; break; @@ -383,17 +376,44 @@ private static void StitchBoundaries(List<(TrieNode, TreePath)> sortedBoundaryLi //leading to TrieNodeException after sync (as healing may not get to heal the particular storage trie) if (node.IsLeaf) { - node.IsPersisted = store.IsPersisted(path, node.Keccak); + node.IsPersisted = tree.IsPersisted(path, node.Keccak); node.IsBoundaryProofNode = !node.IsPersisted; } } + + if (path.Length == 0 && !node.IsBoundaryProofNode) + { + stitchToTheRoot = true; + + foreach (var valueTuple in sortedBoundaryList) + { + if (valueTuple.Item1.IsBoundaryProofNode) + { + Console.Error.WriteLine($"Root can be persisted but child {valueTuple.Item2}, {valueTuple.Item1} cannot"); + } + } + } } + + return stitchToTheRoot; } - private static bool IsChildPersisted(TrieNode node, ref TreePath nodePath, object? child, int childIndex, IScopedTrieStore store) + private static bool IsChildPersisted(TrieNode node, ref TreePath nodePath, object? child, int childIndex, ISnapTree tree, ValueHash256 startPath) { if (child is TrieNode childNode) { + if (childNode.FullRlp.Length < 32) + { + TreePath childPath = nodePath.Append(childIndex); + TreePath fullPath = childPath.Append(childNode.Key); + if (fullPath.Path < startPath) + { + // When a branch have an inline leaf whose full path is < startPath, + // we cannot mark it as persisted and cause the branch proof to be persisted. This is because + // we dont know if the inline leaf is part of a different storage root or not. + return false; + } + } return childNode.IsBoundaryProofNode == false; } @@ -410,7 +430,7 @@ private static bool IsChildPersisted(TrieNode node, ref TreePath nodePath, objec int previousPathLength = node.AppendChildPath(ref nodePath, childIndex); try { - return store.IsPersisted(nodePath, childKeccak); + return tree.IsPersisted(nodePath, childKeccak); } finally { diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapUpperBoundAdapter.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapUpperBoundAdapter.cs new file mode 100644 index 00000000000..68a4b2bd45f --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/SnapUpperBoundAdapter.cs @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.Synchronization.SnapSync; + +/// +/// A wrapper to trie store that prevent committing boundary proof node and nodes whose subtree extend beyond +/// UpperBound. This is to prevent double writes on partitioned snap ranges. +/// +/// +public class SnapUpperBoundAdapter(IScopedTrieStore baseTrieStore): IScopedTrieStore +{ + public ValueHash256 UpperBound = ValueKeccak.MaxValue; + + public TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => baseTrieStore.FindCachedOrUnknown(in path, hash); + + public byte[]? LoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => baseTrieStore.LoadRlp(in path, hash, flags); + + public byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => baseTrieStore.TryLoadRlp(in path, hash, flags); + + public ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => throw new NotSupportedException("Get storage trie node resolver not supported"); + + public INodeStorage.KeyScheme Scheme => baseTrieStore.Scheme; + + public ICommitter BeginCommit(TrieNode? root, WriteFlags writeFlags = WriteFlags.None) => new BoundedSnapCommitter(baseTrieStore.BeginCommit(root, writeFlags), UpperBound); + + public bool IsPersisted(in TreePath path, in ValueHash256 keccak) => baseTrieStore.IsPersisted(in path, in keccak); + + private sealed class BoundedSnapCommitter(ICommitter baseCommitter, ValueHash256 subtreeLimit) : ICommitter + { + public void Dispose() => baseCommitter.Dispose(); + + public TrieNode CommitNode(ref TreePath path, TrieNode node) + { + if (node.IsBoundaryProofNode) return node; + if (node.IsPersisted) return node; + + ValueHash256 subtreeUpperRange = node.IsBranch ? path.ToUpperBoundPath() : path.Append(node.Key).ToUpperBoundPath(); + if (subtreeUpperRange > subtreeLimit) return node; + + node = baseCommitter.CommitNode(ref path, node); + node.IsPersisted = true; + return node; + } + } +} diff --git a/src/Nethermind/Nethermind.Synchronization/SnapSync/UnknownNodeResolver.cs b/src/Nethermind/Nethermind.Synchronization/SnapSync/UnknownNodeResolver.cs new file mode 100644 index 00000000000..462749a6210 --- /dev/null +++ b/src/Nethermind/Nethermind.Synchronization/SnapSync/UnknownNodeResolver.cs @@ -0,0 +1,33 @@ +// SPDX-FileCopyrightText: 2025 Demerzel Solutions Limited +// SPDX-License-Identifier: LGPL-3.0-only + +using System; +using Nethermind.Core; +using Nethermind.Core.Crypto; +using Nethermind.Trie; +using Nethermind.Trie.Pruning; + +namespace Nethermind.Synchronization.SnapSync; + +/// +/// A simple ITrieNodeResolver that creates unknown nodes from hashes. +/// Used for proof resolution where RLP is already provided. +/// +internal sealed class UnknownNodeResolver : ITrieNodeResolver +{ + public static readonly UnknownNodeResolver Instance = new(); + + private UnknownNodeResolver() { } + + public TrieNode FindCachedOrUnknown(in TreePath path, Hash256 hash) => + new(NodeType.Unknown, hash); + + public byte[]? LoadRlp(in TreePath path, Hash256 hash, ReadFlags flags) => + throw new NotSupportedException("Proof nodes have RLP embedded"); + + public byte[]? TryLoadRlp(in TreePath path, Hash256 hash, ReadFlags flags) => null; + + public ITrieNodeResolver GetStorageTrieNodeResolver(Hash256? address) => this; + + public INodeStorage.KeyScheme Scheme => INodeStorage.KeyScheme.Hash; +} diff --git a/src/Nethermind/Nethermind.Synchronization/Synchronizer.cs b/src/Nethermind/Nethermind.Synchronization/Synchronizer.cs index c4398fa29cc..849c6f5946a 100644 --- a/src/Nethermind/Nethermind.Synchronization/Synchronizer.cs +++ b/src/Nethermind/Nethermind.Synchronization/Synchronizer.cs @@ -412,6 +412,7 @@ private void ConfigureSnapComponent(ContainerBuilder serviceCollection) { serviceCollection .AddSingleton() + .AddSingleton() .AddSingleton(); ConfigureSingletonSyncFeed(serviceCollection); @@ -450,6 +451,7 @@ private void ConfigureStateSyncComponent(ContainerBuilder serviceCollection) { serviceCollection .AddSingleton() + .AddSingleton() .AddSingleton(); ConfigureSingletonSyncFeed(serviceCollection); diff --git a/src/Nethermind/Nethermind.Trie/PatriciaTree.cs b/src/Nethermind/Nethermind.Trie/PatriciaTree.cs index 2bc7c2188a4..9c9658b030f 100644 --- a/src/Nethermind/Nethermind.Trie/PatriciaTree.cs +++ b/src/Nethermind/Nethermind.Trie/PatriciaTree.cs @@ -817,6 +817,13 @@ internal bool ShouldUpdateChild(TrieNode? parent, TrieNode? oldChild, TrieNode? { return originalNode; } + + if (!originalNode.IsSealed) + { + // Use the original where possible. This is actually needed for snapsync because of the BoundaryProofNode flag + originalNode.SetChild(0, onlyChildNode); + return originalNode; + } } return TrieNodeFactory.CreateExtension(extensionKey, onlyChildNode); @@ -840,6 +847,13 @@ internal bool ShouldUpdateChild(TrieNode? parent, TrieNode? oldChild, TrieNode? { return originalNode; } + + if (!originalNode.IsSealed) + { + // Use the original where possible. This is actually needed for snapsync because of the BoundaryProofNode flag + originalNode.SetChild(0, newChild); + return originalNode; + } } } diff --git a/src/Nethermind/Nethermind.Trie/TrieNode.cs b/src/Nethermind/Nethermind.Trie/TrieNode.cs index ea7885709dc..4faa5596eac 100644 --- a/src/Nethermind/Nethermind.Trie/TrieNode.cs +++ b/src/Nethermind/Nethermind.Trie/TrieNode.cs @@ -592,6 +592,29 @@ static SpanSource ThrowUnhandledNodeType(TrieNode item) } } + public byte[]? GetInlineNodeRlp(int i) + { + SpanSource rlp = _rlp; + if (rlp.IsNull) + { + return null; + } + + ValueRlpStream rlpStream = new(rlp); + SeekChild(ref rlpStream, i); + + int prefixValue = rlpStream.PeekByte(); + if (prefixValue < 192) + { + return null; + } + else + { + int length = rlpStream.PeekNextRlpLength(); + return rlpStream.Read(length).ToArray(); + } + } + public bool GetChildHashAsValueKeccak(int i, out ValueHash256 keccak) { Unsafe.SkipInit(out keccak);