Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cspell.json
Original file line number Diff line number Diff line change
Expand Up @@ -790,6 +790,7 @@
"unreferred",
"unrequested",
"unresolve",
"unshifted",
"unsub",
"unsubscription",
"unsynchronized",
Expand Down
345 changes: 345 additions & 0 deletions src/Nethermind/Nethermind.Benchmark/Core/SeqlockCacheBenchmarks.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,345 @@
// SPDX-FileCopyrightText: 2026 Demerzel Solutions Limited
// SPDX-License-Identifier: LGPL-3.0-only

#nullable enable

using System;
using System.Collections.Concurrent;
using BenchmarkDotNet.Attributes;
using Nethermind.Core;
using Nethermind.Core.Collections;
using Nethermind.Int256;

namespace Nethermind.Benchmarks.Core;

[MemoryDiagnoser]
[DisassemblyDiagnoser(maxDepth: 3)]
public class SeqlockCacheBenchmarks
{
private SeqlockCache<StorageCell, byte[]> _seqlockCache = null!;
private ConcurrentDictionary<StorageCell, byte[]> _concurrentDict = null!;

private StorageCell[] _keys = null!;
private byte[][] _values = null!;
private StorageCell _missKey;

[Params(1000)]
public int KeyCount { get; set; }

[GlobalSetup]
public void Setup()
{
_seqlockCache = new SeqlockCache<StorageCell, byte[]>();
_concurrentDict = new ConcurrentDictionary<StorageCell, byte[]>();

_keys = new StorageCell[KeyCount];
_values = new byte[KeyCount][];

var random = new Random(42);
for (int i = 0; i < KeyCount; i++)
{
var addressBytes = new byte[20];
random.NextBytes(addressBytes);
var address = new Address(addressBytes);
var index = new UInt256((ulong)i);

_keys[i] = new StorageCell(address, index);
_values[i] = new byte[32];
random.NextBytes(_values[i]);

// Pre-populate both caches
_seqlockCache.Set(in _keys[i], _values[i]);
_concurrentDict[_keys[i]] = _values[i];
}

// Create a key that won't be in the cache
var missAddressBytes = new byte[20];
random.NextBytes(missAddressBytes);
_missKey = new StorageCell(new Address(missAddressBytes), UInt256.MaxValue);
}

// ==================== TryGetValue (Hit) ====================

[Benchmark(Baseline = true)]
public bool SeqlockCache_TryGetValue_Hit()
{
return _seqlockCache.TryGetValue(in _keys[500], out _);
}

[Benchmark]
public bool ConcurrentDict_TryGetValue_Hit()
{
return _concurrentDict.TryGetValue(_keys[500], out _);
}

// ==================== TryGetValue (Miss) ====================

[Benchmark]
public bool SeqlockCache_TryGetValue_Miss()
{
return _seqlockCache.TryGetValue(in _missKey, out _);
}

[Benchmark]
public bool ConcurrentDict_TryGetValue_Miss()
{
return _concurrentDict.TryGetValue(_missKey, out _);
}

// ==================== Set (Existing Key) ====================

[Benchmark]
public void SeqlockCache_Set_Existing()
{
_seqlockCache.Set(in _keys[500], _values[500]);
}

[Benchmark]
public void ConcurrentDict_Set_Existing()
{
_concurrentDict[_keys[500]] = _values[500];
}

// ==================== GetOrAdd (Hit) ====================

[Benchmark]
public byte[]? SeqlockCache_GetOrAdd_Hit()
{
return _seqlockCache.GetOrAdd(in _keys[500], static (in StorageCell _) => new byte[32]);
}

[Benchmark]
public byte[] ConcurrentDict_GetOrAdd_Hit()
{
return _concurrentDict.GetOrAdd(_keys[500], static _ => new byte[32]);
}

// ==================== GetOrAdd (Miss - measures factory overhead) ====================

private int _missCounter;

[Benchmark]
public byte[]? SeqlockCache_GetOrAdd_Miss()
{
// Use incrementing key to always miss
var key = new StorageCell(_keys[0].Address, new UInt256((ulong)(KeyCount + _missCounter++)));
return _seqlockCache.GetOrAdd(in key, static (in StorageCell _) => new byte[32]);
}

[Benchmark]
public byte[] ConcurrentDict_GetOrAdd_Miss()
{
var key = new StorageCell(_keys[0].Address, new UInt256((ulong)(KeyCount + _missCounter++)));
return _concurrentDict.GetOrAdd(key, static _ => new byte[32]);
}
}

/// <summary>
/// Benchmark comparing read-heavy workloads (90% reads, 10% writes)
/// </summary>
[MemoryDiagnoser]
public class SeqlockCacheMixedWorkloadBenchmarks
{
private SeqlockCache<StorageCell, byte[]> _seqlockCache = null!;
private ConcurrentDictionary<StorageCell, byte[]> _concurrentDict = null!;

private StorageCell[] _keys = null!;
private byte[][] _values = null!;

private const int KeyCount = 10000;
private const int OperationsPerInvoke = 1000;

[GlobalSetup]
public void Setup()
{
_seqlockCache = new SeqlockCache<StorageCell, byte[]>();
_concurrentDict = new ConcurrentDictionary<StorageCell, byte[]>();

_keys = new StorageCell[KeyCount];
_values = new byte[KeyCount][];

var random = new Random(42);
for (int i = 0; i < KeyCount; i++)
{
var addressBytes = new byte[20];
random.NextBytes(addressBytes);
var address = new Address(addressBytes);
var index = new UInt256((ulong)i);

_keys[i] = new StorageCell(address, index);
_values[i] = new byte[32];
random.NextBytes(_values[i]);

// Pre-populate both caches
_seqlockCache.Set(in _keys[i], _values[i]);
_concurrentDict[_keys[i]] = _values[i];
}
}

[Benchmark(Baseline = true, OperationsPerInvoke = OperationsPerInvoke)]
public int SeqlockCache_MixedWorkload_90Read_10Write()
{
int hits = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
int keyIndex = i % KeyCount;
if (i % 10 == 0)
{
// 10% writes
_seqlockCache.Set(in _keys[keyIndex], _values[keyIndex]);
}
else
{
// 90% reads
if (_seqlockCache.TryGetValue(in _keys[keyIndex], out _))
hits++;
}
}
return hits;
}

[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public int ConcurrentDict_MixedWorkload_90Read_10Write()
{
int hits = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
int keyIndex = i % KeyCount;
if (i % 10 == 0)
{
// 10% writes
_concurrentDict[_keys[keyIndex]] = _values[keyIndex];
}
else
{
// 90% reads
if (_concurrentDict.TryGetValue(_keys[keyIndex], out _))
hits++;
}
}
return hits;
}

[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public int SeqlockCache_ReadOnly()
{
int hits = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
int keyIndex = i % KeyCount;
if (_seqlockCache.TryGetValue(in _keys[keyIndex], out _))
hits++;
}
return hits;
}

[Benchmark(OperationsPerInvoke = OperationsPerInvoke)]
public int ConcurrentDict_ReadOnly()
{
int hits = 0;
for (int i = 0; i < OperationsPerInvoke; i++)
{
int keyIndex = i % KeyCount;
if (_concurrentDict.TryGetValue(_keys[keyIndex], out _))
hits++;
}
return hits;
}
}

/// <summary>
/// Benchmark measuring effective hit rate after populating with N keys.
/// This directly measures the impact of collision rate.
/// </summary>
public class SeqlockCacheHitRateBenchmarks
{
private SeqlockCache<StorageCell, byte[]> _seqlockCache = null!;
private StorageCell[] _keys = null!;
private byte[][] _values = null!;

[Params(1000, 5000, 10000, 20000)]
public int KeyCount { get; set; }

[GlobalSetup]
public void Setup()
{
_seqlockCache = new SeqlockCache<StorageCell, byte[]>();
_keys = new StorageCell[KeyCount];
_values = new byte[KeyCount][];

var random = new Random(42);
for (int i = 0; i < KeyCount; i++)
{
var addressBytes = new byte[20];
random.NextBytes(addressBytes);
_keys[i] = new StorageCell(new Address(addressBytes), new UInt256((ulong)i));
_values[i] = new byte[32];
random.NextBytes(_values[i]);
_seqlockCache.Set(in _keys[i], _values[i]);
}
}

[Benchmark]
public double MeasureHitRate()
{
int hits = 0;
for (int i = 0; i < KeyCount; i++)
{
if (_seqlockCache.TryGetValue(in _keys[i], out byte[]? val) && ReferenceEquals(val, _values[i]))
hits++;
}
return (double)hits / KeyCount * 100;
}
}

[MemoryDiagnoser]
public class SeqlockCacheCallSiteBenchmarks
{
private SeqlockCache<StorageCell, byte[]> _cache = null!;
private SeqlockCache<StorageCell, byte[]>.ValueFactory _cachedFactory = null!;
private StorageCell _key;
private byte[] _value = null!;

[GlobalSetup]
public void Setup()
{
_cache = new SeqlockCache<StorageCell, byte[]>();

byte[] addressBytes = new byte[20];
new Random(123).NextBytes(addressBytes);
_key = new StorageCell(new Address(addressBytes), UInt256.One);
_value = new byte[32];

_cache.Set(in _key, _value);
_cachedFactory = LoadFromBackingStore;
}

[Benchmark(Baseline = true)]
public byte[]? GetOrAdd_Hit_PerCallMethodGroup()
{
return _cache.GetOrAdd(in _key, LoadFromBackingStore);
}

[Benchmark]
public byte[]? GetOrAdd_Hit_CachedDelegate()
{
return _cache.GetOrAdd(in _key, _cachedFactory);
}

[Benchmark]
public bool TryGetValue_WithIn()
{
return _cache.TryGetValue(in _key, out _);
}

[Benchmark]
public bool TryGetValue_WithoutIn()
{
return _cache.TryGetValue(_key, out _);
}

private byte[] LoadFromBackingStore(in StorageCell _)
{
return _value;
}
}
10 changes: 6 additions & 4 deletions src/Nethermind/Nethermind.Blockchain/BlockhashProvider.cs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ public class BlockhashProvider(
private readonly IBlockhashStore _blockhashStore = new BlockhashStore(worldState);
private readonly ILogger _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager));
private Hash256[]? _hashes;
private long _prefetchVersion;

public Hash256? GetBlockhash(BlockHeader currentBlock, long number, IReleaseSpec spec)
{
Expand All @@ -39,7 +40,7 @@ public class BlockhashProvider(
}

long depth = currentBlock.Number - number;
Hash256[]? hashes = _hashes;
Hash256[]? hashes = Volatile.Read(ref _hashes);

return depth switch
{
Expand All @@ -60,7 +61,8 @@ public class BlockhashProvider(

public async Task Prefetch(BlockHeader currentBlock, CancellationToken token)
{
_hashes = null;
long prefetchVersion = Interlocked.Increment(ref _prefetchVersion);
Volatile.Write(ref _hashes, null);
Hash256[]? hashes = await blockhashCache.Prefetch(currentBlock, token);

// This leverages that branch processing is single threaded
Expand All @@ -69,9 +71,9 @@ public async Task Prefetch(BlockHeader currentBlock, CancellationToken token)
// This allows us to avoid await on Prefetch in BranchProcessor
lock (_blockhashStore)
{
if (!token.IsCancellationRequested)
if (!token.IsCancellationRequested && prefetchVersion == Interlocked.Read(ref _prefetchVersion))
{
_hashes = hashes;
Volatile.Write(ref _hashes, hashes);
}
}
}
Expand Down
Loading