From 7c23def1ffc764cd7cca6dedf7877f3dfd46f7a6 Mon Sep 17 00:00:00 2001 From: Reuben Bond Date: Wed, 28 May 2025 10:31:38 -0700 Subject: [PATCH 1/5] Replace LRU with implementation based on BitFaster.Caching's ConcurrentLru --- .../Caching/ConcurrentLruCache.cs | 956 +++++++++++++ .../Caching/Internal/CacheDebugView.cs | 37 + .../Caching/Internal/CapacityPartition.cs | 72 + .../Internal/ConcurrentDictionarySize.cs | 219 +++ src/Orleans.Core/Caching/Internal/Counter.cs | 70 + src/Orleans.Core/Caching/Internal/ICache.cs | 118 ++ .../Caching/Internal/ICacheMetrics.cs | 39 + .../Caching/Internal/PaddedLong.cs | 32 + .../Caching/Internal/PaddedQueueCount.cs | 14 + src/Orleans.Core/Caching/Internal/Padding.cs | 11 + .../Caching/Internal/Striped64.cs | 277 ++++ .../Caching/Internal/TypeProps.cs | 45 + .../Messaging/CachingIdSpanCodec.cs | 7 +- .../Messaging/CachingSiloAddressCodec.cs | 7 +- src/Orleans.Core/Utils/LRU.cs | 259 ---- .../Options/GrainDirectoryOptions.cs | 6 +- .../AdaptiveGrainDirectoryCache.cs | 15 +- .../GrainDirectoryCacheFactory.cs | 4 +- .../LRUBasedGrainDirectoryCache.cs | 51 - .../GrainDirectory/LruGrainDirectoryCache.cs | 41 + .../Utilities/StripedMpscBuffer.cs | 2 +- .../Caching/ConcurrentLruSoakTests.cs | 463 ++++++ .../Caching/ConcurrentLruTests.cs | 1249 +++++++++++++++++ test/NonSilo.Tests/General/LruTest.cs | 122 -- 24 files changed, 3665 insertions(+), 451 deletions(-) create mode 100644 src/Orleans.Core/Caching/ConcurrentLruCache.cs create mode 100644 src/Orleans.Core/Caching/Internal/CacheDebugView.cs create mode 100644 src/Orleans.Core/Caching/Internal/CapacityPartition.cs create mode 100644 src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs create mode 100644 src/Orleans.Core/Caching/Internal/Counter.cs create mode 100644 src/Orleans.Core/Caching/Internal/ICache.cs create mode 100644 src/Orleans.Core/Caching/Internal/ICacheMetrics.cs create mode 100644 src/Orleans.Core/Caching/Internal/PaddedLong.cs create mode 100644 src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs create mode 100644 src/Orleans.Core/Caching/Internal/Padding.cs create mode 100644 src/Orleans.Core/Caching/Internal/Striped64.cs create mode 100644 src/Orleans.Core/Caching/Internal/TypeProps.cs delete mode 100644 src/Orleans.Core/Utils/LRU.cs delete mode 100644 src/Orleans.Runtime/GrainDirectory/LRUBasedGrainDirectoryCache.cs create mode 100644 src/Orleans.Runtime/GrainDirectory/LruGrainDirectoryCache.cs create mode 100644 test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs create mode 100644 test/NonSilo.Tests/Caching/ConcurrentLruTests.cs delete mode 100644 test/NonSilo.Tests/General/LruTest.cs diff --git a/src/Orleans.Core/Caching/ConcurrentLruCache.cs b/src/Orleans.Core/Caching/ConcurrentLruCache.cs new file mode 100644 index 00000000000..cce9b821f4d --- /dev/null +++ b/src/Orleans.Core/Caching/ConcurrentLruCache.cs @@ -0,0 +1,956 @@ +#nullable enable +using System; +using System.Collections; +using System.Collections.Concurrent; +using System.Collections.Generic; +using System.Diagnostics; +using System.Diagnostics.CodeAnalysis; +using System.Linq; +using System.Runtime.CompilerServices; +using System.Threading; +using Orleans.Caching.Internal; + +namespace Orleans.Caching; + +/// +/// A pseudo LRU based on the TU-Q eviction policy. The LRU list is composed of 3 segments: hot, warm and cold. +/// Cost of maintaining segments is amortized across requests. Items are only cycled when capacity is exceeded. +/// Pure read does not cycle items if all segments are within capacity constraints. There are no global locks. +/// On cache miss, a new item is added. Tail items in each segment are dequeued, examined, and are either enqueued +/// or discarded. +/// The TU-Q scheme of hot, warm and cold is similar to that used in MemCached (https://memcached.org/blog/modern-lru/) +/// and OpenBSD (https://flak.tedunangst.com/post/2Q-buffer-cache-algorithm), but does not use a background thread +/// to maintain the internal queues. +/// +/// +/// This implementation is derived from BitFaster.Caching (https://github.com/bitfaster/BitFaster.Caching), removing +/// functionality that is not needed for Orleans (async, custom policies), to reduce the number of source files. +/// +/// Each segment has a capacity. When segment capacity is exceeded, items are moved as follows: +/// +/// New items are added to hot, WasAccessed = false. +/// When items are accessed, update WasAccessed = true. +/// When items are moved WasAccessed is set to false. +/// When hot is full, hot tail is moved to either Warm or Cold depending on WasAccessed. +/// When warm is full, warm tail is moved to warm head or cold depending on WasAccessed. +/// When cold is full, cold tail is moved to warm head or removed from dictionary on depending on WasAccessed. +/// +/// +internal class ConcurrentLruCache : ICache, ICacheMetrics, ConcurrentLruCache.ITestAccessor + where K : notnull +{ + private readonly ConcurrentDictionary _dictionary; + private readonly ConcurrentQueue _hotQueue = new(); + private readonly ConcurrentQueue _warmQueue = new(); + private readonly ConcurrentQueue _coldQueue = new(); + private readonly CapacityPartition _capacity; + private readonly TelemetryPolicy _telemetryPolicy = new(); + + // maintain count outside ConcurrentQueue, since ConcurrentQueue.Count holds a global lock + private PaddedQueueCount _counter; + private bool _isWarm; + + /// + /// Initializes a new instance of the ConcurrentLruCore class with the specified capacity. + /// + /// The capacity. + /// + public ConcurrentLruCache(int capacity) : this(capacity, EqualityComparer.Default) + { + } + + /// + /// Initializes a new instance of the ConcurrentLruCore class with the specified concurrencyLevel, capacity, equality comparer, item policy and telemetry policy. + /// + /// The capacity. + /// The equality comparer. + /// + public ConcurrentLruCache( + int capacity, + IEqualityComparer comparer) + { + ArgumentNullException.ThrowIfNull(comparer); + _capacity = new CapacityPartition(capacity); + + var dictionaryCapacity = ConcurrentDictionarySize.Estimate(Capacity); + _dictionary = new ConcurrentDictionary(Environment.ProcessorCount, dictionaryCapacity, comparer); + } + + // No lock count: https://arbel.net/2013/02/03/best-practices-for-using-concurrentdictionary/ + /// + public int Count => _dictionary.Where(_ => true).Count(); + + /// + public int Capacity => _capacity.Hot + _capacity.Warm + _capacity.Cold; + + /// + public ICacheMetrics Metrics => this; + + /// + /// Gets the number of hot items. + /// + public int HotCount => Volatile.Read(ref _counter.Hot); + + /// + /// Gets the number of warm items. + /// + public int WarmCount => Volatile.Read(ref _counter.Warm); + + /// + /// Gets the number of cold items. + /// + public int ColdCount => Volatile.Read(ref _counter.Cold); + + /// + /// Gets a collection containing the keys in the cache. + /// + public ICollection Keys => _dictionary.Keys; + + /// Returns an enumerator that iterates through the cache. + /// An enumerator for the cache. + /// + /// The enumerator returned from the cache is safe to use concurrently with + /// reads and writes, however it does not represent a moment-in-time snapshot. + /// The contents exposed through the enumerator may contain modifications + /// made after was called. + /// + public IEnumerator> GetEnumerator() + { + foreach (var kvp in _dictionary) + { + yield return new KeyValuePair(kvp.Key, kvp.Value.Value); + } + } + + /// + public V Get(K key) + { + if (!TryGet(key, out var value)) + { + throw new KeyNotFoundException($"Key '{key}' not found in the cache."); + } + + return value; + } + + /// + public bool TryGet(K key, [MaybeNullWhen(false)] out V value) + { + if (_dictionary.TryGetValue(key, out var item)) + { + value = item.Value; + item.MarkAccessed(); + _telemetryPolicy.IncrementHit(); + return true; + } + + value = default; + _telemetryPolicy.IncrementMiss(); + return false; + } + + public bool TryAdd(K key, V value) + { + var newItem = new LruItem(key, value); + + if (_dictionary.TryAdd(key, newItem)) + { + _hotQueue.Enqueue(newItem); + Cycle(Interlocked.Increment(ref _counter.Hot)); + return true; + } + + (newItem.Value as IDisposable)?.Dispose(); + + return false; + } + + /// + public V GetOrAdd(K key, Func valueFactory) + { + while (true) + { + if (TryGet(key, out var value)) + { + return value; + } + + // The value factory may be called concurrently for the same key, but the first write to the dictionary wins. + value = valueFactory(key); + + if (TryAdd(key, value)) + { + return value; + } + } + } + + /// + /// Adds a key/value pair to the cache if the key does not already exist. Returns the new value, or the + /// existing value if the key already exists. + /// + /// The type of an argument to pass into valueFactory. + /// The key of the element to add. + /// The factory function used to generate a value for the key. + /// An argument value to pass into valueFactory. + /// The value for the key. This will be either the existing value for the key if the key is already + /// in the cache, or the new value if the key was not in the cache. + public V GetOrAdd(K key, Func valueFactory, TArg factoryArgument) + { + while (true) + { + if (TryGet(key, out var value)) + { + return value; + } + + // The value factory may be called concurrently for the same key, but the first write to the dictionary wins. + value = valueFactory(key, factoryArgument); + + if (TryAdd(key, value)) + { + return value; + } + } + } + + /// + /// Attempts to remove the specified key value pair. + /// + /// The predicate used to determine if the item should be removed. + /// Argument passed to the predicate. + /// true if the item was removed successfully; otherwise, false. + public bool TryRemove(K key, Func predicate, TArg predicateArgument) + { + if (_dictionary.TryGetValue(key, out var existing)) + { + lock (existing) + { + if (predicate(existing.Value, predicateArgument)) + { + var kvp = new KeyValuePair(key, existing); + if (_dictionary.TryRemove(kvp)) + { + OnRemove(kvp.Value, ItemRemovedReason.Removed); + return true; + } + } + } + + // it existed, but we couldn't remove - this means value was replaced after the TryGetValue (a race) + } + + return false; + } + + /// + /// Attempts to remove the specified key value pair. + /// + /// The item to remove. + /// true if the item was removed successfully; otherwise, false. + public bool TryRemove(KeyValuePair item) + { + if (_dictionary.TryGetValue(item.Key, out var existing)) + { + lock (existing) + { + if (EqualityComparer.Default.Equals(existing.Value, item.Value)) + { + var kvp = new KeyValuePair(item.Key, existing); + if (_dictionary.TryRemove(kvp)) + { + OnRemove(kvp.Value, ItemRemovedReason.Removed); + return true; + } + } + } + + // it existed, but we couldn't remove - this means value was replaced after the TryGetValue (a race) + } + + return false; + } + + /// + /// Attempts to remove and return the value that has the specified key. + /// + /// The key of the element to remove. + /// When this method returns, contains the object removed, or the default value of the value type if key does not exist. + /// true if the object was removed successfully; otherwise, false. + public bool TryRemove(K key, [MaybeNullWhen(false)] out V value) + { + if (_dictionary.TryRemove(key, out var item)) + { + OnRemove(item, ItemRemovedReason.Removed); + value = item.Value; + return true; + } + + value = default; + return false; + } + + /// + public bool TryRemove(K key) => TryRemove(key, out _); + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void OnRemove(LruItem item, ItemRemovedReason reason) + { + // Mark as not accessed, it will later be cycled out of the queues because it can never be fetched + // from the dictionary. Note: Hot/Warm/Cold count will reflect the removed item until it is cycled + // from the queue. + item.WasAccessed = false; + item.WasRemoved = true; + + if (reason == ItemRemovedReason.Evicted) + { + _telemetryPolicy.IncrementEvicted(); + } + + // serialize dispose (common case dispose not thread safe) + lock (item) + { + (item.Value as IDisposable)?.Dispose(); + } + } + + /// + ///Note: Calling this method does not affect LRU order. + public bool TryUpdate(K key, V value) + { + if (_dictionary.TryGetValue(key, out var existing)) + { + lock (existing) + { + if (!existing.WasRemoved) + { + var oldValue = existing.Value; + + existing.Value = value; + + _telemetryPolicy.IncrementUpdated(); + (oldValue as IDisposable)?.Dispose(); + + return true; + } + } + } + + return false; + } + + /// + ///Note: Updates to existing items do not affect LRU order. Added items are at the top of the LRU. + public void AddOrUpdate(K key, V value) + { + while (true) + { + // first, try to update + if (TryUpdate(key, value)) + { + return; + } + + // then try add + var newItem = new LruItem(key, value); + + if (_dictionary.TryAdd(key, newItem)) + { + _hotQueue.Enqueue(newItem); + Cycle(Interlocked.Increment(ref _counter.Hot)); + return; + } + + // if both update and add failed there was a race, try again + } + } + + /// + public void Clear() + { + // don't overlap Clear/Trim/TrimExpired + lock (_dictionary) + { + // evaluate queue count, remove everything including items removed from the dictionary but + // not the queues. This also avoids the expensive o(n) no lock count, or locking the dictionary. + var queueCount = HotCount + WarmCount + ColdCount; + TrimLiveItems(queueCount, ItemRemovedReason.Cleared); + } + } + + /// + /// Trim the specified number of items from the cache. Removes items in LRU order. + /// + /// The number of items to remove. + /// The number of items removed from the cache. + /// is less than 0./ + /// is greater than capacity./ + /// + /// Note: Trim affects LRU order. Calling Trim resets the internal accessed status of items. + /// + public void Trim(int itemCount) + { + var capacity = Capacity; + ArgumentOutOfRangeException.ThrowIfLessThan(itemCount, 1); + ArgumentOutOfRangeException.ThrowIfGreaterThan(itemCount, capacity); + + // clamp itemCount to number of items actually in the cache + itemCount = Math.Min(itemCount, HotCount + WarmCount + ColdCount); + + // don't overlap Clear/Trim/TrimExpired + lock (_dictionary) + { + TrimLiveItems(itemCount, ItemRemovedReason.Trimmed); + } + } + + private void TrimLiveItems(int itemCount, ItemRemovedReason reason) + { + // When items are touched, they are moved to warm by cycling. Therefore, to guarantee + // that we can remove itemCount items, we must cycle (2 * capacity.Warm) + capacity.Hot times. + // If clear is called during trimming, it would be possible to get stuck in an infinite + // loop here. The warm + hot limit also guards against this case. + var trimWarmAttempts = 0; + var itemsRemoved = 0; + var maxWarmHotAttempts = _capacity.Warm * 2 + _capacity.Hot; + + while (itemsRemoved < itemCount && trimWarmAttempts < maxWarmHotAttempts) + { + if (Volatile.Read(ref _counter.Cold) > 0) + { + if (TryRemoveCold(reason) == (ItemDestination.Remove, 0)) + { + itemsRemoved++; + trimWarmAttempts = 0; + } + else + { + TrimWarmOrHot(reason); + } + } + else + { + TrimWarmOrHot(reason); + trimWarmAttempts++; + } + } + + if (Volatile.Read(ref _counter.Warm) < _capacity.Warm) + { + Volatile.Write(ref _isWarm, false); + } + } + + private void TrimWarmOrHot(ItemRemovedReason reason) + { + if (Volatile.Read(ref _counter.Warm) > 0) + { + CycleWarmUnchecked(reason); + } + else if (Volatile.Read(ref _counter.Hot) > 0) + { + CycleHotUnchecked(reason); + } + } + + private void Cycle(int hotCount) + { + if (_isWarm) + { + (var dest, var count) = CycleHot(hotCount); + + var cycles = 0; + while (cycles++ < 3 && dest != ItemDestination.Remove) + { + if (dest == ItemDestination.Warm) + { + (dest, count) = CycleWarm(count); + } + else if (dest == ItemDestination.Cold) + { + (dest, count) = CycleCold(count); + } + } + + // If nothing was removed yet, constrain the size of warm and cold by discarding the coldest item. + if (dest != ItemDestination.Remove) + { + if (dest == ItemDestination.Warm && count > _capacity.Warm) + { + count = LastWarmToCold(); + } + + ConstrainCold(count, ItemRemovedReason.Evicted); + } + } + else + { + // fill up the warm queue with new items until warm is full. + // else during warmup the cache will only use the hot + cold queues until any item is requested twice. + CycleDuringWarmup(hotCount); + } + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private void CycleDuringWarmup(int hotCount) + { + // do nothing until hot is full + if (hotCount > _capacity.Hot) + { + Interlocked.Decrement(ref _counter.Hot); + + if (_hotQueue.TryDequeue(out var item)) + { + // special case: removed during warmup + if (item.WasRemoved) + { + return; + } + + var count = Move(item, ItemDestination.Warm, ItemRemovedReason.Evicted); + + // if warm is now full, overflow to cold and mark as warm + if (count > _capacity.Warm) + { + Volatile.Write(ref _isWarm, true); + count = LastWarmToCold(); + ConstrainCold(count, ItemRemovedReason.Evicted); + } + } + else + { + Interlocked.Increment(ref _counter.Hot); + } + } + } + + private (ItemDestination, int) CycleHot(int hotCount) + { + if (hotCount > _capacity.Hot) + { + return CycleHotUnchecked(ItemRemovedReason.Evicted); + } + + return (ItemDestination.Remove, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private (ItemDestination, int) CycleHotUnchecked(ItemRemovedReason removedReason) + { + Interlocked.Decrement(ref _counter.Hot); + + if (_hotQueue.TryDequeue(out var item)) + { + var where = RouteHot(item); + return (where, Move(item, where, removedReason)); + } + else + { + Interlocked.Increment(ref _counter.Hot); + return (ItemDestination.Remove, 0); + } + } + + private (ItemDestination, int) CycleWarm(int count) + { + if (count > _capacity.Warm) + { + return CycleWarmUnchecked(ItemRemovedReason.Evicted); + } + + return (ItemDestination.Remove, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private (ItemDestination, int) CycleWarmUnchecked(ItemRemovedReason removedReason) + { + var wc = Interlocked.Decrement(ref _counter.Warm); + + if (_warmQueue.TryDequeue(out var item)) + { + if (item.WasRemoved) + { + return (ItemDestination.Remove, 0); + } + + var where = RouteWarm(item); + + // When the warm queue is full, we allow an overflow of 1 item before redirecting warm items to cold. + // This only happens when hit rate is high, in which case we can consider all items relatively equal in + // terms of which was least recently used. + if (where == ItemDestination.Warm && wc <= _capacity.Warm) + { + return (ItemDestination.Warm, Move(item, where, removedReason)); + } + else + { + return (ItemDestination.Cold, Move(item, ItemDestination.Cold, removedReason)); + } + } + else + { + Interlocked.Increment(ref _counter.Warm); + return (ItemDestination.Remove, 0); + } + } + + private (ItemDestination, int) CycleCold(int count) + { + if (count > _capacity.Cold) + { + return TryRemoveCold(ItemRemovedReason.Evicted); + } + + return (ItemDestination.Remove, 0); + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private (ItemDestination, int) TryRemoveCold(ItemRemovedReason removedReason) + { + Interlocked.Decrement(ref _counter.Cold); + + if (_coldQueue.TryDequeue(out var item)) + { + var where = RouteCold(item); + if (where == ItemDestination.Warm && Volatile.Read(ref _counter.Warm) <= _capacity.Warm) + { + return (ItemDestination.Warm, Move(item, where, removedReason)); + } + else + { + Move(item, ItemDestination.Remove, removedReason); + return (ItemDestination.Remove, 0); + } + } + else + { + return (ItemDestination.Cold, Interlocked.Increment(ref _counter.Cold)); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private int LastWarmToCold() + { + Interlocked.Decrement(ref _counter.Warm); + + if (_warmQueue.TryDequeue(out var item)) + { + var destination = item.WasRemoved ? ItemDestination.Remove : ItemDestination.Cold; + return Move(item, destination, ItemRemovedReason.Evicted); + } + else + { + Interlocked.Increment(ref _counter.Warm); + return 0; + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private void ConstrainCold(int coldCount, ItemRemovedReason removedReason) + { + if (coldCount > _capacity.Cold && _coldQueue.TryDequeue(out var item)) + { + Interlocked.Decrement(ref _counter.Cold); + Move(item, ItemDestination.Remove, removedReason); + } + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private int Move(LruItem item, ItemDestination where, ItemRemovedReason removedReason) + { + item.WasAccessed = false; + + switch (where) + { + case ItemDestination.Warm: + _warmQueue.Enqueue(item); + return Interlocked.Increment(ref _counter.Warm); + case ItemDestination.Cold: + _coldQueue.Enqueue(item); + return Interlocked.Increment(ref _counter.Cold); + case ItemDestination.Remove: + + var kvp = new KeyValuePair(item.Key, item); + + if (_dictionary.TryRemove(kvp)) + { + OnRemove(item, removedReason); + } + + break; + } + + return 0; + } + + /// Returns an enumerator that iterates through the cache. + /// An enumerator for the cache. + /// + /// The enumerator returned from the cache is safe to use concurrently with + /// reads and writes, however it does not represent a moment-in-time snapshot. + /// The contents exposed through the enumerator may contain modifications + /// made after was called. + /// + IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); + +#if DEBUG + /// + /// Format the LRU as a string by converting all the keys to strings. + /// + /// The LRU formatted as a string. + internal string FormatLruString() + { + var sb = new System.Text.StringBuilder(); + + sb.Append("Hot ["); + sb.Append(string.Join(",", _hotQueue.Select(n => n.Key.ToString()))); + sb.Append("] Warm ["); + sb.Append(string.Join(",", _warmQueue.Select(n => n.Key.ToString()))); + sb.Append("] Cold ["); + sb.Append(string.Join(",", _coldQueue.Select(n => n.Key.ToString()))); + sb.Append(']'); + + return sb.ToString(); + } +#endif + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ItemDestination RouteHot(LruItem item) + { + if (item.WasRemoved) + { + return ItemDestination.Remove; + } + + if (item.WasAccessed) + { + return ItemDestination.Warm; + } + + return ItemDestination.Cold; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ItemDestination RouteWarm(LruItem item) + { + if (item.WasRemoved) + { + return ItemDestination.Remove; + } + + if (item.WasAccessed) + { + return ItemDestination.Warm; + } + + return ItemDestination.Cold; + } + + [MethodImpl(MethodImplOptions.AggressiveInlining)] + private static ItemDestination RouteCold(LruItem item) + { + if (item.WasAccessed & !item.WasRemoved) + { + return ItemDestination.Warm; + } + + return ItemDestination.Remove; + } + + double ICacheMetrics.HitRatio => _telemetryPolicy.HitRatio; + + long ICacheMetrics.Total => _telemetryPolicy.Total; + + long ICacheMetrics.Hits => _telemetryPolicy.Hits; + + long ICacheMetrics.Misses => _telemetryPolicy.Misses; + + long ICacheMetrics.Evicted => _telemetryPolicy.Evicted; + + long ICacheMetrics.Updated => _telemetryPolicy.Updated; + + ConcurrentQueue ITestAccessor.HotQueue => _hotQueue; + ConcurrentQueue ITestAccessor.WarmQueue => _warmQueue; + ConcurrentQueue ITestAccessor.ColdQueue => _coldQueue; + ConcurrentDictionary ITestAccessor.Dictionary => _dictionary; + bool ITestAccessor.IsWarm => _isWarm; + + /// + /// Represents an LRU item. + /// + /// + /// Initializes a new instance of the LruItem class with the specified key and value. + /// + /// The key. + /// The value. + // NOTE: Internal for testing + [DebuggerDisplay("[{Key}] = {Value}")] + internal sealed class LruItem(K key, V value) + { + private V _data = value; + + // only used when V is a non-atomic value type to prevent torn reads + private int _sequence; + + /// + /// Gets the key. + /// + public readonly K Key = key; + + /// + /// Gets or sets the value. + /// + public V Value + { + [MethodImpl(MethodImplOptions.AggressiveInlining)] + get + { + if (TypeProps.IsWriteAtomic) + { + return _data; + } + else + { + return SeqLockRead(); + } + } + [MethodImpl(MethodImplOptions.AggressiveInlining)] + set + { + if (TypeProps.IsWriteAtomic) + { + _data = value; + } + else + { + SeqLockWrite(value); + } + } + } + + /// + /// Gets or sets a value indicating whether the item was accessed. + /// + public bool WasAccessed { get; set; } + + /// + /// Gets or sets a value indicating whether the item was removed. + /// + public bool WasRemoved { get; set; } + + /// + /// Marks the item as accessed, if it was not already accessed. + /// + public void MarkAccessed() + { + if (!WasAccessed) + { + WasAccessed = true; + } + } + + internal V SeqLockRead() + { + var spin = new SpinWait(); + while (true) + { + var start = Volatile.Read(ref _sequence); + + if ((start & 1) == 1) + { + // A write is in progress, spin. + spin.SpinOnce(); + continue; + } + + var copy = _data; + + var end = Volatile.Read(ref _sequence); + if (start == end) + { + return copy; + } + } + } + + // Note: LruItem should be locked while invoking this method. Multiple writer threads are not supported. + internal void SeqLockWrite(V value) + { + Interlocked.Increment(ref _sequence); + + _data = value; + + Interlocked.Increment(ref _sequence); + } + } + + /// + /// Represents a telemetry policy with counters and events. + /// + [DebuggerDisplay("Hit = {Hits}, Miss = {Misses}, Upd = {Updated}, Evict = {Evicted}")] + internal readonly struct TelemetryPolicy + { + private readonly Counter _hitCount = new(); + private readonly Counter _missCount = new(); + private readonly Counter _evictedCount = new(); + private readonly Counter _updatedCount = new(); + + public TelemetryPolicy() + { + } + + /// + public readonly double HitRatio => Total == 0 ? 0 : Hits / (double)Total; + + /// + public readonly long Total => _hitCount.Count() + _missCount.Count(); + + /// + public readonly long Hits => _hitCount.Count(); + + /// + public readonly long Misses => _missCount.Count(); + + /// + public readonly long Evicted => _evictedCount.Count(); + + /// + public readonly long Updated => _updatedCount.Count(); + + /// + public readonly void IncrementMiss() => _missCount.Increment(); + + /// + public readonly void IncrementHit() => _hitCount.Increment(); + + /// + public readonly void IncrementEvicted() => _evictedCount.Increment(); + + /// + public readonly void IncrementUpdated() => _updatedCount.Increment(); + } + + private enum ItemDestination + { + Warm, + Cold, + Remove + } + + private enum ItemRemovedReason + { + Removed, + Evicted, + Cleared, + Trimmed, + } + + internal interface ITestAccessor + { + public ConcurrentQueue HotQueue { get; } + public ConcurrentQueue WarmQueue { get; } + public ConcurrentQueue ColdQueue { get; } + public ConcurrentDictionary Dictionary { get; } + public bool IsWarm { get; } + } +} diff --git a/src/Orleans.Core/Caching/Internal/CacheDebugView.cs b/src/Orleans.Core/Caching/Internal/CacheDebugView.cs new file mode 100644 index 00000000000..fbdf8c3c698 --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/CacheDebugView.cs @@ -0,0 +1,37 @@ +#nullable enable +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace Orleans.Caching.Internal; + +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/CacheDebugView.cs +[ExcludeFromCodeCoverage] +internal sealed class CacheDebugView + where K : notnull +{ + private readonly ICache _cache; + + public CacheDebugView(ICache cache) + { + ArgumentNullException.ThrowIfNull(cache); + _cache = cache; + } + + public KeyValuePair[] Items + { + get + { + var items = new KeyValuePair[_cache.Count]; + + var index = 0; + foreach (var kvp in _cache) + { + items[index++] = kvp; + } + return items; + } + } + + public ICacheMetrics? Metrics => Metrics; +} diff --git a/src/Orleans.Core/Caching/Internal/CapacityPartition.cs b/src/Orleans.Core/Caching/Internal/CapacityPartition.cs new file mode 100644 index 00000000000..33f0e553b64 --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/CapacityPartition.cs @@ -0,0 +1,72 @@ +using System; +using System.Diagnostics; + +namespace Orleans.Caching.Internal; + +/// +/// A capacity partitioning scheme that favors frequently accessed items by allocating 80% +/// capacity to the warm queue. +/// +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Lru/FavorWarmPartition.cs +[DebuggerDisplay("{Hot}/{Warm}/{Cold}")] +internal readonly struct CapacityPartition +{ + /// + /// Default to 80% capacity allocated to warm queue, 20% split equally for hot and cold. + /// This favors frequently accessed items. + /// + public const double DefaultWarmRatio = 0.8; + + /// + /// Initializes a new instance of the CapacityPartition class with the specified capacity and the default warm ratio. + /// + /// The total capacity. + public CapacityPartition(int totalCapacity) + : this(totalCapacity, DefaultWarmRatio) + { + } + + /// + /// Initializes a new instance of the CapacityPartition class with the specified capacity and warm ratio. + /// + /// The total capacity. + /// The ratio of warm items to hot and cold items. + public CapacityPartition(int totalCapacity, double warmRatio) + { + ArgumentOutOfRangeException.ThrowIfLessThan(totalCapacity, 3); + ArgumentOutOfRangeException.ThrowIfLessThan(warmRatio, 0.0); + ArgumentOutOfRangeException.ThrowIfGreaterThanOrEqual(warmRatio, 1.0); + + var (hot, warm, cold) = ComputeQueueCapacity(totalCapacity, warmRatio); + Debug.Assert(cold >= 1); + Debug.Assert(warm >= 1); + Debug.Assert(hot >= 1); + Hot = hot; + Warm = warm; + Cold = cold; + } + + public int Cold { get; } + + public int Warm { get; } + + public int Hot { get; } + + private static (int hot, int warm, int cold) ComputeQueueCapacity(int capacity, double warmRatio) + { + var warm2 = (int)(capacity * warmRatio); + var hot2 = (capacity - warm2) / 2; + + if (hot2 < 1) + { + hot2 = 1; + } + + var cold2 = hot2; + + var overflow = warm2 + hot2 + cold2 - capacity; + warm2 -= overflow; + + return (hot2, warm2, cold2); + } +} diff --git a/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs b/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs new file mode 100644 index 00000000000..2b4158ecb9b --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs @@ -0,0 +1,219 @@ +using System; +using System.Collections.Generic; + +namespace Orleans.Caching.Internal; + +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ConcurrentDictionarySize.cs +internal static class ConcurrentDictionarySize +{ + private static int NextPrimeGreaterThan(int min) + { + foreach (var prime in Primes) + { + if (prime > min) + { + return prime; + } + } + + return min; + } + + /// + /// Estimate the size of the ConcurrentDictionary constructor capacity arg to use for the given desired cache size. + /// + /// + /// To minimize collisions, ideal case is is for ConcurrentDictionary to have a prime number of buckets, and + /// for the bucket count to be about 33% greater than the cache capacity (load factor of 0.75). + /// See load factor here: https://en.wikipedia.org/wiki/Hash_table + /// + /// The desired cache size + /// The estimated optimal ConcurrentDictionary capacity + internal static int Estimate(int desiredSize) + { + // Size map entries are approx 4% apart in the worst case, so increase by 29% to target 33%. + // In practice, this leads to the number of buckets being somewhere between 29% and 40% greater + // than cache capacity. + try + { + checked + { + desiredSize = (int)(desiredSize * 1.29); + } + + // When small, exact size hashtable to nearest larger prime number + if (desiredSize < 197) + { + return NextPrimeGreaterThan(desiredSize); + } + + // When large, size to approx 10% of desired size to save memory. Initial value is chosen such + // that 4x ConcurrentDictionary grow operations will select a prime number slightly larger + // than desired size. + foreach (var pair in SizeMap) + { + if (pair.Key > desiredSize) + { + return pair.Value; + } + } + } + catch (OverflowException) + { + // return largest + } + + // Use largest mapping: ConcurrentDictionary will resize to max array size after 4x grow calls. + return SizeMap[^1].Value; + } + +#if NETSTANDARD2_0 + internal static int[] Primes = new int[] { +#else + private static ReadOnlySpan Primes => new int[] { +#endif + 3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919, + 1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591, + 17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437, + 187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263, + 1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369 + }; + +#if NETSTANDARD2_0 + internal static KeyValuePair[] SizeMap = +#else + private static ReadOnlySpan> SizeMap => +#endif + new KeyValuePair[129] + { + new(197, 197), + new(277, 137), + new(331, 163), + new(359, 179), + new(397, 197), + new(443, 221), + new(499, 247), + new(557, 137), + new(599, 149), + new(677, 167), + new(719, 179), + new(797, 197), + new(839, 209), + new(887, 221), + new(1061, 131), + new(1117, 137), + new(1237, 151), + new(1439, 179), + new(1559, 193), + new(1777, 221), + new(2011, 247), + new(2179, 269), + new(2347, 289), + new(2683, 331), + new(2797, 347), + new(3359, 419), + new(3917, 487), + new(4363, 541), + new(4597, 571), + new(5879, 733), + new(7517, 937), + new(8731, 1087), + new(9839, 1229), + new(17467, 2179), + new(18397, 2297), + new(20357, 2543), + new(24317, 3037), + new(25919, 3239), + new(29759, 3719), + new(31357, 3917), + new(33599, 4199), + new(38737, 4841), + new(41117, 5137), + new(48817, 6101), + new(61819, 7723), + new(72959, 9119), + new(86011, 10747), + new(129277, 16157), + new(140797, 17597), + new(164477, 20557), + new(220411, 27547), + new(233851, 29227), + new(294397, 36797), + new(314879, 39359), + new(338683, 42331), + new(389117, 48637), + new(409597, 51197), + new(436477, 54557), + new(609277, 76157), + new(651517, 81437), + new(737279, 92159), + new(849917, 106237), + new(1118203, 139771), + new(1269757, 158717), + new(1440763, 180091), + new(1576957, 197117), + new(1684477, 210557), + new(2293757, 286717), + new(2544637, 318077), + new(2666491, 333307), + new(2846717, 355837), + new(3368957, 421117), + new(3543037, 442877), + new(4472827, 559099), + new(4710397, 588797), + new(5038079, 629759), + new(5763067, 720379), + new(6072317, 759037), + new(6594557, 824317), + new(7913467, 989179), + new(8257531, 1032187), + new(9175037, 1146877), + new(9633787, 1204219), + new(10076159, 1259519), + new(11386877, 1423357), + new(14020603, 1752571), + new(16056317, 2007037), + new(19496957, 2437117), + new(20848637, 2606077), + new(24084479, 3010559), + new(27934717, 3491837), + new(29589499, 3698683), + new(32788477, 4098557), + new(36044797, 4505597), + new(38051837, 4756477), + new(43581437, 5447677), + new(51814397, 6476797), + new(56688637, 7086077), + new(60948479, 7618559), + new(69631997, 8703997), + new(75366397, 9420797), + new(78643199, 9830399), + new(96337919, 12042239), + new(106168319, 13271039), + new(115671037, 14458877), + new(132382717, 16547837), + new(144179197, 18022397), + new(165150719, 20643839), + new(178257917, 22282237), + new(188743679, 23592959), + new(209715197, 26214397), + new(254279677, 31784957), + new(297271291, 37158907), + new(314572799, 39321599), + new(385351679, 48168959), + new(453509117, 56688637), + new(517472251, 64684027), + new(644874239, 80609279), + new(673710077, 84213757), + new(770703359, 96337919), + new(849346559, 106168319), + new(903086077, 112885757), + new(1145044987, 143130619), + new(1233125371, 154140667), + new(1321205759, 165150719), + new(1394606077, 174325757), + new(1635778559, 204472319), + new(1855979519, 231997439), + new(2003828731, 250478587), + }; +} diff --git a/src/Orleans.Core/Caching/Internal/Counter.cs b/src/Orleans.Core/Caching/Internal/Counter.cs new file mode 100644 index 00000000000..e9cb76a502a --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/Counter.cs @@ -0,0 +1,70 @@ +#nullable enable +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +using Orleans; + +namespace Orleans.Caching.Internal; + +/// +/// A thread-safe counter suitable for high throughput counting across many concurrent threads. +/// +/// Based on the LongAdder class by Doug Lea. +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/Counter.cs +internal sealed class Counter : Striped64 +{ + /// + /// Creates a new Counter with an initial sum of zero. + /// + public Counter() { } + + /// + /// Computes the current count. + /// + /// The current sum. + public long Count() + { + var @as = cells; Cell a; + var sum = @base.VolatileRead(); + if (@as != null) + { + for (var i = 0; i < @as.Length; ++i) + { + if ((a = @as[i]) != null) + sum += a.Value.VolatileRead(); + } + } + return sum; + } + + /// + /// Increment by 1. + /// + public void Increment() + { + Add(1L); + } + + /// + /// Adds the specified value. + /// + /// The value to add. + public void Add(long value) + { + Cell[]? @as; + long b, v; + int m; + Cell a; + if ((@as = cells) != null || !@base.CompareAndSwap(b = @base.VolatileRead(), b + value)) + { + var uncontended = true; + if (@as == null || (m = @as.Length - 1) < 0 || (a = @as[GetProbe() & m]) == null || !(uncontended = a.Value.CompareAndSwap(v = a.Value.VolatileRead(), v + value))) + { + LongAccumulate(value, uncontended); + } + } + } +} diff --git a/src/Orleans.Core/Caching/Internal/ICache.cs b/src/Orleans.Core/Caching/Internal/ICache.cs new file mode 100644 index 00000000000..bc6a7987a8b --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/ICache.cs @@ -0,0 +1,118 @@ +using System; +using System.Collections.Generic; +using System.Diagnostics.CodeAnalysis; + +namespace Orleans.Caching.Internal; + +/// +/// Represents a generic cache of key/value pairs. +/// +/// The type of keys in the cache. +/// The type of values in the cache. +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ICache.cs +internal interface ICache : IEnumerable> +{ + /// + /// Gets the number of items currently held in the cache. + /// + int Count { get; } + + /// + /// Gets the cache metrics, if configured. + /// + ICacheMetrics Metrics { get; } + + /// + /// Gets a collection containing the keys in the cache. + /// + ICollection Keys { get; } + + /// + /// Attempts to add the specified key and value to the cache if the key does not already exist. + /// + /// The key of the element to add. + /// The value of the element to add. + /// true if the key/value pair was added to the cache; otherwise, false. + bool TryAdd(K key, V value); + + /// + /// Attempts to get the value associated with the specified key from the cache. + /// + /// The key of the value to get. + /// When this method returns, contains the object from the cache that has the specified key, or the default value of the type if the operation failed. + /// true if the key was found in the cache; otherwise, false. + bool TryGet(K key, [MaybeNullWhen(false)] out V value); + + /// + /// Gets the value associated with the specified key from the cache. + /// + /// The key of the value to get. + /// The value. + V Get(K key); + + /// + /// Adds a key/value pair to the cache if the key does not already exist. Returns the new value, or the + /// existing value if the key already exists. + /// + /// The key of the element to add. + /// The factory function used to generate a value for the key. + /// The value for the key. This will be either the existing value for the key if the key is already + /// in the cache, or the new value if the key was not in the cache. + V GetOrAdd(K key, Func valueFactory); + + /// + /// Adds a key/value pair to the cache if the key does not already exist. Returns the new value, or the + /// existing value if the key already exists. + /// + /// The type of an argument to pass into valueFactory. + /// The key of the element to add. + /// The factory function used to generate a value for the key. + /// An argument value to pass into valueFactory. + /// The value for the key. This will be either the existing value for the key if the key is already + /// in the cache, or the new value if the key was not in the cache. + /// The default implementation given here is the fallback that provides backwards compatibility for classes that implement ICache on prior versions + V GetOrAdd(K key, Func valueFactory, TArg factoryArgument) => GetOrAdd(key, k => valueFactory(k, factoryArgument)); + + /// + /// Attempts to remove and return the value that has the specified key. + /// + /// The key of the element to remove. + /// When this method returns, contains the object removed, or the default value of the value type if key does not exist. + /// true if the object was removed successfully; otherwise, false. + bool TryRemove(K key, [MaybeNullWhen(false)] out V value); + + /// + /// Attempts to remove the specified key value pair. + /// + /// The item to remove. + /// true if the item was removed successfully; otherwise, false. + bool TryRemove(KeyValuePair item); + + /// + /// Attempts to remove the value that has the specified key. + /// + /// The key of the element to remove. + /// true if the object was removed successfully; otherwise, false. + bool TryRemove(K key); + + /// + /// Attempts to update the value that has the specified key. + /// + /// The key of the element to update. + /// The new value. + /// true if the object was updated successfully; otherwise, false. + bool TryUpdate(K key, V value); + + /// + /// Adds a key/value pair to the cache if the key does not already exist, or updates a key/value pair if the + /// key already exists. + /// + /// The key of the element to update. + /// The new value. + void AddOrUpdate(K key, V value); + + /// + /// Removes all keys and values from the cache. + /// + void Clear(); +} diff --git a/src/Orleans.Core/Caching/Internal/ICacheMetrics.cs b/src/Orleans.Core/Caching/Internal/ICacheMetrics.cs new file mode 100644 index 00000000000..eccdedd1fba --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/ICacheMetrics.cs @@ -0,0 +1,39 @@ +namespace Orleans.Caching.Internal; + +/// +/// Represents cache metrics collected over the lifetime of the cache. +/// If metrics are disabled. +/// +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ICacheMetrics.cs?plain=1#L8C22-L8C35 +internal interface ICacheMetrics +{ + /// + /// Gets the ratio of hits to misses, where a value of 1 indicates 100% hits. + /// + double HitRatio { get; } + + /// + /// Gets the total number of requests made to the cache. + /// + long Total { get; } + + /// + /// Gets the total number of cache hits. + /// + long Hits { get; } + + /// + /// Gets the total number of cache misses. + /// + long Misses { get; } + + /// + /// Gets the total number of evicted items. + /// + long Evicted { get; } + + /// + /// Gets the total number of updated items. + /// + long Updated { get; } +} diff --git a/src/Orleans.Core/Caching/Internal/PaddedLong.cs b/src/Orleans.Core/Caching/Internal/PaddedLong.cs new file mode 100644 index 00000000000..2f3089a41d7 --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/PaddedLong.cs @@ -0,0 +1,32 @@ +using System.Runtime.InteropServices; +using System.Threading; + +namespace Orleans.Caching.Internal; + +/// +/// A long value padded by the size of a CPU cache line to mitigate false sharing. +/// +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/PaddedLong.cs +[StructLayout(LayoutKind.Explicit, Size = 2 * Padding.CACHE_LINE_SIZE)] // padding before/between/after fields +internal struct PaddedLong +{ + /// + /// The value. + /// + [FieldOffset(Padding.CACHE_LINE_SIZE)] public long Value; + + /// + /// Reads the value of the field, and on systems that require it inserts a memory barrier to + /// prevent reordering of memory operations. + /// + /// The value that was read. + public long VolatileRead() => Volatile.Read(ref Value); + + /// + /// Compares the current value with an expected value, if they are equal replaces the current value. + /// + /// The expected value. + /// The updated value. + /// True if the value is updated, otherwise false. + public bool CompareAndSwap(long expected, long updated) => Interlocked.CompareExchange(ref Value, updated, expected) == expected; +} diff --git a/src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs b/src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs new file mode 100644 index 00000000000..82debf2673d --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs @@ -0,0 +1,14 @@ +using System.Diagnostics; +using System.Runtime.InteropServices; + +namespace Orleans.Caching.Internal; + +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Lru/PaddedQueueCount.cs +[DebuggerDisplay("Hot = {Hot}, Warm = {Warm}, Cold = {Cold}")] +[StructLayout(LayoutKind.Explicit, Size = 4 * Padding.CACHE_LINE_SIZE)] // padding before/between/after fields +internal struct PaddedQueueCount +{ + [FieldOffset(1 * Padding.CACHE_LINE_SIZE)] public int Hot; + [FieldOffset(2 * Padding.CACHE_LINE_SIZE)] public int Warm; + [FieldOffset(3 * Padding.CACHE_LINE_SIZE)] public int Cold; +} diff --git a/src/Orleans.Core/Caching/Internal/Padding.cs b/src/Orleans.Core/Caching/Internal/Padding.cs new file mode 100644 index 00000000000..33223df63f2 --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/Padding.cs @@ -0,0 +1,11 @@ +namespace Orleans.Caching.Internal; + +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Padding.cs +internal static class Padding +{ +#if TARGET_ARM64 || TARGET_LOONGARCH64 + internal const int CACHE_LINE_SIZE = 128; +#else + internal const int CACHE_LINE_SIZE = 64; +#endif +} diff --git a/src/Orleans.Core/Caching/Internal/Striped64.cs b/src/Orleans.Core/Caching/Internal/Striped64.cs new file mode 100644 index 00000000000..3eede1724f1 --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/Striped64.cs @@ -0,0 +1,277 @@ +#nullable enable +using System; +using System.Diagnostics.CodeAnalysis; +using System.Threading; + +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +namespace Orleans.Caching.Internal; + +/* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock; when the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + +/// +/// Maintains a lazily-initialized table of atomically updated variables, plus an extra +/// "base" field. The table size is a power of two. Indexing uses masked thread IDs. +/// +// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/Striped64.cs +[ExcludeFromCodeCoverage] +internal abstract class Striped64 +{ + // Number of CPUS, to place bound on table size + private static readonly int MaxBuckets = Environment.ProcessorCount * 4; + + /// + /// The base value used mainly when there is no contention, but also as a fallback + /// during table initialization races. Updated via CAS. + /// + protected PaddedLong @base; + + /// + /// When non-null, size is a power of 2. + /// + protected Cell[]? cells; + private int _cellsBusy; + + /// + /// A wrapper for PaddedLong. + /// + protected sealed class Cell + { + /// + /// The value of the cell. + /// + public PaddedLong Value; + + /// + /// Initializes a new cell with the specified value. + /// + /// The value. + public Cell(long x) + { + Value = new PaddedLong() { Value = x }; + } + } + + /** + * CASes the cellsBusy field from 0 to 1 to acquire lock. + */ + private bool CasCellsBusy() => Interlocked.CompareExchange(ref _cellsBusy, 1, 0) == 0; + + private void VolatileWriteNotBusy() => Volatile.Write(ref _cellsBusy, 0); + + /** + * Returns the probe value for the current thread. + * Duplicated from ThreadLocalRandom because of packaging restrictions. + */ + protected static int GetProbe() => + // Note: this results in higher throughput than introducing a random. + Environment.CurrentManagedThreadId; + + /** + * Pseudo-randomly advances and records the given probe value for the + * given thread. + * Duplicated from ThreadLocalRandom because of packaging restrictions. + */ + private static int AdvanceProbe(int probe) + { + // xorshift + probe ^= probe << 13; + probe ^= (int)((uint)probe >> 17); + probe ^= probe << 5; + return probe; + } + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param wasUncontended false if CAS failed before call + */ + protected void LongAccumulate(long x, bool wasUncontended) + { + var h = GetProbe(); + + // True if last slot nonempty + var collide = false; + while (true) + { + Cell[]? @as; Cell a; int n; long v; + if ((@as = cells) != null && (n = @as.Length) > 0) + { + if ((a = @as[n - 1 & h]) == null) + { + if (_cellsBusy == 0) + { + // Try to attach new Cell + // Optimistically create + var r = new Cell(x); + if (_cellsBusy == 0 && CasCellsBusy()) + { + try + { + // Recheck under lock + Cell[]? rs; int m, j; + if ((rs = cells) != null && + (m = rs.Length) > 0 && + rs[j = m - 1 & h] == null) + { + rs[j] = r; + break; + } + } + finally + { + VolatileWriteNotBusy(); + } + + // Slot is now non-empty + continue; + } + } + collide = false; + } + else if (!wasUncontended) + { + // CAS already known to fail + // Continue after rehash + wasUncontended = true; + } + else if (a.Value.CompareAndSwap(v = a.Value.VolatileRead(), v + x)) + { + break; + } + else if (n >= MaxBuckets || cells != @as) + { + // At max size or stale + collide = false; + } + else if (!collide) + { + collide = true; + } + else if (_cellsBusy == 0 && CasCellsBusy()) + { + try + { + if (cells == @as) + { + // Expand table unless stale + var rs = new Cell[n << 1]; + for (var i = 0; i < n; ++i) + { + rs[i] = @as[i]; + } + + cells = rs; + } + } + finally + { + VolatileWriteNotBusy(); + } + + collide = false; + + // Retry with expanded table + continue; + } + + // Rehash + h = AdvanceProbe(h); + } + else if (_cellsBusy == 0 && cells == @as && CasCellsBusy()) + { + try + { + // Initialize table + if (cells == @as) + { + var rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + break; + } + } + finally + { + VolatileWriteNotBusy(); + } + } + + // Fall back on using base + else if (@base.CompareAndSwap(v = @base.VolatileRead(), v + x)) + { + break; + } + } + } +} diff --git a/src/Orleans.Core/Caching/Internal/TypeProps.cs b/src/Orleans.Core/Caching/Internal/TypeProps.cs new file mode 100644 index 00000000000..80ed8ca728d --- /dev/null +++ b/src/Orleans.Core/Caching/Internal/TypeProps.cs @@ -0,0 +1,45 @@ +using System; + +namespace Orleans.Caching.Internal; + +// https://source.dot.net/#System.Collections.Concurrent/System/Collections/Concurrent/ConcurrentDictionary.cs,2293 +internal static class TypeProps +{ + /// Whether T's type can be written atomically (i.e., with no danger of torn reads). + internal static readonly bool IsWriteAtomic = IsWriteAtomicPrivate(); + + private static bool IsWriteAtomicPrivate() + { + // Section 12.6.6 of ECMA CLI explains which types can be read and written atomically without + // the risk of tearing. See https://www.ecma-international.org/publications/files/ECMA-ST/ECMA-335.pdf + + if (!typeof(T).IsValueType || + typeof(T) == typeof(nint) || + typeof(T) == typeof(nuint)) + { + return true; + } + + switch (Type.GetTypeCode(typeof(T))) + { + case TypeCode.Boolean: + case TypeCode.Byte: + case TypeCode.Char: + case TypeCode.Int16: + case TypeCode.Int32: + case TypeCode.SByte: + case TypeCode.Single: + case TypeCode.UInt16: + case TypeCode.UInt32: + return true; + + case TypeCode.Double: + case TypeCode.Int64: + case TypeCode.UInt64: + return nint.Size == 8; + + default: + return false; + } + } +} diff --git a/src/Orleans.Core/Messaging/CachingIdSpanCodec.cs b/src/Orleans.Core/Messaging/CachingIdSpanCodec.cs index ef142e66810..212de5d858d 100644 --- a/src/Orleans.Core/Messaging/CachingIdSpanCodec.cs +++ b/src/Orleans.Core/Messaging/CachingIdSpanCodec.cs @@ -4,6 +4,7 @@ using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using Orleans.Serialization.Buffers; +using Orleans.Caching; namespace Orleans.Runtime.Messaging { @@ -12,7 +13,7 @@ namespace Orleans.Runtime.Messaging /// internal sealed class CachingIdSpanCodec { - private static readonly LRU SharedCache = new(maxSize: 128_000, maxAge: TimeSpan.FromHours(1)); + private static readonly ConcurrentLruCache SharedCache = new(capacity: 128_000); // Purge entries which have not been accessed in over 2 minutes. private const long PurgeAfterMilliseconds = 2 * 60 * 1000; @@ -55,7 +56,7 @@ public IdSpan ReadRaw(ref Reader reader) result = IdSpan.UnsafeCreate(payloadArray ?? payloadSpan.ToArray(), hashCode); // Before adding this value to the private cache and returning it, intern it via the shared cache to hopefully reduce duplicates. - result = SharedCache.GetOrAdd(result, static (_, key) => key, (object)null); + result = SharedCache.GetOrAdd(result, static (key, _) => key, (object)null); // Update the cache. If there is a hash collision, the last entry wins. cacheEntry.Value = IdSpan.UnsafeGetArray(result); @@ -88,7 +89,7 @@ private void PurgeStaleEntries() public void WriteRaw(ref Writer writer, IdSpan value) where TBufferWriter : IBufferWriter { IdSpanCodec.WriteRaw(ref writer, value); - SharedCache.GetOrAdd(value, static (_, key) => key, (object)null); + SharedCache.GetOrAdd(value, static (key, _) => key, (object)null); } } } diff --git a/src/Orleans.Core/Messaging/CachingSiloAddressCodec.cs b/src/Orleans.Core/Messaging/CachingSiloAddressCodec.cs index e9a595e61ba..04e9b3c3a79 100644 --- a/src/Orleans.Core/Messaging/CachingSiloAddressCodec.cs +++ b/src/Orleans.Core/Messaging/CachingSiloAddressCodec.cs @@ -3,6 +3,7 @@ using System.Collections.Generic; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using Orleans.Caching; using Orleans.Serialization.Buffers; using Orleans.Serialization.Codecs; @@ -13,7 +14,7 @@ namespace Orleans.Runtime.Messaging /// internal sealed class CachingSiloAddressCodec { - internal static LRU SharedCache { get; } = new(maxSize: 128_000, maxAge: TimeSpan.FromHours(1)); + internal static ConcurrentLruCache SharedCache { get; } = new(capacity: 1024); // Purge entries which have not been accessed in over 2 minutes. private const long PurgeAfterMilliseconds = 2 * 60 * 1000; @@ -62,7 +63,7 @@ public SiloAddress ReadRaw(ref Reader reader) // Before adding this value to the private cache and returning it, intern it via the shared cache to hopefully reduce duplicates. payloadArray ??= payloadSpan.ToArray(); - (result, payloadArray) = SharedCache.GetOrAdd(result, static (encoded, key) => (key, encoded), payloadArray); + (result, payloadArray) = SharedCache.GetOrAdd(result, static (key, encoded) => (key, encoded), payloadArray); // If there is a hash collision, then the last seen entry will always win. cacheEntry.Encoded = payloadArray; @@ -141,7 +142,7 @@ public void WriteRaw(ref Writer writer, SiloAddres innerWriter.Dispose(); // Before adding this value to the private cache, intern it via the shared cache to hopefully reduce duplicates. - (_, payloadArray) = SharedCache.GetOrAdd(value, static (encoded, key) => (key, encoded), payloadArray); + (_, payloadArray) = SharedCache.GetOrAdd(value, static (key, encoded) => (key, encoded), payloadArray); // If there is a hash collision, then the last seen entry will always win. cacheEntry.Encoded = payloadArray; diff --git a/src/Orleans.Core/Utils/LRU.cs b/src/Orleans.Core/Utils/LRU.cs deleted file mode 100644 index 12a1fd1239c..00000000000 --- a/src/Orleans.Core/Utils/LRU.cs +++ /dev/null @@ -1,259 +0,0 @@ -using System; -using System.Collections; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Linq; -using System.Threading; - -namespace Orleans.Runtime -{ - // This class implements an LRU (Least Recently Used) cache of values. It keeps a bounded set of values and will - // flush "old" values - internal sealed class LRU : IEnumerable> - { - // The following machinery is used to notify client objects when a key and its value - // is being flushed from the cache. - // The client's event handler is called after the key has been removed from the cache, - // but when the cache is in a consistent state so that other methods on the cache may freely - // be invoked. - public event Action RaiseFlushEvent; - - private long nextGeneration = 0; - private long generationToFree = 0; - private readonly TimeSpan requiredFreshness; - // We want this to be a reference type so that we can update the values in the cache - // without having to call AddOrUpdate, which is a nuisance - private sealed class TimestampedValue : IEquatable - { - public readonly TValue Value; - public CoarseStopwatch Age; - public long Generation; - - public TimestampedValue(TValue v, long generation) - { - Generation = generation; - Value = v; - Age = CoarseStopwatch.StartNew(); - } - - public override bool Equals(object obj) => obj is TimestampedValue value && Equals(value); - public bool Equals(TimestampedValue other) => ReferenceEquals(this, other) || Generation == other.Generation && EqualityComparer.Default.Equals(Value, other.Value); - public override int GetHashCode() => HashCode.Combine(Value, Generation); - } - - private readonly ConcurrentDictionary cache = new(); - private int count; - - public int Count => count; - public int MaximumSize { get; } - - /// - /// Creates a new LRU (Least Recently Used) cache. - /// - /// Maximum number of entries to allow. - /// Maximum age of an entry. - public LRU(int maxSize, TimeSpan maxAge) - { - if (maxSize <= 0) - { - throw new ArgumentOutOfRangeException(nameof(maxSize), "LRU maxSize must be greater than 0"); - } - MaximumSize = maxSize; - requiredFreshness = maxAge; - } - - public TValue GetOrAdd(TKey key, Func addFunc, TState state) - { - var generation = GetNewGeneration(); - var storedValue = cache.AddOrUpdate( - key, - static (key, state) => - { - var (_, outerState, addFunc, generation) = state; - return new TimestampedValue(addFunc(outerState, key), generation); - }, - static (key, existing, state) => - { - var (self, _, _, _) = state; - existing.Age.Restart(); - existing.Generation = self.GetNewGeneration(); - return existing; - }, - (Self: this, State: state, AddFunc: addFunc, Generation: generation)); - - var result = storedValue.Value; - - if (storedValue.Generation == generation) - { - Interlocked.Increment(ref count); - AdjustSize(); - } - - return result; - } - - public TValue AddOrUpdate(TKey key, Func addFunc, TState state) - { - var generation = GetNewGeneration(); - var storedValue = cache.AddOrUpdate( - key, - static (key, state) => - { - var (_, outerState, addFunc, generation) = state; - return new TimestampedValue(addFunc(outerState, key), generation); - }, - static (key, existing, state) => - { - var (self, outerState, addFunc, generation) = state; - return new TimestampedValue(addFunc(outerState, key), self.GetNewGeneration()); - }, - (Self: this, State: state, AddFunc: addFunc, Generation: generation)); - - var result = storedValue.Value; - - if (storedValue.Generation == generation) - { - Interlocked.Increment(ref count); - AdjustSize(); - } - - return result; - } - - public void Add(TKey key, TValue value) - { - GetOrAdd(key, static (value, key) => value, value); - } - - public void AddOrUpdate(TKey key, TValue value) - { - AddOrUpdate(key, static (value, key) => value, value); - } - - public bool ContainsKey(TKey key) => cache.ContainsKey(key); - - public bool RemoveKey(TKey key) - { - if (!cache.TryRemove(key, out _)) return false; - - Interlocked.Decrement(ref count); - return true; - } - - public bool TryRemove(TKey key, Func predicate, T context) - { - if (!cache.TryGetValue(key, out var timestampedValue)) - { - return false; - } - - if (predicate(context, timestampedValue.Value) && cache.TryRemove(KeyValuePair.Create(key, timestampedValue))) - { - Interlocked.Decrement(ref count); - return true; - } - - return false; - } - - private long GetNewGeneration() => Interlocked.Increment(ref nextGeneration); - - public void Clear() - { - if (RaiseFlushEvent is { } FlushEvent) - { - foreach (var _ in cache) FlushEvent(); - } - - // not thread-safe: if anything is added, or even removed after addition, between Clear and Count, count may be off - cache.Clear(); - Interlocked.Exchange(ref count, 0); - } - - public bool TryGetValue(TKey key, out TValue value) - { - if (cache.TryGetValue(key, out var result)) - { - var age = result.Age.Elapsed; - if (age > requiredFreshness) - { - if (RemoveKey(key)) RaiseFlushEvent?.Invoke(); - } - else - { - result.Age.Restart(); - result.Generation = GetNewGeneration(); - value = result.Value; - return true; - } - } - - value = default; - return false; - } - - public TValue Get(TKey key) - { - TryGetValue(key, out var value); - return value; - } - - /// - /// Remove all expired values from the LRU (Least Recently Used) instance. - /// - public void RemoveExpired() - { - foreach (var entry in this.cache) - { - if (entry.Value.Age.Elapsed > requiredFreshness) - { - if (RemoveKey(entry.Key)) RaiseFlushEvent?.Invoke(); - } - } - } - - private void AdjustSize() - { - if (Count <= MaximumSize) - { - return; - } - - RemoveExpired(); - - var minGeneration = long.MaxValue; - while (Count > MaximumSize) - { - var targetGeneration = Interlocked.Increment(ref generationToFree); - - foreach (var e in cache) - { - var entryGeneration = e.Value.Generation; - if (minGeneration > entryGeneration) - { - minGeneration = entryGeneration; - } - - if (entryGeneration <= targetGeneration) - { - if (RemoveKey(e.Key)) RaiseFlushEvent?.Invoke(); - } - } - - // Skip forward to the minimum present generation. - var diff = minGeneration - generationToFree - 1; - if (minGeneration < long.MaxValue && diff > 0) - { - Interlocked.Add(ref generationToFree, diff); - } - } - } - - public IEnumerator> GetEnumerator() - { - return cache.Select(p => new KeyValuePair(p.Key, p.Value.Value)).GetEnumerator(); - } - - IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - } -} diff --git a/src/Orleans.Runtime/Configuration/Options/GrainDirectoryOptions.cs b/src/Orleans.Runtime/Configuration/Options/GrainDirectoryOptions.cs index 4043ad172f0..dc9ffcb47ec 100644 --- a/src/Orleans.Runtime/Configuration/Options/GrainDirectoryOptions.cs +++ b/src/Orleans.Runtime/Configuration/Options/GrainDirectoryOptions.cs @@ -26,14 +26,14 @@ public enum CachingStrategyType /// The options are None, which means don't cache directory entries locally; /// LRU, which indicates that a standard fixed-size least recently used strategy should be used; and /// Adaptive, which indicates that an adaptive strategy with a fixed maximum size should be used. - /// The Adaptive strategy is used by default. + /// The LRU strategy is used by default. /// public CachingStrategyType CachingStrategy { get; set; } = DEFAULT_CACHING_STRATEGY; /// /// The default value for . /// - public const CachingStrategyType DEFAULT_CACHING_STRATEGY = CachingStrategyType.Adaptive; + public const CachingStrategyType DEFAULT_CACHING_STRATEGY = CachingStrategyType.LRU; /// /// Gets or sets the maximum number of grains to cache directory information for. @@ -43,7 +43,7 @@ public enum CachingStrategyType /// /// The default value for . /// - public const int DEFAULT_CACHE_SIZE = 1000000; + public const int DEFAULT_CACHE_SIZE = 1_000_000; /// /// Gets or sets the initial (minimum) time, in seconds, to keep a cache entry before revalidating. diff --git a/src/Orleans.Runtime/GrainDirectory/AdaptiveGrainDirectoryCache.cs b/src/Orleans.Runtime/GrainDirectory/AdaptiveGrainDirectoryCache.cs index 14f6b659f69..369640e368f 100644 --- a/src/Orleans.Runtime/GrainDirectory/AdaptiveGrainDirectoryCache.cs +++ b/src/Orleans.Runtime/GrainDirectory/AdaptiveGrainDirectoryCache.cs @@ -1,6 +1,7 @@ using System; using System.Collections.Generic; using System.Text; +using Orleans.Caching; using Orleans.Internal; namespace Orleans.Runtime.GrainDirectory @@ -41,9 +42,9 @@ internal void Refresh(TimeSpan newExpirationTimer) } } - private static readonly Func ActivationAddressesMatches = (addr, entry) => GrainAddress.MatchesGrainIdAndSilo(addr, entry.Address); + private static readonly Func ActivationAddressesMatches = (entry, addr) => GrainAddress.MatchesGrainIdAndSilo(addr, entry.Address); - private readonly LRU cache; + private readonly ConcurrentLruCache cache; /// controls the time the new entry is considered "fresh" (unit: ms) private readonly TimeSpan initialExpirationTimer; /// controls the exponential growth factor (i.e., x2, x4) for the freshness timer (unit: none) @@ -59,7 +60,7 @@ internal void Refresh(TimeSpan newExpirationTimer) public AdaptiveGrainDirectoryCache(TimeSpan initialExpirationTimer, TimeSpan maxExpirationTimer, double exponentialTimerGrowth, int maxCacheSize) { - cache = new(maxCacheSize, TimeSpan.MaxValue); + cache = new(maxCacheSize); this.initialExpirationTimer = initialExpirationTimer; this.maxExpirationTimer = maxExpirationTimer; @@ -76,7 +77,7 @@ public void AddOrUpdate(GrainAddress value, int version) cache.AddOrUpdate(value.GrainId, entry); } - public bool Remove(GrainId key) => cache.RemoveKey(key); + public bool Remove(GrainId key) => cache.TryRemove(key); public bool Remove(GrainAddress key) => cache.TryRemove(key.GrainId, ActivationAddressesMatches, key); @@ -89,7 +90,7 @@ public bool LookUp(GrainId key, out GrainAddress result, out int version) // Here we do not check whether the found entry is expired. // It will be done by the thread managing the cache. // This is to avoid situation where the entry was just expired, but the manager still have not run and have not refreshed it. - if (!cache.TryGetValue(key, out var tmp)) + if (!cache.TryGet(key, out var tmp)) { result = default; version = default; @@ -117,7 +118,7 @@ public bool LookUp(GrainId key, out GrainAddress result, out int version) public bool MarkAsFresh(GrainId key) { GrainDirectoryCacheEntry result; - if (!cache.TryGetValue(key, out result)) return false; + if (!cache.TryGet(key, out result)) return false; TimeSpan newExpirationTimer = StandardExtensions.Min(maxExpirationTimer, result.ExpirationTimer.Multiply(exponentialTimerGrowth)); result.Refresh(newExpirationTimer); @@ -145,7 +146,7 @@ public override string ToString() LastNumHits = NumHits; sb.Append("Adaptive cache statistics:").AppendLine(); - sb.AppendFormat(" Cache size: {0} entries ({1} maximum)", cache.Count, cache.MaximumSize).AppendLine(); + sb.AppendFormat(" Cache size: {0} entries ({1} maximum)", cache.Count, cache.Capacity).AppendLine(); sb.AppendFormat(" Since last call:").AppendLine(); sb.AppendFormat(" Accesses: {0}", curNumAccesses); sb.AppendFormat(" Hits: {0}", curNumHits); diff --git a/src/Orleans.Runtime/GrainDirectory/GrainDirectoryCacheFactory.cs b/src/Orleans.Runtime/GrainDirectory/GrainDirectoryCacheFactory.cs index 100982050f3..6ec31feb2b6 100644 --- a/src/Orleans.Runtime/GrainDirectory/GrainDirectoryCacheFactory.cs +++ b/src/Orleans.Runtime/GrainDirectory/GrainDirectoryCacheFactory.cs @@ -27,7 +27,7 @@ public static IGrainDirectoryCache CreateGrainDirectoryCache(IServiceProvider se case GrainDirectoryOptions.CachingStrategyType.None: return new NullGrainDirectoryCache(); case GrainDirectoryOptions.CachingStrategyType.LRU: - return new LRUBasedGrainDirectoryCache(options.CacheSize, options.MaximumCacheTTL); + return new LruGrainDirectoryCache(options.CacheSize); case GrainDirectoryOptions.CachingStrategyType.Adaptive: return new AdaptiveGrainDirectoryCache(options.InitialCacheTTL, options.MaximumCacheTTL, options.CacheTTLExtensionFactor, options.CacheSize); case GrainDirectoryOptions.CachingStrategyType.Custom: @@ -45,7 +45,7 @@ internal static IGrainDirectoryCache CreateCustomGrainDirectoryCache(IServicePro } else { - return new LRUBasedGrainDirectoryCache(options.CacheSize, options.MaximumCacheTTL); + return new LruGrainDirectoryCache(options.CacheSize); } } diff --git a/src/Orleans.Runtime/GrainDirectory/LRUBasedGrainDirectoryCache.cs b/src/Orleans.Runtime/GrainDirectory/LRUBasedGrainDirectoryCache.cs deleted file mode 100644 index 84845a78825..00000000000 --- a/src/Orleans.Runtime/GrainDirectory/LRUBasedGrainDirectoryCache.cs +++ /dev/null @@ -1,51 +0,0 @@ -using System; -using System.Collections.Generic; - - -namespace Orleans.Runtime.GrainDirectory -{ - internal class LRUBasedGrainDirectoryCache : IGrainDirectoryCache - { - private static readonly Func ActivationAddressesMatch = (a, b) => GrainAddress.MatchesGrainIdAndSilo(a, b.Address); - private readonly LRU cache; - - public LRUBasedGrainDirectoryCache(int maxCacheSize, TimeSpan maxEntryAge) => cache = new(maxCacheSize, maxEntryAge); - - public void AddOrUpdate(GrainAddress activationAddress, int version) - { - // ignore the version number - cache.AddOrUpdate(activationAddress.GrainId, (activationAddress, version)); - } - - public bool Remove(GrainId key) => cache.RemoveKey(key); - - public bool Remove(GrainAddress grainAddress) => cache.TryRemove(grainAddress.GrainId, ActivationAddressesMatch, grainAddress); - - public void Clear() => cache.Clear(); - - public bool LookUp(GrainId key, out GrainAddress result, out int version) - { - if (cache.TryGetValue(key, out var entry)) - { - version = entry.Version; - result = entry.ActivationAddress; - return true; - } - - version = default; - result = default; - return false; - } - - public IEnumerable<(GrainAddress ActivationAddress, int Version)> KeyValues - { - get - { - foreach (var entry in cache) - { - yield return (entry.Value.ActivationAddress, entry.Value.Version); - } - } - } - } -} diff --git a/src/Orleans.Runtime/GrainDirectory/LruGrainDirectoryCache.cs b/src/Orleans.Runtime/GrainDirectory/LruGrainDirectoryCache.cs new file mode 100644 index 00000000000..b95f92168e9 --- /dev/null +++ b/src/Orleans.Runtime/GrainDirectory/LruGrainDirectoryCache.cs @@ -0,0 +1,41 @@ +using System; +using System.Collections.Generic; +using Orleans.Caching; + +namespace Orleans.Runtime.GrainDirectory; + +internal sealed class LruGrainDirectoryCache(int maxCacheSize) : ConcurrentLruCache(capacity: maxCacheSize), IGrainDirectoryCache +{ + private static readonly Func<(GrainAddress Address, int Version), GrainAddress, bool> ActivationAddressesMatch = (value, state) => GrainAddress.MatchesGrainIdAndSilo(state, value.Address); + + public void AddOrUpdate(GrainAddress activationAddress, int version) => AddOrUpdate(activationAddress.GrainId, (activationAddress, version)); + + public bool Remove(GrainId key) => TryRemove(key); + + public bool Remove(GrainAddress grainAddress) => TryRemove(grainAddress.GrainId, ActivationAddressesMatch, grainAddress); + + public bool LookUp(GrainId key, out GrainAddress result, out int version) + { + if (TryGet(key, out var entry)) + { + version = entry.Version; + result = entry.ActivationAddress; + return true; + } + + version = default; + result = default; + return false; + } + + public IEnumerable<(GrainAddress ActivationAddress, int Version)> KeyValues + { + get + { + foreach (var entry in this) + { + yield return (entry.Value.ActivationAddress, entry.Value.Version); + } + } + } +} diff --git a/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs b/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs index 57420c90d68..3633797c137 100644 --- a/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs +++ b/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs @@ -417,7 +417,7 @@ internal struct PaddedHeadAndTail [FieldOffset(2 * Padding.CACHE_LINE_SIZE)] public int Tail; } -internal class Padding +internal static class Padding { #if TARGET_ARM64 || TARGET_LOONGARCH64 internal const int CACHE_LINE_SIZE = 128; diff --git a/test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs b/test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs new file mode 100644 index 00000000000..1ea77f0f5a1 --- /dev/null +++ b/test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs @@ -0,0 +1,463 @@ +using System.Collections.Concurrent; +using System.Reflection; +using FluentAssertions; +using Orleans.Caching; +using Orleans.Caching.Internal; +using Xunit; +using Xunit.Abstractions; + +namespace NonSilo.Tests.Caching; + +[TestCategory("BVT")] +public sealed class ConcurrentLruCacheSoakTests +{ + private readonly ITestOutputHelper testOutputHelper; + private const int HotCap = 3; + private const int WarmCap = 3; + private const int ColdCap = 3; + + private const int Capacity = HotCap + WarmCap + ColdCap; + + private ConcurrentLruCache lru = new ConcurrentLruCache(Capacity, EqualityComparer.Default); + + public ConcurrentLruCacheSoakTests(ITestOutputHelper testOutputHelper) + { + this.testOutputHelper = testOutputHelper; + } + + [Fact] + public async Task WhenSoakConcurrentGetCacheEndsInConsistentState() + { + for (var i = 0; i < 10; i++) + { + await Threaded.Run(4, () => + { + for (var i = 0; i < 100000; i++) + { + lru.GetOrAdd(i + 1, i => i.ToString()); + } + }); + + testOutputHelper.WriteLine($"{lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + // allow +/- 1 variance for capacity + lru.Count.Should().BeInRange(7, 10); + RunIntegrityCheck(); + } + } + + [Fact] + public async Task WhenSoakConcurrentGetWithArgCacheEndsInConsistentState() + { + for (var i = 0; i < 10; i++) + { + await Threaded.Run(4, () => + { + for (var i = 0; i < 100000; i++) + { + // use the arg overload + lru.GetOrAdd(i + 1, (i, s) => i.ToString(), "Foo"); + } + }); + + testOutputHelper.WriteLine($"{lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + // allow +/- 1 variance for capacity + lru.Count.Should().BeInRange(7, 10); + RunIntegrityCheck(); + } + } + + [Fact] + public async Task WhenSoakConcurrentGetAndRemoveCacheEndsInConsistentState() + { + for (var i = 0; i < 10; i++) + { + await Threaded.Run(4, () => + { + for (var i = 0; i < 100000; i++) + { + lru.TryRemove(i + 1); + lru.GetOrAdd(i + 1, i => i.ToString()); + } + }); + + testOutputHelper.WriteLine($"{lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + RunIntegrityCheck(); + } + } + + [Fact] + public async Task WhenSoakConcurrentGetAndRemoveKvpCacheEndsInConsistentState() + { + for (var i = 0; i < 10; i++) + { + await Threaded.Run(4, () => + { + for (var i = 0; i < 100000; i++) + { + lru.TryRemove(new KeyValuePair(i + 1, (i + 1).ToString())); + lru.GetOrAdd(i + 1, i => i.ToString()); + } + }); + + testOutputHelper.WriteLine($"{lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + RunIntegrityCheck(); + } + } + + [Fact] + public async Task WhenSoakConcurrentGetAndUpdateCacheEndsInConsistentState() + { + for (var i = 0; i < 10; i++) + { + await Threaded.Run(4, () => + { + for (var i = 0; i < 100000; i++) + { + lru.TryUpdate(i + 1, i.ToString()); + lru.GetOrAdd(i + 1, i => i.ToString()); + } + }); + + testOutputHelper.WriteLine($"{lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + RunIntegrityCheck(); + } + } + + [Fact] + public async Task WhenSoakConcurrentGetAndAddCacheEndsInConsistentState() + { + for (var i = 0; i < 10; i++) + { + await Threaded.Run(4, () => + { + for (var i = 0; i < 100000; i++) + { + lru.AddOrUpdate(i + 1, i.ToString()); + lru.GetOrAdd(i + 1, i => i.ToString()); + } + }); + + testOutputHelper.WriteLine($"{lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + RunIntegrityCheck(); + } + } + + [Fact] + public async Task WhenSoakConcurrentGetAndUpdateValueTypeCacheEndsInConsistentState() + { + var lruVT = new ConcurrentLruCache(Capacity, EqualityComparer.Default); + + for (var i = 0; i < 10; i++) + { + await Threaded.Run(4, () => + { + var b = new byte[8]; + for (var i = 0; i < 100000; i++) + { + lruVT.TryUpdate(i + 1, new Guid(i, 0, 0, b)); + lruVT.GetOrAdd(i + 1, x => new Guid(x, 0, 0, b)); + } + }); + + testOutputHelper.WriteLine($"{lruVT.HotCount} {lruVT.WarmCount} {lruVT.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lruVT.Keys)); + + new ConcurrentLruCacheIntegrityChecker(lruVT).Validate(); + } + } + + [Fact] + public async Task WhenAddingCacheSizeItemsNothingIsEvicted() + { + const int size = 1024; + + var cache = new ConcurrentLruCache(size); + + await Threaded.Run(4, () => + { + for (var i = 0; i < size; i++) + { + cache.GetOrAdd(i, k => k); + } + }); + + cache.Metrics.Evicted.Should().Be(0); + } + + [Fact] + public async Task WhenConcurrentUpdateAndRemoveKvp() + { + var tcs = new TaskCompletionSource(); + + var removal = Task.Run(() => + { + while (!tcs.Task.IsCompleted) + { + lru.TryRemove(new KeyValuePair(5, "x")); + } + }); + + for (var i = 0; i < 100_000; i++) + { + lru.AddOrUpdate(5, "a"); + lru.TryGet(5, out _).Should().BeTrue("key 'a' should not be deleted"); + lru.AddOrUpdate(5, "x"); + } + + tcs.SetResult(int.MaxValue); + + await removal; + } + + [Theory] + [Repeat(10)] + public async Task WhenConcurrentGetAndClearCacheEndsInConsistentState(int iteration) + { + await Threaded.Run(4, r => + { + for (var i = 0; i < 100000; i++) + { + // clear 6,250 times per 1_000_000 iters + if (r == 0 && (i & 15) == 15) + { + lru.Clear(); + } + + lru.GetOrAdd(i + 1, i => i.ToString()); + } + }); + + testOutputHelper.WriteLine($"{iteration} {lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + RunIntegrityCheck(); + } + + [Theory] + [Repeat(10)] + public async Task WhenConcurrentGetAndClearDuringWarmupCacheEndsInConsistentState(int iteration) + { + await Threaded.Run(4, r => + { + for (var i = 0; i < 100000; i++) + { + // clear 25,000 times per 1_000_000 iters + // capacity is 9, so we will try to clear before warmup is done + if (r == 0 && (i & 3) == 3) + { + lru.Clear(); + } + + lru.GetOrAdd(i + 1, i => i.ToString()); + } + }); + + testOutputHelper.WriteLine($"{iteration} {lru.HotCount} {lru.WarmCount} {lru.ColdCount}"); + testOutputHelper.WriteLine(string.Join(" ", lru.Keys)); + + RunIntegrityCheck(); + } + + // This test will run forever if there is a live lock. + // Since the cache bookkeeping has some overhead, it is harder to provoke + // spinning inside the reader thread compared to LruItemSoakTests.DetectTornStruct. + [Theory] + [Repeat(10)] + public async Task WhenValueIsBigStructNoLiveLock(int _) + { + using var source = new CancellationTokenSource(); + var started = new TaskCompletionSource(); + var cache = new ConcurrentLruCache(Capacity, EqualityComparer.Default); + + var setTask = Task.Run(() => Setter(cache, source.Token, started)); + await started.Task; + Checker(cache, source); + + await setTask; + } + + private void Setter(ICache cache, CancellationToken cancelToken, TaskCompletionSource started) + { + started.SetResult(true); + + while (true) + { + cache.AddOrUpdate(1, Guid.NewGuid()); + cache.AddOrUpdate(1, Guid.NewGuid()); + + if (cancelToken.IsCancellationRequested) + { + return; + } + } + } + + private void Checker(ICache cache, CancellationTokenSource source) + { + for (var count = 0; count < 100_000; ++count) + { + cache.TryGet(1, out _); + } + + source.Cancel(); + } + + private void RunIntegrityCheck() => new ConcurrentLruCacheIntegrityChecker(lru).Validate(); + + private class ConcurrentLruCacheIntegrityChecker + { + private readonly ConcurrentLruCache _cache; + + private readonly ConcurrentDictionary.LruItem> dictionary; + private readonly ConcurrentQueue.LruItem> hotQueue; + private readonly ConcurrentQueue.LruItem> warmQueue; + private readonly ConcurrentQueue.LruItem> coldQueue; + + private static FieldInfo dictionaryField = typeof(ConcurrentLruCache).GetField("_dictionary", BindingFlags.NonPublic | BindingFlags.Instance); + + private static FieldInfo hotQueueField = typeof(ConcurrentLruCache).GetField("_hotQueue", BindingFlags.NonPublic | BindingFlags.Instance); + private static FieldInfo warmQueueField = typeof(ConcurrentLruCache).GetField("_warmQueue", BindingFlags.NonPublic | BindingFlags.Instance); + private static FieldInfo coldQueueField = typeof(ConcurrentLruCache).GetField("_coldQueue", BindingFlags.NonPublic | BindingFlags.Instance); + + public ConcurrentLruCacheIntegrityChecker(ConcurrentLruCache cache) + { + this._cache = cache; + + // get queues via reflection + this.dictionary = (ConcurrentDictionary.LruItem>)dictionaryField.GetValue(cache); + this.hotQueue = (ConcurrentQueue.LruItem>)hotQueueField.GetValue(cache); + this.warmQueue = (ConcurrentQueue.LruItem>)warmQueueField.GetValue(cache); + this.coldQueue = (ConcurrentQueue.LruItem>)coldQueueField.GetValue(cache); + } + + public void Validate() + { + // queue counters must be consistent with queues + this.hotQueue.Count.Should().Be(_cache.HotCount, "hot queue has a corrupted count"); + this.warmQueue.Count.Should().Be(_cache.WarmCount, "warm queue has a corrupted count"); + this.coldQueue.Count.Should().Be(_cache.ColdCount, "cold queue has a corrupted count"); + + // cache contents must be consistent with queued items + ValidateQueue(_cache, this.hotQueue, "hot"); + ValidateQueue(_cache, this.warmQueue, "warm"); + ValidateQueue(_cache, this.coldQueue, "cold"); + + // cache must be within capacity + _cache.Count.Should().BeLessThanOrEqualTo(_cache.Capacity + 1, "capacity out of valid range"); + } + + private void ValidateQueue(ConcurrentLruCache cache, ConcurrentQueue.LruItem> queue, string queueName) + { + foreach (var item in queue) + { + if (item.WasRemoved) + { + // It is possible for the queues to contain 2 (or more) instances of the same key/item. One that was removed, + // and one that was added after the other was removed. + // In this case, the dictionary may contain the value only if the queues contain an entry for that key marked as WasRemoved == false. + if (dictionary.TryGetValue(item.Key, out var value)) + { + hotQueue.Union(warmQueue).Union(coldQueue) + .Any(i => i.Key.Equals(item.Key) && !i.WasRemoved) + .Should().BeTrue($"{queueName} removed item {item.Key} was not removed"); + } + } + else + { + dictionary.TryGetValue(item.Key, out var value).Should().BeTrue($"{queueName} item {item.Key} was not present"); + } + } + } + } + + private sealed class RepeatAttribute : Xunit.Sdk.DataAttribute + { + private readonly int _count; + + public RepeatAttribute(int count) + { + if (count < 1) + { + throw new ArgumentOutOfRangeException( + paramName: nameof(count), + message: "Repeat count must be greater than 0." + ); + } + + _count = count; + } + + public override IEnumerable GetData(System.Reflection.MethodInfo testMethod) + { + foreach (var iterationNumber in Enumerable.Range(start: 1, count: _count)) + { + yield return new object[] { iterationNumber }; + } + } + } + + private class Threaded + { + public static Task Run(int threadCount, Action action) + { + return Run(threadCount, i => action()); + } + + public static async Task Run(int threadCount, Action action) + { + var tasks = new Task[threadCount]; + ManualResetEvent mre = new ManualResetEvent(false); + + for (int i = 0; i < threadCount; i++) + { + int run = i; + tasks[i] = Task.Run(() => + { + mre.WaitOne(); + action(run); + }); + } + + mre.Set(); + + await Task.WhenAll(tasks); + } + + public static Task RunAsync(int threadCount, Func action) + { + return Run(threadCount, i => action()); + } + + public static async Task RunAsync(int threadCount, Func action) + { + var tasks = new Task[threadCount]; + ManualResetEvent mre = new ManualResetEvent(false); + + for (int i = 0; i < threadCount; i++) + { + int run = i; + tasks[i] = Task.Run(async () => + { + mre.WaitOne(); + await action(run); + }); + } + + mre.Set(); + + await Task.WhenAll(tasks); + } + } +} diff --git a/test/NonSilo.Tests/Caching/ConcurrentLruTests.cs b/test/NonSilo.Tests/Caching/ConcurrentLruTests.cs new file mode 100644 index 00000000000..f1fb84038db --- /dev/null +++ b/test/NonSilo.Tests/Caching/ConcurrentLruTests.cs @@ -0,0 +1,1249 @@ +using FluentAssertions; +using System.Collections; +using Xunit; +using Xunit.Abstractions; +using Orleans.Caching; +using Orleans.Caching.Internal; + +namespace NonSilo.Tests.Caching; + +public class ConcurrentLruTests(ITestOutputHelper testOutputHelper) +{ + private readonly ITestOutputHelper _testOutputHelper = testOutputHelper; + private const int Capacity = 100; + private readonly CapacityPartition _capacityPartition = new(Capacity); + private int HotCap => _capacityPartition.Hot; + private int WarmCap => _capacityPartition.Warm; + private int ColdCap => _capacityPartition.Cold; + + private readonly ConcurrentLruCache _lru = new(Capacity); + private readonly ValueFactory _valueFactory = new(); + + private static ConcurrentLruCache.ITestAccessor GetTestAccessor(ConcurrentLruCache lru) => lru; + + [Fact] + public void WhenCapacityIsLessThan3CtorThrows() + { + Action constructor = () => { var x = new ConcurrentLruCache(2, EqualityComparer.Default); }; + + constructor.Should().Throw(); + } + + [Fact] + public void WhenComparerIsNullCtorThrows() + { + Action constructor = () => { var x = new ConcurrentLruCache(3, null); }; + + constructor.Should().Throw(); + } + + [Fact] + public void WhenCapacityIs4HotHasCapacity1AndColdHasCapacity1() + { + var lru = new ConcurrentLruCache(4, EqualityComparer.Default); + + for (var i = 0; i < 5; i++) + { + lru.GetOrAdd(i, x => x); + } + + lru.HotCount.Should().Be(1); + lru.ColdCount.Should().Be(1); + lru.Capacity.Should().Be(4); + } + + [Fact] + public void WhenCapacityIs10HotHasCapacity1AndWarmHasCapacity8AndColdHasCapacity1() + { + var lru = new ConcurrentLruCache(10, EqualityComparer.Default); + + for (var i = 0; i < lru.Capacity; i++) + { + lru.GetOrAdd(i, x => x); + } + + lru.HotCount.Should().Be(1); + lru.WarmCount.Should().Be(8); + lru.ColdCount.Should().Be(1); + lru.Capacity.Should().Be(10); + } + + [Fact] + public void ConstructAddAndRetrieveWithDefaultCtorReturnsValue() + { + var x = new ConcurrentLruCache(3); + + x.GetOrAdd(1, k => k).Should().Be(1); + } + + [Fact] + public void WhenItemIsAddedCountIsCorrect() + { + _lru.Count.Should().Be(0); + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.Count.Should().Be(1); + } + + [Fact] + public void WhenItemsAddedKeysContainsTheKeys() + { + _lru.Count.Should().Be(0); + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.GetOrAdd(2, _valueFactory.Create); + _lru.Keys.Should().BeEquivalentTo(new[] { 1, 2 }); + } + + [Fact] + public void WhenItemsAddedGenericEnumerateContainsKvps() + { + _lru.Count.Should().Be(0); + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.GetOrAdd(2, _valueFactory.Create); + _lru.Should().BeEquivalentTo(new[] { new KeyValuePair(1, "1"), new KeyValuePair(2, "2") }); + } + + [Fact] + public void WhenItemsAddedEnumerateContainsKvps() + { + _lru.Count.Should().Be(0); + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.GetOrAdd(2, _valueFactory.Create); + + var enumerable = (IEnumerable)_lru; + enumerable.Should().BeEquivalentTo(new[] { new KeyValuePair(1, "1"), new KeyValuePair(2, "2") }); + } + + [Fact] + public void FromColdWarmupFillsWarmQueue() + { + FillCache(); + + _lru.Count.Should().Be(Capacity); + } + + [Fact] + public void WhenItemExistsTryGetReturnsValueAndTrue() + { + _lru.GetOrAdd(1, _valueFactory.Create); + var result = _lru.TryGet(1, out var value); + + result.Should().Be(true); + value.Should().Be("1"); + } + + [Fact] + public void WhenItemDoesNotExistTryGetReturnsNullAndFalse() + { + _lru.GetOrAdd(1, _valueFactory.Create); + var result = _lru.TryGet(2, out var value); + + result.Should().Be(false); + value.Should().BeNull(); + } + + [Fact] + public void WhenItemIsAddedThenRetrievedMetricHitRatioIsHalf() + { + _lru.GetOrAdd(1, _valueFactory.Create); + var result = _lru.TryGet(1, out var value); + + _lru.Metrics.HitRatio.Should().Be(0.5); + } + + [Fact] + public void WhenItemIsAddedThenRetrievedTotalIs2() + { + _lru.GetOrAdd(1, _valueFactory.Create); + var result = _lru.TryGet(1, out var value); + + _lru.Metrics.Total.Should().Be(2); + } + + [Fact] + public void WhenRefToMetricsIsCapturedResultIsCorrect() + { + // this detects the case where the struct is copied. If the internal Data class + // doesn't work, this test fails. + var m = _lru.Metrics; + + _lru.GetOrAdd(1, _valueFactory.Create); + var result = _lru.TryGet(1, out var value); + + m.HitRatio.Should().Be(0.5); + } + + [Fact] + public void WhenKeyIsRequestedItIsCreatedAndCached() + { + var result1 = _lru.GetOrAdd(1, _valueFactory.Create); + var result2 = _lru.GetOrAdd(1, _valueFactory.Create); + + _valueFactory.TimesCalled.Should().Be(1); + result1.Should().Be(result2); + } + + [Fact] + public void WhenKeyIsRequestedWithArgItIsCreatedAndCached() + { + var result1 = _lru.GetOrAdd(1, _valueFactory.Create, "x"); + var result2 = _lru.GetOrAdd(1, _valueFactory.Create, "y"); + + _valueFactory.TimesCalled.Should().Be(1); + result1.Should().Be(result2); + } + + [Fact] + public void WhenDifferentKeysAreRequestedValueIsCreatedForEach() + { + var result1 = _lru.GetOrAdd(1, _valueFactory.Create); + var result2 = _lru.GetOrAdd(2, _valueFactory.Create); + + _valueFactory.TimesCalled.Should().Be(2); + + result1.Should().Be("1"); + result2.Should().Be("2"); + } + + [Fact] + public void WhenValuesAreNotReadAndMoreKeysRequestedThanCapacityCountDoesNotIncrease() + { + FillCache(); + + var result = _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.Count.Should().Be(Capacity); + _valueFactory.TimesCalled.Should().Be(Capacity + 1); + } + + [Fact] + public void WhenValuesAreReadAndMoreKeysRequestedThanCapacityCountIsBounded() + { + for (var i = 0; i < Capacity + 1; i++) + { + _lru.GetOrAdd(i, _valueFactory.Create); + + // touch items already cached when they are still in hot + if (i > 0) + { + _lru.GetOrAdd(i - 1, _valueFactory.Create); + } + } + + _lru.Count.Should().Be(Capacity); + _valueFactory.TimesCalled.Should().Be(Capacity + 1); + } + + [Fact] + public void WhenKeysAreContinuouslyRequestedInTheOrderTheyAreAddedCountIsBounded() + { + for (var i = 0; i < Capacity + 10; i++) + { + _lru.GetOrAdd(i, _valueFactory.Create); + + // Touch all items already cached in hot, warm and cold. + // This is worst case scenario, since we touch them in the exact order they + // were added. + for (var j = 0; j < i; j++) + { + _lru.GetOrAdd(j, _valueFactory.Create); + } + + _testOutputHelper.WriteLine($"Total: {_lru.Count} Hot: {_lru.HotCount} Warm: {_lru.WarmCount} Cold: {_lru.ColdCount}"); + _lru.Count.Should().BeLessThanOrEqualTo(Capacity + 1); + } + } + + [Fact] + public void WhenKeysAreContinuouslyRequestedInTheOrderTheyAreAddedCountIsBounded2() + { + var lru = new ConcurrentLruCache(128, EqualityComparer.Default); + + for (var i = 0; i < 128 + 10; i++) + { + lru.GetOrAdd(i, _valueFactory.Create); + + // Touch all items already cached in hot, warm and cold. + // This is worst case scenario, since we touch them in the exact order they + // were added. + for (var j = 0; j < i; j++) + { + lru.GetOrAdd(j, _valueFactory.Create); + } + + lru.Count.Should().BeLessThanOrEqualTo(128 + 1, $"Total: {lru.Count} Hot: {lru.HotCount} Warm: {lru.WarmCount} Cold: {lru.ColdCount}"); + } + } + + [Fact] + public void WhenValueIsNotTouchedAndExpiresFromHotValueIsBumpedToCold() + { + FillCache(); + + // Insert a value, making it hot + Touch(0); + + // Insert more values, demoting it to cold + GetOrAddRangeInclusive(1, Capacity - 1); + + IsInCache(0); // Value should be cold + + GetOrAddRangeInclusive(Capacity, Capacity + 1); + + // Value should have been evicted + _lru.TryGet(0, out var value).Should().BeFalse(); + } + + private bool IsInCache(int key) => _lru.Keys.Contains(key); + + private void Touch(int key) + { + _lru.GetOrAdd(key, _valueFactory.Create); + } + + private void GetOrAddRangeInclusive(int start, int end) + { + if (start <= end) + { + for (var i = start; i <= end; i++) + { + Touch(i); + } + } + else + { + for (var i = start; i >= end; i--) + { + Touch(i); + } + } + } + + private void AddOrUpdateRangeInclusive(int start, int end) + { + if (start <= end) + { + for (var i = start; i <= end; i++) + { + _lru.AddOrUpdate(i, _valueFactory.Create(i)); + } + } + else + { + for (var i = start; i >= end; i--) + { + _lru.AddOrUpdate(i, _valueFactory.Create(i)); + } + } + } + + [Fact] + public void WhenValueIsTouchedAndExpiresFromHotValueIsBumpedToWarm() + { + FillCache(); + + // Promote to hot + Touch(0); + Touch(0); + + GetOrAddRangeInclusive(1, 9); + + _lru.TryGet(0, out var value).Should().BeTrue(); + } + + [Fact] + public void WhenValueIsTouchedAndExpiresFromColdItIsBumpedToWarm() + { + FillCache(); + + _lru.GetOrAdd(0, _valueFactory.Create); + + IsInCache(0).Should().BeTrue(); + + + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.GetOrAdd(2, _valueFactory.Create); + _lru.GetOrAdd(3, _valueFactory.Create); // push 0 to cold (not touched in hot) + + IsInCache(0).Should().BeTrue(); + + _lru.GetOrAdd(0, _valueFactory.Create); // Touch 0 in cold + + IsInCache(0).Should().BeTrue(); + + GetOrAddRangeInclusive(4, 9); + _lru.GetOrAdd(4, _valueFactory.Create); // fully cycle cold, this will evict 0 if it is not moved to warm + _lru.GetOrAdd(5, _valueFactory.Create); + _lru.GetOrAdd(6, _valueFactory.Create); + _lru.GetOrAdd(7, _valueFactory.Create); + _lru.GetOrAdd(8, _valueFactory.Create); + _lru.GetOrAdd(9, _valueFactory.Create); + + _lru.TryGet(0, out var value).Should().BeTrue(); + } + + [Fact] + public void WhenValueIsNotTouchedAndExpiresFromColdItIsRemoved() + { + FillCache(); + + _lru.GetOrAdd(0, _valueFactory.Create); + + AddOrUpdateRangeInclusive(1, Capacity); + + _lru.TryGet(0, out var value).Should().BeFalse(); + } + + [Fact] + public void WhenValueIsNotTouchedAndExpiresFromWarmValueIsBumpedToCold() + { + FillCache(); + + // Insert hot + Touch(0); + + // Touch it again so that it will be promoted to 'warm' + Touch(0); + + GetOrAddRangeInclusive(1, Capacity - 1); + + IsInCache(0).Should().BeTrue(); + + // Touch more values to have it evicted. + for (var i = 1; i < 1 + Capacity; i++) + { + Touch(i); + Touch(i); + } + + _lru.TryGet(0, out var value).Should().BeFalse(); + } + + [Fact] + public void WhenValueIsTouchedAndExpiresFromWarmValueIsBumpedBackIntoWarm() + { + FillCache(); + + _lru.GetOrAdd(0, _valueFactory.Create); + _lru.GetOrAdd(0, _valueFactory.Create); // Touch 0 in hot, it will promote to warm + + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.GetOrAdd(2, _valueFactory.Create); + _lru.GetOrAdd(3, _valueFactory.Create); // push 0 to warm + + // touch next 3 values, so they will promote to warm + _lru.GetOrAdd(4, _valueFactory.Create); _lru.GetOrAdd(4, _valueFactory.Create); + _lru.GetOrAdd(5, _valueFactory.Create); _lru.GetOrAdd(5, _valueFactory.Create); + _lru.GetOrAdd(6, _valueFactory.Create); _lru.GetOrAdd(6, _valueFactory.Create); + + // push 4,5,6 to warm, 0 to cold + _lru.GetOrAdd(7, _valueFactory.Create); + _lru.GetOrAdd(8, _valueFactory.Create); + _lru.GetOrAdd(9, _valueFactory.Create); + + // Touch 0 + _lru.TryGet(0, out var value).Should().BeTrue(); + + // push 7,8,9 to cold, cycle 0 back to warm + _lru.GetOrAdd(10, _valueFactory.Create); + _lru.GetOrAdd(11, _valueFactory.Create); + _lru.GetOrAdd(12, _valueFactory.Create); + + _lru.TryGet(0, out value).Should().BeTrue(); + } + + [Fact] + public void WhenValueExpiresItIsDisposed() + { + var lruOfDisposable = new ConcurrentLruCache(6, EqualityComparer.Default); + var disposableValueFactory = new DisposableValueFactory(); + + for (var i = 0; i < 7; i++) + { + lruOfDisposable.GetOrAdd(i, disposableValueFactory.Create); + } + + disposableValueFactory.Items[0].IsDisposed.Should().BeTrue(); + + disposableValueFactory.Items[1].IsDisposed.Should().BeFalse(); + disposableValueFactory.Items[2].IsDisposed.Should().BeFalse(); + disposableValueFactory.Items[3].IsDisposed.Should().BeFalse(); + disposableValueFactory.Items[4].IsDisposed.Should().BeFalse(); + disposableValueFactory.Items[5].IsDisposed.Should().BeFalse(); + disposableValueFactory.Items[6].IsDisposed.Should().BeFalse(); + } + + [Fact] + public void WhenAddingNullValueCanBeAddedAndRemoved() + { + _lru.GetOrAdd(1, _ => null).Should().BeNull(); + _lru.AddOrUpdate(1, null); + _lru.TryRemove(1).Should().BeTrue(); + } + + [Fact] + public void WhenValuesAreEvictedEvictionMetricCountsEvicted() + { + FillCache(); + + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.Metrics.Evicted.Should().Be(1); + } + + [Fact] + public void WhenKeyExistsTryRemoveRemovesItemAndReturnsTrue() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryRemove(1).Should().BeTrue(); + _lru.TryGet(1, out var value).Should().BeFalse(); + } + + [Fact] + public void WhenKeyExistsTryRemoveReturnsValue() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryRemove(1, out var value).Should().BeTrue(); + value.Should().Be("1"); + } + + [Fact] + public void WhenItemIsRemovedItIsDisposed() + { + var lruOfDisposable = new ConcurrentLruCache(6, EqualityComparer.Default); + var disposableValueFactory = new DisposableValueFactory(); + + lruOfDisposable.GetOrAdd(1, disposableValueFactory.Create); + lruOfDisposable.TryRemove(1); + + disposableValueFactory.Items[1].IsDisposed.Should().BeTrue(); + } + + [Fact] + public void WhenItemRemovedFromHotDuringWarmupItIsEagerlyCycledOut() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryRemove(1); + Print(); // Hot [1] Warm [] Cold [] + + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.GetOrAdd(2, _valueFactory.Create); + _lru.GetOrAdd(3, _valueFactory.Create); + Print(); // Hot [1,2,3] Warm [] Cold [] + + _lru.WarmCount.Should().Be(0); + _lru.ColdCount.Should().Be(0); + } + + [Fact] + public void WhenItemRemovedFromHotAfterWarmupItIsEagerlyCycledOut() + { + for (var i = 0; i < _lru.Capacity; i++) + { + _lru.GetOrAdd(i, _valueFactory.Create); + } + + _lru.Metrics.Evicted.Should().Be(0); + + _lru.GetOrAdd(-1, _valueFactory.Create); + + _lru.TryRemove(-1); + + // fully cycle hot, which is 3 items + foreach (var item in Enumerable.Range(1000, HotCap)) + { + _lru.GetOrAdd(item, _valueFactory.Create); + } + + // without eager eviction as -1 is purged from hot, a 4th item will pushed out since hot queue is full + _lru.Metrics.Evicted.Should().Be(HotCap); + } + + [Fact] + public void WhenItemRemovedFromWarmDuringWarmupItIsEagerlyCycledOut() + { + foreach (var item in Enumerable.Range(1, HotCap)) + { + _lru.GetOrAdd(item, _valueFactory.Create); + } + + _lru.TryRemove(1); + + foreach (var item in Enumerable.Range(1000, HotCap)) + { + _lru.GetOrAdd(item, _valueFactory.Create); + } + + // Items are cycled from Hot to Warm, since Warm is not yet filled. + // The previously removed item is skipped. + _lru.WarmCount.Should().Be(HotCap - 1); + _lru.ColdCount.Should().Be(0); + } + + + [Fact] + public void WhenItemRemovedFromWarmAfterWarmupItIsEagerlyCycledOut() + { + for (var i = 0; i < _lru.Capacity; i++) + { + _lru.GetOrAdd(i, _valueFactory.Create); + } + + Print(); // Hot [6,7,8] Warm [1,2,3] Cold [0,4,5] + _lru.Metrics.Evicted.Should().Be(0); + + _lru.TryRemove(1); + + _lru.GetOrAdd(6, _valueFactory.Create); // 6 -> W + _lru.GetOrAdd(9, _valueFactory.Create); + + Print(); // Hot [7,8,9] Warm [2,3,6] Cold [0,4,5] + + _lru.Metrics.Evicted.Should().Be(0); + } + + [Fact] + public void WhenItemRemovedFromColdAfterWarmupItIsEagerlyCycledOut() + { + for (var i = 0; i < _lru.Capacity; i++) + { + _lru.GetOrAdd(i, _valueFactory.Create); + } + + Print(); // Hot [6,7,8] Warm [1,2,3] Cold [0,4,5] + _lru.Metrics.Evicted.Should().Be(0); + + _lru.GetOrAdd(0, _valueFactory.Create); + _lru.TryRemove(0); + + _lru.GetOrAdd(9, _valueFactory.Create); + + Print(); // Hot [7,8,9] Warm [1,2,3] Cold [4,5,6] + + _lru.Metrics.Evicted.Should().Be(0); + } + + [Fact] + public void WhenKeyDoesNotExistTryRemoveReturnsFalse() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryRemove(2).Should().BeFalse(); + } + + [Fact] + public void WhenItemsAreRemovedTrimRemovesDeletedItemsFromQueues() + { + for (var i = 0; i < _lru.Capacity; i++) + { + _lru.GetOrAdd(i, _valueFactory.Create); + } + + _lru.TryRemove(0); + _lru.TryRemove(1); + _lru.TryRemove(6); + + _lru.Trim(1); + + _lru.HotCount.Should().Be(HotCap); + _lru.WarmCount.Should().Be(WarmCap); + _lru.ColdCount.Should().Be(ColdCap - 1); + } + + [Fact] + public void WhenRepeatedlyAddingAndRemovingSameValueLruRemainsInConsistentState() + { + for (var i = 0; i < Capacity; i++) + { + // Because TryRemove leaves the item in the queue, when it is eventually removed + // from the cold queue, it should not remove the newly created value. + _lru.GetOrAdd(1, _valueFactory.Create); + _lru.TryGet(1, out var value).Should().BeTrue(); + _lru.TryRemove(1); + } + } + + [Fact] + public void WhenKeyExistsTryUpdateUpdatesValueAndReturnsTrue() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryUpdate(1, "2").Should().BeTrue(); + + _lru.TryGet(1, out var value); + value.Should().Be("2"); + } + + [Fact] + public void WhenKeyExistsTryUpdateDisposesOldValue() + { + var lruOfDisposable = new ConcurrentLruCache(6, EqualityComparer.Default); + var disposableValueFactory = new DisposableValueFactory(); + var newValue = new DisposableItem(); + + lruOfDisposable.GetOrAdd(1, disposableValueFactory.Create); + lruOfDisposable.TryUpdate(1, newValue); + + disposableValueFactory.Items[1].IsDisposed.Should().BeTrue(); + } + + [Fact] + public void WhenKeyDoesNotExistTryUpdateReturnsFalse() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryUpdate(2, "3").Should().BeFalse(); + } + + // backcompat: remove conditional compile +#if NETCOREAPP3_0_OR_GREATER + [Fact] + public void WhenKeyExistsTryUpdateIncrementsUpdateCount() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryUpdate(1, "2").Should().BeTrue(); + + _lru.Metrics.Updated.Should().Be(1); + } + + [Fact] + public void WhenKeyDoesNotExistTryUpdateDoesNotIncrementCounter() + { + _lru.GetOrAdd(1, _valueFactory.Create); + + _lru.TryUpdate(2, "3").Should().BeFalse(); + + _lru.Metrics.Updated.Should().Be(0); + } +#endif + [Fact] + public void WhenKeyDoesNotExistAddOrUpdateAddsNewItem() + { + _lru.AddOrUpdate(1, "1"); + + _lru.TryGet(1, out var value).Should().BeTrue(); + value.Should().Be("1"); + } + + [Fact] + public void WhenKeyExistsAddOrUpdateUpdatesExistingItem() + { + _lru.AddOrUpdate(1, "1"); + _lru.AddOrUpdate(1, "2"); + + _lru.TryGet(1, out var value).Should().BeTrue(); + value.Should().Be("2"); + } + + [Fact] + public void WhenKeyExistsAddOrUpdateGuidUpdatesExistingItem() + { + var lru2 = new ConcurrentLruCache(Capacity, EqualityComparer.Default); + + var b = new byte[8]; + lru2.AddOrUpdate(1, new Guid(1, 0, 0, b)); + lru2.AddOrUpdate(1, new Guid(2, 0, 0, b)); + + lru2.TryGet(1, out var value).Should().BeTrue(); + value.Should().Be(new Guid(2, 0, 0, b)); + } + + [Fact] + public void WhenKeyExistsAddOrUpdateDisposesOldValue() + { + var lruOfDisposable = new ConcurrentLruCache(6, EqualityComparer.Default); + var disposableValueFactory = new DisposableValueFactory(); + var newValue = new DisposableItem(); + + lruOfDisposable.GetOrAdd(1, disposableValueFactory.Create); + lruOfDisposable.AddOrUpdate(1, newValue); + + disposableValueFactory.Items[1].IsDisposed.Should().BeTrue(); + } + + [Fact] + public void WhenKeyDoesNotExistAddOrUpdateMaintainsLruOrder() + { + AddOrUpdateRangeInclusive(1, Capacity + 1); + + _lru.HotCount.Should().Be(HotCap); + _lru.WarmCount.Should().Be(WarmCap); + _lru.TryGet(0, out _).Should().BeFalse(); + } + + [Fact] + public void WhenCacheIsEmptyClearIsNoOp() + { + _lru.Clear(); + _lru.Count.Should().Be(0); + } + + [Fact] + public void WhenItemsExistClearRemovesAllItems() + { + _lru.AddOrUpdate(1, "1"); + _lru.AddOrUpdate(2, "2"); + + _lru.Clear(); + + _lru.Count.Should().Be(0); + + // verify queues are purged + _lru.HotCount.Should().Be(0); + _lru.WarmCount.Should().Be(0); + _lru.ColdCount.Should().Be(0); + } + + // This is a special case: + // Cycle 1: hot => warm + // Cycle 2: warm => warm + // Cycle 3: warm => cold + // Cycle 4: cold => remove + // Cycle 5: cold => remove + [Fact] + public void WhenCacheIsSize3ItemsExistAndItemsAccessedClearRemovesAllItems() + { + var lru = new ConcurrentLruCache(3); + + lru.AddOrUpdate(1, "1"); + lru.AddOrUpdate(2, "1"); + + lru.TryGet(1, out _); + lru.TryGet(2, out _); + + lru.Clear(); + + lru.Count.Should().Be(0); + } + + [Theory] + [InlineData(1)] + [InlineData(2)] + [InlineData(3)] + [InlineData(4)] + [InlineData(5)] + [InlineData(6)] + [InlineData(7)] + [InlineData(8)] + [InlineData(9)] + [InlineData(10)] + public void WhenItemsExistAndItemsAccessedClearRemovesAllItems(int itemCount) + { + // By default capacity is 9. Test all possible states of touched items + // in the cache. + + for (var i = 0; i < itemCount; i++) + { + _lru.AddOrUpdate(i, "1"); + } + + // touch n items + for (var i = 0; i < itemCount; i++) + { + _lru.TryGet(i, out _); + } + + _lru.Clear(); + + _testOutputHelper.WriteLine("LRU " + string.Join(" ", _lru.Keys)); + + _lru.Count.Should().Be(0); + + // verify queues are purged + _lru.HotCount.Should().Be(0); + _lru.WarmCount.Should().Be(0); + _lru.ColdCount.Should().Be(0); + } + + [Fact] + public void WhenWarmThenClearedIsWarmIsReset() + { + for (var i = 0; i < Capacity; i++) + { + Touch(i); + } + + var testAccessor = GetTestAccessor(_lru); + testAccessor.IsWarm.Should().BeTrue(); + + _lru.Clear(); + _lru.Count.Should().Be(0); + testAccessor.IsWarm.Should().BeFalse(); + + for (var i = 0; i < Capacity; i++) + { + Touch(i); + } + + testAccessor.IsWarm.Should().BeTrue(); + _lru.Count.Should().Be(_lru.Capacity); + } + + [Fact] + public void WhenWarmThenTrimIsWarmIsReset() + { + GetOrAddRangeInclusive(1, Capacity); + + var testAccessor = GetTestAccessor(_lru); + testAccessor.IsWarm.Should().BeTrue(); + _lru.Trim(Capacity / 2); + + testAccessor.IsWarm.Should().BeFalse(); + _lru.Count.Should().Be(Capacity / 2); + + for (var i = 0; i < Capacity; i++) + { + Touch(i); + } + + testAccessor.IsWarm.Should().BeTrue(); + _lru.Count.Should().Be(_lru.Capacity); + } + + [Fact] + public void WhenItemsAreDisposableClearDisposesItemsOnRemove() + { + var lruOfDisposable = new ConcurrentLruCache(6, EqualityComparer.Default); + + var items = Enumerable.Range(1, 4).Select(i => new DisposableItem()).ToList(); + + for (var i = 0; i < 4; i++) + { + lruOfDisposable.AddOrUpdate(i, items[i]); + } + + lruOfDisposable.Clear(); + + items.All(i => i.IsDisposed == true).Should().BeTrue(); + } + + [Fact] + public void WhenTrimCountIsZeroThrows() + { + _lru.Invoking(l => _lru.Trim(0)).Should().Throw(); + } + + [Fact] + public void WhenTrimCountIsMoreThanCapacityThrows() + { + _lru.Invoking(l => _lru.Trim(HotCap + WarmCap + ColdCap + 1)).Should().Throw(); + } + + [Theory] + [InlineData(10)] + [InlineData(20)] + [InlineData(30)] + [InlineData(40)] + [InlineData(50)] + [InlineData(60)] + [InlineData(70)] + [InlineData(80)] + [InlineData(90)] + public void WhenColdItemsExistTrimRemovesExpectedItemCount(int trimCount) + { + FillCache(); + + // Warm items + var warmItems = Enumerable.Range(1, WarmCap).ToArray(); + foreach (var item in warmItems) + { + _lru.AddOrUpdate(item, item.ToString()); + _lru.GetOrAdd(item, _valueFactory.Create); + } + + // Cold items (added but untouched) + var coldItems = Enumerable.Range(1000, ColdCap).ToArray(); + foreach (var item in coldItems) + { + _lru.AddOrUpdate(item, item.ToString()); + } + + // Hot Items (evict the previous hot items to cold) + var hotItems = Enumerable.Range(2000, HotCap).ToArray(); + foreach (var item in hotItems) + { + _lru.AddOrUpdate(item, item.ToString()); + } + + _lru.Trim(trimCount); + + int[] expected = [ + .. warmItems.Skip(Math.Max(0, trimCount - coldItems.Length)), + .. hotItems, + .. coldItems.Skip(trimCount)]; + _lru.Keys.Order().Should().BeEquivalentTo(expected.Order()); + } + + [Theory] + [InlineData(1, new[] { 6, 5, 4, 3, 2 })] + [InlineData(2, new[] { 6, 5, 4, 3 })] + [InlineData(3, new[] { 6, 5, 4 })] + [InlineData(4, new[] { 6, 5 })] + [InlineData(5, new[] { 6 })] + [InlineData(6, new int[] { })] + [InlineData(7, new int[] { })] + [InlineData(8, new int[] { })] + [InlineData(9, new int[] { })] + public void WhenHotAndWarmItemsExistTrimRemovesExpectedItemCount(int itemCount, int[] expected) + { + // initial state: + // Hot = 6, 5, 4 + // Warm = 3, 2, 1 + // Cold = - + _lru.AddOrUpdate(1, "1"); + _lru.AddOrUpdate(2, "2"); + _lru.AddOrUpdate(3, "3"); + _lru.GetOrAdd(1, i => i.ToString()); + _lru.GetOrAdd(2, i => i.ToString()); + _lru.GetOrAdd(3, i => i.ToString()); + + _lru.AddOrUpdate(4, "4"); + _lru.AddOrUpdate(5, "5"); + _lru.AddOrUpdate(6, "6"); + + _lru.Trim(itemCount); + + _lru.Keys.Should().BeEquivalentTo(expected); + } + + [Theory] + [InlineData(1, new[] { 3, 2 })] + [InlineData(2, new[] { 3 })] + [InlineData(3, new int[] { })] + [InlineData(4, new int[] { })] + [InlineData(5, new int[] { })] + [InlineData(6, new int[] { })] + [InlineData(7, new int[] { })] + [InlineData(8, new int[] { })] + [InlineData(9, new int[] { })] + public void WhenHotItemsExistTrimRemovesExpectedItemCount(int itemCount, int[] expected) + { + // initial state: + // Hot = 3, 2, 1 + // Warm = - + // Cold = - + _lru.AddOrUpdate(1, "1"); + _lru.AddOrUpdate(2, "2"); + _lru.AddOrUpdate(3, "3"); + + _lru.Trim(itemCount); + + _lru.Keys.Should().BeEquivalentTo(expected); + } + + [Theory] + [InlineData(10)] + [InlineData(20)] + [InlineData(30)] + [InlineData(40)] + [InlineData(50)] + [InlineData(60)] + [InlineData(70)] + [InlineData(80)] + public void WhenColdItemsAreTouchedTrimRemovesExpectedItemCount(int trimCount) + { + FillCache(); + + // Warm items + var warmItems = Enumerable.Range(1, WarmCap).ToArray(); + foreach (var item in warmItems) + { + _lru.AddOrUpdate(item, item.ToString()); + _lru.GetOrAdd(item, _valueFactory.Create); + } + + // Cold items (added but untouched) + var coldItems = Enumerable.Range(1000, ColdCap).ToArray(); + foreach (var item in coldItems) + { + _lru.AddOrUpdate(item, item.ToString()); + } + + // Hot Items (evict the previous hot items to cold) + var hotItems = Enumerable.Range(2000, HotCap).ToArray(); + foreach (var item in hotItems) + { + _lru.AddOrUpdate(item, item.ToString()); + } + + // Touch cold items to promote them to warm + foreach (var item in coldItems) + { + _lru.GetOrAdd(item, _valueFactory.Create); + } + + _lru.Trim(trimCount); + + int[] expected = [ + .. warmItems.Skip(Math.Max(0, trimCount)), + .. hotItems, + .. coldItems]; + _lru.Keys.Order().Should().BeEquivalentTo(expected.Order()); + _testOutputHelper.WriteLine("LRU " + string.Join(" ", _lru.Keys)); + _testOutputHelper.WriteLine("exp " + string.Join(" ", expected)); + + _lru.Keys.Should().BeEquivalentTo(expected); + } + + [Theory] + [InlineData(1)] + [InlineData(2)] + [InlineData(3)] + [InlineData(4)] + [InlineData(5)] + [InlineData(6)] + [InlineData(7)] + [InlineData(8)] + [InlineData(9)] + [InlineData(10)] + public void WhenItemsExistAndItemsAccessedTrimRemovesAllItems(int itemCount) + { + // By default capacity is 9. Test all possible states of touched items + // in the cache. + + for (var i = 0; i < itemCount; i++) + { + _lru.AddOrUpdate(i, "1"); + } + + // touch n items + for (var i = 0; i < itemCount; i++) + { + _lru.TryGet(i, out _); + } + + _lru.Trim(Math.Min(itemCount, _lru.Capacity)); + + _testOutputHelper.WriteLine("LRU " + string.Join(" ", _lru.Keys)); + + _lru.Count.Should().Be(0); + + // verify queues are purged + _lru.HotCount.Should().Be(0); + _lru.WarmCount.Should().Be(0); + _lru.ColdCount.Should().Be(0); + } + + [Theory] + [InlineData(1)] + [InlineData(2)] + [InlineData(3)] + [InlineData(4)] + [InlineData(5)] + [InlineData(6)] + [InlineData(7)] + [InlineData(8)] + [InlineData(9)] + [InlineData(10)] + public void WhenItemsRemovedClearRemovesAllItems(int itemCount) + { + for (var i = 0; i < itemCount; i++) + { + _lru.AddOrUpdate(i, "1"); + } + + // this leaves an item in the queue but not the dictionary + _lru.TryRemove(0, out _); + + _lru.Clear(); + + _testOutputHelper.WriteLine("LRU " + string.Join(" ", _lru.Keys)); + + _lru.Count.Should().Be(0); + + // verify queues are purged + _lru.HotCount.Should().Be(0); + _lru.WarmCount.Should().Be(0); + _lru.ColdCount.Should().Be(0); + } + + [Fact] + public void WhenItemsAreDisposableTrimDisposesItems() + { + var lruOfDisposable = new ConcurrentLruCache(6, EqualityComparer.Default); + + var items = Enumerable.Range(1, 4).Select(i => new DisposableItem()).ToList(); + + for (var i = 0; i < 4; i++) + { + lruOfDisposable.AddOrUpdate(i, items[i]); + } + + lruOfDisposable.Trim(2); + + items[0].IsDisposed.Should().BeTrue(); + items[1].IsDisposed.Should().BeTrue(); + items[2].IsDisposed.Should().BeFalse(); + items[3].IsDisposed.Should().BeFalse(); + } + + private void FillCache() => GetOrAddRangeInclusive(-1, -Capacity); + + private void Print() + { +#if DEBUG + _testOutputHelper.WriteLine(_lru.FormatLruString()); +#endif + } + + private class ValueFactory + { + public int TimesCalled; + + public string Create(int key) + { + TimesCalled++; + return key.ToString(); + } + + public string Create(int key, TArg arg) + { + TimesCalled++; + return $"{key}{arg}"; + } + + public Task CreateAsync(int key) + { + TimesCalled++; + return Task.FromResult(key.ToString()); + } + + public Task CreateAsync(int key, TArg arg) + { + TimesCalled++; + return Task.FromResult($"{key}{arg}"); + } + } + + private class DisposableItem : IDisposable + { + public bool IsDisposed { get; private set; } + + public void Dispose() + { + IsDisposed = true; + } + } + + private class DisposableValueFactory + { + public Dictionary Items { get; } = []; + + public DisposableItem Create(int key) + { + var item = new DisposableItem(); + Items.Add(key, item); + return item; + } + + public Task CreateAsync(int key) + { + var item = new DisposableItem(); + Items.Add(key, item); + return Task.FromResult(item); + } + } +} diff --git a/test/NonSilo.Tests/General/LruTest.cs b/test/NonSilo.Tests/General/LruTest.cs deleted file mode 100644 index dc0ecf764c9..00000000000 --- a/test/NonSilo.Tests/General/LruTest.cs +++ /dev/null @@ -1,122 +0,0 @@ -using Orleans.Runtime; -using Xunit; - -namespace UnitTests -{ - /// - ///This is a test class for the LRU class and is intended - ///to contain all LRU Unit Tests - /// - public class LruTest - { - [Fact, TestCategory("BVT"), TestCategory("LRU")] - public void LruCountTest() - { - const int maxSize = 10; - var maxAge = new TimeSpan(0, 1, 0, 0); - - var target = new LRU(maxSize, maxAge); - Assert.Equal(0, target.Count); // "Count wrong after construction" - - target.Add("1", "one"); - Assert.Equal(1, target.Count); // "Count wrong after adding one item" - - target.Add("2", "two"); - Assert.Equal(2, target.Count); // "Count wrong after adding two items" - - target.AddOrUpdate("2", "two"); - Assert.Equal(2, target.Count); // "Count wrong after updating existing item" - } - - [Fact, TestCategory("BVT"), TestCategory("LRU")] - public void LruMaximumSizeTest() - { - const int maxSize = 10; - var maxAge = new TimeSpan(0, 1, 0, 0); - - var target = new LRU(maxSize, maxAge); - for (var i = 1; i <= maxSize + 5; i++) - { - var s = i.ToString(); - target.Add(s, "item " + s); - Thread.Sleep(10); - } - - Assert.Equal(maxSize, target.Count); // "LRU grew larger than maximum size" - for (var i = 1; i <= 5; i++) - { - var s = i.ToString(); - Assert.False(target.ContainsKey(s), "'Older' entry is still in cache"); - } - } - - [Fact, TestCategory("BVT"), TestCategory("LRU")] - public void LruUsageTest() - { - const int maxSize = 10; - var maxAge = new TimeSpan(0, 1, 0, 0); - - var target = new LRU(maxSize, maxAge); - - // Fill the LRU with "1" through "10" - for (var i = 1; i <= maxSize; i++) - { - var s = i.ToString(); - target.Add(s, "item " + s); - Thread.Sleep(10); - } - - // Use "10", then "9", etc. - for (var i = maxSize; i >= 1; i--) - { - var s = i.ToString(); - target.TryGetValue(s, out _); - } - - // Add a new item to push the least recently used out -- which should be item "10" - var s1 = (maxSize + 1).ToString(); - target.Add(s1, "item " + s1); - - Assert.Equal(maxSize, target.Count); // "Cache has exceeded maximum size" - var s0 = maxSize.ToString(); - Assert.False(target.ContainsKey(s0), "Least recently used item was not expelled"); - for (var i = 1; i < maxSize; i++) - { - var s = i.ToString(); - Assert.True(target.ContainsKey(s), "Recently used item " + s + " was incorrectly expelled"); - } - } - - [Fact, TestCategory("BVT"), TestCategory("LRU")] - public async Task LruRemoveExpired() - { - const int n = 10; - const int maxSize = n*2; - var maxAge = TimeSpan.FromMilliseconds(500); - var flushCounter = 0; - - var target = new LRU(maxSize, maxAge); - target.RaiseFlushEvent += () => flushCounter++; - - for (int i = 0; i < n; i++) - { - var s = i.ToString(); - target.Add(s, $"item {s}"); - } - - target.RemoveExpired(); - Assert.Equal(0, flushCounter); - Assert.Equal(n, target.Count); - - await Task.Delay(maxAge.Add(maxAge)); - - target.Add("expected", "value"); - target.RemoveExpired(); - - Assert.Equal(n, flushCounter); - Assert.Equal(1, target.Count); - Assert.True(target.TryGetValue("expected", out var value)); - Assert.Equal("value", value); - } - } -} From 8c531c2e127178bd9936de6a61e9b414913c63b9 Mon Sep 17 00:00:00 2001 From: Reuben Bond Date: Thu, 29 May 2025 08:52:10 -0700 Subject: [PATCH 2/5] Add more explicit attribution --- src/Orleans.Core/Caching/Internal/CacheDebugView.cs | 5 +++-- src/Orleans.Core/Caching/Internal/CapacityPartition.cs | 3 ++- .../Caching/Internal/ConcurrentDictionarySize.cs | 3 ++- src/Orleans.Core/Caching/Internal/Counter.cs | 3 ++- src/Orleans.Core/Caching/Internal/ICache.cs | 3 ++- src/Orleans.Core/Caching/Internal/ICacheMetrics.cs | 3 ++- src/Orleans.Core/Caching/Internal/PaddedLong.cs | 3 ++- src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs | 3 ++- src/Orleans.Core/Caching/Internal/Padding.cs | 3 ++- src/Orleans.Core/Caching/Internal/Striped64.cs | 3 ++- src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs | 2 +- 11 files changed, 22 insertions(+), 12 deletions(-) diff --git a/src/Orleans.Core/Caching/Internal/CacheDebugView.cs b/src/Orleans.Core/Caching/Internal/CacheDebugView.cs index fbdf8c3c698..8cfc1f4bc3e 100644 --- a/src/Orleans.Core/Caching/Internal/CacheDebugView.cs +++ b/src/Orleans.Core/Caching/Internal/CacheDebugView.cs @@ -5,7 +5,8 @@ namespace Orleans.Caching.Internal; -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/CacheDebugView.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/CacheDebugView.cs [ExcludeFromCodeCoverage] internal sealed class CacheDebugView where K : notnull @@ -33,5 +34,5 @@ public KeyValuePair[] Items } } - public ICacheMetrics? Metrics => Metrics; + public ICacheMetrics? Metrics => _cache.Metrics; } diff --git a/src/Orleans.Core/Caching/Internal/CapacityPartition.cs b/src/Orleans.Core/Caching/Internal/CapacityPartition.cs index 33f0e553b64..0aa8bc8ed58 100644 --- a/src/Orleans.Core/Caching/Internal/CapacityPartition.cs +++ b/src/Orleans.Core/Caching/Internal/CapacityPartition.cs @@ -7,7 +7,8 @@ namespace Orleans.Caching.Internal; /// A capacity partitioning scheme that favors frequently accessed items by allocating 80% /// capacity to the warm queue. /// -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Lru/FavorWarmPartition.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Lru/FavorWarmPartition.cs [DebuggerDisplay("{Hot}/{Warm}/{Cold}")] internal readonly struct CapacityPartition { diff --git a/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs b/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs index 2b4158ecb9b..09734c83384 100644 --- a/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs +++ b/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs @@ -3,7 +3,8 @@ namespace Orleans.Caching.Internal; -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ConcurrentDictionarySize.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ConcurrentDictionarySize.cs internal static class ConcurrentDictionarySize { private static int NextPrimeGreaterThan(int min) diff --git a/src/Orleans.Core/Caching/Internal/Counter.cs b/src/Orleans.Core/Caching/Internal/Counter.cs index e9cb76a502a..67c207f5be2 100644 --- a/src/Orleans.Core/Caching/Internal/Counter.cs +++ b/src/Orleans.Core/Caching/Internal/Counter.cs @@ -13,7 +13,8 @@ namespace Orleans.Caching.Internal; /// A thread-safe counter suitable for high throughput counting across many concurrent threads. /// /// Based on the LongAdder class by Doug Lea. -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/Counter.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/Counter.cs internal sealed class Counter : Striped64 { /// diff --git a/src/Orleans.Core/Caching/Internal/ICache.cs b/src/Orleans.Core/Caching/Internal/ICache.cs index bc6a7987a8b..c77cd100a66 100644 --- a/src/Orleans.Core/Caching/Internal/ICache.cs +++ b/src/Orleans.Core/Caching/Internal/ICache.cs @@ -9,7 +9,8 @@ namespace Orleans.Caching.Internal; /// /// The type of keys in the cache. /// The type of values in the cache. -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ICache.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ICache.cs internal interface ICache : IEnumerable> { /// diff --git a/src/Orleans.Core/Caching/Internal/ICacheMetrics.cs b/src/Orleans.Core/Caching/Internal/ICacheMetrics.cs index eccdedd1fba..749b60b5a33 100644 --- a/src/Orleans.Core/Caching/Internal/ICacheMetrics.cs +++ b/src/Orleans.Core/Caching/Internal/ICacheMetrics.cs @@ -4,7 +4,8 @@ namespace Orleans.Caching.Internal; /// Represents cache metrics collected over the lifetime of the cache. /// If metrics are disabled. /// -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ICacheMetrics.cs?plain=1#L8C22-L8C35 +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ICacheMetrics.cs?plain=1#L8C22-L8C35 internal interface ICacheMetrics { /// diff --git a/src/Orleans.Core/Caching/Internal/PaddedLong.cs b/src/Orleans.Core/Caching/Internal/PaddedLong.cs index 2f3089a41d7..9f551f40f2b 100644 --- a/src/Orleans.Core/Caching/Internal/PaddedLong.cs +++ b/src/Orleans.Core/Caching/Internal/PaddedLong.cs @@ -6,7 +6,8 @@ namespace Orleans.Caching.Internal; /// /// A long value padded by the size of a CPU cache line to mitigate false sharing. /// -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/PaddedLong.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/PaddedLong.cs [StructLayout(LayoutKind.Explicit, Size = 2 * Padding.CACHE_LINE_SIZE)] // padding before/between/after fields internal struct PaddedLong { diff --git a/src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs b/src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs index 82debf2673d..ece384f85c9 100644 --- a/src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs +++ b/src/Orleans.Core/Caching/Internal/PaddedQueueCount.cs @@ -3,7 +3,8 @@ namespace Orleans.Caching.Internal; -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Lru/PaddedQueueCount.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Lru/PaddedQueueCount.cs [DebuggerDisplay("Hot = {Hot}, Warm = {Warm}, Cold = {Cold}")] [StructLayout(LayoutKind.Explicit, Size = 4 * Padding.CACHE_LINE_SIZE)] // padding before/between/after fields internal struct PaddedQueueCount diff --git a/src/Orleans.Core/Caching/Internal/Padding.cs b/src/Orleans.Core/Caching/Internal/Padding.cs index 33223df63f2..e76ca399243 100644 --- a/src/Orleans.Core/Caching/Internal/Padding.cs +++ b/src/Orleans.Core/Caching/Internal/Padding.cs @@ -1,6 +1,7 @@ namespace Orleans.Caching.Internal; -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Padding.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Padding.cs internal static class Padding { #if TARGET_ARM64 || TARGET_LOONGARCH64 diff --git a/src/Orleans.Core/Caching/Internal/Striped64.cs b/src/Orleans.Core/Caching/Internal/Striped64.cs index 3eede1724f1..65550afed3f 100644 --- a/src/Orleans.Core/Caching/Internal/Striped64.cs +++ b/src/Orleans.Core/Caching/Internal/Striped64.cs @@ -78,7 +78,8 @@ namespace Orleans.Caching.Internal; /// Maintains a lazily-initialized table of atomically updated variables, plus an extra /// "base" field. The table size is a power of two. Indexing uses masked thread IDs. /// -// Source: https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/Striped64.cs +// Derived from BitFaster.Caching by Alex Peck +// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/Counters/Striped64.cs [ExcludeFromCodeCoverage] internal abstract class Striped64 { diff --git a/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs b/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs index 3633797c137..959645ca102 100644 --- a/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs +++ b/src/Orleans.Runtime/Utilities/StripedMpscBuffer.cs @@ -15,7 +15,7 @@ namespace Orleans.Runtime.Utilities; /// writes scale linearly with number of concurrent threads. /// /// -/// Note: this implementation was originally authored by Alex Peck and was copied from BitFaster.Caching: https://github.com/bitfaster/BitFaster.Caching/blob/275b9b072c0218e20f549b769cd183df1374e2ee/BitFaster.Caching/Buffers/StripedMpscBuffer.cs +/// Derived from BitFaster.Caching by Alex Peck: https://github.com/bitfaster/BitFaster.Caching/blob/275b9b072c0218e20f549b769cd183df1374e2ee/BitFaster.Caching/Buffers/StripedMpscBuffer.cs /// [DebuggerDisplay("Count = {Count}/{Capacity}")] internal sealed class StripedMpscBuffer where T : class From 7c0fddccbdeee9dcf5cd4ac5ef0b0cfd6e8d1dbf Mon Sep 17 00:00:00 2001 From: Reuben Bond Date: Thu, 29 May 2025 13:02:55 -0700 Subject: [PATCH 3/5] Remove unnecessary ConcurrentDictionarySize calculation --- .../Caching/ConcurrentLruCache.cs | 7 +- .../Internal/ConcurrentDictionarySize.cs | 220 ------------------ 2 files changed, 2 insertions(+), 225 deletions(-) delete mode 100644 src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs diff --git a/src/Orleans.Core/Caching/ConcurrentLruCache.cs b/src/Orleans.Core/Caching/ConcurrentLruCache.cs index cce9b821f4d..99c1933033d 100644 --- a/src/Orleans.Core/Caching/ConcurrentLruCache.cs +++ b/src/Orleans.Core/Caching/ConcurrentLruCache.cs @@ -54,7 +54,6 @@ internal class ConcurrentLruCache : ICache, ICacheMetrics, Concurren /// Initializes a new instance of the ConcurrentLruCore class with the specified capacity. /// /// The capacity. - /// public ConcurrentLruCache(int capacity) : this(capacity, EqualityComparer.Default) { } @@ -64,16 +63,14 @@ public ConcurrentLruCache(int capacity) : this(capacity, EqualityComparer.Def /// /// The capacity. /// The equality comparer. - /// + /// One of the provided arguments was . public ConcurrentLruCache( int capacity, IEqualityComparer comparer) { ArgumentNullException.ThrowIfNull(comparer); _capacity = new CapacityPartition(capacity); - - var dictionaryCapacity = ConcurrentDictionarySize.Estimate(Capacity); - _dictionary = new ConcurrentDictionary(Environment.ProcessorCount, dictionaryCapacity, comparer); + _dictionary = new ConcurrentDictionary(comparer); } // No lock count: https://arbel.net/2013/02/03/best-practices-for-using-concurrentdictionary/ diff --git a/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs b/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs deleted file mode 100644 index 09734c83384..00000000000 --- a/src/Orleans.Core/Caching/Internal/ConcurrentDictionarySize.cs +++ /dev/null @@ -1,220 +0,0 @@ -using System; -using System.Collections.Generic; - -namespace Orleans.Caching.Internal; - -// Derived from BitFaster.Caching by Alex Peck -// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ConcurrentDictionarySize.cs -internal static class ConcurrentDictionarySize -{ - private static int NextPrimeGreaterThan(int min) - { - foreach (var prime in Primes) - { - if (prime > min) - { - return prime; - } - } - - return min; - } - - /// - /// Estimate the size of the ConcurrentDictionary constructor capacity arg to use for the given desired cache size. - /// - /// - /// To minimize collisions, ideal case is is for ConcurrentDictionary to have a prime number of buckets, and - /// for the bucket count to be about 33% greater than the cache capacity (load factor of 0.75). - /// See load factor here: https://en.wikipedia.org/wiki/Hash_table - /// - /// The desired cache size - /// The estimated optimal ConcurrentDictionary capacity - internal static int Estimate(int desiredSize) - { - // Size map entries are approx 4% apart in the worst case, so increase by 29% to target 33%. - // In practice, this leads to the number of buckets being somewhere between 29% and 40% greater - // than cache capacity. - try - { - checked - { - desiredSize = (int)(desiredSize * 1.29); - } - - // When small, exact size hashtable to nearest larger prime number - if (desiredSize < 197) - { - return NextPrimeGreaterThan(desiredSize); - } - - // When large, size to approx 10% of desired size to save memory. Initial value is chosen such - // that 4x ConcurrentDictionary grow operations will select a prime number slightly larger - // than desired size. - foreach (var pair in SizeMap) - { - if (pair.Key > desiredSize) - { - return pair.Value; - } - } - } - catch (OverflowException) - { - // return largest - } - - // Use largest mapping: ConcurrentDictionary will resize to max array size after 4x grow calls. - return SizeMap[^1].Value; - } - -#if NETSTANDARD2_0 - internal static int[] Primes = new int[] { -#else - private static ReadOnlySpan Primes => new int[] { -#endif - 3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919, - 1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591, - 17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437, - 187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263, - 1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369 - }; - -#if NETSTANDARD2_0 - internal static KeyValuePair[] SizeMap = -#else - private static ReadOnlySpan> SizeMap => -#endif - new KeyValuePair[129] - { - new(197, 197), - new(277, 137), - new(331, 163), - new(359, 179), - new(397, 197), - new(443, 221), - new(499, 247), - new(557, 137), - new(599, 149), - new(677, 167), - new(719, 179), - new(797, 197), - new(839, 209), - new(887, 221), - new(1061, 131), - new(1117, 137), - new(1237, 151), - new(1439, 179), - new(1559, 193), - new(1777, 221), - new(2011, 247), - new(2179, 269), - new(2347, 289), - new(2683, 331), - new(2797, 347), - new(3359, 419), - new(3917, 487), - new(4363, 541), - new(4597, 571), - new(5879, 733), - new(7517, 937), - new(8731, 1087), - new(9839, 1229), - new(17467, 2179), - new(18397, 2297), - new(20357, 2543), - new(24317, 3037), - new(25919, 3239), - new(29759, 3719), - new(31357, 3917), - new(33599, 4199), - new(38737, 4841), - new(41117, 5137), - new(48817, 6101), - new(61819, 7723), - new(72959, 9119), - new(86011, 10747), - new(129277, 16157), - new(140797, 17597), - new(164477, 20557), - new(220411, 27547), - new(233851, 29227), - new(294397, 36797), - new(314879, 39359), - new(338683, 42331), - new(389117, 48637), - new(409597, 51197), - new(436477, 54557), - new(609277, 76157), - new(651517, 81437), - new(737279, 92159), - new(849917, 106237), - new(1118203, 139771), - new(1269757, 158717), - new(1440763, 180091), - new(1576957, 197117), - new(1684477, 210557), - new(2293757, 286717), - new(2544637, 318077), - new(2666491, 333307), - new(2846717, 355837), - new(3368957, 421117), - new(3543037, 442877), - new(4472827, 559099), - new(4710397, 588797), - new(5038079, 629759), - new(5763067, 720379), - new(6072317, 759037), - new(6594557, 824317), - new(7913467, 989179), - new(8257531, 1032187), - new(9175037, 1146877), - new(9633787, 1204219), - new(10076159, 1259519), - new(11386877, 1423357), - new(14020603, 1752571), - new(16056317, 2007037), - new(19496957, 2437117), - new(20848637, 2606077), - new(24084479, 3010559), - new(27934717, 3491837), - new(29589499, 3698683), - new(32788477, 4098557), - new(36044797, 4505597), - new(38051837, 4756477), - new(43581437, 5447677), - new(51814397, 6476797), - new(56688637, 7086077), - new(60948479, 7618559), - new(69631997, 8703997), - new(75366397, 9420797), - new(78643199, 9830399), - new(96337919, 12042239), - new(106168319, 13271039), - new(115671037, 14458877), - new(132382717, 16547837), - new(144179197, 18022397), - new(165150719, 20643839), - new(178257917, 22282237), - new(188743679, 23592959), - new(209715197, 26214397), - new(254279677, 31784957), - new(297271291, 37158907), - new(314572799, 39321599), - new(385351679, 48168959), - new(453509117, 56688637), - new(517472251, 64684027), - new(644874239, 80609279), - new(673710077, 84213757), - new(770703359, 96337919), - new(849346559, 106168319), - new(903086077, 112885757), - new(1145044987, 143130619), - new(1233125371, 154140667), - new(1321205759, 165150719), - new(1394606077, 174325757), - new(1635778559, 204472319), - new(1855979519, 231997439), - new(2003828731, 250478587), - }; -} From 74c8d81f45ca0b32a59220b0c98c08a3e253b35e Mon Sep 17 00:00:00 2001 From: Reuben Bond Date: Thu, 29 May 2025 13:06:39 -0700 Subject: [PATCH 4/5] Remove unnecessary ConcurrentDictionarySize calculation --- .../Caching/ConcurrentLruCache.cs | 30 +++++++------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/src/Orleans.Core/Caching/ConcurrentLruCache.cs b/src/Orleans.Core/Caching/ConcurrentLruCache.cs index 99c1933033d..673bbba872b 100644 --- a/src/Orleans.Core/Caching/ConcurrentLruCache.cs +++ b/src/Orleans.Core/Caching/ConcurrentLruCache.cs @@ -36,14 +36,21 @@ namespace Orleans.Caching; /// When cold is full, cold tail is moved to warm head or removed from dictionary on depending on WasAccessed. /// /// -internal class ConcurrentLruCache : ICache, ICacheMetrics, ConcurrentLruCache.ITestAccessor +/// +/// Initializes a new instance of the ConcurrentLruCore class with the specified concurrencyLevel, capacity, equality comparer, item policy and telemetry policy. +/// +/// The capacity. +/// The equality comparer. +internal class ConcurrentLruCache( + int capacity, + IEqualityComparer? comparer) : ICache, ICacheMetrics, ConcurrentLruCache.ITestAccessor where K : notnull { - private readonly ConcurrentDictionary _dictionary; + private readonly ConcurrentDictionary _dictionary = new(concurrencyLevel: -1, capacity: capacity, comparer: comparer); private readonly ConcurrentQueue _hotQueue = new(); private readonly ConcurrentQueue _warmQueue = new(); private readonly ConcurrentQueue _coldQueue = new(); - private readonly CapacityPartition _capacity; + private readonly CapacityPartition _capacity = new(capacity); private readonly TelemetryPolicy _telemetryPolicy = new(); // maintain count outside ConcurrentQueue, since ConcurrentQueue.Count holds a global lock @@ -54,25 +61,10 @@ internal class ConcurrentLruCache : ICache, ICacheMetrics, Concurren /// Initializes a new instance of the ConcurrentLruCore class with the specified capacity. /// /// The capacity. - public ConcurrentLruCache(int capacity) : this(capacity, EqualityComparer.Default) + public ConcurrentLruCache(int capacity) : this(capacity, comparer: null) { } - /// - /// Initializes a new instance of the ConcurrentLruCore class with the specified concurrencyLevel, capacity, equality comparer, item policy and telemetry policy. - /// - /// The capacity. - /// The equality comparer. - /// One of the provided arguments was . - public ConcurrentLruCache( - int capacity, - IEqualityComparer comparer) - { - ArgumentNullException.ThrowIfNull(comparer); - _capacity = new CapacityPartition(capacity); - _dictionary = new ConcurrentDictionary(comparer); - } - // No lock count: https://arbel.net/2013/02/03/best-practices-for-using-concurrentdictionary/ /// public int Count => _dictionary.Where(_ => true).Count(); From ad38fa6d8f5fc701df78d403aae61e5f50eb6b41 Mon Sep 17 00:00:00 2001 From: Reuben Bond Date: Thu, 29 May 2025 13:51:54 -0700 Subject: [PATCH 5/5] Remove unused interface & clean up --- .../Caching/ConcurrentLruCache.cs | 2 +- .../Caching/Internal/CacheDebugView.cs | 5 +- src/Orleans.Core/Caching/Internal/ICache.cs | 119 ------------------ .../Caching/Internal/Striped64.cs | 14 +-- .../Caching/ConcurrentLruSoakTests.cs | 83 +++++------- .../Caching/ConcurrentLruTests.cs | 9 +- 6 files changed, 38 insertions(+), 194 deletions(-) delete mode 100644 src/Orleans.Core/Caching/Internal/ICache.cs diff --git a/src/Orleans.Core/Caching/ConcurrentLruCache.cs b/src/Orleans.Core/Caching/ConcurrentLruCache.cs index 673bbba872b..88d64cbfc66 100644 --- a/src/Orleans.Core/Caching/ConcurrentLruCache.cs +++ b/src/Orleans.Core/Caching/ConcurrentLruCache.cs @@ -43,7 +43,7 @@ namespace Orleans.Caching; /// The equality comparer. internal class ConcurrentLruCache( int capacity, - IEqualityComparer? comparer) : ICache, ICacheMetrics, ConcurrentLruCache.ITestAccessor + IEqualityComparer? comparer) : IEnumerable>, ICacheMetrics, ConcurrentLruCache.ITestAccessor where K : notnull { private readonly ConcurrentDictionary _dictionary = new(concurrencyLevel: -1, capacity: capacity, comparer: comparer); diff --git a/src/Orleans.Core/Caching/Internal/CacheDebugView.cs b/src/Orleans.Core/Caching/Internal/CacheDebugView.cs index 8cfc1f4bc3e..3c83d0d9e95 100644 --- a/src/Orleans.Core/Caching/Internal/CacheDebugView.cs +++ b/src/Orleans.Core/Caching/Internal/CacheDebugView.cs @@ -1,5 +1,6 @@ #nullable enable using System; +using System.Collections.Concurrent; using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; @@ -11,9 +12,9 @@ namespace Orleans.Caching.Internal; internal sealed class CacheDebugView where K : notnull { - private readonly ICache _cache; + private readonly ConcurrentLruCache _cache; - public CacheDebugView(ICache cache) + public CacheDebugView(ConcurrentLruCache cache) { ArgumentNullException.ThrowIfNull(cache); _cache = cache; diff --git a/src/Orleans.Core/Caching/Internal/ICache.cs b/src/Orleans.Core/Caching/Internal/ICache.cs deleted file mode 100644 index c77cd100a66..00000000000 --- a/src/Orleans.Core/Caching/Internal/ICache.cs +++ /dev/null @@ -1,119 +0,0 @@ -using System; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; - -namespace Orleans.Caching.Internal; - -/// -/// Represents a generic cache of key/value pairs. -/// -/// The type of keys in the cache. -/// The type of values in the cache. -// Derived from BitFaster.Caching by Alex Peck -// https://github.com/bitfaster/BitFaster.Caching/blob/5b2d64a1afcc251787fbe231c6967a62820fc93c/BitFaster.Caching/ICache.cs -internal interface ICache : IEnumerable> -{ - /// - /// Gets the number of items currently held in the cache. - /// - int Count { get; } - - /// - /// Gets the cache metrics, if configured. - /// - ICacheMetrics Metrics { get; } - - /// - /// Gets a collection containing the keys in the cache. - /// - ICollection Keys { get; } - - /// - /// Attempts to add the specified key and value to the cache if the key does not already exist. - /// - /// The key of the element to add. - /// The value of the element to add. - /// true if the key/value pair was added to the cache; otherwise, false. - bool TryAdd(K key, V value); - - /// - /// Attempts to get the value associated with the specified key from the cache. - /// - /// The key of the value to get. - /// When this method returns, contains the object from the cache that has the specified key, or the default value of the type if the operation failed. - /// true if the key was found in the cache; otherwise, false. - bool TryGet(K key, [MaybeNullWhen(false)] out V value); - - /// - /// Gets the value associated with the specified key from the cache. - /// - /// The key of the value to get. - /// The value. - V Get(K key); - - /// - /// Adds a key/value pair to the cache if the key does not already exist. Returns the new value, or the - /// existing value if the key already exists. - /// - /// The key of the element to add. - /// The factory function used to generate a value for the key. - /// The value for the key. This will be either the existing value for the key if the key is already - /// in the cache, or the new value if the key was not in the cache. - V GetOrAdd(K key, Func valueFactory); - - /// - /// Adds a key/value pair to the cache if the key does not already exist. Returns the new value, or the - /// existing value if the key already exists. - /// - /// The type of an argument to pass into valueFactory. - /// The key of the element to add. - /// The factory function used to generate a value for the key. - /// An argument value to pass into valueFactory. - /// The value for the key. This will be either the existing value for the key if the key is already - /// in the cache, or the new value if the key was not in the cache. - /// The default implementation given here is the fallback that provides backwards compatibility for classes that implement ICache on prior versions - V GetOrAdd(K key, Func valueFactory, TArg factoryArgument) => GetOrAdd(key, k => valueFactory(k, factoryArgument)); - - /// - /// Attempts to remove and return the value that has the specified key. - /// - /// The key of the element to remove. - /// When this method returns, contains the object removed, or the default value of the value type if key does not exist. - /// true if the object was removed successfully; otherwise, false. - bool TryRemove(K key, [MaybeNullWhen(false)] out V value); - - /// - /// Attempts to remove the specified key value pair. - /// - /// The item to remove. - /// true if the item was removed successfully; otherwise, false. - bool TryRemove(KeyValuePair item); - - /// - /// Attempts to remove the value that has the specified key. - /// - /// The key of the element to remove. - /// true if the object was removed successfully; otherwise, false. - bool TryRemove(K key); - - /// - /// Attempts to update the value that has the specified key. - /// - /// The key of the element to update. - /// The new value. - /// true if the object was updated successfully; otherwise, false. - bool TryUpdate(K key, V value); - - /// - /// Adds a key/value pair to the cache if the key does not already exist, or updates a key/value pair if the - /// key already exists. - /// - /// The key of the element to update. - /// The new value. - void AddOrUpdate(K key, V value); - - /// - /// Removes all keys and values from the cache. - /// - void Clear(); -} diff --git a/src/Orleans.Core/Caching/Internal/Striped64.cs b/src/Orleans.Core/Caching/Internal/Striped64.cs index 65550afed3f..7f329ac8ef6 100644 --- a/src/Orleans.Core/Caching/Internal/Striped64.cs +++ b/src/Orleans.Core/Caching/Internal/Striped64.cs @@ -101,21 +101,13 @@ internal abstract class Striped64 /// /// A wrapper for PaddedLong. /// - protected sealed class Cell + /// The value. + protected sealed class Cell(long value) { /// /// The value of the cell. /// - public PaddedLong Value; - - /// - /// Initializes a new cell with the specified value. - /// - /// The value. - public Cell(long x) - { - Value = new PaddedLong() { Value = x }; - } + public PaddedLong Value = new() { Value = value }; } /** diff --git a/test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs b/test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs index 1ea77f0f5a1..16ad7407a16 100644 --- a/test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs +++ b/test/NonSilo.Tests/Caching/ConcurrentLruSoakTests.cs @@ -174,7 +174,7 @@ await Threaded.Run(4, () => testOutputHelper.WriteLine($"{lruVT.HotCount} {lruVT.WarmCount} {lruVT.ColdCount}"); testOutputHelper.WriteLine(string.Join(" ", lruVT.Keys)); - new ConcurrentLruCacheIntegrityChecker(lruVT).Validate(); + ConcurrentLruCacheIntegrityChecker.Validate(lruVT); } } @@ -288,7 +288,7 @@ public async Task WhenValueIsBigStructNoLiveLock(int _) await setTask; } - private void Setter(ICache cache, CancellationToken cancelToken, TaskCompletionSource started) + private void Setter(ConcurrentLruCache cache, CancellationToken cancelToken, TaskCompletionSource started) { started.SetResult(true); @@ -304,7 +304,7 @@ private void Setter(ICache cache, CancellationToken cancelToken, Task } } - private void Checker(ICache cache, CancellationTokenSource source) + private void Checker(ConcurrentLruCache cache, CancellationTokenSource source) { for (var count = 0; count < 100_000; ++count) { @@ -314,69 +314,46 @@ private void Checker(ICache cache, CancellationTokenSource source) source.Cancel(); } - private void RunIntegrityCheck() => new ConcurrentLruCacheIntegrityChecker(lru).Validate(); + private void RunIntegrityCheck() => ConcurrentLruCacheIntegrityChecker.Validate(lru); - private class ConcurrentLruCacheIntegrityChecker + private static class ConcurrentLruCacheIntegrityChecker { - private readonly ConcurrentLruCache _cache; - - private readonly ConcurrentDictionary.LruItem> dictionary; - private readonly ConcurrentQueue.LruItem> hotQueue; - private readonly ConcurrentQueue.LruItem> warmQueue; - private readonly ConcurrentQueue.LruItem> coldQueue; - - private static FieldInfo dictionaryField = typeof(ConcurrentLruCache).GetField("_dictionary", BindingFlags.NonPublic | BindingFlags.Instance); - - private static FieldInfo hotQueueField = typeof(ConcurrentLruCache).GetField("_hotQueue", BindingFlags.NonPublic | BindingFlags.Instance); - private static FieldInfo warmQueueField = typeof(ConcurrentLruCache).GetField("_warmQueue", BindingFlags.NonPublic | BindingFlags.Instance); - private static FieldInfo coldQueueField = typeof(ConcurrentLruCache).GetField("_coldQueue", BindingFlags.NonPublic | BindingFlags.Instance); - - public ConcurrentLruCacheIntegrityChecker(ConcurrentLruCache cache) - { - this._cache = cache; - - // get queues via reflection - this.dictionary = (ConcurrentDictionary.LruItem>)dictionaryField.GetValue(cache); - this.hotQueue = (ConcurrentQueue.LruItem>)hotQueueField.GetValue(cache); - this.warmQueue = (ConcurrentQueue.LruItem>)warmQueueField.GetValue(cache); - this.coldQueue = (ConcurrentQueue.LruItem>)coldQueueField.GetValue(cache); - } - - public void Validate() + public static void Validate(ConcurrentLruCache cache) { + ConcurrentLruCache.ITestAccessor testAccessor = cache; // queue counters must be consistent with queues - this.hotQueue.Count.Should().Be(_cache.HotCount, "hot queue has a corrupted count"); - this.warmQueue.Count.Should().Be(_cache.WarmCount, "warm queue has a corrupted count"); - this.coldQueue.Count.Should().Be(_cache.ColdCount, "cold queue has a corrupted count"); + testAccessor.HotQueue.Count.Should().Be(cache.HotCount, "hot queue has a corrupted count"); + testAccessor.WarmQueue.Count.Should().Be(cache.WarmCount, "warm queue has a corrupted count"); + testAccessor.ColdQueue.Count.Should().Be(cache.ColdCount, "cold queue has a corrupted count"); // cache contents must be consistent with queued items - ValidateQueue(_cache, this.hotQueue, "hot"); - ValidateQueue(_cache, this.warmQueue, "warm"); - ValidateQueue(_cache, this.coldQueue, "cold"); + ValidateQueue(testAccessor.HotQueue, "hot"); + ValidateQueue(testAccessor.WarmQueue, "warm"); + ValidateQueue(testAccessor.ColdQueue, "cold"); // cache must be within capacity - _cache.Count.Should().BeLessThanOrEqualTo(_cache.Capacity + 1, "capacity out of valid range"); - } + cache.Count.Should().BeLessThanOrEqualTo(cache.Capacity + 1, "capacity out of valid range"); - private void ValidateQueue(ConcurrentLruCache cache, ConcurrentQueue.LruItem> queue, string queueName) - { - foreach (var item in queue) + void ValidateQueue(ConcurrentQueue.LruItem> queue, string queueName) { - if (item.WasRemoved) + foreach (var item in queue) { - // It is possible for the queues to contain 2 (or more) instances of the same key/item. One that was removed, - // and one that was added after the other was removed. - // In this case, the dictionary may contain the value only if the queues contain an entry for that key marked as WasRemoved == false. - if (dictionary.TryGetValue(item.Key, out var value)) + if (item.WasRemoved) { - hotQueue.Union(warmQueue).Union(coldQueue) - .Any(i => i.Key.Equals(item.Key) && !i.WasRemoved) - .Should().BeTrue($"{queueName} removed item {item.Key} was not removed"); + // It is possible for the queues to contain 2 (or more) instances of the same key/item. One that was removed, + // and one that was added after the other was removed. + // In this case, the dictionary may contain the value only if the queues contain an entry for that key marked as WasRemoved == false. + if (testAccessor.Dictionary.TryGetValue(item.Key, out var value)) + { + testAccessor.HotQueue.Union(testAccessor.WarmQueue).Union(testAccessor.ColdQueue) + .Any(i => i.Key.Equals(item.Key) && !i.WasRemoved) + .Should().BeTrue($"{queueName} removed item {item.Key} was not removed"); + } + } + else + { + testAccessor.Dictionary.TryGetValue(item.Key, out var value).Should().BeTrue($"{queueName} item {item.Key} was not present"); } - } - else - { - dictionary.TryGetValue(item.Key, out var value).Should().BeTrue($"{queueName} item {item.Key} was not present"); } } } diff --git a/test/NonSilo.Tests/Caching/ConcurrentLruTests.cs b/test/NonSilo.Tests/Caching/ConcurrentLruTests.cs index f1fb84038db..745d6537a6c 100644 --- a/test/NonSilo.Tests/Caching/ConcurrentLruTests.cs +++ b/test/NonSilo.Tests/Caching/ConcurrentLruTests.cs @@ -7,6 +7,7 @@ namespace NonSilo.Tests.Caching; +[TestCategory("BVT")] public class ConcurrentLruTests(ITestOutputHelper testOutputHelper) { private readonly ITestOutputHelper _testOutputHelper = testOutputHelper; @@ -29,14 +30,6 @@ public void WhenCapacityIsLessThan3CtorThrows() constructor.Should().Throw(); } - [Fact] - public void WhenComparerIsNullCtorThrows() - { - Action constructor = () => { var x = new ConcurrentLruCache(3, null); }; - - constructor.Should().Throw(); - } - [Fact] public void WhenCapacityIs4HotHasCapacity1AndColdHasCapacity1() {