diff --git a/eng/MSBuild/LegacySupport.props b/eng/MSBuild/LegacySupport.props
index 8ebacbd60f7..7f1f9292431 100644
--- a/eng/MSBuild/LegacySupport.props
+++ b/eng/MSBuild/LegacySupport.props
@@ -15,7 +15,7 @@
-
+
diff --git a/eng/packages/TestOnly.props b/eng/packages/TestOnly.props
index 1956cb4d6d2..3aa569bd7fc 100644
--- a/eng/packages/TestOnly.props
+++ b/eng/packages/TestOnly.props
@@ -16,6 +16,7 @@
+
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheOptions.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheOptions.cs
index 982ea55a6af..473f1e3c46d 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheOptions.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheOptions.cs
@@ -11,11 +11,13 @@ public class HybridCacheOptions
private const int ShiftBytesToMibiBytes = 20;
///
- /// Gets or sets the default global options to be applied to operations; if options are
- /// specified at the individual call level, the non-null values are merged (with the per-call
- /// options being used in preference to the global options). If no value is specified for a given
- /// option (globally or per-call), the implementation may choose a reasonable default.
+ /// Gets or sets the default global options to be applied to operations.
///
+ ///
+ /// If options are specified at the individual call level, the non-null values are merged
+ /// (with the per-call options being used in preference to the global options). If no value is
+ /// specified for a given option (globally or per-call), the implementation can choose a reasonable default.
+ ///
public HybridCacheEntryOptions? DefaultEntryOptions { get; set; }
///
@@ -24,21 +26,35 @@ public class HybridCacheOptions
public bool DisableCompression { get; set; }
///
- /// Gets or sets the maximum size of cache items; attempts to store values over this size will be logged
- /// and the value will not be stored in cache.
+ /// Gets or sets the maximum size of cache items.
///
- /// The default value is 1 MiB.
+ ///
+ /// The maximum size of cache items. The default value is 1 MiB.
+ ///
+ ///
+ /// Attempts to store values over this size are logged,
+ /// and the value isn't stored in the cache.
+ ///
public long MaximumPayloadBytes { get; set; } = 1 << ShiftBytesToMibiBytes; // 1MiB
///
- /// Gets or sets the maximum permitted length (in characters) of keys; attempts to use keys over this size will be logged.
+ /// Gets or sets the maximum permitted length (in characters) of keys.
///
- /// The default value is 1024 characters.
+ ///
+ /// The maximum permitted length of keys, in characters. The default value is 1024 characters.
+ ///
+ /// Attempts to use keys over this size are logged.
public int MaximumKeyLength { get; set; } = 1024; // characters
///
- /// Gets or sets a value indicating whether to use "tags" data as dimensions on metric reporting; if enabled, care should be used to ensure that
- /// tags do not contain data that should not be visible in metrics systems.
+ /// Gets or sets a value indicating whether to use "tags" data as dimensions on metric reporting.
///
+ ///
+ /// to use "tags" data as dimensions on metric reporting; otherwise, .
+ ///
+ ///
+ /// If enabled, take care to ensure that tags don't contain data that
+ /// should not be visible in metrics systems.
+ ///
public bool ReportTagMetrics { get; set; }
}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/BufferChunk.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/BufferChunk.cs
index 0d7d54cfdd6..d17eacb3484 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/BufferChunk.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/BufferChunk.cs
@@ -15,11 +15,13 @@ namespace Microsoft.Extensions.Caching.Hybrid.Internal;
internal readonly struct BufferChunk
{
private const int FlagReturnToPool = (1 << 31);
-
private readonly int _lengthAndPoolFlag;
- public byte[]? Array { get; } // null for default
+ public byte[]? OversizedArray { get; } // null for default
+
+ public bool HasValue => OversizedArray is not null;
+ public int Offset { get; }
public int Length => _lengthAndPoolFlag & ~FlagReturnToPool;
public bool ReturnToPool => (_lengthAndPoolFlag & FlagReturnToPool) != 0;
@@ -27,8 +29,9 @@ internal readonly struct BufferChunk
public BufferChunk(byte[] array)
{
Debug.Assert(array is not null, "expected valid array input");
- Array = array;
+ OversizedArray = array;
_lengthAndPoolFlag = array!.Length;
+ Offset = 0;
// assume not pooled, if exact-sized
// (we don't expect array.Length to be negative; we're really just saying
@@ -39,11 +42,12 @@ public BufferChunk(byte[] array)
Debug.Assert(Length == array.Length, "array length not respected");
}
- public BufferChunk(byte[] array, int length, bool returnToPool)
+ public BufferChunk(byte[] array, int offset, int length, bool returnToPool)
{
Debug.Assert(array is not null, "expected valid array input");
Debug.Assert(length >= 0, "expected valid length");
- Array = array;
+ OversizedArray = array;
+ Offset = offset;
_lengthAndPoolFlag = length | (returnToPool ? FlagReturnToPool : 0);
Debug.Assert(ReturnToPool == returnToPool, "return-to-pool not respected");
Debug.Assert(Length == length, "length not respected");
@@ -58,7 +62,7 @@ public byte[] ToArray()
}
var copy = new byte[length];
- Buffer.BlockCopy(Array!, 0, copy, 0, length);
+ Buffer.BlockCopy(OversizedArray!, Offset, copy, 0, length);
return copy;
// Note on nullability of Array; the usage here is that a non-null array
@@ -73,15 +77,19 @@ internal void RecycleIfAppropriate()
{
if (ReturnToPool)
{
- ArrayPool.Shared.Return(Array!);
+ ArrayPool.Shared.Return(OversizedArray!);
}
Unsafe.AsRef(in this) = default; // anti foot-shotgun double-return guard; not 100%, but worth doing
- Debug.Assert(Array is null && !ReturnToPool, "expected clean slate after recycle");
+ Debug.Assert(OversizedArray is null && !ReturnToPool, "expected clean slate after recycle");
}
+ internal ArraySegment AsArraySegment() => Length == 0 ? default! : new(OversizedArray!, Offset, Length);
+
+ internal ReadOnlySpan AsSpan() => Length == 0 ? default : new(OversizedArray!, Offset, Length);
+
// get the data as a ROS; for note on null-logic of Array!, see comment in ToArray
- internal ReadOnlySequence AsSequence() => Length == 0 ? default : new ReadOnlySequence(Array!, 0, Length);
+ internal ReadOnlySequence AsSequence() => Length == 0 ? default : new ReadOnlySequence(OversizedArray!, Offset, Length);
internal BufferChunk DoNotReturnToPool()
{
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.CacheItem.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.CacheItem.cs
index 5585b9b2a29..59d2f59df41 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.CacheItem.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.CacheItem.cs
@@ -3,8 +3,10 @@
using System;
using System.Diagnostics;
+using System.Runtime.CompilerServices;
using System.Threading;
using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.Logging;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
@@ -12,20 +14,35 @@ internal partial class DefaultHybridCache
{
internal abstract class CacheItem
{
+ private readonly long _creationTimestamp;
+
+ protected CacheItem(long creationTimestamp, TagSet tags)
+ {
+ Tags = tags;
+ _creationTimestamp = creationTimestamp;
+ }
+
private int _refCount = 1; // the number of pending operations against this cache item
public abstract bool DebugIsImmutable { get; }
+ public long CreationTimestamp => _creationTimestamp;
+
+ public TagSet Tags { get; }
+
// Note: the ref count is the number of callers anticipating this value at any given time. Initially,
// it is one for a simple "get the value" flow, but if another call joins with us, it'll be incremented.
// If either cancels, it will get decremented, with the entire flow being cancelled if it ever becomes
// zero.
// This counter also drives cache lifetime, with the cache itself incrementing the count by one. In the
// case of mutable data, cache eviction may reduce this to zero (in cooperation with any concurrent readers,
- // who incr/decr around their fetch), allowing safe buffer recycling.
+ // who increment/decrement around their fetch), allowing safe buffer recycling.
internal int RefCount => Volatile.Read(ref _refCount);
+ internal void UnsafeSetCreationTimestamp(long timestamp)
+ => Unsafe.AsRef(in _creationTimestamp) = timestamp;
+
internal static readonly PostEvictionDelegate SharedOnEviction = static (key, value, reason, state) =>
{
if (value is CacheItem item)
@@ -87,15 +104,25 @@ protected virtual void OnFinalRelease() // any required release semantics
internal abstract class CacheItem : CacheItem
{
+ protected CacheItem(long creationTimestamp, TagSet tags)
+ : base(creationTimestamp, tags)
+ {
+ }
+
public abstract bool TryGetSize(out long size);
- // attempt to get a value that was *not* previously reserved
- public abstract bool TryGetValue(out T value);
+ // Attempt to get a value that was *not* previously reserved.
+ // Note on ILogger usage: we don't want to propagate and store this everywhere.
+ // It is used for reporting deserialization problems - pass it as needed.
+ // (CacheItem gets into the IMemoryCache - let's minimize the onward reachable set
+ // of that cache, by only handing it leaf nodes of a "tree", not a "graph" with
+ // backwards access - we can also limit object size at the same time)
+ public abstract bool TryGetValue(ILogger log, out T value);
// get a value that *was* reserved, countermanding our reservation in the process
- public T GetReservedValue()
+ public T GetReservedValue(ILogger log)
{
- if (!TryGetValue(out var value))
+ if (!TryGetValue(log, out var value))
{
Throw();
}
@@ -106,6 +133,7 @@ public T GetReservedValue()
static void Throw() => throw new ObjectDisposedException("The cache item has been recycled before the value was obtained");
}
- internal static CacheItem Create() => ImmutableTypeCache.IsImmutable ? new ImmutableCacheItem() : new MutableCacheItem();
+ internal static CacheItem Create(long creationTimestamp, TagSet tags) => ImmutableTypeCache.IsImmutable
+ ? new ImmutableCacheItem(creationTimestamp, tags) : new MutableCacheItem(creationTimestamp, tags);
}
}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Debug.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Debug.cs
index a9901103555..e5125fb8acf 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Debug.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Debug.cs
@@ -54,7 +54,6 @@ private partial class MutableCacheItem
#endif
[Conditional("DEBUG")]
- [SuppressMessage("Performance", "CA1822:Mark members as static", Justification = "Instance state used in debug")]
internal void DebugOnlyTrackBuffer(DefaultHybridCache cache)
{
#if DEBUG
@@ -63,11 +62,12 @@ internal void DebugOnlyTrackBuffer(DefaultHybridCache cache)
{
_cache?.DebugOnlyIncrementOutstandingBuffers();
}
+#else
+ _ = this; // dummy just to prevent CA1822, never hit
#endif
}
[Conditional("DEBUG")]
- [SuppressMessage("Performance", "CA1822:Mark members as static", Justification = "Instance state used in debug")]
private void DebugOnlyDecrementOutstandingBuffers()
{
#if DEBUG
@@ -75,6 +75,8 @@ private void DebugOnlyDecrementOutstandingBuffers()
{
_cache?.DebugOnlyDecrementOutstandingBuffers();
}
+#else
+ _ = this; // dummy just to prevent CA1822, never hit
#endif
}
}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.ImmutableCacheItem.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.ImmutableCacheItem.cs
index 9ae8468ba29..fa996ee41bc 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.ImmutableCacheItem.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.ImmutableCacheItem.cs
@@ -2,6 +2,7 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System.Threading;
+using Microsoft.Extensions.Logging;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
@@ -11,6 +12,11 @@ internal partial class DefaultHybridCache
{
private static ImmutableCacheItem? _sharedDefault;
+ public ImmutableCacheItem(long creationTimestamp, TagSet tags)
+ : base(creationTimestamp, tags)
+ {
+ }
+
private T _value = default!; // deferred until SetValue
public long Size { get; private set; } = -1;
@@ -24,7 +30,7 @@ public static ImmutableCacheItem GetReservedShared()
ImmutableCacheItem? obj = Volatile.Read(ref _sharedDefault);
if (obj is null || !obj.TryReserve())
{
- obj = new();
+ obj = new(0, TagSet.Empty); // timestamp doesn't matter - not used in L1/L2
_ = obj.TryReserve(); // this is reliable on a new instance
Volatile.Write(ref _sharedDefault, obj);
}
@@ -38,7 +44,7 @@ public void SetValue(T value, long size)
Size = size;
}
- public override bool TryGetValue(out T value)
+ public override bool TryGetValue(ILogger log, out T value)
{
value = _value;
return true; // always available
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.L2.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.L2.cs
index 1e694448737..c5182035330 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.L2.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.L2.cs
@@ -2,6 +2,8 @@
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Buffers;
+using System.Buffers.Binary;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
@@ -14,14 +16,24 @@ namespace Microsoft.Extensions.Caching.Hybrid.Internal;
internal partial class DefaultHybridCache
{
+ private const int MaxCacheDays = 1000;
+ private const string TagKeyPrefix = "__MSFT_HCT__";
+ private static readonly DistributedCacheEntryOptions _tagInvalidationEntryOptions = new() { AbsoluteExpirationRelativeToNow = TimeSpan.FromDays(MaxCacheDays) };
+
+ private static readonly TimeSpan _defaultTimeout = TimeSpan.FromHours(1);
+
[SuppressMessage("Performance", "CA1849:Call async methods when in an async method", Justification = "Manual sync check")]
[SuppressMessage("Usage", "VSTHRD003:Avoid awaiting foreign Tasks", Justification = "Manual sync check")]
- internal ValueTask GetFromL2Async(string key, CancellationToken token)
+ [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Explicit async exception handling")]
+ [SuppressMessage("Reliability", "CA2000:Dispose objects before losing scope", Justification = "Deliberate recycle only on success")]
+ internal ValueTask GetFromL2DirectAsync(string key, CancellationToken token)
{
switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
{
case CacheFeatures.BackendCache: // legacy byte[]-based
+
var pendingLegacy = _backendCache!.GetAsync(key, token);
+
#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
if (!pendingLegacy.IsCompletedSuccessfully)
#else
@@ -36,6 +48,7 @@ internal ValueTask GetFromL2Async(string key, CancellationToken tok
case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // IBufferWriter-based
RecyclableArrayBufferWriter writer = RecyclableArrayBufferWriter.Create(MaximumPayloadBytes);
var cache = Unsafe.As(_backendCache!); // type-checked already
+
var pendingBuffers = cache.TryGetAsync(key, writer, token);
if (!pendingBuffers.IsCompletedSuccessfully)
{
@@ -43,13 +56,13 @@ internal ValueTask GetFromL2Async(string key, CancellationToken tok
}
BufferChunk result = pendingBuffers.GetAwaiter().GetResult()
- ? new(writer.DetachCommitted(out var length), length, returnToPool: true)
+ ? new(writer.DetachCommitted(out var length), 0, length, returnToPool: true)
: default;
writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
return new(result);
}
- return default;
+ return default; // treat as a "miss"
static async Task AwaitedLegacyAsync(Task pending, DefaultHybridCache @this)
{
@@ -60,35 +73,114 @@ static async Task AwaitedLegacyAsync(Task pending, Default
static async Task AwaitedBuffersAsync(ValueTask pending, RecyclableArrayBufferWriter writer)
{
BufferChunk result = await pending.ConfigureAwait(false)
- ? new(writer.DetachCommitted(out var length), length, returnToPool: true)
+ ? new(writer.DetachCommitted(out var length), 0, length, returnToPool: true)
: default;
writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
return result;
}
}
- internal ValueTask SetL2Async(string key, in BufferChunk buffer, HybridCacheEntryOptions? options, CancellationToken token)
+ internal ValueTask SetL2Async(string key, CacheItem cacheItem, in BufferChunk buffer, HybridCacheEntryOptions? options, CancellationToken token)
+ => HasBackendCache ? WritePayloadAsync(key, cacheItem, buffer, options, token) : default;
+
+ internal ValueTask SetDirectL2Async(string key, in BufferChunk buffer, DistributedCacheEntryOptions options, CancellationToken token)
{
- Debug.Assert(buffer.Array is not null, "array should be non-null");
+ Debug.Assert(buffer.OversizedArray is not null, "array should be non-null");
switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
{
case CacheFeatures.BackendCache: // legacy byte[]-based
- var arr = buffer.Array!;
- if (arr.Length != buffer.Length)
+ var arr = buffer.OversizedArray!;
+ if (buffer.Offset != 0 || arr.Length != buffer.Length)
{
// we'll need a right-sized snapshot
arr = buffer.ToArray();
}
- return new(_backendCache!.SetAsync(key, arr, GetOptions(options), token));
+ return new(_backendCache!.SetAsync(key, arr, options, token));
case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // ReadOnlySequence-based
var cache = Unsafe.As(_backendCache!); // type-checked already
- return cache.SetAsync(key, buffer.AsSequence(), GetOptions(options), token);
+ return cache.SetAsync(key, buffer.AsSequence(), options, token);
}
return default;
}
+ [SuppressMessage("Performance", "CA1849:Call async methods when in an async method", Justification = "Manual async core implementation")]
+ internal ValueTask InvalidateL2TagAsync(string tag, long timestamp, CancellationToken token)
+ {
+ if (!HasBackendCache)
+ {
+ return default; // no L2
+ }
+
+ byte[] oversized = ArrayPool.Shared.Rent(sizeof(long));
+ BinaryPrimitives.WriteInt64LittleEndian(oversized, timestamp);
+ var pending = SetDirectL2Async(TagKeyPrefix + tag, new BufferChunk(oversized, 0, sizeof(long), false), _tagInvalidationEntryOptions, token);
+
+ if (pending.IsCompletedSuccessfully)
+ {
+ pending.GetAwaiter().GetResult(); // ensure observed (IVTS etc)
+ ArrayPool.Shared.Return(oversized);
+ return default;
+ }
+ else
+ {
+ return AwaitedAsync(pending, oversized);
+ }
+
+ static async ValueTask AwaitedAsync(ValueTask pending, byte[] oversized)
+ {
+ await pending.ConfigureAwait(false);
+ ArrayPool.Shared.Return(oversized);
+ }
+ }
+
+ [SuppressMessage("Resilience", "EA0014:The async method doesn't support cancellation", Justification = "Cancellation handled internally")]
+ [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "All failure is critical")]
+ internal async Task SafeReadTagInvalidationAsync(string tag)
+ {
+ Debug.Assert(HasBackendCache, "shouldn't be here without L2");
+
+ const int READ_TIMEOUT = 4000;
+
+ try
+ {
+ using var cts = new CancellationTokenSource(millisecondsDelay: READ_TIMEOUT);
+ var buffer = await GetFromL2DirectAsync(TagKeyPrefix + tag, cts.Token).ConfigureAwait(false);
+
+ long timestamp;
+ if (buffer.OversizedArray is not null)
+ {
+ if (buffer.Length == sizeof(long))
+ {
+ timestamp = BinaryPrimitives.ReadInt64LittleEndian(buffer.AsSpan());
+ }
+ else
+ {
+ // not what we expected! assume invalid
+ timestamp = CurrentTimestamp();
+ }
+
+ buffer.RecycleIfAppropriate();
+ }
+ else
+ {
+ timestamp = 0; // never invalidated
+ }
+
+ buffer.RecycleIfAppropriate();
+ return timestamp;
+ }
+ catch (Exception ex)
+ {
+ // ^^^ this catch is the "Safe" in "SafeReadTagInvalidationAsync"
+ Debug.WriteLine(ex.Message);
+
+ // if anything goes wrong reading tag invalidations; we have to assume the tag is invalid
+ return CurrentTimestamp();
+ }
+ }
+
internal void SetL1(string key, CacheItem value, HybridCacheEntryOptions? options)
{
// incr ref-count for the the cache itself; this *may* be released via the NeedsEvictionCallback path
@@ -115,9 +207,28 @@ internal void SetL1(string key, CacheItem value, HybridCacheEntryOptions?
// commit
cacheEntry.Dispose();
+
+ if (HybridCacheEventSource.Log.IsEnabled())
+ {
+ HybridCacheEventSource.Log.LocalCacheWrite();
+ }
}
}
+ private async ValueTask WritePayloadAsync(string key, CacheItem cacheItem, BufferChunk payload, HybridCacheEntryOptions? options, CancellationToken token)
+ {
+ // bundle a serialized payload inside the wrapper used at the DC layer
+ var maxLength = HybridCachePayload.GetMaxBytes(key, cacheItem.Tags, payload.Length);
+ var oversized = ArrayPool.Shared.Rent(maxLength);
+
+ var length = HybridCachePayload.Write(oversized, key, cacheItem.CreationTimestamp, options?.Expiration ?? _defaultTimeout,
+ HybridCachePayload.PayloadFlags.None, cacheItem.Tags, payload.AsSequence());
+
+ await SetDirectL2Async(key, new(oversized, 0, length, true), GetOptions(options), token).ConfigureAwait(false);
+
+ ArrayPool.Shared.Return(oversized);
+ }
+
private BufferChunk GetValidPayloadSegment(byte[]? payload)
{
if (payload is not null)
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.MutableCacheItem.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.MutableCacheItem.cs
index 2d02c23b6d8..e19279656c7 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.MutableCacheItem.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.MutableCacheItem.cs
@@ -1,14 +1,23 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System;
+using Microsoft.Extensions.Logging;
+
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
internal partial class DefaultHybridCache
{
private sealed partial class MutableCacheItem : CacheItem // used to hold types that require defensive copies
{
- private IHybridCacheSerializer _serializer = null!; // deferred until SetValue
+ private IHybridCacheSerializer? _serializer;
private BufferChunk _buffer;
+ private T? _fallbackValue; // only used in the case of serialization failures
+
+ public MutableCacheItem(long creationTimestamp, TagSet tags)
+ : base(creationTimestamp, tags)
+ {
+ }
public override bool NeedsEvictionCallback => _buffer.ReturnToPool;
@@ -21,16 +30,27 @@ public void SetValue(ref BufferChunk buffer, IHybridCacheSerializer serialize
buffer = default; // we're taking over the lifetime; the caller no longer has it!
}
- public override bool TryGetValue(out T value)
+ public void SetFallbackValue(T fallbackValue)
+ {
+ _fallbackValue = fallbackValue;
+ }
+
+ public override bool TryGetValue(ILogger log, out T value)
{
// only if we haven't already burned
if (TryReserve())
{
try
{
- value = _serializer.Deserialize(_buffer.AsSequence());
+ var serializer = _serializer;
+ value = serializer is null ? _fallbackValue! : serializer.Deserialize(_buffer.AsSequence());
return true;
}
+ catch (Exception ex)
+ {
+ log.DeserializationFailure(ex);
+ throw;
+ }
finally
{
_ = Release();
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Serialization.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Serialization.cs
index 523a95e279a..cb39696d532 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Serialization.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Serialization.cs
@@ -3,7 +3,7 @@
using System;
using System.Collections.Concurrent;
-using System.Reflection;
+using System.Diagnostics.CodeAnalysis;
using System.Runtime.CompilerServices;
using Microsoft.Extensions.DependencyInjection;
@@ -51,4 +51,54 @@ static IHybridCacheSerializer ResolveAndAddSerializer(DefaultHybridCache @thi
return serializer;
}
}
+
+ [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Intentional for logged failure mode")]
+ private bool TrySerialize(T value, out BufferChunk buffer, out IHybridCacheSerializer? serializer)
+ {
+ // note: also returns the serializer we resolved, because most-any time we want to serialize, we'll also want
+ // to make sure we use that same instance later (without needing to re-resolve and/or store the entire HC machinery)
+
+ RecyclableArrayBufferWriter? writer = null;
+ buffer = default;
+ try
+ {
+ writer = RecyclableArrayBufferWriter.Create(MaximumPayloadBytes); // note this lifetime spans the SetL2Async
+ serializer = GetSerializer();
+
+ serializer.Serialize(value, writer);
+
+ buffer = new(writer.DetachCommitted(out var length), 0, length, returnToPool: true); // remove buffer ownership from the writer
+ writer.Dispose(); // we're done with the writer
+ return true;
+ }
+ catch (Exception ex)
+ {
+ bool knownCause = false;
+
+ // ^^^ if we know what happened, we can record directly via cause-specific events
+ // and treat as a handled failure (i.e. return false) - otherwise, we'll bubble
+ // the fault up a few layers *in addition to* logging in a failure event
+
+ if (writer is not null)
+ {
+ if (writer.QuotaExceeded)
+ {
+ _logger.MaximumPayloadBytesExceeded(ex, MaximumPayloadBytes);
+ knownCause = true;
+ }
+
+ writer.Dispose();
+ }
+
+ if (!knownCause)
+ {
+ _logger.SerializationFailure(ex);
+ throw;
+ }
+
+ buffer = default;
+ serializer = null;
+ return false;
+ }
+ }
}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Stampede.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Stampede.cs
index ef5c570c670..660233e41ef 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Stampede.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Stampede.cs
@@ -3,6 +3,7 @@
using System;
using System.Collections.Concurrent;
+using System.Collections.Generic;
using System.Diagnostics;
using System.Diagnostics.CodeAnalysis;
@@ -13,7 +14,7 @@ internal partial class DefaultHybridCache
private readonly ConcurrentDictionary _currentOperations = new();
// returns true for a new session (in which case: we need to start the work), false for a pre-existing session
- public bool GetOrCreateStampedeState(string key, HybridCacheEntryFlags flags, out StampedeState stampedeState, bool canBeCanceled)
+ public bool GetOrCreateStampedeState(string key, HybridCacheEntryFlags flags, out StampedeState stampedeState, bool canBeCanceled, IEnumerable? tags)
{
var stampedeKey = new StampedeKey(key, flags);
@@ -27,7 +28,7 @@ public bool GetOrCreateStampedeState(string key, HybridCacheEntryFlag
// Most common scenario here, then, is that we're not fighting with anyone else
// go ahead and create a placeholder state object and *try* to add it.
- stampedeState = new StampedeState(this, stampedeKey, canBeCanceled);
+ stampedeState = new StampedeState(this, stampedeKey, TagSet.Create(tags), canBeCanceled);
if (_currentOperations.TryAdd(stampedeKey, stampedeState))
{
// successfully added; indeed, no-one else was fighting: we're done
@@ -56,8 +57,9 @@ public bool GetOrCreateStampedeState(string key, HybridCacheEntryFlag
// Check whether the value was L1-cached by an outgoing operation (for *us* to check needs local-cache-read,
// and for *them* to have updated needs local-cache-write, but since the shared us/them key includes flags,
// we can skip this if *either* flag is set).
- if ((flags & HybridCacheEntryFlags.DisableLocalCache) == 0 && _localCache.TryGetValue(key, out var untyped)
- && untyped is CacheItem typed && typed.TryReserve())
+ if ((flags & HybridCacheEntryFlags.DisableLocalCache) == 0
+ && TryGetExisting(key, out var typed)
+ && typed.TryReserve())
{
stampedeState.SetResultDirect(typed);
return false; // the work has ALREADY been done
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeState.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeState.cs
index eba71774395..e2439357f26 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeState.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeState.cs
@@ -74,8 +74,6 @@ protected StampedeState(DefaultHybridCache cache, in StampedeKey key, CacheItem
public abstract void Execute();
- protected int MaximumPayloadBytes => _cache.MaximumPayloadBytes;
-
public override string ToString() => Key.ToString();
public abstract void SetCanceled();
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeStateT.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeStateT.cs
index 4e45acae930..992c03c24cb 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeStateT.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeStateT.cs
@@ -6,6 +6,7 @@
using System.Diagnostics.CodeAnalysis;
using System.Threading;
using System.Threading.Tasks;
+using Microsoft.Extensions.Logging;
using static Microsoft.Extensions.Caching.Hybrid.Internal.DefaultHybridCache;
namespace Microsoft.Extensions.Caching.Hybrid.Internal;
@@ -14,7 +15,8 @@ internal partial class DefaultHybridCache
{
internal sealed class StampedeState : StampedeState
{
- private const HybridCacheEntryFlags FlagsDisableL1AndL2 = HybridCacheEntryFlags.DisableLocalCacheWrite | HybridCacheEntryFlags.DisableDistributedCacheWrite;
+ // note on terminology: L1 and L2 are, for brevity, used interchangeably with "local" and "distributed" cache, i.e. `IMemoryCache` and `IDistributedCache`
+ private const HybridCacheEntryFlags FlagsDisableL1AndL2Write = HybridCacheEntryFlags.DisableLocalCacheWrite | HybridCacheEntryFlags.DisableDistributedCacheWrite;
private readonly TaskCompletionSource>? _result;
private TState? _state;
@@ -26,14 +28,14 @@ internal sealed class StampedeState : StampedeState
internal void SetResultDirect(CacheItem value)
=> _result?.TrySetResult(value);
- public StampedeState(DefaultHybridCache cache, in StampedeKey key, bool canBeCanceled)
- : base(cache, key, CacheItem.Create(), canBeCanceled)
+ public StampedeState(DefaultHybridCache cache, in StampedeKey key, TagSet tags, bool canBeCanceled)
+ : base(cache, key, CacheItem.Create(cache.CurrentTimestamp(), tags), canBeCanceled)
{
_result = new(TaskCreationOptions.RunContinuationsAsynchronously);
}
- public StampedeState(DefaultHybridCache cache, in StampedeKey key, CancellationToken token)
- : base(cache, key, CacheItem.Create(), token)
+ public StampedeState(DefaultHybridCache cache, in StampedeKey key, TagSet tags, CancellationToken token)
+ : base(cache, key, CacheItem.Create(cache.CurrentTimestamp(), tags), token)
{
// no TCS in this case - this is for SetValue only
}
@@ -76,13 +78,13 @@ public Task ExecuteDirectAsync(in TState state, Func _result?.TrySetCanceled(SharedToken);
[SuppressMessage("Usage", "VSTHRD003:Avoid awaiting foreign Tasks", Justification = "Custom task management")]
- public ValueTask JoinAsync(CancellationToken token)
+ public ValueTask JoinAsync(ILogger log, CancellationToken token)
{
// If the underlying has already completed, and/or our local token can't cancel: we
// can simply wrap the shared task; otherwise, we need our own cancellation state.
- return token.CanBeCanceled && !Task.IsCompleted ? WithCancellationAsync(this, token) : UnwrapReservedAsync();
+ return token.CanBeCanceled && !Task.IsCompleted ? WithCancellationAsync(log, this, token) : UnwrapReservedAsync(log);
- static async ValueTask WithCancellationAsync(StampedeState stampede, CancellationToken token)
+ static async ValueTask WithCancellationAsync(ILogger log, StampedeState stampede, CancellationToken token)
{
var cancelStub = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously);
using var reg = token.Register(static obj =>
@@ -112,7 +114,7 @@ static async ValueTask WithCancellationAsync(StampedeState stamped
}
// outside the catch, so we know we only decrement one way or the other
- return result.GetReservedValue();
+ return result.GetReservedValue(log);
}
}
@@ -133,7 +135,7 @@ static Task> InvalidAsync() => System.Threading.Tasks.Task.FromExce
[SuppressMessage("Performance", "CA1849:Call async methods when in an async method", Justification = "Checked manual unwrap")]
[SuppressMessage("Usage", "VSTHRD003:Avoid awaiting foreign Tasks", Justification = "Checked manual unwrap")]
[SuppressMessage("Major Code Smell", "S1121:Assignments should not be made from within sub-expressions", Justification = "Unusual, but legit here")]
- internal ValueTask UnwrapReservedAsync()
+ internal ValueTask UnwrapReservedAsync(ILogger log)
{
var task = Task;
#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
@@ -142,16 +144,16 @@ internal ValueTask UnwrapReservedAsync()
if (task.Status == TaskStatus.RanToCompletion)
#endif
{
- return new(task.Result.GetReservedValue());
+ return new(task.Result.GetReservedValue(log));
}
// if the type is immutable, callers can share the final step too (this may leave dangling
// reservation counters, but that's OK)
- var result = ImmutableTypeCache.IsImmutable ? (_sharedUnwrap ??= AwaitedAsync(Task)) : AwaitedAsync(Task);
+ var result = ImmutableTypeCache.IsImmutable ? (_sharedUnwrap ??= AwaitedAsync(log, Task)) : AwaitedAsync(log, Task);
return new(result);
- static async Task AwaitedAsync(Task> task)
- => (await task.ConfigureAwait(false)).GetReservedValue();
+ static async Task AwaitedAsync(ILogger log, Task> task)
+ => (await task.ConfigureAwait(false)).GetReservedValue(log);
}
[DoesNotReturn]
@@ -159,19 +161,79 @@ static async Task AwaitedAsync(Task> task)
[SuppressMessage("Resilience", "EA0014:The async method doesn't support cancellation", Justification = "In this case the cancellation token is provided internally via SharedToken")]
[SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Exception is passed through to faulted task result")]
+ [SuppressMessage("Reliability", "EA0002:Use 'System.TimeProvider' to make the code easier to test", Justification = "Does not apply")]
private async Task BackgroundFetchAsync()
{
+ bool eventSourceEnabled = HybridCacheEventSource.Log.IsEnabled();
try
{
// read from L2 if appropriate
if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheRead) == 0)
{
- var result = await Cache.GetFromL2Async(Key.Key, SharedToken).ConfigureAwait(false);
+ // kick off any necessary tag invalidation fetches
+ Cache.PrefetchTags(CacheItem.Tags);
- if (result.Array is not null)
+ BufferChunk result;
+ try
{
- SetResultAndRecycleIfAppropriate(ref result);
- return;
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.DistributedCacheGet();
+ }
+
+ result = await Cache.GetFromL2DirectAsync(Key.Key, SharedToken).ConfigureAwait(false);
+ if (eventSourceEnabled)
+ {
+ if (result.HasValue)
+ {
+ HybridCacheEventSource.Log.DistributedCacheHit();
+ }
+ else
+ {
+ HybridCacheEventSource.Log.DistributedCacheMiss();
+ }
+ }
+ }
+ catch (OperationCanceledException) when (SharedToken.IsCancellationRequested)
+ {
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.DistributedCacheCanceled();
+ }
+
+ throw; // don't just treat as miss - exit ASAP
+ }
+ catch (Exception ex)
+ {
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.DistributedCacheFailed();
+ }
+
+ Cache._logger.CacheUnderlyingDataQueryFailure(ex);
+ result = default; // treat as "miss"
+ }
+
+ if (result.HasValue)
+ {
+ // result is the wider payload including HC headers; unwrap it:
+ switch (HybridCachePayload.TryParse(result.AsArraySegment(), Key.Key, CacheItem.Tags, Cache, out var payload,
+ out var flags, out var entropy, out var pendingTags))
+ {
+ case HybridCachePayload.ParseResult.Success:
+ // check any pending expirations, if necessary
+ if (pendingTags.IsEmpty || !await Cache.IsAnyTagExpiredAsync(pendingTags, CacheItem.CreationTimestamp).ConfigureAwait(false))
+ {
+ // move into the payload segment (minus any framing/header/etc data)
+ result = new(payload.Array!, payload.Offset, payload.Count, result.ReturnToPool);
+ SetResultAndRecycleIfAppropriate(ref result);
+ return;
+ }
+
+ break;
+ }
+
+ result.RecycleIfAppropriate();
}
}
@@ -179,7 +241,66 @@ private async Task BackgroundFetchAsync()
if ((Key.Flags & HybridCacheEntryFlags.DisableUnderlyingData) == 0)
{
// invoke the callback supplied by the caller
- T newValue = await _underlying!(_state!, SharedToken).ConfigureAwait(false);
+ T newValue;
+ try
+ {
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.UnderlyingDataQueryStart();
+ }
+
+ newValue = await _underlying!(_state!, SharedToken).ConfigureAwait(false);
+
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.UnderlyingDataQueryComplete();
+ }
+ }
+ catch (Exception ex)
+ {
+ if (eventSourceEnabled)
+ {
+ if (ex is OperationCanceledException && SharedToken.IsCancellationRequested)
+ {
+ HybridCacheEventSource.Log.UnderlyingDataQueryCanceled();
+ }
+ else
+ {
+ HybridCacheEventSource.Log.UnderlyingDataQueryFailed();
+ }
+ }
+
+ throw;
+ }
+
+ // check whether we're going to hit a timing problem with tag invalidation
+ if (!Cache.IsValid(CacheItem))
+ {
+ // When writing to L1, we need to avoid a problem where either "*" or one of
+ // the active tags matches "now" - we get into a problem whereby it is
+ // ambiguous whether the data is invalidated; consider all of the following happen
+ // *in the same measured instance*:
+ // - write with value A
+ // - invalidate by tag (or wildcard)
+ // - write with value B
+ // Both A and B have the same timestamp as the invalidated one; to avoid this problem,
+ // we need to detect this (very rare) scenario, and inject an artificial delay, such that
+ // B effectively gets written at a later time.
+ var time = Cache.CurrentTimestamp();
+ if (time <= CacheItem.CreationTimestamp)
+ {
+ // Clock hasn't changed; this is *very rare*, and honestly mostly applies to
+ // tests with dummy fetch calls; inject an artificial delay and re-fetch
+ // the time.
+ await System.Threading.Tasks.Task.Delay(1, CancellationToken.None).ConfigureAwait(false);
+ time = Cache.CurrentTimestamp();
+ }
+
+ // We can safely update the timestamp without fear of torn values etc; no competing code
+ // will access this until we set it into L1, which happens towards the *end* of this method,
+ // and we (the current thread/path) are the only execution for this instance.
+ CacheItem.UnsafeSetCreationTimestamp(time);
+ }
// If we're writing this value *anywhere*, we're going to need to serialize; this is obvious
// in the case of L2, but we also need it for L1, because MemoryCache might be enforcing
@@ -187,11 +308,10 @@ private async Task BackgroundFetchAsync()
// Likewise, if we're writing to a MutableCacheItem, we'll be serializing *anyway* for the payload.
//
// Rephrasing that: the only scenario in which we *do not* need to serialize is if:
- // - it is an ImmutableCacheItem
- // - we're writing neither to L1 nor L2
-
+ // - it is an ImmutableCacheItem (so we don't need bytes for the CacheItem, L1)
+ // - we're not writing to L2
CacheItem cacheItem = CacheItem;
- bool skipSerialize = cacheItem is ImmutableCacheItem && (Key.Flags & FlagsDisableL1AndL2) == FlagsDisableL1AndL2;
+ bool skipSerialize = cacheItem is ImmutableCacheItem && (Key.Flags & FlagsDisableL1AndL2Write) == FlagsDisableL1AndL2Write;
if (skipSerialize)
{
@@ -202,33 +322,54 @@ private async Task BackgroundFetchAsync()
// ^^^ The first thing we need to do is make sure we're not getting into a thread race over buffer disposal.
// In particular, if this cache item is somehow so short-lived that the buffers would be released *before* we're
// done writing them to L2, which happens *after* we've provided the value to consumers.
- RecyclableArrayBufferWriter writer = RecyclableArrayBufferWriter.Create(MaximumPayloadBytes); // note this lifetime spans the SetL2Async
- IHybridCacheSerializer serializer = Cache.GetSerializer();
- serializer.Serialize(newValue, writer);
- BufferChunk buffer = new(writer.DetachCommitted(out var length), length, returnToPool: true); // remove buffer ownership from the writer
- writer.Dispose(); // we're done with the writer
-
- // protect "buffer" (this is why we "reserved") for writing to L2 if needed; SetResultPreSerialized
- // *may* (depending on context) claim this buffer, in which case "bufferToRelease" gets reset, and
- // the final RecycleIfAppropriate() is a no-op; however, the buffer is valid in either event,
- // (with TryReserve above guaranteeing that we aren't in a race condition).
- BufferChunk bufferToRelease = buffer;
-
- // and since "bufferToRelease" is the thing that will be returned at some point, we can make it explicit
- // that we do not need or want "buffer" to do any recycling (they're the same memory)
- buffer = buffer.DoNotReturnToPool();
-
- // set the underlying result for this operation (includes L1 write if appropriate)
- SetResultPreSerialized(newValue, ref bufferToRelease, serializer);
-
- // Note that at this point we've already released most or all of the waiting callers. Everything
- // from this point onwards happens in the background, from the perspective of the calling code.
-
- // Write to L2 if appropriate.
- if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheWrite) == 0)
+ BufferChunk bufferToRelease = default;
+ if (Cache.TrySerialize(newValue, out var buffer, out var serializer))
{
- // We already have the payload serialized, so this is trivial to do.
- await Cache.SetL2Async(Key.Key, in buffer, _options, SharedToken).ConfigureAwait(false);
+ // note we also capture the resolved serializer ^^^ - we'll need it again later
+
+ // protect "buffer" (this is why we "reserved") for writing to L2 if needed; SetResultPreSerialized
+ // *may* (depending on context) claim this buffer, in which case "bufferToRelease" gets reset, and
+ // the final RecycleIfAppropriate() is a no-op; however, the buffer is valid in either event,
+ // (with TryReserve above guaranteeing that we aren't in a race condition).
+ bufferToRelease = buffer;
+
+ // and since "bufferToRelease" is the thing that will be returned at some point, we can make it explicit
+ // that we do not need or want "buffer" to do any recycling (they're the same memory)
+ buffer = buffer.DoNotReturnToPool();
+
+ // set the underlying result for this operation (includes L1 write if appropriate)
+ SetResultPreSerialized(newValue, ref bufferToRelease, serializer);
+
+ // Note that at this point we've already released most or all of the waiting callers. Everything
+ // from this point onwards happens in the background, from the perspective of the calling code.
+
+ // Write to L2 if appropriate.
+ if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheWrite) == 0)
+ {
+ // We already have the payload serialized, so this is trivial to do.
+ try
+ {
+ await Cache.SetL2Async(Key.Key, cacheItem, in buffer, _options, SharedToken).ConfigureAwait(false);
+
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.DistributedCacheWrite();
+ }
+ }
+ catch (Exception ex)
+ {
+ // log the L2 write failure, but that doesn't need to interrupt the app flow (so:
+ // don't rethrow); L1 will still reduce impact, and L1 without L2 is better than
+ // hard failure every time
+ Cache._logger.CacheBackendWriteFailure(ex);
+ }
+ }
+ }
+ else
+ {
+ // unable to serialize (or quota exceeded); try to at least store the onwards value; this is
+ // especially useful for immutable data types
+ SetResultPreSerialized(newValue, ref bufferToRelease, serializer);
}
// Release our hook on the CacheItem (only really important for "mutable").
@@ -281,7 +422,7 @@ private void SetDefaultResult()
private void SetResultAndRecycleIfAppropriate(ref BufferChunk value)
{
// set a result from L2 cache
- Debug.Assert(value.Array is not null, "expected buffer");
+ Debug.Assert(value.OversizedArray is not null, "expected buffer");
IHybridCacheSerializer serializer = Cache.GetSerializer();
CacheItem cacheItem;
@@ -289,7 +430,7 @@ private void SetResultAndRecycleIfAppropriate(ref BufferChunk value)
{
case ImmutableCacheItem immutable:
// deserialize; and store object; buffer can be recycled now
- immutable.SetValue(serializer.Deserialize(new(value.Array!, 0, value.Length)), value.Length);
+ immutable.SetValue(serializer.Deserialize(new(value.OversizedArray!, value.Offset, value.Length)), value.Length);
value.RecycleIfAppropriate();
cacheItem = immutable;
break;
@@ -309,7 +450,7 @@ private void SetResultAndRecycleIfAppropriate(ref BufferChunk value)
private void SetImmutableResultWithoutSerialize(T value)
{
- Debug.Assert((Key.Flags & FlagsDisableL1AndL2) == FlagsDisableL1AndL2, "Only expected if L1+L2 disabled");
+ Debug.Assert((Key.Flags & FlagsDisableL1AndL2Write) == FlagsDisableL1AndL2Write, "Only expected if L1+L2 disabled");
// set a result from a value we calculated directly
CacheItem cacheItem;
@@ -328,7 +469,7 @@ private void SetImmutableResultWithoutSerialize(T value)
SetResult(cacheItem);
}
- private void SetResultPreSerialized(T value, ref BufferChunk buffer, IHybridCacheSerializer serializer)
+ private void SetResultPreSerialized(T value, ref BufferChunk buffer, IHybridCacheSerializer? serializer)
{
// set a result from a value we calculated directly that
// has ALREADY BEEN SERIALIZED (we can optionally consume this buffer)
@@ -343,8 +484,17 @@ private void SetResultPreSerialized(T value, ref BufferChunk buffer, IHybridCach
// (but leave the buffer alone)
break;
case MutableCacheItem mutable:
- mutable.SetValue(ref buffer, serializer);
- mutable.DebugOnlyTrackBuffer(Cache);
+ if (serializer is null)
+ {
+ // serialization is failing; set fallback value
+ mutable.SetFallbackValue(value);
+ }
+ else
+ {
+ mutable.SetValue(ref buffer, serializer);
+ mutable.DebugOnlyTrackBuffer(Cache);
+ }
+
cacheItem = mutable;
break;
default:
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.TagInvalidation.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.TagInvalidation.cs
new file mode 100644
index 00000000000..1c46dafe352
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.TagInvalidation.cs
@@ -0,0 +1,257 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Concurrent;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ private static readonly Task _zeroTimestamp = Task.FromResult(0L);
+
+ private readonly ConcurrentDictionary> _tagInvalidationTimes = [];
+
+#if NET9_0_OR_GREATER
+ private readonly ConcurrentDictionary>.AlternateLookup> _tagInvalidationTimesBySpan;
+ private readonly bool _tagInvalidationTimesUseAltLookup;
+#endif
+
+ private Task _globalInvalidateTimestamp;
+
+ public override ValueTask RemoveByTagAsync(string tag, CancellationToken token = default)
+ {
+ if (string.IsNullOrWhiteSpace(tag))
+ {
+ return default; // nothing sensible to do
+ }
+
+ var now = CurrentTimestamp();
+ InvalidateTagLocalCore(tag, now, isNow: true); // isNow to be 100% explicit
+ return InvalidateL2TagAsync(tag, now, token);
+ }
+
+ public bool IsValid(CacheItem cacheItem)
+ {
+ var timestamp = cacheItem.CreationTimestamp;
+
+ if (IsWildcardExpired(timestamp))
+ {
+ return false;
+ }
+
+ var tags = cacheItem.Tags;
+ switch (tags.Count)
+ {
+ case 0:
+ return true;
+
+ case 1:
+ return !IsTagExpired(tags.GetSinglePrechecked(), timestamp, out _);
+
+ default:
+ bool allValid = true;
+ foreach (var tag in tags.GetSpanPrechecked())
+ {
+ if (IsTagExpired(tag, timestamp, out _))
+ {
+ allValid = false; // but check them all, to kick-off tag fetch
+ }
+ }
+
+ return allValid;
+ }
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "VSTHRD002:Avoid problematic synchronous waits", Justification = "Completion-checked")]
+ public bool IsWildcardExpired(long timestamp)
+ {
+ if (_globalInvalidateTimestamp.IsCompleted)
+ {
+ if (timestamp <= _globalInvalidateTimestamp.Result)
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "VSTHRD002:Avoid problematic synchronous waits", Justification = "Completion-checked")]
+ public bool IsTagExpired(ReadOnlySpan tag, long timestamp, out bool isPending)
+ {
+ isPending = false;
+#if NET9_0_OR_GREATER
+ if (_tagInvalidationTimesUseAltLookup && _tagInvalidationTimesBySpan.TryGetValue(tag, out var pending))
+ {
+ if (pending.IsCompleted)
+ {
+ return timestamp <= pending.Result;
+ }
+ else
+ {
+ isPending = true;
+ return true; // assume invalid until completed
+ }
+ }
+ else if (!HasBackendCache)
+ {
+ // not invalidated, and no L2 to check
+ return false;
+ }
+#endif
+
+ // fallback to using a string
+ return IsTagExpired(tag.ToString(), timestamp, out isPending);
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "VSTHRD002:Avoid problematic synchronous waits", Justification = "Completion-checked")]
+ public bool IsTagExpired(string tag, long timestamp, out bool isPending)
+ {
+ isPending = false;
+ if (!_tagInvalidationTimes.TryGetValue(tag, out var pending))
+ {
+ // not in the tag invalidation cache; if we have L2, need to check there
+ if (HasBackendCache)
+ {
+ pending = SafeReadTagInvalidationAsync(tag);
+ _ = _tagInvalidationTimes.TryAdd(tag, pending);
+ }
+ else
+ {
+ // not invalidated, and no L2 to check
+ return false;
+ }
+ }
+
+ if (pending.IsCompleted)
+ {
+ return timestamp <= pending.Result;
+ }
+ else
+ {
+ isPending = true;
+ return true; // assume invalid until completed
+ }
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Resilience", "EA0014:The async method doesn't support cancellation", Justification = "Ack")]
+ public ValueTask IsAnyTagExpiredAsync(TagSet tags, long timestamp)
+ {
+ return tags.Count switch
+ {
+ 0 => new(false),
+ 1 => IsTagExpiredAsync(tags.GetSinglePrechecked(), timestamp),
+ _ => SlowAsync(this, tags, timestamp),
+ };
+
+ static async ValueTask SlowAsync(DefaultHybridCache @this, TagSet tags, long timestamp)
+ {
+ int count = tags.Count;
+ for (int i = 0; i < count; i++)
+ {
+ if (await @this.IsTagExpiredAsync(tags[i], timestamp).ConfigureAwait(false))
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Resilience", "EA0014:The async method doesn't support cancellation", Justification = "Ack")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Performance", "CA1849:Call async methods when in an async method", Justification = "Completion-checked")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "VSTHRD003:Avoid awaiting foreign Tasks", Justification = "Manual async unwrap")]
+ public ValueTask IsTagExpiredAsync(string tag, long timestamp)
+ {
+ if (!_tagInvalidationTimes.TryGetValue(tag, out var pending))
+ {
+ // not in the tag invalidation cache; if we have L2, need to check there
+ if (HasBackendCache)
+ {
+ pending = SafeReadTagInvalidationAsync(tag);
+ _ = _tagInvalidationTimes.TryAdd(tag, pending);
+ }
+ else
+ {
+ // not invalidated, and no L2 to check
+ return new(false);
+ }
+ }
+
+ if (pending.IsCompleted)
+ {
+ return new(timestamp <= pending.Result);
+ }
+ else
+ {
+ return AwaitedAsync(pending, timestamp);
+ }
+
+ static async ValueTask AwaitedAsync(Task pending, long timestamp) => timestamp <= await pending.ConfigureAwait(false);
+ }
+
+ internal void DebugInvalidateTag(string tag, Task pending)
+ {
+ if (tag == TagSet.WildcardTag)
+ {
+ _globalInvalidateTimestamp = pending;
+ }
+ else
+ {
+ _tagInvalidationTimes[tag] = pending;
+ }
+ }
+
+ internal long CurrentTimestamp() => _clock.GetUtcNow().UtcTicks;
+
+ internal void PrefetchTags(TagSet tags)
+ {
+ if (HasBackendCache && !tags.IsEmpty)
+ {
+ // only needed if L2 exists
+ switch (tags.Count)
+ {
+ case 1:
+ PrefetchTagWithBackendCache(tags.GetSinglePrechecked());
+ break;
+ default:
+ foreach (var tag in tags.GetSpanPrechecked())
+ {
+ PrefetchTagWithBackendCache(tag);
+ }
+
+ break;
+ }
+ }
+ }
+
+ private void PrefetchTagWithBackendCache(string tag)
+ {
+ if (!_tagInvalidationTimes.TryGetValue(tag, out var pending))
+ {
+ _ = _tagInvalidationTimes.TryAdd(tag, SafeReadTagInvalidationAsync(tag));
+ }
+ }
+
+ private void InvalidateTagLocalCore(string tag, long timestamp, bool isNow)
+ {
+ var timestampTask = Task.FromResult(timestamp);
+ if (tag == TagSet.WildcardTag)
+ {
+ _globalInvalidateTimestamp = timestampTask;
+ if (isNow && !HasBackendCache)
+ {
+ // no L2, so we don't need any prior invalidated tags any more; can clear
+ _tagInvalidationTimes.Clear();
+ }
+ }
+ else
+ {
+ _tagInvalidationTimes[tag] = timestampTask;
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.cs
index c789e7c6652..9ee647cf07d 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.cs
@@ -3,6 +3,7 @@
using System;
using System.Collections.Generic;
+using System.Diagnostics.CodeAnalysis;
using System.Linq;
using System.Runtime.CompilerServices;
using System.Threading;
@@ -20,8 +21,12 @@ namespace Microsoft.Extensions.Caching.Hybrid.Internal;
///
/// The inbuilt implementation of , as registered via .
///
+[SkipLocalsInit]
internal sealed partial class DefaultHybridCache : HybridCache
{
+ // reserve non-printable characters from keys, to prevent potential L2 abuse
+ private static readonly char[] _keyReservedCharacters = Enumerable.Range(0, 32).Select(i => (char)i).ToArray();
+
[System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Keep usage explicit")]
private readonly IDistributedCache? _backendCache;
[System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Keep usage explicit")]
@@ -32,11 +37,13 @@ internal sealed partial class DefaultHybridCache : HybridCache
private readonly HybridCacheOptions _options;
private readonly ILogger _logger;
private readonly CacheFeatures _features; // used to avoid constant type-testing
+ private readonly TimeProvider _clock;
private readonly HybridCacheEntryFlags _hardFlags; // *always* present (for example, because no L2)
private readonly HybridCacheEntryFlags _defaultFlags; // note this already includes hardFlags
private readonly TimeSpan _defaultExpiration;
private readonly TimeSpan _defaultLocalCacheExpiration;
+ private readonly int _maximumKeyLength;
private readonly DistributedCacheEntryOptions _defaultDistributedCacheExpiration;
@@ -56,13 +63,15 @@ internal enum CacheFeatures
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private CacheFeatures GetFeatures(CacheFeatures mask) => _features & mask;
+ internal bool HasBackendCache => (_features & CacheFeatures.BackendCache) != 0;
+
public DefaultHybridCache(IOptions options, IServiceProvider services)
{
_services = Throw.IfNull(services);
_localCache = services.GetRequiredService();
_options = options.Value;
_logger = services.GetService()?.CreateLogger(typeof(HybridCache)) ?? NullLogger.Instance;
-
+ _clock = services.GetService() ?? TimeProvider.System;
_backendCache = services.GetService(); // note optional
// ignore L2 if it is really just the same L1, wrapped
@@ -90,6 +99,7 @@ public DefaultHybridCache(IOptions options, IServiceProvider
_serializerFactories = factories;
MaximumPayloadBytes = checked((int)_options.MaximumPayloadBytes); // for now hard-limit to 2GiB
+ _maximumKeyLength = _options.MaximumKeyLength;
var defaultEntryOptions = _options.DefaultEntryOptions;
@@ -102,6 +112,13 @@ public DefaultHybridCache(IOptions options, IServiceProvider
_defaultExpiration = defaultEntryOptions?.Expiration ?? TimeSpan.FromMinutes(5);
_defaultLocalCacheExpiration = defaultEntryOptions?.LocalCacheExpiration ?? TimeSpan.FromMinutes(1);
_defaultDistributedCacheExpiration = new DistributedCacheEntryOptions { AbsoluteExpirationRelativeToNow = _defaultExpiration };
+
+#if NET9_0_OR_GREATER
+ _tagInvalidationTimesUseAltLookup = _tagInvalidationTimes.TryGetAlternateLookup(out _tagInvalidationTimesBySpan);
+#endif
+
+ // do this last
+ _globalInvalidateTimestamp = _backendCache is null ? _zeroTimestamp : SafeReadTagInvalidationAsync(TagSet.WildcardTag);
}
internal IDistributedCache? BackendCache => _backendCache;
@@ -119,14 +136,37 @@ public override ValueTask GetOrCreateAsync(string key, TState stat
}
var flags = GetEffectiveFlags(options);
- if ((flags & HybridCacheEntryFlags.DisableLocalCacheRead) == 0 && _localCache.TryGetValue(key, out var untyped)
- && untyped is CacheItem typed && typed.TryGetValue(out var value))
+ if (!ValidateKey(key))
{
- // short-circuit
- return new(value);
+ // we can't use cache, but we can still provide the data
+ return RunWithoutCacheAsync(flags, state, underlyingDataCallback, cancellationToken);
+ }
+
+ bool eventSourceEnabled = HybridCacheEventSource.Log.IsEnabled();
+
+ if ((flags & HybridCacheEntryFlags.DisableLocalCacheRead) == 0)
+ {
+ if (TryGetExisting(key, out var typed)
+ && typed.TryGetValue(_logger, out var value))
+ {
+ // short-circuit
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.LocalCacheHit();
+ }
+
+ return new(value);
+ }
+ else
+ {
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.LocalCacheMiss();
+ }
+ }
}
- if (GetOrCreateStampedeState(key, flags, out var stampede, canBeCanceled))
+ if (GetOrCreateStampedeState(key, flags, out var stampede, canBeCanceled, tags))
{
// new query; we're responsible for making it happen
if (canBeCanceled)
@@ -139,11 +179,19 @@ public override ValueTask GetOrCreateAsync(string key, TState stat
{
// we're going to run to completion; no need to get complicated
_ = stampede.ExecuteDirectAsync(in state, underlyingDataCallback, options); // this larger task includes L2 write etc
- return stampede.UnwrapReservedAsync();
+ return stampede.UnwrapReservedAsync(_logger);
+ }
+ }
+ else
+ {
+ // pre-existing query
+ if (eventSourceEnabled)
+ {
+ HybridCacheEventSource.Log.StampedeJoin();
}
}
- return stampede.JoinAsync(cancellationToken);
+ return stampede.JoinAsync(_logger, cancellationToken);
}
public override ValueTask RemoveAsync(string key, CancellationToken token = default)
@@ -152,19 +200,69 @@ public override ValueTask RemoveAsync(string key, CancellationToken token = defa
return _backendCache is null ? default : new(_backendCache.RemoveAsync(key, token));
}
- public override ValueTask RemoveByTagAsync(string tag, CancellationToken token = default)
- => default; // tags not yet implemented
-
public override ValueTask SetAsync(string key, T value, HybridCacheEntryOptions? options = null, IEnumerable? tags = null, CancellationToken token = default)
{
// since we're forcing a write: disable L1+L2 read; we'll use a direct pass-thru of the value as the callback, to reuse all the code
// note also that stampede token is not shared with anyone else
var flags = GetEffectiveFlags(options) | (HybridCacheEntryFlags.DisableLocalCacheRead | HybridCacheEntryFlags.DisableDistributedCacheRead);
- var state = new StampedeState(this, new StampedeKey(key, flags), token);
+ var state = new StampedeState(this, new StampedeKey(key, flags), TagSet.Create(tags), token);
return new(state.ExecuteDirectAsync(value, static (state, _) => new(state), options)); // note this spans L2 write etc
}
+ private static ValueTask RunWithoutCacheAsync(HybridCacheEntryFlags flags, TState state,
+ Func> underlyingDataCallback,
+ CancellationToken cancellationToken)
+ {
+ return (flags & HybridCacheEntryFlags.DisableUnderlyingData) == 0
+ ? underlyingDataCallback(state, cancellationToken) : default;
+ }
+
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private HybridCacheEntryFlags GetEffectiveFlags(HybridCacheEntryOptions? options)
- => (options?.Flags | _hardFlags) ?? _defaultFlags;
+ => (options?.Flags | _hardFlags) ?? _defaultFlags;
+
+ private bool ValidateKey(string key)
+ {
+ if (string.IsNullOrWhiteSpace(key))
+ {
+ _logger.KeyEmptyOrWhitespace();
+ return false;
+ }
+
+ if (key.Length > _maximumKeyLength)
+ {
+ _logger.MaximumKeyLengthExceeded(_maximumKeyLength, key.Length);
+ return false;
+ }
+
+ if (key.IndexOfAny(_keyReservedCharacters) >= 0)
+ {
+ _logger.KeyInvalidContent();
+ return false;
+ }
+
+ // nothing to complain about
+ return true;
+ }
+
+ private bool TryGetExisting(string key, [NotNullWhen(true)] out CacheItem? value)
+ {
+ if (_localCache.TryGetValue(key, out var untyped) && untyped is CacheItem typed)
+ {
+ // check tag-based and global invalidation
+ if (IsValid(typed))
+ {
+ value = typed;
+ return true;
+ }
+
+ // remove from L1; note there's a little unavoidable race here; worst case is that
+ // a fresher value gets dropped - we'll have to accept it
+ _localCache.Remove(key);
+ }
+
+ // failure
+ value = null;
+ return false;
+ }
}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCacheEventSource.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCacheEventSource.cs
new file mode 100644
index 00000000000..2db179cfc4c
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCacheEventSource.cs
@@ -0,0 +1,221 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics;
+using System.Diagnostics.Tracing;
+using System.Runtime.CompilerServices;
+using System.Threading;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+[EventSource(Name = "Microsoft-Extensions-HybridCache")]
+internal sealed class HybridCacheEventSource : EventSource
+{
+ public static readonly HybridCacheEventSource Log = new();
+
+ internal const int EventIdLocalCacheHit = 1;
+ internal const int EventIdLocalCacheMiss = 2;
+ internal const int EventIdDistributedCacheGet = 3;
+ internal const int EventIdDistributedCacheHit = 4;
+ internal const int EventIdDistributedCacheMiss = 5;
+ internal const int EventIdDistributedCacheFailed = 6;
+ internal const int EventIdUnderlyingDataQueryStart = 7;
+ internal const int EventIdUnderlyingDataQueryComplete = 8;
+ internal const int EventIdUnderlyingDataQueryFailed = 9;
+ internal const int EventIdLocalCacheWrite = 10;
+ internal const int EventIdDistributedCacheWrite = 11;
+ internal const int EventIdStampedeJoin = 12;
+ internal const int EventIdUnderlyingDataQueryCanceled = 13;
+ internal const int EventIdDistributedCacheCanceled = 14;
+
+ // fast local counters
+ private long _totalLocalCacheHit;
+ private long _totalLocalCacheMiss;
+ private long _totalDistributedCacheHit;
+ private long _totalDistributedCacheMiss;
+ private long _totalUnderlyingDataQuery;
+ private long _currentUnderlyingDataQuery;
+ private long _currentDistributedFetch;
+ private long _totalLocalCacheWrite;
+ private long _totalDistributedCacheWrite;
+ private long _totalStampedeJoin;
+
+#if !(NETSTANDARD2_0 || NET462)
+ // full Counter infrastructure
+ private DiagnosticCounter[]? _counters;
+#endif
+
+ [NonEvent]
+ public void ResetCounters()
+ {
+ Debug.WriteLine($"{nameof(HybridCacheEventSource)} counters reset!");
+
+ Volatile.Write(ref _totalLocalCacheHit, 0);
+ Volatile.Write(ref _totalLocalCacheMiss, 0);
+ Volatile.Write(ref _totalDistributedCacheHit, 0);
+ Volatile.Write(ref _totalDistributedCacheMiss, 0);
+ Volatile.Write(ref _totalUnderlyingDataQuery, 0);
+ Volatile.Write(ref _currentUnderlyingDataQuery, 0);
+ Volatile.Write(ref _currentDistributedFetch, 0);
+ Volatile.Write(ref _totalLocalCacheWrite, 0);
+ Volatile.Write(ref _totalDistributedCacheWrite, 0);
+ Volatile.Write(ref _totalStampedeJoin, 0);
+ }
+
+ [Event(EventIdLocalCacheHit, Level = EventLevel.Verbose)]
+ public void LocalCacheHit()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Increment(ref _totalLocalCacheHit);
+ WriteEvent(EventIdLocalCacheHit);
+ }
+
+ [Event(EventIdLocalCacheMiss, Level = EventLevel.Verbose)]
+ public void LocalCacheMiss()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Increment(ref _totalLocalCacheMiss);
+ WriteEvent(EventIdLocalCacheMiss);
+ }
+
+ [Event(EventIdDistributedCacheGet, Level = EventLevel.Verbose)]
+ public void DistributedCacheGet()
+ {
+ // should be followed by DistributedCacheHit, DistributedCacheMiss or DistributedCacheFailed
+ DebugAssertEnabled();
+ _ = Interlocked.Increment(ref _currentDistributedFetch);
+ WriteEvent(EventIdDistributedCacheGet);
+ }
+
+ [Event(EventIdDistributedCacheHit, Level = EventLevel.Verbose)]
+ public void DistributedCacheHit()
+ {
+ DebugAssertEnabled();
+
+ // note: not concerned about off-by-one here, i.e. don't panic
+ // about these two being atomic ref each-other - just the overall shape
+ _ = Interlocked.Increment(ref _totalDistributedCacheHit);
+ _ = Interlocked.Decrement(ref _currentDistributedFetch);
+ WriteEvent(EventIdDistributedCacheHit);
+ }
+
+ [Event(EventIdDistributedCacheMiss, Level = EventLevel.Verbose)]
+ public void DistributedCacheMiss()
+ {
+ DebugAssertEnabled();
+
+ // note: not concerned about off-by-one here, i.e. don't panic
+ // about these two being atomic ref each-other - just the overall shape
+ _ = Interlocked.Increment(ref _totalDistributedCacheMiss);
+ _ = Interlocked.Decrement(ref _currentDistributedFetch);
+ WriteEvent(EventIdDistributedCacheMiss);
+ }
+
+ [Event(EventIdDistributedCacheFailed, Level = EventLevel.Error)]
+ public void DistributedCacheFailed()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Decrement(ref _currentDistributedFetch);
+ WriteEvent(EventIdDistributedCacheFailed);
+ }
+
+ [Event(EventIdDistributedCacheCanceled, Level = EventLevel.Verbose)]
+ public void DistributedCacheCanceled()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Decrement(ref _currentDistributedFetch);
+ WriteEvent(EventIdDistributedCacheCanceled);
+ }
+
+ [Event(EventIdUnderlyingDataQueryStart, Level = EventLevel.Verbose)]
+ public void UnderlyingDataQueryStart()
+ {
+ // should be followed by UnderlyingDataQueryComplete or UnderlyingDataQueryFailed
+ DebugAssertEnabled();
+ _ = Interlocked.Increment(ref _totalUnderlyingDataQuery);
+ _ = Interlocked.Increment(ref _currentUnderlyingDataQuery);
+ WriteEvent(EventIdUnderlyingDataQueryStart);
+ }
+
+ [Event(EventIdUnderlyingDataQueryComplete, Level = EventLevel.Verbose)]
+ public void UnderlyingDataQueryComplete()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Decrement(ref _currentUnderlyingDataQuery);
+ WriteEvent(EventIdUnderlyingDataQueryComplete);
+ }
+
+ [Event(EventIdUnderlyingDataQueryFailed, Level = EventLevel.Error)]
+ public void UnderlyingDataQueryFailed()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Decrement(ref _currentUnderlyingDataQuery);
+ WriteEvent(EventIdUnderlyingDataQueryFailed);
+ }
+
+ [Event(EventIdUnderlyingDataQueryCanceled, Level = EventLevel.Verbose)]
+ public void UnderlyingDataQueryCanceled()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Decrement(ref _currentUnderlyingDataQuery);
+ WriteEvent(EventIdUnderlyingDataQueryCanceled);
+ }
+
+ [Event(EventIdLocalCacheWrite, Level = EventLevel.Verbose)]
+ public void LocalCacheWrite()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Increment(ref _totalLocalCacheWrite);
+ WriteEvent(EventIdLocalCacheWrite);
+ }
+
+ [Event(EventIdDistributedCacheWrite, Level = EventLevel.Verbose)]
+ public void DistributedCacheWrite()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Increment(ref _totalDistributedCacheWrite);
+ WriteEvent(EventIdDistributedCacheWrite);
+ }
+
+ [Event(EventIdStampedeJoin, Level = EventLevel.Verbose)]
+ internal void StampedeJoin()
+ {
+ DebugAssertEnabled();
+ _ = Interlocked.Increment(ref _totalStampedeJoin);
+ WriteEvent(EventIdStampedeJoin);
+ }
+
+#if !(NETSTANDARD2_0 || NET462)
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Reliability", "CA2000:Dispose objects before losing scope", Justification = "Lifetime exceeds obvious scope; handed to event source")]
+ [NonEvent]
+ protected override void OnEventCommand(EventCommandEventArgs command)
+ {
+ if (command.Command == EventCommand.Enable)
+ {
+ // lazily create counters on first Enable
+ _counters ??= [
+ new PollingCounter("total-local-cache-hits", this, () => Volatile.Read(ref _totalLocalCacheHit)) { DisplayName = "Total Local Cache Hits" },
+ new PollingCounter("total-local-cache-misses", this, () => Volatile.Read(ref _totalLocalCacheMiss)) { DisplayName = "Total Local Cache Misses" },
+ new PollingCounter("total-distributed-cache-hits", this, () => Volatile.Read(ref _totalDistributedCacheHit)) { DisplayName = "Total Distributed Cache Hits" },
+ new PollingCounter("total-distributed-cache-misses", this, () => Volatile.Read(ref _totalDistributedCacheMiss)) { DisplayName = "Total Distributed Cache Misses" },
+ new PollingCounter("total-data-query", this, () => Volatile.Read(ref _totalUnderlyingDataQuery)) { DisplayName = "Total Data Queries" },
+ new PollingCounter("current-data-query", this, () => Volatile.Read(ref _currentUnderlyingDataQuery)) { DisplayName = "Current Data Queries" },
+ new PollingCounter("current-distributed-cache-fetches", this, () => Volatile.Read(ref _currentDistributedFetch)) { DisplayName = "Current Distributed Cache Fetches" },
+ new PollingCounter("total-local-cache-writes", this, () => Volatile.Read(ref _totalLocalCacheWrite)) { DisplayName = "Total Local Cache Writes" },
+ new PollingCounter("total-distributed-cache-writes", this, () => Volatile.Read(ref _totalDistributedCacheWrite)) { DisplayName = "Total Distributed Cache Writes" },
+ new PollingCounter("total-stampede-joins", this, () => Volatile.Read(ref _totalStampedeJoin)) { DisplayName = "Total Stampede Joins" },
+ ];
+ }
+
+ base.OnEventCommand(command);
+ }
+#endif
+
+ [NonEvent]
+ [Conditional("DEBUG")]
+ private void DebugAssertEnabled([CallerMemberName] string caller = "")
+ {
+ Debug.Assert(IsEnabled(), $"Missing check to {nameof(HybridCacheEventSource)}.{nameof(Log)}.{nameof(IsEnabled)} from {caller}");
+ Debug.WriteLine($"{nameof(HybridCacheEventSource)}: {caller}"); // also log all event calls, for visibility
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCachePayload.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCachePayload.cs
new file mode 100644
index 00000000000..50edf21dff9
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCachePayload.cs
@@ -0,0 +1,407 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Buffers;
+using System.Buffers.Binary;
+using System.Diagnostics;
+using System.Text;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+// logic related to the payload that we send to IDistributedCache
+internal static class HybridCachePayload
+{
+ // FORMAT (v1):
+ // fixed-size header (so that it can be reliably broadcast)
+ // 2 bytes: sentinel+version
+ // 2 bytes: entropy (this is a random, and is to help with multi-node collisions at the same time)
+ // 8 bytes: creation time (UTC ticks, little-endian)
+
+ // and the dynamic part
+ // varint: flags (little-endian)
+ // varint: payload size
+ // varint: duration (ticks relative to creation time)
+ // varint: tag count
+ // varint+utf8: key
+ // (for each tag): varint+utf8: tagN
+ // (payload-size bytes): payload
+ // 2 bytes: sentinel+version (repeated, for reliability)
+ // (at this point, all bytes *must* be exhausted, or it is treated as failure)
+
+ // the encoding for varint etc is akin to BinaryWriter, also comparable to FormatterBinaryWriter in OutputCaching
+
+ private const int MaxVarint64Length = 10;
+ private const byte SentinelPrefix = 0x03;
+ private const byte ProtocolVersion = 0x01;
+ private const ushort UInt16SentinelPrefixPair = (ProtocolVersion << 8) | SentinelPrefix;
+
+ private static readonly Random _entropySource = new(); // doesn't need to be cryptographic
+ private static readonly UTF8Encoding _utf8NoBom = new(false);
+
+ [Flags]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Minor Code Smell", "S2344:Enumeration type names should not have \"Flags\" or \"Enum\" suffixes", Justification = "Clarity")]
+ internal enum PayloadFlags : uint
+ {
+ None = 0,
+ }
+
+ internal enum ParseResult
+ {
+ Success = 0,
+ NotRecognized = 1,
+ InvalidData = 2,
+ InvalidKey = 3,
+ ExpiredSelf = 4,
+ ExpiredTag = 5,
+ ExpiredWildcard = 6,
+ }
+
+ public static int GetMaxBytes(string key, TagSet tags, int payloadSize)
+ {
+ int length =
+ 2 // sentinel+version
+ + 2 // entropy
+ + 8 // creation time
+ + MaxVarint64Length // flags
+ + MaxVarint64Length // payload size
+ + MaxVarint64Length // duration
+ + MaxVarint64Length // tag count
+ + 2 // trailing sentinel + version
+ + GetMaxStringLength(key.Length) // key
+ + payloadSize; // the payload itself
+
+ // keys
+ switch (tags.Count)
+ {
+ case 0:
+ break;
+ case 1:
+ length += GetMaxStringLength(tags.GetSinglePrechecked().Length);
+ break;
+ default:
+ foreach (var tag in tags.GetSpanPrechecked())
+ {
+ length += GetMaxStringLength(tag.Length);
+ }
+
+ break;
+ }
+
+ return length;
+
+ // pay the cost to get the actual length, to avoid significant
+ // over-estiamte in ASCII cases
+ static int GetMaxStringLength(int charCount) =>
+ MaxVarint64Length + _utf8NoBom.GetMaxByteCount(charCount);
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Major Code Smell", "S109:Magic numbers should not be used", Justification = "Encoding details; clear in context")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Security", "CA5394:Do not use insecure randomness", Justification = "Not cryptographic")]
+ public static int Write(byte[] destination,
+ string key, long creationTime, TimeSpan duration, PayloadFlags flags, TagSet tags, ReadOnlySequence payload)
+ {
+ var payloadLength = checked((int)payload.Length);
+
+ BinaryPrimitives.WriteUInt16LittleEndian(destination.AsSpan(0, 2), UInt16SentinelPrefixPair);
+ BinaryPrimitives.WriteUInt16LittleEndian(destination.AsSpan(2, 2), (ushort)_entropySource.Next(0, 0x010000)); // Next is exclusive at RHS
+ BinaryPrimitives.WriteInt64LittleEndian(destination.AsSpan(4, 8), creationTime);
+ var len = 12;
+
+ long durationTicks = duration.Ticks;
+ if (durationTicks < 0)
+ {
+ durationTicks = 0;
+ }
+
+ Write7BitEncodedInt64(destination, ref len, (uint)flags);
+ Write7BitEncodedInt64(destination, ref len, (ulong)payloadLength);
+ Write7BitEncodedInt64(destination, ref len, (ulong)durationTicks);
+ Write7BitEncodedInt64(destination, ref len, (ulong)tags.Count);
+ WriteString(destination, ref len, key);
+ switch (tags.Count)
+ {
+ case 0:
+ break;
+ case 1:
+ WriteString(destination, ref len, tags.GetSinglePrechecked());
+ break;
+ default:
+ foreach (var tag in tags.GetSpanPrechecked())
+ {
+ WriteString(destination, ref len, tag);
+ }
+
+ break;
+ }
+
+ payload.CopyTo(destination.AsSpan(len, payloadLength));
+ len += payloadLength;
+ BinaryPrimitives.WriteUInt16LittleEndian(destination.AsSpan(len, 2), UInt16SentinelPrefixPair);
+ return len + 2;
+
+ static void Write7BitEncodedInt64(byte[] target, ref int offset, ulong value)
+ {
+ // Write out an int 7 bits at a time. The high bit of the byte,
+ // when on, tells reader to continue reading more bytes.
+ //
+ // Using the constants 0x7F and ~0x7F below offers smaller
+ // codegen than using the constant 0x80.
+
+ while (value > 0x7Fu)
+ {
+ target[offset++] = (byte)((uint)value | ~0x7Fu);
+ value >>= 7;
+ }
+
+ target[offset++] = (byte)value;
+ }
+
+ static void WriteString(byte[] target, ref int offset, string value)
+ {
+ var len = _utf8NoBom.GetByteCount(value);
+ Write7BitEncodedInt64(target, ref offset, (ulong)len);
+ offset += _utf8NoBom.GetBytes(value, 0, value.Length, target, offset);
+ }
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("StyleCop.CSharp.ReadabilityRules",
+ "SA1108:Block statements should not contain embedded comments", Justification = "Byte offset comments for clarity")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("StyleCop.CSharp.ReadabilityRules",
+ "SA1122:Use string.Empty for empty strings", Justification = "Subjective, but; ugly")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("StyleCop.CSharp.OrderingRules", "SA1204:Static elements should appear before instance elements", Justification = "False positive?")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Major Code Smell", "S109:Magic numbers should not be used", Justification = "Encoding details; clear in context")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Major Code Smell", "S107:Methods should not have too many parameters", Justification = "Borderline")]
+ public static ParseResult TryParse(ArraySegment source, string key, TagSet knownTags, DefaultHybridCache cache,
+ out ArraySegment payload, out PayloadFlags flags, out ushort entropy, out TagSet pendingTags)
+ {
+ // note "cache" is used primarily for expiration checks; we don't automatically add etc
+ entropy = 0;
+ payload = default;
+ flags = 0;
+ string[] pendingTagBuffer = [];
+ int pendingTagsCount = 0;
+
+ pendingTags = TagSet.Empty;
+ ReadOnlySpan bytes = new(source.Array!, source.Offset, source.Count);
+ if (bytes.Length < 19) // minimum needed for empty payload and zero tags
+ {
+ return ParseResult.NotRecognized;
+ }
+
+ var now = cache.CurrentTimestamp();
+ char[] scratch = [];
+ try
+ {
+ switch (BinaryPrimitives.ReadUInt16LittleEndian(bytes))
+ {
+ case UInt16SentinelPrefixPair:
+ entropy = BinaryPrimitives.ReadUInt16LittleEndian(bytes.Slice(2));
+ var creationTime = BinaryPrimitives.ReadInt64LittleEndian(bytes.Slice(4));
+ bytes = bytes.Slice(12); // the end of the fixed part
+
+ if (cache.IsWildcardExpired(creationTime))
+ {
+ return ParseResult.ExpiredWildcard;
+ }
+
+ if (!TryRead7BitEncodedInt64(ref bytes, out var u64)) // flags
+ {
+ return ParseResult.InvalidData;
+ }
+
+ flags = (PayloadFlags)u64;
+
+ if (!TryRead7BitEncodedInt64(ref bytes, out u64) || u64 > int.MaxValue) // payload length
+ {
+ return ParseResult.InvalidData;
+ }
+
+ var payloadLength = (int)u64;
+
+ if (!TryRead7BitEncodedInt64(ref bytes, out var duration)) // duration
+ {
+ return ParseResult.InvalidData;
+ }
+
+ if ((creationTime + (long)duration) <= now)
+ {
+ return ParseResult.ExpiredSelf;
+ }
+
+ if (!TryRead7BitEncodedInt64(ref bytes, out u64) || u64 > int.MaxValue) // tag count
+ {
+ return ParseResult.InvalidData;
+ }
+
+ var tagCount = (int)u64;
+
+ if (!TryReadString(ref bytes, ref scratch, out var stringSpan))
+ {
+ return ParseResult.InvalidData;
+ }
+
+ if (!stringSpan.SequenceEqual(key.AsSpan()))
+ {
+ return ParseResult.InvalidKey; // key must match!
+ }
+
+ for (int i = 0; i < tagCount; i++)
+ {
+ if (!TryReadString(ref bytes, ref scratch, out stringSpan))
+ {
+ return ParseResult.InvalidData;
+ }
+
+ bool isTagExpired;
+ bool isPending;
+ if (knownTags.TryFind(stringSpan, out var tagString))
+ {
+ // prefer to re-use existing tag strings when they exist
+ isTagExpired = cache.IsTagExpired(tagString, creationTime, out isPending);
+ }
+ else
+ {
+ // if an unknown tag; we might need to juggle
+ isTagExpired = cache.IsTagExpired(stringSpan, creationTime, out isPending);
+ }
+
+ if (isPending)
+ {
+ // might be expired, but the operation is still in-flight
+ if (pendingTagsCount == pendingTagBuffer.Length)
+ {
+ var newBuffer = ArrayPool.Shared.Rent(Math.Max(4, pendingTagsCount * 2));
+ pendingTagBuffer.CopyTo(newBuffer, 0);
+ ArrayPool.Shared.Return(pendingTagBuffer);
+ pendingTagBuffer = newBuffer;
+ }
+
+ pendingTagBuffer[pendingTagsCount++] = tagString ?? stringSpan.ToString();
+ }
+ else if (isTagExpired)
+ {
+ // definitely an expired tag
+ return ParseResult.ExpiredTag;
+ }
+ }
+
+ if (bytes.Length != payloadLength + 2
+ || BinaryPrimitives.ReadUInt16LittleEndian(bytes.Slice(payloadLength)) != UInt16SentinelPrefixPair)
+ {
+ return ParseResult.InvalidData;
+ }
+
+ var start = source.Offset + source.Count - (payloadLength + 2);
+ payload = new(source.Array!, start, payloadLength);
+
+ // finalize the pending tag buffer (in-flight tag expirations)
+ switch (pendingTagsCount)
+ {
+ case 0:
+ break;
+ case 1:
+ pendingTags = new(pendingTagBuffer[0]);
+ break;
+ default:
+ var final = new string[pendingTagsCount];
+ pendingTagBuffer.CopyTo(final, 0);
+ pendingTags = new(final);
+ break;
+ }
+
+ return ParseResult.Success;
+ default:
+ return ParseResult.NotRecognized;
+ }
+ }
+ finally
+ {
+ ArrayPool.Shared.Return(scratch);
+ ArrayPool.Shared.Return(pendingTagBuffer);
+ }
+
+ static bool TryReadString(ref ReadOnlySpan buffer, ref char[] scratch, out ReadOnlySpan value)
+ {
+ int length;
+ if (!TryRead7BitEncodedInt64(ref buffer, out var u64Length)
+ || u64Length > int.MaxValue
+ || buffer.Length < (length = (int)u64Length)) // note buffer is now past the prefix via "ref"
+ {
+ value = default;
+ return false;
+ }
+
+ // make sure we have enough buffer space
+ var maxChars = _utf8NoBom.GetMaxCharCount(length);
+ if (scratch.Length < maxChars)
+ {
+ ArrayPool.Shared.Return(scratch);
+ scratch = ArrayPool.Shared.Rent(maxChars);
+ }
+
+ // decode
+#if NETCOREAPP3_1_OR_GREATER
+ var charCount = _utf8NoBom.GetChars(buffer.Slice(0, length), scratch);
+#else
+ int charCount;
+ unsafe
+ {
+ fixed (byte* bPtr = buffer)
+ {
+ fixed (char* cPtr = scratch)
+ {
+ charCount = _utf8NoBom.GetChars(bPtr, length, cPtr, scratch.Length);
+ }
+ }
+ }
+#endif
+ value = new(scratch, 0, charCount);
+ buffer = buffer.Slice(length);
+ return true;
+ }
+
+ static bool TryRead7BitEncodedInt64(ref ReadOnlySpan buffer, out ulong result)
+ {
+ byte byteReadJustNow;
+
+ // Read the integer 7 bits at a time. The high bit
+ // of the byte when on means to continue reading more bytes.
+ //
+ // There are two failure cases: we've read more than 10 bytes,
+ // or the tenth byte is about to cause integer overflow.
+ // This means that we can read the first 9 bytes without
+ // worrying about integer overflow.
+
+ const int MaxBytesWithoutOverflow = 9;
+ result = 0;
+ int index = 0;
+ for (int shift = 0; shift < MaxBytesWithoutOverflow * 7; shift += 7)
+ {
+ // ReadByte handles end of stream cases for us.
+ byteReadJustNow = buffer[index++];
+ result |= (byteReadJustNow & 0x7Ful) << shift;
+
+ if (byteReadJustNow <= 0x7Fu)
+ {
+ buffer = buffer.Slice(index);
+ return true; // early exit
+ }
+ }
+
+ // Read the 10th byte. Since we already read 63 bits,
+ // the value of this byte must fit within 1 bit (64 - 63),
+ // and it must not have the high bit set.
+
+ byteReadJustNow = buffer[index++];
+ if (byteReadJustNow > 0b_1u)
+ {
+ throw new OverflowException();
+ }
+
+ result |= (ulong)byteReadJustNow << (MaxBytesWithoutOverflow * 7);
+ buffer = buffer.Slice(index);
+ return true;
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/InbuiltTypeSerializer.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/InbuiltTypeSerializer.cs
index 3ef26341433..4800428a88f 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/InbuiltTypeSerializer.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/InbuiltTypeSerializer.cs
@@ -17,6 +17,18 @@ internal sealed class InbuiltTypeSerializer : IHybridCacheSerializer, IH
public static InbuiltTypeSerializer Instance { get; } = new();
string IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
+ => DeserializeString(source);
+
+ void IHybridCacheSerializer.Serialize(string value, IBufferWriter target)
+ => SerializeString(value, target);
+
+ byte[] IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
+ => source.ToArray();
+
+ void IHybridCacheSerializer.Serialize(byte[] value, IBufferWriter target)
+ => target.Write(value);
+
+ internal static string DeserializeString(ReadOnlySequence source)
{
#if NET5_0_OR_GREATER
return Encoding.UTF8.GetString(source);
@@ -36,7 +48,7 @@ string IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
#endif
}
- void IHybridCacheSerializer.Serialize(string value, IBufferWriter target)
+ internal static void SerializeString(string value, IBufferWriter target)
{
#if NET5_0_OR_GREATER
Encoding.UTF8.GetBytes(value, target);
@@ -49,10 +61,4 @@ void IHybridCacheSerializer.Serialize(string value, IBufferWriter
ArrayPool.Shared.Return(oversized);
#endif
}
-
- byte[] IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
- => source.ToArray();
-
- void IHybridCacheSerializer.Serialize(byte[] value, IBufferWriter target)
- => target.Write(value);
}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/Log.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/Log.cs
new file mode 100644
index 00000000000..785107c32ec
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/Log.cs
@@ -0,0 +1,49 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using Microsoft.Extensions.Logging;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal static partial class Log
+{
+ internal const int IdMaximumPayloadBytesExceeded = 1;
+ internal const int IdSerializationFailure = 2;
+ internal const int IdDeserializationFailure = 3;
+ internal const int IdKeyEmptyOrWhitespace = 4;
+ internal const int IdMaximumKeyLengthExceeded = 5;
+ internal const int IdCacheBackendReadFailure = 6;
+ internal const int IdCacheBackendWriteFailure = 7;
+ internal const int IdKeyInvalidContent = 8;
+
+ [LoggerMessage(LogLevel.Error, "Cache MaximumPayloadBytes ({Bytes}) exceeded.", EventName = "MaximumPayloadBytesExceeded", EventId = IdMaximumPayloadBytesExceeded, SkipEnabledCheck = false)]
+ internal static partial void MaximumPayloadBytesExceeded(this ILogger logger, Exception e, int bytes);
+
+ // note that serialization is critical enough that we perform hard failures in addition to logging; serialization
+ // failures are unlikely to be transient (i.e. connectivity); we would rather this shows up in QA, rather than
+ // being invisible and people *thinking* they're using cache, when actually they are not
+
+ [LoggerMessage(LogLevel.Error, "Cache serialization failure.", EventName = "SerializationFailure", EventId = IdSerializationFailure, SkipEnabledCheck = false)]
+ internal static partial void SerializationFailure(this ILogger logger, Exception e);
+
+ // (see same notes per SerializationFailure)
+ [LoggerMessage(LogLevel.Error, "Cache deserialization failure.", EventName = "DeserializationFailure", EventId = IdDeserializationFailure, SkipEnabledCheck = false)]
+ internal static partial void DeserializationFailure(this ILogger logger, Exception e);
+
+ [LoggerMessage(LogLevel.Error, "Cache key empty or whitespace.", EventName = "KeyEmptyOrWhitespace", EventId = IdKeyEmptyOrWhitespace, SkipEnabledCheck = false)]
+ internal static partial void KeyEmptyOrWhitespace(this ILogger logger);
+
+ [LoggerMessage(LogLevel.Error, "Cache key maximum length exceeded (maximum: {MaxLength}, actual: {KeyLength}).", EventName = "MaximumKeyLengthExceeded",
+ EventId = IdMaximumKeyLengthExceeded, SkipEnabledCheck = false)]
+ internal static partial void MaximumKeyLengthExceeded(this ILogger logger, int maxLength, int keyLength);
+
+ [LoggerMessage(LogLevel.Error, "Cache backend read failure.", EventName = "CacheBackendReadFailure", EventId = IdCacheBackendReadFailure, SkipEnabledCheck = false)]
+ internal static partial void CacheUnderlyingDataQueryFailure(this ILogger logger, Exception ex);
+
+ [LoggerMessage(LogLevel.Error, "Cache backend write failure.", EventName = "CacheBackendWriteFailure", EventId = IdCacheBackendWriteFailure, SkipEnabledCheck = false)]
+ internal static partial void CacheBackendWriteFailure(this ILogger logger, Exception ex);
+
+ [LoggerMessage(LogLevel.Error, "Cache key contains invalid content.", EventName = "KeyInvalidContent", EventId = IdKeyInvalidContent, SkipEnabledCheck = false)]
+ internal static partial void KeyInvalidContent(this ILogger logger); // for PII etc reasons, we won't include the actual key
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/RecyclableArrayBufferWriter.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/RecyclableArrayBufferWriter.cs
index 2f2da2c7019..82d7fba4755 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/RecyclableArrayBufferWriter.cs
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/RecyclableArrayBufferWriter.cs
@@ -46,20 +46,20 @@ internal sealed class RecyclableArrayBufferWriter : IBufferWriter, IDispos
public int CommittedBytes => _index;
public int FreeCapacity => _buffer.Length - _index;
+ public bool QuotaExceeded { get; private set; }
+
private static RecyclableArrayBufferWriter? _spare;
+
public static RecyclableArrayBufferWriter Create(int maxLength)
{
var obj = Interlocked.Exchange(ref _spare, null) ?? new();
- Debug.Assert(obj._index == 0, "index should be zero initially");
- obj._maxLength = maxLength;
+ obj.Initialize(maxLength);
return obj;
}
private RecyclableArrayBufferWriter()
{
_buffer = [];
- _index = 0;
- _maxLength = int.MaxValue;
}
public void Dispose()
@@ -91,6 +91,7 @@ public void Advance(int count)
if (_index + count > _maxLength)
{
+ QuotaExceeded = true;
ThrowQuota();
}
@@ -130,6 +131,8 @@ public Span GetSpan(int sizeHint = 0)
// create a standalone isolated copy of the buffer
public T[] ToArray() => _buffer.AsSpan(0, _index).ToArray();
+ public ReadOnlySequence AsSequence() => new(_buffer, 0, _index);
+
///
/// Disconnect the current buffer so that we can store it without it being recycled.
///
@@ -199,4 +202,12 @@ private void CheckAndResizeBuffer(int sizeHint)
static void ThrowOutOfMemoryException() => throw new InvalidOperationException("Unable to grow buffer as requested");
}
+
+ private void Initialize(int maxLength)
+ {
+ // think .ctor, but with pooled object re-use
+ _index = 0;
+ _maxLength = maxLength;
+ QuotaExceeded = false;
+ }
}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/TagSet.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/TagSet.cs
new file mode 100644
index 00000000000..66ccbd29926
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/TagSet.cs
@@ -0,0 +1,216 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Buffers;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+///
+/// Represents zero (null), one (string) or more (string[]) tags, avoiding the additional array overhead when necessary.
+///
+[System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1066:Implement IEquatable when overriding Object.Equals", Justification = "Equals throws by intent")]
+internal readonly struct TagSet
+{
+ public static readonly TagSet Empty = default!;
+
+ private readonly object? _tagOrTags;
+
+ internal TagSet(string tag)
+ {
+ Validate(tag);
+ _tagOrTags = tag;
+ }
+
+ internal TagSet(string[] tags)
+ {
+ Debug.Assert(tags is { Length: > 1 }, "should be non-trivial array");
+ foreach (var tag in tags)
+ {
+ Validate(tag);
+ }
+
+ Array.Sort(tags, StringComparer.InvariantCulture);
+ _tagOrTags = tags;
+ }
+
+ public string GetSinglePrechecked() => (string)_tagOrTags!; // we expect this to fail if used on incorrect types
+ public Span GetSpanPrechecked() => (string[])_tagOrTags!; // we expect this to fail if used on incorrect types
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1065:Do not raise exceptions in unexpected locations", Justification = "Intentional; should not be used")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Blocker Code Smell", "S3877:Exceptions should not be thrown from unexpected methods", Justification = "Intentional; should not be used")]
+ public override bool Equals(object? obj) => throw new NotSupportedException();
+
+ // [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1065:Do not raise exceptions in unexpected locations", Justification = "Intentional; should not be used")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Blocker Code Smell", "S3877:Exceptions should not be thrown from unexpected methods", Justification = "Intentional; should not be used")]
+ public override int GetHashCode() => throw new NotSupportedException();
+
+ public override string ToString() => _tagOrTags switch
+ {
+ string tag => tag,
+ string[] tags => string.Join(", ", tags),
+ _ => "(no tags)",
+ };
+
+ public bool IsEmpty => _tagOrTags is null;
+
+ public int Count => _tagOrTags switch
+ {
+ null => 0,
+ string => 1,
+ string[] arr => arr.Length,
+ _ => 0, // should never happen, but treat as empty
+ };
+
+ internal bool IsArray => _tagOrTags is string[];
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "CA2201:Do not raise reserved exception types", Justification = "This is the most appropriate exception here.")]
+ public string this[int index] => _tagOrTags switch
+ {
+ string tag when index == 0 => tag,
+ string[] tags => tags[index],
+ _ => throw new IndexOutOfRangeException(nameof(index)),
+ };
+
+ public void CopyTo(Span target)
+ {
+ switch (_tagOrTags)
+ {
+ case string tag:
+ target[0] = tag;
+ break;
+ case string[] tags:
+ tags.CopyTo(target);
+ break;
+ }
+ }
+
+ internal static TagSet Create(IEnumerable? tags)
+ {
+ if (tags is null)
+ {
+ return Empty;
+ }
+
+ // note that in multi-tag scenarios we always create a defensive copy
+ if (tags is ICollection collection)
+ {
+ switch (collection.Count)
+ {
+ case 0:
+ return Empty;
+ case 1 when collection is IList list:
+ return new TagSet(list[0]);
+ case 1:
+ // avoid the GetEnumerator() alloc
+ var arr = ArrayPool.Shared.Rent(1);
+ collection.CopyTo(arr, 0);
+ string tag = arr[0];
+ ArrayPool.Shared.Return(arr);
+ return new TagSet(tag);
+ default:
+ arr = new string[collection.Count];
+ collection.CopyTo(arr, 0);
+ return new TagSet(arr);
+ }
+ }
+
+ // perhaps overkill, but: avoid as much as possible when unrolling
+ using var iterator = tags.GetEnumerator();
+ if (!iterator.MoveNext())
+ {
+ return Empty;
+ }
+
+ var firstTag = iterator.Current;
+ if (!iterator.MoveNext())
+ {
+ return new TagSet(firstTag);
+ }
+
+ string[] oversized = ArrayPool.Shared.Rent(8);
+ oversized[0] = firstTag;
+ int count = 1;
+ do
+ {
+ if (count == oversized.Length)
+ {
+ // grow
+ var bigger = ArrayPool.Shared.Rent(count * 2);
+ oversized.CopyTo(bigger, 0);
+ ArrayPool.Shared.Return(oversized);
+ oversized = bigger;
+ }
+
+ oversized[count++] = iterator.Current;
+ }
+ while (iterator.MoveNext());
+
+ if (count == oversized.Length)
+ {
+ return new TagSet(oversized);
+ }
+ else
+ {
+ var final = oversized.AsSpan(0, count).ToArray();
+ ArrayPool.Shared.Return(oversized);
+ return new TagSet(final);
+ }
+ }
+
+ internal string[] ToArray() // for testing only
+ {
+ var arr = new string[Count];
+ CopyTo(arr);
+ return arr;
+ }
+
+ internal const string WildcardTag = "*";
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("StyleCop.CSharp.ReadabilityRules", "SA1122:Use string.Empty for empty strings", Justification = "Not needed")]
+ internal bool TryFind(ReadOnlySpan span, [NotNullWhen(true)] out string? tag)
+ {
+ switch (_tagOrTags)
+ {
+ case string single when span.SequenceEqual(single.AsSpan()):
+ tag = single;
+ return true;
+ case string[] tags:
+ foreach (string test in tags)
+ {
+ if (span.SequenceEqual(test.AsSpan()))
+ {
+ tag = test;
+ return true;
+ }
+ }
+
+ break;
+ }
+
+ tag = null;
+ return false;
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Major Code Smell", "S3928:Parameter names used into ArgumentException constructors should match an existing one ",
+ Justification = "Using parameter name from public callable API")]
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Usage", "CA2208:Instantiate argument exceptions correctly", Justification = "Using parameter name from public callable API")]
+ private static void Validate(string tag)
+ {
+ if (string.IsNullOrWhiteSpace(tag))
+ {
+ ThrowEmpty();
+ }
+
+ if (tag == WildcardTag)
+ {
+ ThrowReserved();
+ }
+
+ static void ThrowEmpty() => throw new ArgumentException("Tags cannot be empty.", "tags");
+ static void ThrowReserved() => throw new ArgumentException($"The tag '{WildcardTag}' is reserved and cannot be used in this context.", "tags");
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.csproj b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.csproj
index f460c4ee0cc..b8aff39eb98 100644
--- a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.csproj
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.csproj
@@ -4,7 +4,7 @@
Multi-level caching implementation building on and extending IDistributedCache
$(NetCoreTargetFrameworks)$(ConditionalNet462);netstandard2.0;netstandard2.1
true
- cache;distributedcache;hybrid
+ cache;distributedcache;hybridcache
true
true
true
@@ -12,11 +12,25 @@
true
true
true
+ CachingHybrid
+
+ true
+ true
+ true
+
+
+ false
+
+
+
dev
+ true
EXTEXP0018
- 75
+ 86
50
Fundamentals
+ true
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BufferReleaseTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BufferReleaseTests.cs
index 4996406c09a..21b901c9482 100644
--- a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BufferReleaseTests.cs
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BufferReleaseTests.cs
@@ -121,7 +121,11 @@ private static bool Write(IBufferWriter destination, byte[]? buffer)
using (RecyclableArrayBufferWriter writer = RecyclableArrayBufferWriter.Create(int.MaxValue))
{
serializer.Serialize(await GetAsync(), writer);
- cache.BackendCache.Set(key, writer.ToArray());
+
+ var arr = ArrayPool.Shared.Rent(HybridCachePayload.GetMaxBytes(key, TagSet.Empty, writer.CommittedBytes));
+ var bytes = HybridCachePayload.Write(arr, key, cache.CurrentTimestamp(), TimeSpan.FromHours(1), 0, TagSet.Empty, writer.AsSequence());
+ cache.BackendCache.Set(key, new ReadOnlySpan(arr, 0, bytes).ToArray());
+ ArrayPool.Shared.Return(arr);
}
#if DEBUG
cache.DebugOnlyGetOutstandingBuffers(flush: true);
@@ -180,7 +184,11 @@ private static bool Write(IBufferWriter destination, byte[]? buffer)
using (RecyclableArrayBufferWriter writer = RecyclableArrayBufferWriter.Create(int.MaxValue))
{
serializer.Serialize(await GetAsync(), writer);
- cache.BackendCache.Set(key, writer.ToArray());
+
+ var arr = ArrayPool.Shared.Rent(HybridCachePayload.GetMaxBytes(key, TagSet.Empty, writer.CommittedBytes));
+ var bytes = HybridCachePayload.Write(arr, key, cache.CurrentTimestamp(), TimeSpan.FromHours(1), 0, TagSet.Empty, writer.AsSequence());
+ cache.BackendCache.Set(key, new ReadOnlySpan(arr, 0, bytes).ToArray());
+ ArrayPool.Shared.Return(arr);
}
#if DEBUG
cache.DebugOnlyGetOutstandingBuffers(flush: true);
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/DistributedCacheTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/DistributedCacheTests.cs
index 5a565866f63..5eb015196f2 100644
--- a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/DistributedCacheTests.cs
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/DistributedCacheTests.cs
@@ -26,18 +26,17 @@ protected DistributedCacheTests(ITestOutputHelper log)
protected abstract ValueTask ConfigureAsync(IServiceCollection services);
protected abstract bool CustomClockSupported { get; }
- protected FakeTime Clock { get; } = new();
+ internal FakeTime Clock { get; } = new();
- protected sealed class FakeTime : TimeProvider, ISystemClock
+ internal sealed class FakeTime : TimeProvider, ISystemClock
{
- private DateTimeOffset _now = DateTimeOffset.UtcNow;
- public void Reset() => _now = DateTimeOffset.UtcNow;
+ public void Reset() => UtcNow = DateTimeOffset.UtcNow;
- DateTimeOffset ISystemClock.UtcNow => _now;
+ public DateTimeOffset UtcNow { get; private set; } = DateTimeOffset.UtcNow;
- public override DateTimeOffset GetUtcNow() => _now;
+ public override DateTimeOffset GetUtcNow() => UtcNow;
- public void Add(TimeSpan delta) => _now += delta;
+ public void Add(TimeSpan delta) => UtcNow += delta;
}
private async ValueTask InitAsync()
@@ -185,7 +184,7 @@ public async Task ReadOnlySequenceBufferRoundtrip(int size, SequenceKind kind)
Assert.Equal(size, expected.Length);
cache.Set(key, payload, _fiveMinutes);
- RecyclableArrayBufferWriter writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
+ var writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
Assert.True(cache.TryGet(key, writer));
Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
writer.ResetInPlace();
@@ -247,7 +246,7 @@ public async Task ReadOnlySequenceBufferRoundtripAsync(int size, SequenceKind ki
Assert.Equal(size, expected.Length);
await cache.SetAsync(key, payload, _fiveMinutes);
- RecyclableArrayBufferWriter writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
+ var writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
Assert.True(await cache.TryGetAsync(key, writer));
Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
writer.ResetInPlace();
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/HybridCacheEventSourceTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/HybridCacheEventSourceTests.cs
new file mode 100644
index 00000000000..74876053e34
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/HybridCacheEventSourceTests.cs
@@ -0,0 +1,232 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics.Tracing;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public class HybridCacheEventSourceTests(ITestOutputHelper log, TestEventListener listener) : IClassFixture
+{
+ // see notes in TestEventListener for context on fixture usage
+
+ [SkippableFact]
+ public void MatchesNameAndGuid()
+ {
+ // Assert
+ Assert.Equal("Microsoft-Extensions-HybridCache", listener.Source.Name);
+ Assert.Equal(Guid.Parse("b3aca39e-5dc9-5e21-f669-b72225b66cfc"), listener.Source.Guid); // from name
+ }
+
+ [SkippableFact]
+ public async Task LocalCacheHit()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.LocalCacheHit();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdLocalCacheHit, "LocalCacheHit", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-local-cache-hits", "Total Local Cache Hits", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task LocalCacheMiss()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.LocalCacheMiss();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdLocalCacheMiss, "LocalCacheMiss", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-local-cache-misses", "Total Local Cache Misses", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task DistributedCacheGet()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.DistributedCacheGet();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdDistributedCacheGet, "DistributedCacheGet", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("current-distributed-cache-fetches", "Current Distributed Cache Fetches", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task DistributedCacheHit()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.DistributedCacheGet();
+ listener.Reset(resetCounters: false).Source.DistributedCacheHit();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdDistributedCacheHit, "DistributedCacheHit", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-distributed-cache-hits", "Total Distributed Cache Hits", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task DistributedCacheMiss()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.DistributedCacheGet();
+ listener.Reset(resetCounters: false).Source.DistributedCacheMiss();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdDistributedCacheMiss, "DistributedCacheMiss", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-distributed-cache-misses", "Total Distributed Cache Misses", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task DistributedCacheFailed()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.DistributedCacheGet();
+ listener.Reset(resetCounters: false).Source.DistributedCacheFailed();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdDistributedCacheFailed, "DistributedCacheFailed", EventLevel.Error);
+
+ await AssertCountersAsync();
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task DistributedCacheCanceled()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.DistributedCacheGet();
+ listener.Reset(resetCounters: false).Source.DistributedCacheCanceled();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdDistributedCacheCanceled, "DistributedCacheCanceled", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task UnderlyingDataQueryStart()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.UnderlyingDataQueryStart();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdUnderlyingDataQueryStart, "UnderlyingDataQueryStart", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("current-data-query", "Current Data Queries", 1);
+ listener.AssertCounter("total-data-query", "Total Data Queries", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task UnderlyingDataQueryComplete()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.UnderlyingDataQueryStart();
+ listener.Reset(resetCounters: false).Source.UnderlyingDataQueryComplete();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdUnderlyingDataQueryComplete, "UnderlyingDataQueryComplete", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-data-query", "Total Data Queries", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task UnderlyingDataQueryFailed()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.UnderlyingDataQueryStart();
+ listener.Reset(resetCounters: false).Source.UnderlyingDataQueryFailed();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdUnderlyingDataQueryFailed, "UnderlyingDataQueryFailed", EventLevel.Error);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-data-query", "Total Data Queries", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task UnderlyingDataQueryCanceled()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.UnderlyingDataQueryStart();
+ listener.Reset(resetCounters: false).Source.UnderlyingDataQueryCanceled();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdUnderlyingDataQueryCanceled, "UnderlyingDataQueryCanceled", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-data-query", "Total Data Queries", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task LocalCacheWrite()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.LocalCacheWrite();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdLocalCacheWrite, "LocalCacheWrite", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-local-cache-writes", "Total Local Cache Writes", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task DistributedCacheWrite()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.DistributedCacheWrite();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdDistributedCacheWrite, "DistributedCacheWrite", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-distributed-cache-writes", "Total Distributed Cache Writes", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ [SkippableFact]
+ public async Task StampedeJoin()
+ {
+ AssertEnabled();
+
+ listener.Reset().Source.StampedeJoin();
+ listener.AssertSingleEvent(HybridCacheEventSource.EventIdStampedeJoin, "StampedeJoin", EventLevel.Verbose);
+
+ await AssertCountersAsync();
+ listener.AssertCounter("total-stampede-joins", "Total Stampede Joins", 1);
+ listener.AssertRemainingCountersZero();
+ }
+
+ private void AssertEnabled()
+ {
+ // including this data for visibility when tests fail - ETW subsystem can be ... weird
+ log.WriteLine($".NET {Environment.Version} on {Environment.OSVersion}, {IntPtr.Size * 8}-bit");
+
+ Skip.IfNot(listener.Source.IsEnabled(), "Event source not enabled");
+ }
+
+ private async Task AssertCountersAsync()
+ {
+ var count = await listener.TryAwaitCountersAsync();
+
+ // ETW counters timing can be painfully unpredictable; generally
+ // it'll work fine locally, especially on modern .NET, but:
+ // CI servers and netfx in particular - not so much. The tests
+ // can still observe and validate the simple events, though, which
+ // should be enough to be credible that the eventing system is
+ // fundamentally working. We're not meant to be testing that
+ // the counters system *itself* works!
+
+ Skip.If(count == 0, "No counters received");
+ }
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/L2Tests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/L2Tests.cs
index 850c6a054b9..948df9d8814 100644
--- a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/L2Tests.cs
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/L2Tests.cs
@@ -52,7 +52,7 @@ public async Task AssertL2Operations_Immutable(bool buffers)
var backend = Assert.IsAssignableFrom(cache.BackendCache);
Log.WriteLine("Inventing key...");
var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString(true)));
- Assert.Equal(2, backend.OpCount); // GET, SET
+ Assert.Equal(3, backend.OpCount); // (wildcard timstamp GET), GET, SET
Log.WriteLine("Reading with L1...");
for (var i = 0; i < 5; i++)
@@ -62,7 +62,7 @@ public async Task AssertL2Operations_Immutable(bool buffers)
Assert.Same(s, x);
}
- Assert.Equal(2, backend.OpCount); // shouldn't be hit
+ Assert.Equal(3, backend.OpCount); // shouldn't be hit
Log.WriteLine("Reading without L1...");
for (var i = 0; i < 5; i++)
@@ -72,7 +72,7 @@ public async Task AssertL2Operations_Immutable(bool buffers)
Assert.NotSame(s, x);
}
- Assert.Equal(7, backend.OpCount); // should be read every time
+ Assert.Equal(8, backend.OpCount); // should be read every time
Log.WriteLine("Setting value directly");
s = CreateString(true);
@@ -84,16 +84,16 @@ public async Task AssertL2Operations_Immutable(bool buffers)
Assert.Same(s, x);
}
- Assert.Equal(8, backend.OpCount); // SET
+ Assert.Equal(9, backend.OpCount); // SET
Log.WriteLine("Removing key...");
await cache.RemoveAsync(Me());
- Assert.Equal(9, backend.OpCount); // DEL
+ Assert.Equal(10, backend.OpCount); // DEL
Log.WriteLine("Fetching new...");
var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString(true)));
Assert.NotEqual(s, t);
- Assert.Equal(11, backend.OpCount); // GET, SET
+ Assert.Equal(12, backend.OpCount); // GET, SET
}
public sealed class Foo
@@ -110,7 +110,7 @@ public async Task AssertL2Operations_Mutable(bool buffers)
var backend = Assert.IsAssignableFrom(cache.BackendCache);
Log.WriteLine("Inventing key...");
var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString(true) }), _expiry);
- Assert.Equal(2, backend.OpCount); // GET, SET
+ Assert.Equal(3, backend.OpCount); // (wildcard timstamp GET), GET, SET
Log.WriteLine("Reading with L1...");
for (var i = 0; i < 5; i++)
@@ -120,7 +120,7 @@ public async Task AssertL2Operations_Mutable(bool buffers)
Assert.NotSame(s, x);
}
- Assert.Equal(2, backend.OpCount); // shouldn't be hit
+ Assert.Equal(3, backend.OpCount); // shouldn't be hit
Log.WriteLine("Reading without L1...");
for (var i = 0; i < 5; i++)
@@ -130,7 +130,7 @@ public async Task AssertL2Operations_Mutable(bool buffers)
Assert.NotSame(s, x);
}
- Assert.Equal(7, backend.OpCount); // should be read every time
+ Assert.Equal(8, backend.OpCount); // should be read every time
Log.WriteLine("Setting value directly");
s = new Foo { Value = CreateString(true) };
@@ -142,16 +142,16 @@ public async Task AssertL2Operations_Mutable(bool buffers)
Assert.NotSame(s, x);
}
- Assert.Equal(8, backend.OpCount); // SET
+ Assert.Equal(9, backend.OpCount); // SET
Log.WriteLine("Removing key...");
await cache.RemoveAsync(Me());
- Assert.Equal(9, backend.OpCount); // DEL
+ Assert.Equal(10, backend.OpCount); // DEL
Log.WriteLine("Fetching new...");
var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString(true) }), _expiry);
Assert.NotEqual(s.Value, t.Value);
- Assert.Equal(11, backend.OpCount); // GET, SET
+ Assert.Equal(12, backend.OpCount); // GET, SET
}
private class BufferLoggingCache : LoggingCache, IBufferDistributedCache
@@ -204,7 +204,7 @@ async ValueTask IBufferDistributedCache.TryGetAsync(string key, IBufferWri
}
}
- private class LoggingCache(ITestOutputHelper log, IDistributedCache tail) : IDistributedCache
+ internal class LoggingCache(ITestOutputHelper log, IDistributedCache tail) : IDistributedCache
{
protected ITestOutputHelper Log => log;
protected IDistributedCache Tail => tail;
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/LocalInvalidationTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/LocalInvalidationTests.cs
new file mode 100644
index 00000000000..cd63e06542e
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/LocalInvalidationTests.cs
@@ -0,0 +1,147 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Options;
+using Xunit.Abstractions;
+using static Microsoft.Extensions.Caching.Hybrid.Tests.L2Tests;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+public class LocalInvalidationTests(ITestOutputHelper log)
+{
+ private static ServiceProvider GetDefaultCache(out DefaultHybridCache cache, Action? config = null)
+ {
+ var services = new ServiceCollection();
+ config?.Invoke(services);
+ services.AddHybridCache();
+ ServiceProvider provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ [Fact]
+ public async Task GlobalInvalidateNoTags()
+ {
+ using var services = GetDefaultCache(out var cache);
+ var value = await cache.GetOrCreateAsync("abc", ct => new(Guid.NewGuid()));
+
+ // should work immediately as-is
+ Assert.Equal(value, await cache.GetOrCreateAsync("abc", ct => new(Guid.NewGuid())));
+
+ // invalidating a normal tag should have no effect
+ await cache.RemoveByTagAsync("foo");
+ Assert.Equal(value, await cache.GetOrCreateAsync("abc", ct => new(Guid.NewGuid())));
+
+ // invalidating everything should force a re-fetch
+ await cache.RemoveByTagAsync("*");
+ var newValue = await cache.GetOrCreateAsync("abc", ct => new(Guid.NewGuid()));
+ Assert.NotEqual(value, newValue);
+
+ // which should now be repeatable again
+ Assert.Equal(newValue, await cache.GetOrCreateAsync("abc", ct => new(Guid.NewGuid())));
+ }
+
+ private static class Options
+ {
+ public static IOptions Create(T value)
+ where T : class
+ => new OptionsImpl(value);
+
+ private sealed class OptionsImpl : IOptions
+ where T : class
+ {
+ public OptionsImpl(T value)
+ {
+ Value = value;
+ }
+
+ public T Value { get; }
+ }
+ }
+
+ [Theory]
+ [InlineData(false)]
+ [InlineData(true)]
+ public async Task TagBasedInvalidate(bool withL2)
+ {
+ using IMemoryCache l1 = new MemoryCache(new MemoryCacheOptions());
+ IDistributedCache? l2 = null;
+ if (withL2)
+ {
+ MemoryDistributedCacheOptions options = new();
+ MemoryDistributedCache mdc = new(Options.Create(options));
+ l2 = new LoggingCache(log, mdc);
+ }
+
+ Guid lastValue = Guid.Empty;
+
+ // loop because we want to test pre-existing L1/L2 impact
+ for (int i = 0; i < 3; i++)
+ {
+ using var services = GetDefaultCache(out var cache, svc =>
+ {
+ svc.AddSingleton(l1);
+ if (l2 is not null)
+ {
+ svc.AddSingleton(l2);
+ }
+ });
+ var clock = services.GetRequiredService();
+
+ string key = "mykey";
+ string tag = "abc";
+ string[] tags = [tag];
+ var value = await cache.GetOrCreateAsync(key, ct => new(Guid.NewGuid()), tags: tags);
+ log.WriteLine($"First value: {value}");
+ if (lastValue != Guid.Empty)
+ {
+ Assert.Equal(lastValue, value);
+ }
+
+ // should work immediately as-is
+ var tmp = await cache.GetOrCreateAsync(key, ct => new(Guid.NewGuid()), tags: tags);
+ log.WriteLine($"Second value: {tmp} (should be {value})");
+ Assert.Equal(value, tmp);
+
+ // invalidating a normal tag should have no effect
+ await cache.RemoveByTagAsync("foo");
+ tmp = await cache.GetOrCreateAsync(key, ct => new(Guid.NewGuid()), tags: tags);
+ log.WriteLine($"Value after invalidating tag foo: {tmp} (should be {value})");
+ Assert.Equal(value, tmp);
+
+ // invalidating a tag we have should force a re-fetch
+ await cache.RemoveByTagAsync(tag);
+ var newValue = await cache.GetOrCreateAsync