diff --git a/Directory.Build.targets b/Directory.Build.targets
index 956a4cf8078..5ee66133050 100644
--- a/Directory.Build.targets
+++ b/Directory.Build.targets
@@ -22,7 +22,7 @@
$(NoWarn);AD0001
- $(NoWarn);EXTEXP0001;EXTEXP0002;EXTEXP0003;EXTEXP0004;EXTEXP0005;EXTEXP0006;EXTEXP0007;EXTEXP0008;EXTEXP0009;EXTEXP0010;EXTEXP0011;EXTEXP0012;EXTEXP0013;EXTEXP0014;EXTEXP0015;EXTEXP0016;EXTEXP0017
+ $(NoWarn);EXTEXP0001;EXTEXP0002;EXTEXP0003;EXTEXP0004;EXTEXP0005;EXTEXP0006;EXTEXP0007;EXTEXP0008;EXTEXP0009;EXTEXP0010;EXTEXP0011;EXTEXP0012;EXTEXP0013;EXTEXP0014;EXTEXP0015;EXTEXP0016;EXTEXP0017;EXTEXP0018
$(NoWarn);EXTOBS0001;
diff --git a/docs/list-of-diagnostics.md b/docs/list-of-diagnostics.md
index ba8e170a878..4ba19ed1099 100644
--- a/docs/list-of-diagnostics.md
+++ b/docs/list-of-diagnostics.md
@@ -40,6 +40,7 @@ if desired.
| `EXTEXP0015` | Environmental probes experiments |
| `EXTEXP0016` | Hosting integration testing experiments |
| `EXTEXP0017` | Contextual options experiments |
+| `EXTEXP0018` | HybridCache experiments |
# Obsoletions
@@ -81,7 +82,7 @@ You may continue using obsolete APIs in your application, but we advise explorin
| `LOGGEN023` | Tag provider method is inaccessible |
| `LOGGEN024` | Property provider method has an invalid signature |
| `LOGGEN025` | Logging method parameters can't have "ref" or "out" modifiers |
-| `LOGGEN026` | Parameters with a custom tag provider are not subject to redaciton |
+| `LOGGEN026` | Parameters with a custom tag provider are not subject to redaction |
| `LOGGEN027` | Multiple logging methods shouldn't use the same event name |
| `LOGGEN028` | Logging method parameter's type has a hidden property |
| `LOGGEN029` | A logging method parameter causes name conflicts |
diff --git a/eng/MSBuild/LegacySupport.props b/eng/MSBuild/LegacySupport.props
index c96a83d34d6..8ebacbd60f7 100644
--- a/eng/MSBuild/LegacySupport.props
+++ b/eng/MSBuild/LegacySupport.props
@@ -7,11 +7,11 @@
-
+
-
+
@@ -47,7 +47,7 @@
-
+
diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml
index 752392c3455..fefd81bb5c0 100644
--- a/eng/Version.Details.xml
+++ b/eng/Version.Details.xml
@@ -8,6 +8,10 @@
https://github.com/dotnet/runtime
31528d082bd760377b8d818fc839a338cd071b1f
+
+ https://github.com/dotnet/runtime
+ 418c3b9e2753715fa017ace6b3f1f5ec4d4d6aae
+
https://github.com/dotnet/runtime
31528d082bd760377b8d818fc839a338cd071b1f
@@ -144,6 +148,10 @@
https://github.com/dotnet/aspnetcore
fc4f8810d8df45a9f42e02f688041bf592c18138
+
+ https://github.com/dotnet/aspnetcore
+ 2b865e33f2c7c9484c28a3b62e8ff07966e23434
+
https://github.com/dotnet/aspnetcore
fc4f8810d8df45a9f42e02f688041bf592c18138
diff --git a/eng/Versions.props b/eng/Versions.props
index 79a87cb5bd0..3d593320c18 100644
--- a/eng/Versions.props
+++ b/eng/Versions.props
@@ -30,6 +30,7 @@
9.0.0-rc.2.24453.5
9.0.0-rc.2.24453.5
+ 9.0.0-rc.2.24453.5
9.0.0-rc.2.24453.5
9.0.0-rc.2.24453.5
9.0.0-rc.2.24453.5
@@ -64,6 +65,7 @@
9.0.0-rc.2.24460.5
9.0.0-rc.2.24460.5
9.0.0-rc.2.24460.5
+ 9.0.0-rc.2.24460.5
9.0.0-rc.2.24460.5
9.0.0-rc.2.24460.5
9.0.0-rc.2.24460.5
diff --git a/eng/packages/General.props b/eng/packages/General.props
index 8f34cf9271a..00a79452d92 100644
--- a/eng/packages/General.props
+++ b/eng/packages/General.props
@@ -10,6 +10,9 @@
+
+
+
diff --git a/eng/spellchecking_exclusions.dic b/eng/spellchecking_exclusions.dic
index 2f00ad64f92..2fc9b74699b 100644
Binary files a/eng/spellchecking_exclusions.dic and b/eng/spellchecking_exclusions.dic differ
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheBuilderExtensions.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheBuilderExtensions.cs
new file mode 100644
index 00000000000..d8fa3a3a3ad
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheBuilderExtensions.cs
@@ -0,0 +1,62 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics.CodeAnalysis;
+using Microsoft.Extensions.Caching.Hybrid;
+using Microsoft.Shared.Diagnostics;
+
+namespace Microsoft.Extensions.DependencyInjection;
+
+///
+/// Configuration extension methods for / .
+///
+public static class HybridCacheBuilderExtensions
+{
+ ///
+ /// Serialize values of type with the specified serializer from .
+ ///
+ /// The type to be serialized.
+ /// The instance.
+ public static IHybridCacheBuilder AddSerializer(this IHybridCacheBuilder builder, IHybridCacheSerializer serializer)
+ {
+ _ = Throw.IfNull(builder).Services.AddSingleton>(serializer);
+ return builder;
+ }
+
+ ///
+ /// Serialize values of type with the serializer of type .
+ ///
+ /// The type to be serialized.
+ /// The serializer to use for this type.
+ /// The instance.
+ public static IHybridCacheBuilder AddSerializer(this IHybridCacheBuilder builder)
+ where TImplementation : class, IHybridCacheSerializer
+ {
+ _ = Throw.IfNull(builder).Services.AddSingleton, TImplementation>();
+ return builder;
+ }
+
+ ///
+ /// Add as an additional serializer factory, which can provide serializers for multiple types.
+ ///
+ /// The instance.
+ public static IHybridCacheBuilder AddSerializerFactory(this IHybridCacheBuilder builder, IHybridCacheSerializerFactory factory)
+ {
+ _ = Throw.IfNull(builder).Services.AddSingleton(factory);
+ return builder;
+ }
+
+ ///
+ /// Add a factory of type as an additional serializer factory, which can provide serializers for multiple types.
+ ///
+ /// The type of the serializer factory.
+ /// The instance.
+ public static IHybridCacheBuilder AddSerializerFactory<
+ [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] TImplementation>(this IHybridCacheBuilder builder)
+ where TImplementation : class, IHybridCacheSerializerFactory
+ {
+ _ = Throw.IfNull(builder).Services.AddSingleton();
+ return builder;
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheOptions.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheOptions.cs
new file mode 100644
index 00000000000..982ea55a6af
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheOptions.cs
@@ -0,0 +1,44 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Microsoft.Extensions.Caching.Hybrid;
+
+///
+/// Options for configuring the default implementation.
+///
+public class HybridCacheOptions
+{
+ private const int ShiftBytesToMibiBytes = 20;
+
+ ///
+ /// Gets or sets the default global options to be applied to operations; if options are
+ /// specified at the individual call level, the non-null values are merged (with the per-call
+ /// options being used in preference to the global options). If no value is specified for a given
+ /// option (globally or per-call), the implementation may choose a reasonable default.
+ ///
+ public HybridCacheEntryOptions? DefaultEntryOptions { get; set; }
+
+ ///
+ /// Gets or sets a value indicating whether compression for this instance is disabled.
+ ///
+ public bool DisableCompression { get; set; }
+
+ ///
+ /// Gets or sets the maximum size of cache items; attempts to store values over this size will be logged
+ /// and the value will not be stored in cache.
+ ///
+ /// The default value is 1 MiB.
+ public long MaximumPayloadBytes { get; set; } = 1 << ShiftBytesToMibiBytes; // 1MiB
+
+ ///
+ /// Gets or sets the maximum permitted length (in characters) of keys; attempts to use keys over this size will be logged.
+ ///
+ /// The default value is 1024 characters.
+ public int MaximumKeyLength { get; set; } = 1024; // characters
+
+ ///
+ /// Gets or sets a value indicating whether to use "tags" data as dimensions on metric reporting; if enabled, care should be used to ensure that
+ /// tags do not contain data that should not be visible in metrics systems.
+ ///
+ public bool ReportTagMetrics { get; set; }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheServiceExtensions.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheServiceExtensions.cs
new file mode 100644
index 00000000000..d28dc4e47d5
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/HybridCacheServiceExtensions.cs
@@ -0,0 +1,44 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using Microsoft.Extensions.Caching.Hybrid;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.DependencyInjection.Extensions;
+using Microsoft.Shared.Diagnostics;
+
+namespace Microsoft.Extensions.DependencyInjection;
+
+///
+/// Configuration extension methods for .
+///
+public static class HybridCacheServiceExtensions
+{
+ ///
+ /// Adds support for multi-tier caching services.
+ ///
+ /// A builder instance that allows further configuration of the system.
+ public static IHybridCacheBuilder AddHybridCache(this IServiceCollection services, Action setupAction)
+ {
+ _ = Throw.IfNull(setupAction);
+ _ = AddHybridCache(services);
+ _ = services.Configure(setupAction);
+ return new HybridCacheBuilder(services);
+ }
+
+ ///
+ /// Adds support for multi-tier caching services.
+ ///
+ /// A builder instance that allows further configuration of the system.
+ public static IHybridCacheBuilder AddHybridCache(this IServiceCollection services)
+ {
+ _ = Throw.IfNull(services);
+ services.TryAddSingleton(TimeProvider.System);
+ _ = services.AddOptions().AddMemoryCache();
+ services.TryAddSingleton();
+ services.TryAddSingleton>(InbuiltTypeSerializer.Instance);
+ services.TryAddSingleton>(InbuiltTypeSerializer.Instance);
+ services.TryAddSingleton();
+ return new HybridCacheBuilder(services);
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/IHybridCacheBuilder.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/IHybridCacheBuilder.cs
new file mode 100644
index 00000000000..55c1f47ae3e
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/IHybridCacheBuilder.cs
@@ -0,0 +1,17 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.Extensions.DependencyInjection;
+
+namespace Microsoft.Extensions.Caching.Hybrid;
+
+///
+/// Helper API for configuring .
+///
+public interface IHybridCacheBuilder
+{
+ ///
+ /// Gets the services collection associated with this instance.
+ ///
+ IServiceCollection Services { get; }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/BufferChunk.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/BufferChunk.cs
new file mode 100644
index 00000000000..0d7d54cfdd6
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/BufferChunk.cs
@@ -0,0 +1,94 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Buffers;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+// Used to convey buffer status; like ArraySegment, but Offset is always
+// zero, and we use the most significant bit of the length (usually the sign flag,
+// but we do not need to support negative length) to track whether or not
+// to recycle this value.
+internal readonly struct BufferChunk
+{
+ private const int FlagReturnToPool = (1 << 31);
+
+ private readonly int _lengthAndPoolFlag;
+
+ public byte[]? Array { get; } // null for default
+
+ public int Length => _lengthAndPoolFlag & ~FlagReturnToPool;
+
+ public bool ReturnToPool => (_lengthAndPoolFlag & FlagReturnToPool) != 0;
+
+ public BufferChunk(byte[] array)
+ {
+ Debug.Assert(array is not null, "expected valid array input");
+ Array = array;
+ _lengthAndPoolFlag = array!.Length;
+
+ // assume not pooled, if exact-sized
+ // (we don't expect array.Length to be negative; we're really just saying
+ // "we expect the result of assigning array.Length to _lengthAndPoolFlag
+ // to give the expected Length *and* not have the MSB set; we're just
+ // checking that we haven't fat-fingered our MSB logic)
+ Debug.Assert(!ReturnToPool, "do not return right-sized arrays");
+ Debug.Assert(Length == array.Length, "array length not respected");
+ }
+
+ public BufferChunk(byte[] array, int length, bool returnToPool)
+ {
+ Debug.Assert(array is not null, "expected valid array input");
+ Debug.Assert(length >= 0, "expected valid length");
+ Array = array;
+ _lengthAndPoolFlag = length | (returnToPool ? FlagReturnToPool : 0);
+ Debug.Assert(ReturnToPool == returnToPool, "return-to-pool not respected");
+ Debug.Assert(Length == length, "length not respected");
+ }
+
+ public byte[] ToArray()
+ {
+ var length = Length;
+ if (length == 0)
+ {
+ return [];
+ }
+
+ var copy = new byte[length];
+ Buffer.BlockCopy(Array!, 0, copy, 0, length);
+ return copy;
+
+ // Note on nullability of Array; the usage here is that a non-null array
+ // is always provided during construction, so the only null scenario is for default(BufferChunk).
+ // Since the constructor explicitly accesses array.Length, any null array passed to the constructor
+ // will cause an exception, even in release (the Debug.Assert only covers debug) - although in
+ // reality we do not expect this to ever occur (internal type, usage checked, etc). In the case of
+ // default(BufferChunk), we know that Length will be zero, which means we will hit the [] case.
+ }
+
+ internal void RecycleIfAppropriate()
+ {
+ if (ReturnToPool)
+ {
+ ArrayPool.Shared.Return(Array!);
+ }
+
+ Unsafe.AsRef(in this) = default; // anti foot-shotgun double-return guard; not 100%, but worth doing
+ Debug.Assert(Array is null && !ReturnToPool, "expected clean slate after recycle");
+ }
+
+ // get the data as a ROS; for note on null-logic of Array!, see comment in ToArray
+ internal ReadOnlySequence AsSequence() => Length == 0 ? default : new ReadOnlySequence(Array!, 0, Length);
+
+ internal BufferChunk DoNotReturnToPool()
+ {
+ var copy = this;
+ Unsafe.AsRef(in copy._lengthAndPoolFlag) &= ~FlagReturnToPool;
+ Debug.Assert(copy.Length == Length, "same length expected");
+ Debug.Assert(!copy.ReturnToPool, "do not return to pool");
+ return copy;
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.CacheItem.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.CacheItem.cs
new file mode 100644
index 00000000000..1f8585d95d5
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.CacheItem.cs
@@ -0,0 +1,109 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+using System.Threading;
+using Microsoft.Extensions.Caching.Memory;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ internal abstract class CacheItem
+ {
+ private int _refCount = 1; // the number of pending operations against this cache item
+
+ public abstract bool DebugIsImmutable { get; }
+
+ // Note: the ref count is the number of callers anticipating this value at any given time. Initially,
+ // it is one for a simple "get the value" flow, but if another call joins with us, it'll be incremented.
+ // If either cancels, it will get decremented, with the entire flow being cancelled if it ever becomes
+ // zero.
+ // This counter also drives cache lifetime, with the cache itself incrementing the count by one. In the
+ // case of mutable data, cache eviction may reduce this to zero (in cooperation with any concurrent readers,
+ // who incr/decr around their fetch), allowing safe buffer recycling.
+
+ internal int RefCount => Volatile.Read(ref _refCount);
+
+ internal static readonly PostEvictionDelegate SharedOnEviction = static (key, value, reason, state) =>
+ {
+ if (value is CacheItem item)
+ {
+ _ = item.Release();
+ }
+ };
+
+ public virtual bool NeedsEvictionCallback => false; // do we need to call Release when evicted?
+
+ public abstract bool TryReserveBuffer(out BufferChunk buffer);
+
+ ///
+ /// Signal that the consumer is done with this item (ref-count decr).
+ ///
+ /// True if this is the final release.
+ public bool Release()
+ {
+ int newCount = Interlocked.Decrement(ref _refCount);
+ Debug.Assert(newCount >= 0, "over-release detected");
+ if (newCount == 0)
+ {
+ // perform per-item clean-up, i.e. buffer recycling (if defensive copies needed)
+ OnFinalRelease();
+ return true;
+ }
+
+ return false;
+ }
+
+ public bool TryReserve()
+ {
+ // This is basically interlocked increment, but with a check against:
+ // a) incrementing upwards from zero
+ // b) overflowing *back* to zero
+ int oldValue = Volatile.Read(ref _refCount);
+ do
+ {
+ if (oldValue is 0 or -1)
+ {
+ return false; // already burned, or about to roll around back to zero
+ }
+
+ var updated = Interlocked.CompareExchange(ref _refCount, oldValue + 1, oldValue);
+ if (updated == oldValue)
+ {
+ return true; // we exchanged
+ }
+
+ oldValue = updated; // we failed, but we have an updated state
+ }
+ while (true);
+ }
+
+ protected virtual void OnFinalRelease() // any required release semantics
+ {
+ }
+ }
+
+ internal abstract class CacheItem : CacheItem
+ {
+ // attempt to get a value that was *not* previously reserved
+ public abstract bool TryGetValue(out T value);
+
+ // get a value that *was* reserved, countermanding our reservation in the process
+ public T GetReservedValue()
+ {
+ if (!TryGetValue(out var value))
+ {
+ Throw();
+ }
+
+ _ = Release();
+ return value;
+
+ static void Throw() => throw new ObjectDisposedException("The cache item has been recycled before the value was obtained");
+ }
+
+ internal static CacheItem Create() => ImmutableTypeCache.IsImmutable ? new ImmutableCacheItem() : new MutableCacheItem();
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Debug.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Debug.cs
new file mode 100644
index 00000000000..a9901103555
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Debug.cs
@@ -0,0 +1,81 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+
+#if DEBUG
+using System.Threading;
+#endif
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ ///
+ /// Auxiliary API for testing purposes, allowing confirmation of the internal state independent of the public API.
+ ///
+ internal bool DebugTryGetCacheItem(string key, [NotNullWhen(true)] out CacheItem? value)
+ {
+ if (_localCache.TryGetValue(key, out var untyped) && untyped is CacheItem typed)
+ {
+ value = typed;
+ return true;
+ }
+
+ value = null;
+ return false;
+ }
+
+#if DEBUG // enable ref-counted buffers
+
+ private int _outstandingBufferCount;
+
+ internal int DebugOnlyGetOutstandingBuffers(bool flush = false)
+ => flush ? Interlocked.Exchange(ref _outstandingBufferCount, 0) : Volatile.Read(ref _outstandingBufferCount);
+
+ [Conditional("DEBUG")]
+ internal void DebugOnlyDecrementOutstandingBuffers()
+ {
+ _ = Interlocked.Decrement(ref _outstandingBufferCount);
+ }
+
+ [Conditional("DEBUG")]
+ internal void DebugOnlyIncrementOutstandingBuffers()
+ {
+ _ = Interlocked.Increment(ref _outstandingBufferCount);
+ }
+#endif
+
+ private partial class MutableCacheItem
+ {
+#if DEBUG
+ private DefaultHybridCache? _cache; // for buffer-tracking - only needed in DEBUG
+#endif
+
+ [Conditional("DEBUG")]
+ [SuppressMessage("Performance", "CA1822:Mark members as static", Justification = "Instance state used in debug")]
+ internal void DebugOnlyTrackBuffer(DefaultHybridCache cache)
+ {
+#if DEBUG
+ _cache = cache;
+ if (_buffer.ReturnToPool)
+ {
+ _cache?.DebugOnlyIncrementOutstandingBuffers();
+ }
+#endif
+ }
+
+ [Conditional("DEBUG")]
+ [SuppressMessage("Performance", "CA1822:Mark members as static", Justification = "Instance state used in debug")]
+ private void DebugOnlyDecrementOutstandingBuffers()
+ {
+#if DEBUG
+ if (_buffer.ReturnToPool)
+ {
+ _cache?.DebugOnlyDecrementOutstandingBuffers();
+ }
+#endif
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.ImmutableCacheItem.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.ImmutableCacheItem.cs
new file mode 100644
index 00000000000..2118fc39247
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.ImmutableCacheItem.cs
@@ -0,0 +1,47 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Threading;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ private sealed class ImmutableCacheItem : CacheItem // used to hold types that do not require defensive copies
+ {
+ private static ImmutableCacheItem? _sharedDefault;
+
+ private T _value = default!; // deferred until SetValue
+
+ public override bool DebugIsImmutable => true;
+
+ // get a shared instance that passes as "reserved"; doesn't need to be 100% singleton,
+ // but we don't want to break the reservation rules either; if we can't reserve: create new
+ public static ImmutableCacheItem GetReservedShared()
+ {
+ ImmutableCacheItem? obj = Volatile.Read(ref _sharedDefault);
+ if (obj is null || !obj.TryReserve())
+ {
+ obj = new();
+ _ = obj.TryReserve(); // this is reliable on a new instance
+ Volatile.Write(ref _sharedDefault, obj);
+ }
+
+ return obj;
+ }
+
+ public void SetValue(T value) => _value = value;
+
+ public override bool TryGetValue(out T value)
+ {
+ value = _value;
+ return true; // always available
+ }
+
+ public override bool TryReserveBuffer(out BufferChunk buffer)
+ {
+ buffer = default;
+ return false; // we don't have one to reserve!
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.L2.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.L2.cs
new file mode 100644
index 00000000000..5c08aecb9ef
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.L2.cs
@@ -0,0 +1,154 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+using System.Runtime.CompilerServices;
+using System.Threading;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Memory;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ [SuppressMessage("Performance", "CA1849:Call async methods when in an async method", Justification = "Manual sync check")]
+ [SuppressMessage("Usage", "VSTHRD003:Avoid awaiting foreign Tasks", Justification = "Manual sync check")]
+ internal ValueTask GetFromL2Async(string key, CancellationToken token)
+ {
+ switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
+ {
+ case CacheFeatures.BackendCache: // legacy byte[]-based
+ var pendingLegacy = _backendCache!.GetAsync(key, token);
+#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ if (!pendingLegacy.IsCompletedSuccessfully)
+#else
+ if (pendingLegacy.Status != TaskStatus.RanToCompletion)
+#endif
+ {
+ return new(AwaitedLegacyAsync(pendingLegacy, this));
+ }
+
+ return new(GetValidPayloadSegment(pendingLegacy.Result)); // already complete
+
+ case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // IBufferWriter-based
+ var writer = RecyclableArrayBufferWriter.Create(MaximumPayloadBytes);
+ var cache = Unsafe.As(_backendCache!); // type-checked already
+ var pendingBuffers = cache.TryGetAsync(key, writer, token);
+ if (!pendingBuffers.IsCompletedSuccessfully)
+ {
+ return new(AwaitedBuffersAsync(pendingBuffers, writer));
+ }
+
+ BufferChunk result = pendingBuffers.GetAwaiter().GetResult()
+ ? new(writer.DetachCommitted(out var length), length, returnToPool: true)
+ : default;
+ writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
+ return new(result);
+ }
+
+ return default;
+
+ static async Task AwaitedLegacyAsync(Task pending, DefaultHybridCache @this)
+ {
+ var bytes = await pending.ConfigureAwait(false);
+ return @this.GetValidPayloadSegment(bytes);
+ }
+
+ static async Task AwaitedBuffersAsync(ValueTask pending, RecyclableArrayBufferWriter writer)
+ {
+ BufferChunk result = await pending.ConfigureAwait(false)
+ ? new(writer.DetachCommitted(out var length), length, returnToPool: true)
+ : default;
+ writer.Dispose(); // it is not accidental that this isn't "using"; avoid recycling if not 100% sure what happened
+ return result;
+ }
+ }
+
+ internal ValueTask SetL2Async(string key, in BufferChunk buffer, HybridCacheEntryOptions? options, CancellationToken token)
+ {
+ Debug.Assert(buffer.Array is not null, "array should be non-null");
+ switch (GetFeatures(CacheFeatures.BackendCache | CacheFeatures.BackendBuffers))
+ {
+ case CacheFeatures.BackendCache: // legacy byte[]-based
+ var arr = buffer.Array!;
+ if (arr.Length != buffer.Length)
+ {
+ // we'll need a right-sized snapshot
+ arr = buffer.ToArray();
+ }
+
+ return new(_backendCache!.SetAsync(key, arr, GetOptions(options), token));
+ case CacheFeatures.BackendCache | CacheFeatures.BackendBuffers: // ReadOnlySequence-based
+ var cache = Unsafe.As(_backendCache!); // type-checked already
+ return cache.SetAsync(key, buffer.AsSequence(), GetOptions(options), token);
+ }
+
+ return default;
+ }
+
+ internal void SetL1(string key, CacheItem value, HybridCacheEntryOptions? options)
+ {
+ // incr ref-count for the the cache itself; this *may* be released via the NeedsEvictionCallback path
+ if (value.TryReserve())
+ {
+ // based on CacheExtensions.Set, but with post-eviction recycling
+ using var cacheEntry = _localCache.CreateEntry(key);
+ cacheEntry.AbsoluteExpirationRelativeToNow = options?.LocalCacheExpiration ?? _defaultLocalCacheExpiration;
+ cacheEntry.Value = value;
+ if (value.NeedsEvictionCallback)
+ {
+ _ = cacheEntry.RegisterPostEvictionCallback(CacheItem.SharedOnEviction);
+ }
+ }
+ }
+
+ private BufferChunk GetValidPayloadSegment(byte[]? payload)
+ {
+ if (payload is not null)
+ {
+ if (payload.Length > MaximumPayloadBytes)
+ {
+ ThrowPayloadLengthExceeded(payload.Length);
+ }
+
+ return new(payload);
+ }
+
+ return default;
+ }
+
+ [DoesNotReturn]
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ private void ThrowPayloadLengthExceeded(int size) // splitting the exception bits out to a different method
+ {
+ // also add via logger when possible
+ throw new InvalidOperationException($"Maximum cache length ({MaximumPayloadBytes} bytes) exceeded");
+ }
+
+#if NET8_0_OR_GREATER
+ [SuppressMessage("Maintainability", "CA1508:Avoid dead conditional code", Justification = "False positive from unsafe accessor")]
+#endif
+ private DistributedCacheEntryOptions GetOptions(HybridCacheEntryOptions? options)
+ {
+ DistributedCacheEntryOptions? result = null;
+ if (options is not null && options.Expiration.HasValue && options.Expiration.GetValueOrDefault() != _defaultExpiration)
+ {
+ result = ToDistributedCacheEntryOptions(options);
+ }
+
+ return result ?? _defaultDistributedCacheExpiration;
+
+#if NET8_0_OR_GREATER
+ // internal method memoizes this allocation; since it is "init", it is immutable (outside reflection)
+ [UnsafeAccessor(UnsafeAccessorKind.Method, Name = nameof(ToDistributedCacheEntryOptions))]
+ extern static DistributedCacheEntryOptions? ToDistributedCacheEntryOptions(HybridCacheEntryOptions options);
+#else
+ // without that helper method, we'll just eat the alloc (down-level TFMs)
+ static DistributedCacheEntryOptions ToDistributedCacheEntryOptions(HybridCacheEntryOptions options)
+ => new() { AbsoluteExpirationRelativeToNow = options.Expiration };
+#endif
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.MutableCacheItem.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.MutableCacheItem.cs
new file mode 100644
index 00000000000..8ce93b79c4a
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.MutableCacheItem.cs
@@ -0,0 +1,73 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ private sealed partial class MutableCacheItem : CacheItem // used to hold types that require defensive copies
+ {
+ private IHybridCacheSerializer _serializer = null!; // deferred until SetValue
+ private BufferChunk _buffer;
+
+ public override bool NeedsEvictionCallback => _buffer.ReturnToPool;
+
+ public override bool DebugIsImmutable => false;
+
+ public void SetValue(ref BufferChunk buffer, IHybridCacheSerializer serializer)
+ {
+ _serializer = serializer;
+ _buffer = buffer;
+ buffer = default; // we're taking over the lifetime; the caller no longer has it!
+ }
+
+ public void SetValue(T value, IHybridCacheSerializer serializer, int maxLength)
+ {
+ _serializer = serializer;
+ var writer = RecyclableArrayBufferWriter.Create(maxLength);
+ serializer.Serialize(value, writer);
+
+ _buffer = new(writer.DetachCommitted(out var length), length, returnToPool: true);
+ writer.Dispose(); // no buffers left (we just detached them), but just in case of other logic
+ }
+
+ public override bool TryGetValue(out T value)
+ {
+ // only if we haven't already burned
+ if (!TryReserve())
+ {
+ value = default!;
+ return false;
+ }
+
+ try
+ {
+ value = _serializer.Deserialize(_buffer.AsSequence());
+ return true;
+ }
+ finally
+ {
+ _ = Release();
+ }
+ }
+
+ public override bool TryReserveBuffer(out BufferChunk buffer)
+ {
+ // only if we haven't already burned
+ if (TryReserve())
+ {
+ buffer = _buffer.DoNotReturnToPool(); // not up to them!
+ return true;
+ }
+
+ buffer = default;
+ return false;
+ }
+
+ protected override void OnFinalRelease()
+ {
+ DebugOnlyDecrementOutstandingBuffers();
+ _buffer.RecycleIfAppropriate();
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Serialization.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Serialization.cs
new file mode 100644
index 00000000000..523a95e279a
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Serialization.cs
@@ -0,0 +1,54 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Concurrent;
+using System.Reflection;
+using System.Runtime.CompilerServices;
+using Microsoft.Extensions.DependencyInjection;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ // Per instance cache of typed serializers; each serializer is a
+ // IHybridCacheSerializer for the corresponding Type, but we can't
+ // know which here - and undesirable to add an artificial non-generic
+ // IHybridCacheSerializer base that serves no other purpose.
+ private readonly ConcurrentDictionary _serializers = new();
+
+ internal int MaximumPayloadBytes { get; }
+
+ internal IHybridCacheSerializer GetSerializer()
+ {
+ return _serializers.TryGetValue(typeof(T), out var serializer)
+ ? Unsafe.As>(serializer) : ResolveAndAddSerializer(this);
+
+ static IHybridCacheSerializer ResolveAndAddSerializer(DefaultHybridCache @this)
+ {
+ // It isn't critical that we get only one serializer instance during start-up; what matters
+ // is that we don't get a new serializer instance *every time*.
+ var serializer = @this._services.GetService>();
+ if (serializer is null)
+ {
+ foreach (var factory in @this._serializerFactories)
+ {
+ if (factory.TryCreateSerializer(out var current))
+ {
+ serializer = current;
+ break; // we've already reversed the factories, so: the first hit is what we want
+ }
+ }
+ }
+
+ if (serializer is null)
+ {
+ throw new InvalidOperationException($"No {nameof(IHybridCacheSerializer)} configured for type '{typeof(T).Name}'");
+ }
+
+ // store the result so we don't repeat this in future
+ @this._serializers[typeof(T)] = serializer;
+ return serializer;
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Stampede.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Stampede.cs
new file mode 100644
index 00000000000..ef5c570c670
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.Stampede.cs
@@ -0,0 +1,111 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Concurrent;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ private readonly ConcurrentDictionary _currentOperations = new();
+
+ // returns true for a new session (in which case: we need to start the work), false for a pre-existing session
+ public bool GetOrCreateStampedeState(string key, HybridCacheEntryFlags flags, out StampedeState stampedeState, bool canBeCanceled)
+ {
+ var stampedeKey = new StampedeKey(key, flags);
+
+ // Double-checked locking to try to avoid unnecessary sessions in race conditions,
+ // while avoiding the lock completely whenever possible.
+ if (TryJoinExistingSession(this, stampedeKey, out var existing))
+ {
+ stampedeState = existing;
+ return false; // someone ELSE is running the work
+ }
+
+ // Most common scenario here, then, is that we're not fighting with anyone else
+ // go ahead and create a placeholder state object and *try* to add it.
+ stampedeState = new StampedeState(this, stampedeKey, canBeCanceled);
+ if (_currentOperations.TryAdd(stampedeKey, stampedeState))
+ {
+ // successfully added; indeed, no-one else was fighting: we're done
+ return true; // the CURRENT caller is responsible for making the work happen
+ }
+
+ // Hmmm, failed to add - there's concurrent activity on the same key; we're now
+ // in very rare race condition territory; go ahead and take a lock while we
+ // collect our thoughts.
+
+ // see notes in SyncLock.cs
+ lock (GetPartitionedSyncLock(in stampedeKey))
+ {
+ // check again while we hold the lock
+ if (TryJoinExistingSession(this, stampedeKey, out existing))
+ {
+ // we found an existing state we can join; do that
+ stampedeState.SetCanceled(); // to be thorough: mark our speculative one as doomed (no-one has seen it, though)
+ stampedeState = existing; // and replace with the one we found
+ return false; // someone ELSE is running the work
+
+ // Note that in this case we allocated a StampedeState that got dropped on
+ // the floor; in the grand scheme of things, that's OK; this is a rare outcome.
+ }
+
+ // Check whether the value was L1-cached by an outgoing operation (for *us* to check needs local-cache-read,
+ // and for *them* to have updated needs local-cache-write, but since the shared us/them key includes flags,
+ // we can skip this if *either* flag is set).
+ if ((flags & HybridCacheEntryFlags.DisableLocalCache) == 0 && _localCache.TryGetValue(key, out var untyped)
+ && untyped is CacheItem typed && typed.TryReserve())
+ {
+ stampedeState.SetResultDirect(typed);
+ return false; // the work has ALREADY been done
+ }
+
+ // Otherwise, either nothing existed - or the thing that already exists can't be joined
+ // in that case, go ahead and use the state that we invented a moment ago (outside of the lock).
+ _currentOperations[stampedeKey] = stampedeState;
+ return true; // the CURRENT caller is responsible for making the work happen
+ }
+
+ static bool TryJoinExistingSession(DefaultHybridCache @this, in StampedeKey stampedeKey,
+ [NotNullWhen(true)] out StampedeState? stampedeState)
+ {
+ if (@this._currentOperations.TryGetValue(stampedeKey, out var found))
+ {
+ if (found is not StampedeState tmp)
+ {
+ ThrowWrongType(stampedeKey.Key, found.Type, typeof(T));
+ }
+
+ if (tmp.TryAddCaller())
+ {
+ // we joined an existing session
+ stampedeState = tmp;
+ return true;
+ }
+ }
+
+ stampedeState = null;
+ return false;
+ }
+
+ [DoesNotReturn]
+ static void ThrowWrongType(string key, Type existingType, Type newType)
+ {
+ Debug.Assert(existingType != newType, "should be different types");
+ throw new InvalidOperationException(
+ $"All calls to {nameof(HybridCache)} with the same key should use the same data type; the same key is being used for '{existingType.FullName}' and '{newType.FullName}' data")
+ {
+ Data = { { "CacheKey", key } }
+ };
+ }
+ }
+
+ internal int DebugGetCallerCount(string key, HybridCacheEntryFlags? flags = null)
+ {
+ var stampedeKey = new StampedeKey(key, flags ?? _defaultFlags);
+ return _currentOperations.TryGetValue(stampedeKey, out var state) ? state.DebugCallerCount : 0;
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeKey.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeKey.cs
new file mode 100644
index 00000000000..bbb519b2992
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeKey.cs
@@ -0,0 +1,54 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics.CodeAnalysis;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ [SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Prefer explicit field in this case")]
+ internal readonly struct StampedeKey : IEquatable
+ {
+ private readonly string _key;
+ private readonly HybridCacheEntryFlags _flags;
+ private readonly int _hashCode; // we know we'll need it; compute it once only
+ public StampedeKey(string key, HybridCacheEntryFlags flags)
+ {
+ // We'll use both the key *and* the flags as combined flag; in reality, we *expect*
+ // the flags to be consistent between calls on the same operation, and it must be
+ // noted that the *cache items* only use the key (not the flags), but: it gets
+ // very hard to grok what the correct behaviour should be if combining two calls
+ // with different flags, since they could have mutually exclusive behaviours!
+
+ // As such, we'll treat conflicting calls entirely separately from a stampede
+ // perspective.
+ _key = key;
+ _flags = flags;
+#if NETCOREAPP2_1_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ _hashCode = System.HashCode.Combine(key, flags);
+#else
+ _hashCode = key.GetHashCode() ^ (int)flags;
+#endif
+ }
+
+ public string Key => _key;
+ public HybridCacheEntryFlags Flags => _flags;
+
+ // Allow direct access to the pre-computed hash-code, semantically emphasizing that
+ // this is a constant-time operation against a known value.
+ internal int HashCode => _hashCode;
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Blocker Code Smell", "S2178:Short-circuit logic should be used in boolean contexts",
+ Justification = "Non-short-circuiting intentional to remove unnecessary branch")]
+ public bool Equals(StampedeKey other) => _flags == other._flags & _key == other._key;
+
+ public override bool Equals([NotNullWhen(true)] object? obj)
+ => obj is StampedeKey other && Equals(other);
+
+ public override int GetHashCode() => _hashCode;
+
+ public override string ToString() => $"{_key} ({_flags})";
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeState.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeState.cs
new file mode 100644
index 00000000000..eba71774395
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeState.cs
@@ -0,0 +1,109 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Threading;
+
+#if !NETCOREAPP3_0_OR_GREATER
+using System.Runtime.CompilerServices;
+#endif
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ internal abstract class StampedeState
+#if NETCOREAPP3_0_OR_GREATER
+ : IThreadPoolWorkItem
+#endif
+ {
+ internal readonly CancellationToken SharedToken; // this might have a value even when _sharedCancellation is null
+
+ // Because multiple callers can enlist, we need to track when the *last* caller cancels
+ // (and keep going until then); that means we need to run with custom cancellation.
+ private readonly CancellationTokenSource? _sharedCancellation;
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Keep usage explicit")]
+ private readonly DefaultHybridCache _cache;
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Keep usage explicit")]
+ private readonly CacheItem _cacheItem;
+
+ // we expose the key as a by-ref readonly; this minimizes the stack work involved in passing the key around
+ // (both in terms of width and copy-semantics)
+ private readonly StampedeKey _key;
+ public ref readonly StampedeKey Key => ref _key;
+ protected CacheItem CacheItem => _cacheItem;
+
+ ///
+ /// Initializes a new instance of the class optionally with shared cancellation support.
+ ///
+ protected StampedeState(DefaultHybridCache cache, in StampedeKey key, CacheItem cacheItem, bool canBeCanceled)
+ {
+ _cache = cache;
+ _key = key;
+ _cacheItem = cacheItem;
+ if (canBeCanceled)
+ {
+ // If the first (or any) caller can't be cancelled;,we'll never get to zero: n point tracking.
+ // (in reality, all callers usually use the same path, so cancellation is usually "all" or "none")
+ _sharedCancellation = new();
+ SharedToken = _sharedCancellation.Token;
+ }
+ else
+ {
+ SharedToken = CancellationToken.None;
+ }
+ }
+
+ ///
+ /// Initializes a new instance of the class using a fixed cancellation token.
+ ///
+ protected StampedeState(DefaultHybridCache cache, in StampedeKey key, CacheItem cacheItem, CancellationToken token)
+ {
+ _cache = cache;
+ _key = key;
+ _cacheItem = cacheItem;
+ SharedToken = token;
+ }
+
+#if !NETCOREAPP3_0_OR_GREATER
+ protected static readonly WaitCallback SharedWaitCallback = static obj => Unsafe.As(obj).Execute();
+#endif
+
+ protected DefaultHybridCache Cache => _cache;
+
+ public abstract void Execute();
+
+ protected int MaximumPayloadBytes => _cache.MaximumPayloadBytes;
+
+ public override string ToString() => Key.ToString();
+
+ public abstract void SetCanceled();
+
+ public int DebugCallerCount => _cacheItem.RefCount;
+
+ public abstract Type Type { get; }
+
+ public void CancelCaller()
+ {
+ // note that TryAddCaller has protections to avoid getting back from zero
+ if (_cacheItem.Release())
+ {
+ // we're the last to leave; turn off the lights
+ _sharedCancellation?.Cancel();
+ SetCanceled();
+ }
+ }
+
+ public bool TryAddCaller() => _cacheItem.TryReserve();
+ }
+
+ private void RemoveStampedeState(in StampedeKey key)
+ {
+ // see notes in SyncLock.cs
+ lock (GetPartitionedSyncLock(in key))
+ {
+ _ = _currentOperations.TryRemove(key, out _);
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeStateT.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeStateT.cs
new file mode 100644
index 00000000000..842444c8666
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.StampedeStateT.cs
@@ -0,0 +1,304 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Diagnostics;
+using System.Diagnostics.CodeAnalysis;
+using System.Threading;
+using System.Threading.Tasks;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ internal sealed class StampedeState : StampedeState
+ {
+ [DoesNotReturn]
+ private static CacheItem ThrowUnexpectedCacheItem() => throw new InvalidOperationException("Unexpected cache item");
+
+ private readonly TaskCompletionSource>? _result;
+ private TState? _state;
+ private Func>? _underlying; // main data factory
+ private HybridCacheEntryOptions? _options;
+ private Task? _sharedUnwrap; // allows multiple non-cancellable callers to share a single task (when no defensive copy needed)
+
+ // ONLY set the result, without any other side-effects
+ internal void SetResultDirect(CacheItem value)
+ => _result?.TrySetResult(value);
+
+ public StampedeState(DefaultHybridCache cache, in StampedeKey key, bool canBeCanceled)
+ : base(cache, key, CacheItem.Create(), canBeCanceled)
+ {
+ _result = new(TaskCreationOptions.RunContinuationsAsynchronously);
+ }
+
+ public StampedeState(DefaultHybridCache cache, in StampedeKey key, CancellationToken token)
+ : base(cache, key, CacheItem.Create(), token)
+ {
+ // no TCS in this case - this is for SetValue only
+ }
+
+ public override Type Type => typeof(T);
+
+ public void QueueUserWorkItem(in TState state, Func> underlying, HybridCacheEntryOptions? options)
+ {
+ Debug.Assert(_underlying is null, "should not already have factory field");
+ Debug.Assert(underlying is not null, "factory argument should be meaningful");
+
+ // initialize the callback state
+ _state = state;
+ _underlying = underlying;
+ _options = options;
+
+#if NETCOREAPP3_0_OR_GREATER
+ ThreadPool.UnsafeQueueUserWorkItem(this, false);
+#else
+ ThreadPool.UnsafeQueueUserWorkItem(SharedWaitCallback, this);
+#endif
+ }
+
+ [SuppressMessage("Resilience", "EA0014:The async method doesn't support cancellation", Justification = "Cancellation is handled separately via SharedToken")]
+ public Task ExecuteDirectAsync(in TState state, Func> underlying, HybridCacheEntryOptions? options)
+ {
+ Debug.Assert(_underlying is null, "should not already have factory field");
+ Debug.Assert(underlying is not null, "factory argument should be meaningful");
+
+ // initialize the callback state
+ _state = state;
+ _underlying = underlying;
+ _options = options;
+
+ return BackgroundFetchAsync();
+ }
+
+ public override void Execute() => _ = BackgroundFetchAsync();
+
+ public override void SetCanceled() => _result?.TrySetCanceled(SharedToken);
+
+ [SuppressMessage("Usage", "VSTHRD003:Avoid awaiting foreign Tasks", Justification = "Custom task management")]
+ public ValueTask JoinAsync(CancellationToken token)
+ {
+ // If the underlying has already completed, and/or our local token can't cancel: we
+ // can simply wrap the shared task; otherwise, we need our own cancellation state.
+ return token.CanBeCanceled && !Task.IsCompleted ? WithCancellationAsync(this, token) : UnwrapReservedAsync();
+
+ static async ValueTask WithCancellationAsync(StampedeState stampede, CancellationToken token)
+ {
+ var cancelStub = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously);
+ using var reg = token.Register(static obj =>
+ {
+ _ = ((TaskCompletionSource)obj!).TrySetResult(true);
+ }, cancelStub);
+
+ CacheItem result;
+ try
+ {
+ var first = await System.Threading.Tasks.Task.WhenAny(stampede.Task, cancelStub.Task).ConfigureAwait(false);
+ if (ReferenceEquals(first, cancelStub.Task))
+ {
+ // we expect this to throw, because otherwise we wouldn't have gotten here
+ token.ThrowIfCancellationRequested(); // get an appropriate exception
+ }
+
+ Debug.Assert(ReferenceEquals(first, stampede.Task), "should not be cancelled");
+
+ // this has already completed, but we'll get the stack nicely
+ result = await stampede.Task.ConfigureAwait(false);
+ }
+ catch
+ {
+ stampede.CancelCaller();
+ throw;
+ }
+
+ // outside the catch, so we know we only decrement one way or the other
+ return result.GetReservedValue();
+ }
+ }
+
+ [SuppressMessage("Maintainability", "CA1508:Avoid dead conditional code", Justification = "Reliability")]
+ public Task> Task
+ {
+ get
+ {
+ Debug.Assert(_result is not null, "result should be assigned");
+ return _result is null ? InvalidAsync() : _result.Task;
+
+ static Task> InvalidAsync() => System.Threading.Tasks.Task.FromException>(
+ new InvalidOperationException("Task should not be accessed for non-shared instances"));
+ }
+ }
+
+ [SuppressMessage("Resilience", "EA0014:The async method doesn't support cancellation", Justification = "No cancellable operation")]
+ [SuppressMessage("Performance", "CA1849:Call async methods when in an async method", Justification = "Checked manual unwrap")]
+ [SuppressMessage("Usage", "VSTHRD003:Avoid awaiting foreign Tasks", Justification = "Checked manual unwrap")]
+ [SuppressMessage("Major Code Smell", "S1121:Assignments should not be made from within sub-expressions", Justification = "Unusual, but legit here")]
+ internal ValueTask UnwrapReservedAsync()
+ {
+ var task = Task;
+#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ if (task.IsCompletedSuccessfully)
+#else
+ if (task.Status == TaskStatus.RanToCompletion)
+#endif
+ {
+ return new(task.Result.GetReservedValue());
+ }
+
+ // if the type is immutable, callers can share the final step too (this may leave dangling
+ // reservation counters, but that's OK)
+ var result = ImmutableTypeCache.IsImmutable ? (_sharedUnwrap ??= AwaitedAsync(Task)) : AwaitedAsync(Task);
+ return new(result);
+
+ static async Task AwaitedAsync(Task> task)
+ => (await task.ConfigureAwait(false)).GetReservedValue();
+ }
+
+ [SuppressMessage("Resilience", "EA0014:The async method doesn't support cancellation", Justification = "In this case the cancellation token is provided internally via SharedToken")]
+ [SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "Exception is passed through to faulted task result")]
+ private async Task BackgroundFetchAsync()
+ {
+ try
+ {
+ // read from L2 if appropriate
+ if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheRead) == 0)
+ {
+ var result = await Cache.GetFromL2Async(Key.Key, SharedToken).ConfigureAwait(false);
+
+ if (result.Array is not null)
+ {
+ SetResultAndRecycleIfAppropriate(ref result);
+ return;
+ }
+ }
+
+ // nothing from L2; invoke the underlying data store
+ if ((Key.Flags & HybridCacheEntryFlags.DisableUnderlyingData) == 0)
+ {
+ var cacheItem = SetResult(await _underlying!(_state!, SharedToken).ConfigureAwait(false));
+
+ // note that at this point we've already released most or all of the waiting callers; everything
+ // else here is background
+
+ // write to L2 if appropriate
+ if ((Key.Flags & HybridCacheEntryFlags.DisableDistributedCacheWrite) == 0)
+ {
+ if (cacheItem.TryReserveBuffer(out var buffer))
+ {
+ // mutable: we've already serialized it for the shared cache item
+ await Cache.SetL2Async(Key.Key, in buffer, _options, SharedToken).ConfigureAwait(false);
+ _ = cacheItem.Release(); // because we reserved
+ }
+ else if (cacheItem.TryGetValue(out var value))
+ {
+ // immutable: we'll need to do the serialize ourselves
+ var writer = RecyclableArrayBufferWriter.Create(MaximumPayloadBytes); // note this lifetime spans the SetL2Async
+ Cache.GetSerializer().Serialize(value, writer);
+ buffer = new(writer.GetBuffer(out var length), length, returnToPool: false); // writer still owns the buffer
+ await Cache.SetL2Async(Key.Key, in buffer, _options, SharedToken).ConfigureAwait(false);
+ writer.Dispose(); // recycle on success
+ }
+ }
+ }
+ else
+ {
+ // can't read from data store; implies we shouldn't write
+ // back to anywhere else, either
+ SetDefaultResult();
+ }
+ }
+ catch (Exception ex)
+ {
+ SetException(ex);
+ }
+ }
+
+ private void SetException(Exception ex)
+ {
+ if (_result is not null)
+ {
+ Cache.RemoveStampedeState(in Key);
+ _ = _result.TrySetException(ex);
+ }
+ }
+
+ private void SetDefaultResult()
+ {
+ // note we don't store this dummy result in L1 or L2
+ if (_result is not null)
+ {
+ Cache.RemoveStampedeState(in Key);
+ _ = _result.TrySetResult(ImmutableCacheItem.GetReservedShared());
+ }
+ }
+
+ private void SetResultAndRecycleIfAppropriate(ref BufferChunk value)
+ {
+ // set a result from L2 cache
+ Debug.Assert(value.Array is not null, "expected buffer");
+
+ var serializer = Cache.GetSerializer();
+ CacheItem cacheItem;
+ switch (CacheItem)
+ {
+ case ImmutableCacheItem immutable:
+ // deserialize; and store object; buffer can be recycled now
+ immutable.SetValue(serializer.Deserialize(new(value.Array!, 0, value.Length)));
+ value.RecycleIfAppropriate();
+ cacheItem = immutable;
+ break;
+ case MutableCacheItem mutable:
+ // use the buffer directly as the backing in the cache-item; do *not* recycle now
+ mutable.SetValue(ref value, serializer);
+ mutable.DebugOnlyTrackBuffer(Cache);
+ cacheItem = mutable;
+ break;
+ default:
+ cacheItem = ThrowUnexpectedCacheItem();
+ break;
+ }
+
+ SetResult(cacheItem);
+ }
+
+ private CacheItem SetResult(T value)
+ {
+ // set a result from a value we calculated directly
+ CacheItem cacheItem;
+ switch (CacheItem)
+ {
+ case ImmutableCacheItem immutable:
+ // no serialize needed
+ immutable.SetValue(value);
+ cacheItem = immutable;
+ break;
+ case MutableCacheItem mutable:
+ // serialization happens here
+ mutable.SetValue(value, Cache.GetSerializer(), MaximumPayloadBytes);
+ mutable.DebugOnlyTrackBuffer(Cache);
+ cacheItem = mutable;
+ break;
+ default:
+ cacheItem = ThrowUnexpectedCacheItem();
+ break;
+ }
+
+ SetResult(cacheItem);
+ return cacheItem;
+ }
+
+ private void SetResult(CacheItem value)
+ {
+ if ((Key.Flags & HybridCacheEntryFlags.DisableLocalCacheWrite) == 0)
+ {
+ Cache.SetL1(Key.Key, value, _options); // we can do this without a TCS, for SetValue
+ }
+
+ if (_result is not null)
+ {
+ Cache.RemoveStampedeState(in Key);
+ _ = _result.TrySetResult(value);
+ }
+ }
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.SyncLock.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.SyncLock.cs
new file mode 100644
index 00000000000..4672818d056
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.SyncLock.cs
@@ -0,0 +1,44 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal partial class DefaultHybridCache
+{
+ // HybridCache's stampede protection requires some level of synchronization to avoid unnecessary runs
+ // of the underlying data fetch; this is *minimized* by the use of double-checked locking and
+ // interlocked join (adding a new request to an existing execution), but: that would leave a race
+ // condition where the *remove* step of the stampede would be in a race with the *add new* step; the
+ // *add new* step is inside a lock, but we need to *remove* step to share that lock, to avoid
+ // the race. We deal with that by taking the same lock during remove, but *that* means we're locking
+ // on all executions.
+ //
+ // To minimize lock contention, we will therefore use partitioning of the lock-token, by using the
+ // low 3 bits of the hash-code (which we calculate eagerly only once, so: already known). This gives
+ // us a fast way to split contention by 8, almost an order-of-magnitude, which is sufficient. We *could*
+ // use an array for this, but: for directness, let's inline it instead (avoiding bounds-checks,
+ // an extra layer of dereferencing, and the allocation; I will acknowledge these are miniscule, but:
+ // it costs us nothing to do)
+
+ private readonly object _syncLock0 = new();
+ private readonly object _syncLock1 = new();
+ private readonly object _syncLock2 = new();
+ private readonly object _syncLock3 = new();
+ private readonly object _syncLock4 = new();
+ private readonly object _syncLock5 = new();
+ private readonly object _syncLock6 = new();
+ private readonly object _syncLock7 = new();
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Major Code Smell", "S109:Magic numbers should not be used", Justification = "Trivial low 3 bits")]
+ internal object GetPartitionedSyncLock(in StampedeKey key) => (key.HashCode & 0b111) switch // generate 8 partitions using the low 3 bits
+ {
+ 0 => _syncLock0,
+ 1 => _syncLock1,
+ 2 => _syncLock2,
+ 3 => _syncLock3,
+ 4 => _syncLock4,
+ 5 => _syncLock5,
+ 6 => _syncLock6,
+ _ => _syncLock7,
+ };
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.cs
new file mode 100644
index 00000000000..c789e7c6652
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultHybridCache.cs
@@ -0,0 +1,170 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Runtime.CompilerServices;
+using System.Threading;
+using System.Threading.Tasks;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Logging;
+using Microsoft.Extensions.Logging.Abstractions;
+using Microsoft.Extensions.Options;
+using Microsoft.Shared.Diagnostics;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+///
+/// The inbuilt implementation of , as registered via .
+///
+internal sealed partial class DefaultHybridCache : HybridCache
+{
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Keep usage explicit")]
+ private readonly IDistributedCache? _backendCache;
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Keep usage explicit")]
+ private readonly IMemoryCache _localCache;
+ private readonly IServiceProvider _services; // we can't resolve per-type serializers until we see each T
+ private readonly IHybridCacheSerializerFactory[] _serializerFactories;
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "Keep usage explicit")]
+ private readonly HybridCacheOptions _options;
+ private readonly ILogger _logger;
+ private readonly CacheFeatures _features; // used to avoid constant type-testing
+
+ private readonly HybridCacheEntryFlags _hardFlags; // *always* present (for example, because no L2)
+ private readonly HybridCacheEntryFlags _defaultFlags; // note this already includes hardFlags
+ private readonly TimeSpan _defaultExpiration;
+ private readonly TimeSpan _defaultLocalCacheExpiration;
+
+ private readonly DistributedCacheEntryOptions _defaultDistributedCacheExpiration;
+
+ [Flags]
+ internal enum CacheFeatures
+ {
+ None = 0,
+ BackendCache = 1 << 0,
+ BackendBuffers = 1 << 1,
+ }
+
+ internal CacheFeatures GetFeatures() => _features;
+
+ // used to restrict features in test suite
+ internal void DebugRemoveFeatures(CacheFeatures features) => Unsafe.AsRef(in _features) &= ~features;
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private CacheFeatures GetFeatures(CacheFeatures mask) => _features & mask;
+
+ public DefaultHybridCache(IOptions options, IServiceProvider services)
+ {
+ _services = Throw.IfNull(services);
+ _localCache = services.GetRequiredService();
+ _options = options.Value;
+ _logger = services.GetService()?.CreateLogger(typeof(HybridCache)) ?? NullLogger.Instance;
+
+ _backendCache = services.GetService(); // note optional
+
+ // ignore L2 if it is really just the same L1, wrapped
+ // (note not just an "is" test; if someone has a custom subclass, who knows what it does?)
+ if (_backendCache is not null
+ && _backendCache.GetType() == typeof(MemoryDistributedCache)
+ && _localCache.GetType() == typeof(MemoryCache))
+ {
+ _backendCache = null;
+ }
+
+ // perform type-tests on the backend once only
+ _features |= _backendCache switch
+ {
+ IBufferDistributedCache => CacheFeatures.BackendCache | CacheFeatures.BackendBuffers,
+ not null => CacheFeatures.BackendCache,
+ _ => CacheFeatures.None
+ };
+
+ // When resolving serializers via the factory API, we will want the *last* instance,
+ // i.e. "last added wins"; we can optimize by reversing the array ahead of time, and
+ // taking the first match
+ var factories = services.GetServices().ToArray();
+ Array.Reverse(factories);
+ _serializerFactories = factories;
+
+ MaximumPayloadBytes = checked((int)_options.MaximumPayloadBytes); // for now hard-limit to 2GiB
+
+ var defaultEntryOptions = _options.DefaultEntryOptions;
+
+ if (_backendCache is null)
+ {
+ _hardFlags |= HybridCacheEntryFlags.DisableDistributedCache;
+ }
+
+ _defaultFlags = (defaultEntryOptions?.Flags ?? HybridCacheEntryFlags.None) | _hardFlags;
+ _defaultExpiration = defaultEntryOptions?.Expiration ?? TimeSpan.FromMinutes(5);
+ _defaultLocalCacheExpiration = defaultEntryOptions?.LocalCacheExpiration ?? TimeSpan.FromMinutes(1);
+ _defaultDistributedCacheExpiration = new DistributedCacheEntryOptions { AbsoluteExpirationRelativeToNow = _defaultExpiration };
+ }
+
+ internal IDistributedCache? BackendCache => _backendCache;
+ internal IMemoryCache LocalCache => _localCache;
+
+ internal HybridCacheOptions Options => _options;
+
+ public override ValueTask GetOrCreateAsync(string key, TState state, Func> underlyingDataCallback,
+ HybridCacheEntryOptions? options = null, IEnumerable? tags = null, CancellationToken cancellationToken = default)
+ {
+ var canBeCanceled = cancellationToken.CanBeCanceled;
+ if (canBeCanceled)
+ {
+ cancellationToken.ThrowIfCancellationRequested();
+ }
+
+ var flags = GetEffectiveFlags(options);
+ if ((flags & HybridCacheEntryFlags.DisableLocalCacheRead) == 0 && _localCache.TryGetValue(key, out var untyped)
+ && untyped is CacheItem typed && typed.TryGetValue(out var value))
+ {
+ // short-circuit
+ return new(value);
+ }
+
+ if (GetOrCreateStampedeState(key, flags, out var stampede, canBeCanceled))
+ {
+ // new query; we're responsible for making it happen
+ if (canBeCanceled)
+ {
+ // *we* might cancel, but someone else might be depending on the result; start the
+ // work independently, then we'll with join the outcome
+ stampede.QueueUserWorkItem(in state, underlyingDataCallback, options);
+ }
+ else
+ {
+ // we're going to run to completion; no need to get complicated
+ _ = stampede.ExecuteDirectAsync(in state, underlyingDataCallback, options); // this larger task includes L2 write etc
+ return stampede.UnwrapReservedAsync();
+ }
+ }
+
+ return stampede.JoinAsync(cancellationToken);
+ }
+
+ public override ValueTask RemoveAsync(string key, CancellationToken token = default)
+ {
+ _localCache.Remove(key);
+ return _backendCache is null ? default : new(_backendCache.RemoveAsync(key, token));
+ }
+
+ public override ValueTask RemoveByTagAsync(string tag, CancellationToken token = default)
+ => default; // tags not yet implemented
+
+ public override ValueTask SetAsync(string key, T value, HybridCacheEntryOptions? options = null, IEnumerable? tags = null, CancellationToken token = default)
+ {
+ // since we're forcing a write: disable L1+L2 read; we'll use a direct pass-thru of the value as the callback, to reuse all the code
+ // note also that stampede token is not shared with anyone else
+ var flags = GetEffectiveFlags(options) | (HybridCacheEntryFlags.DisableLocalCacheRead | HybridCacheEntryFlags.DisableDistributedCacheRead);
+ var state = new StampedeState(this, new StampedeKey(key, flags), token);
+ return new(state.ExecuteDirectAsync(value, static (state, _) => new(state), options)); // note this spans L2 write etc
+ }
+
+ [MethodImpl(MethodImplOptions.AggressiveInlining)]
+ private HybridCacheEntryFlags GetEffectiveFlags(HybridCacheEntryOptions? options)
+ => (options?.Flags | _hardFlags) ?? _defaultFlags;
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultJsonSerializerFactory.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultJsonSerializerFactory.cs
new file mode 100644
index 00000000000..63ce186e1ec
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/DefaultJsonSerializerFactory.cs
@@ -0,0 +1,42 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Diagnostics.CodeAnalysis;
+using System.Text.Json;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal sealed class DefaultJsonSerializerFactory : IHybridCacheSerializerFactory
+{
+ public bool TryCreateSerializer([NotNullWhen(true)] out IHybridCacheSerializer? serializer)
+ {
+ // no restriction
+ serializer = new DefaultJsonSerializer();
+ return true;
+ }
+
+ internal sealed class DefaultJsonSerializer : IHybridCacheSerializer
+ {
+ T IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
+ {
+ var reader = new Utf8JsonReader(source);
+#pragma warning disable IDE0079 // unnecessary suppression: TFM-dependent
+#pragma warning disable IL2026, IL3050 // AOT bits
+ return JsonSerializer.Deserialize(ref reader)!;
+#pragma warning restore IL2026, IL3050
+#pragma warning restore IDE0079
+ }
+
+ void IHybridCacheSerializer.Serialize(T value, IBufferWriter target)
+ {
+ using var writer = new Utf8JsonWriter(target);
+#pragma warning disable IDE0079 // unnecessary suppression: TFM-dependent
+#pragma warning disable IL2026, IL3050 // AOT bits
+ JsonSerializer.Serialize(writer, value, JsonSerializerOptions.Default);
+#pragma warning restore IL2026, IL3050
+#pragma warning restore IDE0079
+ }
+ }
+
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCacheBuilder.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCacheBuilder.cs
new file mode 100644
index 00000000000..814bd4c84a8
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/HybridCacheBuilder.cs
@@ -0,0 +1,16 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.Extensions.DependencyInjection;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal sealed class HybridCacheBuilder : IHybridCacheBuilder
+{
+ public HybridCacheBuilder(IServiceCollection services)
+ {
+ Services = services;
+ }
+
+ public IServiceCollection Services { get; }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/ImmutableTypeCache.T.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/ImmutableTypeCache.T.cs
new file mode 100644
index 00000000000..51566cd0e68
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/ImmutableTypeCache.T.cs
@@ -0,0 +1,15 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+///
+/// Simple memoize storage for whether the type is blittable, in particular to avoid repeated runtime tests
+/// in down-level TFMs where this is trickier to determine. The JIT is very effective at accessing this memoized value.
+///
+/// The type being processed.
+internal static class ImmutableTypeCache // lazy memoize; T doesn't change per cache instance
+{
+ // note for blittable types: a pure struct will be a full copy every time - nothing shared to mutate
+ public static readonly bool IsImmutable = (typeof(T).IsValueType && ImmutableTypeCache.IsBlittable()) || ImmutableTypeCache.IsTypeImmutable(typeof(T));
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/ImmutableTypeCache.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/ImmutableTypeCache.cs
new file mode 100644
index 00000000000..87b86e56cf1
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/ImmutableTypeCache.cs
@@ -0,0 +1,79 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.ComponentModel;
+using System.Reflection;
+
+#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+using System.Runtime.CompilerServices;
+#else
+using System.Runtime.InteropServices;
+using System.Runtime.Serialization;
+#endif
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+///
+/// Utility type for determining whether a type is blittable; the logic for this is very
+/// TFM dependent.
+///
+internal static class ImmutableTypeCache
+{
+ internal static bool IsBlittable() // minimize the generic portion (twinned with IsTypeImmutable)
+ {
+#if NETCOREAPP2_0_OR_GREATER || NETSTANDARD2_1_OR_GREATER
+ return !RuntimeHelpers.IsReferenceOrContainsReferences();
+#else
+ // down-level: only blittable types can be pinned
+ try
+ {
+ // get a typed, zeroed, non-null boxed instance of the appropriate type
+ // (can't use (object)default(T), as that would box to null for nullable types)
+ var obj = FormatterServices.GetUninitializedObject(Nullable.GetUnderlyingType(typeof(T)) ?? typeof(T));
+ GCHandle.Alloc(obj, GCHandleType.Pinned).Free();
+ return true;
+ }
+#pragma warning disable CA1031 // Do not catch general exception types: interpret any failure here as "nope"
+ catch
+ {
+ return false;
+ }
+#pragma warning restore CA1031
+
+#endif
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Blocker Code Smell", "S2178:Short-circuit logic should be used in boolean contexts",
+ Justification = "Non-short-circuiting intentional to remove unnecessary branch")]
+ internal static bool IsTypeImmutable(Type type)
+ {
+ // check for known types
+ if (type == typeof(string))
+ {
+ return true;
+ }
+
+ if (type.IsValueType)
+ {
+ // switch from Foo? to Foo if necessary
+ if (Nullable.GetUnderlyingType(type) is { } nullable)
+ {
+ type = nullable;
+ }
+ }
+
+ if (type.IsValueType || (type.IsClass & type.IsSealed))
+ {
+ // check for [ImmutableObject(true)]; note we're looking at this as a statement about
+ // the overall nullability; for example, a type could contain a private int[] field,
+ // where the field is mutable and the list is mutable; but if the type is annotated:
+ // we're trusting that the API and use-case is such that the type is immutable
+ return type.GetCustomAttribute() is { Immutable: true };
+ }
+
+ // don't trust interfaces and non-sealed types; we might have any concrete
+ // type that has different behaviour
+ return false;
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/InbuiltTypeSerializer.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/InbuiltTypeSerializer.cs
new file mode 100644
index 00000000000..3ef26341433
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/InbuiltTypeSerializer.cs
@@ -0,0 +1,58 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Text;
+
+#if !NET5_0_OR_GREATER
+using System;
+using System.Diagnostics;
+using System.Runtime.InteropServices;
+#endif
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+internal sealed class InbuiltTypeSerializer : IHybridCacheSerializer, IHybridCacheSerializer
+{
+ public static InbuiltTypeSerializer Instance { get; } = new();
+
+ string IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
+ {
+#if NET5_0_OR_GREATER
+ return Encoding.UTF8.GetString(source);
+#else
+ if (source.IsSingleSegment && MemoryMarshal.TryGetArray(source.First, out var segment))
+ {
+ // we can use the existing single chunk as-is
+ return Encoding.UTF8.GetString(segment.Array, segment.Offset, segment.Count);
+ }
+
+ var length = checked((int)source.Length);
+ var oversized = ArrayPool.Shared.Rent(length);
+ source.CopyTo(oversized);
+ var s = Encoding.UTF8.GetString(oversized, 0, length);
+ ArrayPool.Shared.Return(oversized);
+ return s;
+#endif
+ }
+
+ void IHybridCacheSerializer.Serialize(string value, IBufferWriter target)
+ {
+#if NET5_0_OR_GREATER
+ Encoding.UTF8.GetBytes(value, target);
+#else
+ var length = Encoding.UTF8.GetByteCount(value);
+ var oversized = ArrayPool.Shared.Rent(length);
+ var actual = Encoding.UTF8.GetBytes(value, 0, value.Length, oversized, 0);
+ Debug.Assert(actual == length, "encoding length mismatch");
+ target.Write(new(oversized, 0, length));
+ ArrayPool.Shared.Return(oversized);
+#endif
+ }
+
+ byte[] IHybridCacheSerializer.Deserialize(ReadOnlySequence source)
+ => source.ToArray();
+
+ void IHybridCacheSerializer.Serialize(byte[] value, IBufferWriter target)
+ => target.Write(value);
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/RecyclableArrayBufferWriter.cs b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/RecyclableArrayBufferWriter.cs
new file mode 100644
index 00000000000..2f2da2c7019
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/RecyclableArrayBufferWriter.cs
@@ -0,0 +1,202 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Buffers;
+using System.Diagnostics;
+using System.Threading;
+using Microsoft.Shared.Diagnostics;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Internal;
+
+// this is effectively a cut-down re-implementation of ArrayBufferWriter
+// from https://github.com/dotnet/runtime/blob/6cd9bf1937c3b4d2f7304a6c534aacde58a202b6/src/libraries/Common/src/System/Buffers/ArrayBufferWriter.cs
+// except it uses the array pool for allocations
+internal sealed class RecyclableArrayBufferWriter : IBufferWriter, IDisposable
+{
+ // Usage note: *normally* you might want to use "using" for this, and that is fine
+ // however, caution should be exercised in exception scenarios where we don't 100%
+ // know that the caller has stopped touching the buffer; in particular, this means
+ // scenarios involving a combination of external code and (for example) "async".
+ // In those cases, it may be preferable to manually dispose in the success case,
+ // and just drop the buffers in the failure case, i.e. instead of:
+ //
+ // using (writer)
+ // { DoStuff(); }
+ //
+ // simply:
+ //
+ // DoStuff();
+ // writer.Dispose();
+ //
+ // This does not represent a problem, and is consistent with many ArrayPool use-cases.
+
+ // Copy of Array.MaxLength.
+ // Used by projects targeting .NET Framework.
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Major Code Smell", "S125:Sections of code should not be commented out", Justification = "Usage example, please retain")]
+ private const int ArrayMaxLength = 0x7FFFFFC7;
+
+ private const int DefaultInitialBufferSize = 256;
+
+ private T[] _buffer;
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Style", "IDE0032:Use auto property", Justification = "It is preferable to keep this usage explicit")]
+ private int _index;
+ private int _maxLength;
+
+ public int CommittedBytes => _index;
+ public int FreeCapacity => _buffer.Length - _index;
+
+ private static RecyclableArrayBufferWriter? _spare;
+ public static RecyclableArrayBufferWriter Create(int maxLength)
+ {
+ var obj = Interlocked.Exchange(ref _spare, null) ?? new();
+ Debug.Assert(obj._index == 0, "index should be zero initially");
+ obj._maxLength = maxLength;
+ return obj;
+ }
+
+ private RecyclableArrayBufferWriter()
+ {
+ _buffer = [];
+ _index = 0;
+ _maxLength = int.MaxValue;
+ }
+
+ public void Dispose()
+ {
+ // attempt to reuse everything via "spare"; if that isn't possible,
+ // recycle the buffers instead
+ _index = 0;
+ if (Interlocked.CompareExchange(ref _spare, this, null) != null)
+ {
+ var tmp = _buffer;
+ _buffer = [];
+ if (tmp.Length != 0)
+ {
+ ArrayPool.Shared.Return(tmp);
+ }
+ }
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Major Code Smell", "S3928:Parameter names used into ArgumentException constructors should match an existing one ",
+ Justification = "False positive; parameter exists")]
+ public void Advance(int count)
+ {
+ _ = Throw.IfLessThan(count, 0);
+
+ if (_index > _buffer.Length - count)
+ {
+ ThrowCount();
+ }
+
+ if (_index + count > _maxLength)
+ {
+ ThrowQuota();
+ }
+
+ _index += count;
+
+ static void ThrowCount()
+ => throw new ArgumentOutOfRangeException(nameof(count));
+
+ static void ThrowQuota()
+ => throw new InvalidOperationException("Max length exceeded");
+ }
+
+ public void ResetInPlace()
+ {
+ // resets the writer *without* resetting the buffer.
+ // the existing memory should be considered "gone"
+ // (to claim the buffer instead, use DetachCommitted)
+ _index = 0;
+ }
+
+ public ReadOnlyMemory GetCommittedMemory() => new(_buffer, 0, _index); // could also directly expose a ReadOnlySpan if useful
+
+ public Memory GetMemory(int sizeHint = 0)
+ {
+ CheckAndResizeBuffer(sizeHint);
+ Debug.Assert(_buffer.Length > _index, "should have some space");
+ return _buffer.AsMemory(_index);
+ }
+
+ public Span GetSpan(int sizeHint = 0)
+ {
+ CheckAndResizeBuffer(sizeHint);
+ Debug.Assert(_buffer.Length > _index, "should have some space");
+ return _buffer.AsSpan(_index);
+ }
+
+ // create a standalone isolated copy of the buffer
+ public T[] ToArray() => _buffer.AsSpan(0, _index).ToArray();
+
+ ///
+ /// Disconnect the current buffer so that we can store it without it being recycled.
+ ///
+ internal T[] DetachCommitted(out int length)
+ {
+ var tmp = _index == 0 ? [] : _buffer;
+ length = _index;
+
+ _buffer = [];
+ _index = 0;
+
+ return tmp;
+ }
+
+ internal T[] GetBuffer(out int length)
+ {
+ length = _index;
+ return _index == 0 ? [] : _buffer;
+ }
+
+ private void CheckAndResizeBuffer(int sizeHint)
+ {
+ if (sizeHint <= 0)
+ {
+ sizeHint = 1;
+ }
+
+ if (sizeHint > FreeCapacity)
+ {
+ var currentLength = _buffer.Length;
+
+ // Attempt to grow by the larger of the sizeHint and double the current size.
+ var growBy = Math.Max(sizeHint, currentLength);
+
+ if (currentLength == 0)
+ {
+ growBy = Math.Max(growBy, DefaultInitialBufferSize);
+ }
+
+ var newSize = currentLength + growBy;
+
+ if ((uint)newSize > int.MaxValue)
+ {
+ // Attempt to grow to ArrayMaxLength.
+ var needed = (uint)(currentLength - FreeCapacity + sizeHint);
+ Debug.Assert(needed > currentLength, "should need to grow");
+
+ if (needed > ArrayMaxLength)
+ {
+ ThrowOutOfMemoryException();
+ }
+
+ newSize = ArrayMaxLength;
+ }
+
+ // resize the backing buffer
+ var oldArray = _buffer;
+ _buffer = ArrayPool.Shared.Rent(newSize);
+ oldArray.AsSpan(0, _index).CopyTo(_buffer);
+ if (oldArray.Length != 0)
+ {
+ ArrayPool.Shared.Return(oldArray);
+ }
+ }
+
+ Debug.Assert(FreeCapacity > 0 && FreeCapacity >= sizeHint, "should be space");
+
+ static void ThrowOutOfMemoryException() => throw new InvalidOperationException("Unable to grow buffer as requested");
+ }
+}
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/readme.md b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/readme.md
new file mode 100644
index 00000000000..8d6a7d87848
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Internal/readme.md
@@ -0,0 +1,27 @@
+# HybridCache internal design
+
+`HybridCache` encapsulates serialization, caching and stampede protection.
+
+The `DefaultHybridCache` implementation keeps a collection of `StampedeState` entries
+that represent the current in-flight operations (keyed by `StampedeKey`); if a duplicate
+operation occurs during the execution, the second operation will be joined with that
+same flow, rather than executing independently. When attempting to merge with an
+existing flow, interlocked counting is used: we can only join if we can successfully
+increment the value from a non-zero value (zero meaning all existing consumers have
+canceled, and the shared token is therefore canceled)
+
+The `StampedeState<>` performs back-end fetch operations, resulting not in a `T` (of the final
+value), but instead a `CacheItem`; this is the object that gets put into L1 cache,
+and can describe both mutable and immutable types; the significance here is that for
+mutable types, we need a defensive copy per-call to prevent callers impacting each-other.
+
+`StampedeState<>` combines cancellation (so that operations proceed as long as *a* caller
+is still active); this covers all L2 access and serialization operations, releasing all pending
+shared callers for the same operation. Note that L2 storage can occur *after* callers
+have been released.
+
+To ensure correct buffer recycling, when dealing with cache entries that need defensive copies
+we use more ref-counting while reading the buffer, combined with an eviction callback which
+decrements that counter. This means that we recycle things when evicted, without impacting
+in-progress deserialize operations. To simplify tracking, `BufferChunk` acts like a `byte[]`+`int`
+(we don't need non-zero offset), but also tracking "should this be returned to the pool?".
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.csproj b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.csproj
new file mode 100644
index 00000000000..f460c4ee0cc
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.csproj
@@ -0,0 +1,39 @@
+
+
+
+ Multi-level caching implementation building on and extending IDistributedCache
+ $(NetCoreTargetFrameworks)$(ConditionalNet462);netstandard2.0;netstandard2.1
+ true
+ cache;distributedcache;hybrid
+ true
+ true
+ true
+ true
+ true
+ true
+ true
+ dev
+ EXTEXP0018
+ 75
+ 50
+ Fundamentals
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.json b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.json
new file mode 100644
index 00000000000..2c1a811b223
Binary files /dev/null and b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/Microsoft.Extensions.Caching.Hybrid.json differ
diff --git a/src/Libraries/Microsoft.Extensions.Caching.Hybrid/README.md b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/README.md
new file mode 100644
index 00000000000..02dc3e5bae5
--- /dev/null
+++ b/src/Libraries/Microsoft.Extensions.Caching.Hybrid/README.md
@@ -0,0 +1,81 @@
+# Microsoft.Extensions.Caching.Hybrid
+
+This package contains a concrete implementation of [the `HybridCache` API](https://learn.microsoft.com/dotnet/api/microsoft.extensions.caching.hybrid),
+simplifying and enhancing cache usage that might previously have been built on top of [`IDistributedCache`](https://learn.microsoft.com/dotnet/api/microsoft.extensions.caching.distributed.idistributedcache).
+
+Key features:
+
+- built on top of `IDistributedCache` - all existing cache backends (Redis, SQL Server, CosmosDB, etc) should work immediately
+- simple API (all the cache, serialization, etc details from are encapsulated)
+- cache-stampede protection (combining of concurrent requests for the same data)
+- performance enhancements such as inbuilt support for the newer [`IBufferDistributedCache`](https://learn.microsoft.com/dotnet/api/microsoft.extensions.caching.distributed.ibufferdistributedcache) API
+- fully configurable serialization
+
+Full `HybridCache` documentation is [here](https://learn.microsoft.com/aspnet/core/performance/caching/hybrid).
+
+## Full documentation
+
+See [learn.microsoft.com](https://learn.microsoft.com/aspnet/core/performance/caching/hybrid) for full discussion of `HybridCache`.
+
+## Install the package
+
+From the command-line:
+
+```console
+dotnet add package Microsoft.Extensions.Caching.Hybrid
+```
+
+Or directly in the C# project file:
+
+```xml
+
+
+
+```
+
+## Usage example
+
+The `HybridCache` service can be registered and configured via `IServiceCollection`, for example:
+
+```csharp
+builder.Services.AddHybridCache(/* optional configuration /*);
+```
+
+Note that in many cases you may also wish to register a distributed cache backend, as
+[discussed here](https://learn.microsoft.com/aspnet/core/performance/caching/distributed); for example
+a Redis instance:
+
+```csharp
+builder.Services.AddStackExchangeRedisCache(options =>
+{
+ options.Configuration = builder.Configuration.GetConnectionString("MyRedisConStr");
+});
+```
+
+Once registered, the `HybridCache` instance can be obtained via dependency-injection, allowing the
+`GetOrCreateAsync` API to be used to obtain data:
+
+```csharp
+public class SomeService(HybridCache cache)
+{
+ private HybridCache _cache = cache;
+
+ public async Task GetSomeInfoAsync(string name, int id, CancellationToken token = default)
+ {
+ return await _cache.GetOrCreateAsync(
+ $"{name}-{id}", // Unique key to the cache entry
+ async cancel => await GetDataFromTheSourceAsync(name, id, cancel),
+ cancellationToken: token
+ );
+ }
+
+ private async Task GetDataFromTheSourceAsync(string name, int id, CancellationToken token)
+ {
+ // talk to the underlying data store here - could be SQL, gRPC, HTTP, etc
+ }
+}
+```
+
+Additional usage guidance - including expiration, custom serialization support, and alternate usage
+to reduce delegate allocation - is available
+on [learn.microsoft.com](https://learn.microsoft.com/aspnet/core/performance/caching/hybrid).
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BasicConfig.json b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BasicConfig.json
new file mode 100644
index 00000000000..374114fb1db
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BasicConfig.json
@@ -0,0 +1,12 @@
+{
+ "no_entry_options": {
+ "MaximumKeyLength": 937
+ },
+ "with_entry_options": {
+ "MaximumKeyLength": 937,
+ "DefaultEntryOptions": {
+ "LocalCacheExpiration": "00:02:00",
+ "Flags": "DisableCompression,DisableLocalCacheRead"
+ }
+ }
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BufferReleaseTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BufferReleaseTests.cs
new file mode 100644
index 00000000000..3318a86fd70
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/BufferReleaseTests.cs
@@ -0,0 +1,235 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Runtime.CompilerServices;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Options;
+using static Microsoft.Extensions.Caching.Hybrid.Internal.DefaultHybridCache;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public class BufferReleaseTests // note that buffer ref-counting is only enabled for DEBUG builds; can only verify general behaviour without that
+{
+ private static ServiceProvider GetDefaultCache(out DefaultHybridCache cache, Action? config = null)
+ {
+ var services = new ServiceCollection();
+ config?.Invoke(services);
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ [Fact]
+ public async Task BufferGetsReleased_NoL2()
+ {
+ using var provider = GetDefaultCache(out var cache);
+#if DEBUG
+ cache.DebugOnlyGetOutstandingBuffers(flush: true);
+#endif
+
+ var key = Me();
+#if DEBUG
+ Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+ var first = await cache.GetOrCreateAsync(key, _ => GetAsync());
+ Assert.NotNull(first);
+#if DEBUG
+ Assert.Equal(1, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+
+ // assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
+ Assert.True(cacheItem.NeedsEvictionCallback, "should be pooled memory");
+ Assert.True(cacheItem.TryReserveBuffer(out _));
+ cacheItem.Release(); // for the above reserve
+
+ var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.NotNull(second);
+ Assert.NotSame(first, second);
+
+ Assert.Equal(1, cacheItem.RefCount);
+ await cache.RemoveAsync(key);
+ var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.Null(third);
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
+ {
+ await Task.Delay(250);
+ }
+#if DEBUG
+ Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+
+ // assert that we can *no longer* reserve this buffer, because we've already recycled it
+ Assert.False(cacheItem.TryReserveBuffer(out _));
+ Assert.Equal(0, cacheItem.RefCount);
+ Assert.False(cacheItem.NeedsEvictionCallback, "should be recycled now");
+ static ValueTask GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
+ }
+
+ private static readonly HybridCacheEntryOptions _noUnderlying = new() { Flags = HybridCacheEntryFlags.DisableUnderlyingData };
+
+ private class TestCache : MemoryDistributedCache, IBufferDistributedCache
+ {
+ public TestCache(IOptions options)
+ : base(options)
+ {
+ }
+
+ void IBufferDistributedCache.Set(string key, ReadOnlySequence value, DistributedCacheEntryOptions options)
+ => Set(key, value.ToArray(), options); // efficiency not important for this
+
+ ValueTask IBufferDistributedCache.SetAsync(string key, ReadOnlySequence value, DistributedCacheEntryOptions options, CancellationToken token)
+ => new(SetAsync(key, value.ToArray(), options, token)); // efficiency not important for this
+
+ bool IBufferDistributedCache.TryGet(string key, IBufferWriter destination)
+ => Write(destination, Get(key));
+
+ async ValueTask IBufferDistributedCache.TryGetAsync(string key, IBufferWriter destination, CancellationToken token)
+ => Write(destination, await GetAsync(key, token));
+
+ private static bool Write(IBufferWriter destination, byte[]? buffer)
+ {
+ if (buffer is null)
+ {
+ return false;
+ }
+
+ destination.Write(buffer);
+ return true;
+ }
+ }
+
+ [Fact]
+ public async Task BufferDoesNotNeedRelease_LegacyL2() // byte[] API; not pooled
+ {
+ using var provider = GetDefaultCache(out var cache,
+ services => services.AddSingleton());
+
+ cache.DebugRemoveFeatures(CacheFeatures.BackendBuffers);
+
+ // prep the backend with our data
+ var key = Me();
+ Assert.NotNull(cache.BackendCache);
+ var serializer = cache.GetSerializer();
+ using (var writer = RecyclableArrayBufferWriter.Create(int.MaxValue))
+ {
+ serializer.Serialize(await GetAsync(), writer);
+ cache.BackendCache.Set(key, writer.ToArray());
+ }
+#if DEBUG
+ cache.DebugOnlyGetOutstandingBuffers(flush: true);
+ Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+ var first = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying); // we expect this to come from L2, hence NoUnderlying
+ Assert.NotNull(first);
+#if DEBUG
+ Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+
+ // assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
+ Assert.False(cacheItem.NeedsEvictionCallback, "should NOT be pooled memory");
+ Assert.True(cacheItem.TryReserveBuffer(out _));
+ cacheItem.Release(); // for the above reserve
+
+ var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.NotNull(second);
+ Assert.NotSame(first, second);
+
+ Assert.Equal(1, cacheItem.RefCount);
+ await cache.RemoveAsync(key);
+ var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.Null(third);
+ Assert.Null(await cache.BackendCache.GetAsync(key)); // should be gone from L2 too
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
+ {
+ await Task.Delay(250);
+ }
+#if DEBUG
+ Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+
+ // assert that we can *no longer* reserve this buffer, because we've already recycled it
+ Assert.True(cacheItem.TryReserveBuffer(out _)); // always readable
+ cacheItem.Release();
+ Assert.Equal(1, cacheItem.RefCount); // not decremented because there was no need to add the hook
+
+ Assert.False(cacheItem.NeedsEvictionCallback, "should still not need recycling");
+ static ValueTask GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
+ }
+
+ [Fact]
+ public async Task BufferGetsReleased_BufferL2() // IBufferWriter API; pooled
+ {
+ using var provider = GetDefaultCache(out var cache,
+ services => services.AddSingleton());
+
+ // prep the backend with our data
+ var key = Me();
+ Assert.NotNull(cache.BackendCache);
+ var serializer = cache.GetSerializer();
+ using (var writer = RecyclableArrayBufferWriter.Create(int.MaxValue))
+ {
+ serializer.Serialize(await GetAsync(), writer);
+ cache.BackendCache.Set(key, writer.ToArray());
+ }
+#if DEBUG
+ cache.DebugOnlyGetOutstandingBuffers(flush: true);
+ Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+ var first = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying); // we expect this to come from L2, hence NoUnderlying
+ Assert.NotNull(first);
+#if DEBUG
+ Assert.Equal(1, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+ Assert.True(cache.DebugTryGetCacheItem(key, out var cacheItem));
+
+ // assert that we can reserve the buffer *now* (mostly to see that it behaves differently later)
+ Assert.True(cacheItem.NeedsEvictionCallback, "should be pooled memory");
+ Assert.True(cacheItem.TryReserveBuffer(out _));
+ cacheItem.Release(); // for the above reserve
+
+ var second = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.NotNull(second);
+ Assert.NotSame(first, second);
+
+ Assert.Equal(1, cacheItem.RefCount);
+ await cache.RemoveAsync(key);
+ var third = await cache.GetOrCreateAsync(key, _ => GetAsync(), _noUnderlying);
+ Assert.Null(third);
+ Assert.Null(await cache.BackendCache.GetAsync(key)); // should be gone from L2 too
+
+ // give it a moment for the eviction callback to kick in
+ for (var i = 0; i < 10 && cacheItem.NeedsEvictionCallback; i++)
+ {
+ await Task.Delay(250);
+ }
+#if DEBUG
+ Assert.Equal(0, cache.DebugOnlyGetOutstandingBuffers());
+#endif
+
+ // assert that we can *no longer* reserve this buffer, because we've already recycled it
+ Assert.False(cacheItem.TryReserveBuffer(out _)); // released now
+ Assert.Equal(0, cacheItem.RefCount);
+
+ Assert.False(cacheItem.NeedsEvictionCallback, "should be recycled by now");
+ static ValueTask GetAsync() => new(new Customer { Id = 42, Name = "Fred" });
+ }
+
+ public class Customer
+ {
+ public int Id { get; set; }
+ public string Name { get; set; } = "";
+ }
+
+ private static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/DistributedCacheTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/DistributedCacheTests.cs
new file mode 100644
index 00000000000..4f3766990cc
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/DistributedCacheTests.cs
@@ -0,0 +1,397 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Internal;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+///
+/// Validate over-arching expectations of DC implementations, in particular behaviour re IBufferDistributedCache added for HybridCache.
+///
+public abstract class DistributedCacheTests
+{
+ protected DistributedCacheTests(ITestOutputHelper log)
+ {
+ Log = log;
+ }
+
+ protected ITestOutputHelper Log { get; }
+ protected abstract ValueTask ConfigureAsync(IServiceCollection services);
+ protected abstract bool CustomClockSupported { get; }
+
+ protected FakeTime Clock { get; } = new();
+
+ protected sealed class FakeTime : TimeProvider, ISystemClock
+ {
+ private DateTimeOffset _now = DateTimeOffset.UtcNow;
+ public void Reset() => _now = DateTimeOffset.UtcNow;
+
+ DateTimeOffset ISystemClock.UtcNow => _now;
+
+ public override DateTimeOffset GetUtcNow() => _now;
+
+ public void Add(TimeSpan delta) => _now += delta;
+ }
+
+ private async ValueTask InitAsync()
+ {
+ Clock.Reset();
+ var services = new ServiceCollection();
+ services.AddSingleton(Clock);
+ services.AddSingleton(Clock);
+ await ConfigureAsync(services);
+ return services;
+ }
+
+ [Theory]
+ [InlineData(0)]
+ [InlineData(128)]
+ [InlineData(1024)]
+ [InlineData(16 * 1024)]
+ public async Task SimpleBufferRoundtrip(int size)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService();
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}";
+ cache.Remove(key);
+ Assert.Null(cache.Get(key));
+
+ var expected = new byte[size];
+ new Random().NextBytes(expected);
+ cache.Set(key, expected, _fiveMinutes);
+
+ var actual = cache.Get(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ actual = cache.Get(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ actual = cache.Get(key);
+ Assert.Null(actual);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+ }
+
+ [Theory]
+ [InlineData(0)]
+ [InlineData(128)]
+ [InlineData(1024)]
+ [InlineData(16 * 1024)]
+ public async Task SimpleBufferRoundtripAsync(int size)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService();
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}";
+ await cache.RemoveAsync(key);
+ Assert.Null(cache.Get(key));
+
+ var expected = new byte[size];
+ new Random().NextBytes(expected);
+ await cache.SetAsync(key, expected, _fiveMinutes);
+
+ var actual = await cache.GetAsync(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ actual = await cache.GetAsync(key);
+ Assert.NotNull(actual);
+ Assert.True(expected.SequenceEqual(actual));
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ actual = await cache.GetAsync(key);
+ Assert.Null(actual);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+ }
+
+ public enum SequenceKind
+ {
+ FullArray,
+ PaddedArray,
+ CustomMemory,
+ MultiSegment,
+ }
+
+ [Theory]
+ [InlineData(0, SequenceKind.FullArray)]
+ [InlineData(128, SequenceKind.FullArray)]
+ [InlineData(1024, SequenceKind.FullArray)]
+ [InlineData(16 * 1024, SequenceKind.FullArray)]
+ [InlineData(0, SequenceKind.PaddedArray)]
+ [InlineData(128, SequenceKind.PaddedArray)]
+ [InlineData(1024, SequenceKind.PaddedArray)]
+ [InlineData(16 * 1024, SequenceKind.PaddedArray)]
+ [InlineData(0, SequenceKind.CustomMemory)]
+ [InlineData(128, SequenceKind.CustomMemory)]
+ [InlineData(1024, SequenceKind.CustomMemory)]
+ [InlineData(16 * 1024, SequenceKind.CustomMemory)]
+ [InlineData(0, SequenceKind.MultiSegment)]
+ [InlineData(128, SequenceKind.MultiSegment)]
+ [InlineData(1024, SequenceKind.MultiSegment)]
+ [InlineData(16 * 1024, SequenceKind.MultiSegment)]
+ public async Task ReadOnlySequenceBufferRoundtrip(int size, SequenceKind kind)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService() as IBufferDistributedCache;
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available or does not support IBufferDistributedCache");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}/{kind}";
+ cache.Remove(key);
+ Assert.Null(cache.Get(key));
+
+ var payload = Invent(size, kind);
+ ReadOnlyMemory expected = payload.ToArray(); // simplify for testing
+ Assert.Equal(size, expected.Length);
+ cache.Set(key, payload, _fiveMinutes);
+
+ var writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
+ Assert.True(cache.TryGet(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ Assert.True(cache.TryGet(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ Assert.False(cache.TryGet(key, writer));
+ Assert.Equal(0, writer.CommittedBytes);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+
+ writer.Dispose(); // intentionally only recycle on success
+ }
+
+ [Theory]
+ [InlineData(0, SequenceKind.FullArray)]
+ [InlineData(128, SequenceKind.FullArray)]
+ [InlineData(1024, SequenceKind.FullArray)]
+ [InlineData(16 * 1024, SequenceKind.FullArray)]
+ [InlineData(0, SequenceKind.PaddedArray)]
+ [InlineData(128, SequenceKind.PaddedArray)]
+ [InlineData(1024, SequenceKind.PaddedArray)]
+ [InlineData(16 * 1024, SequenceKind.PaddedArray)]
+ [InlineData(0, SequenceKind.CustomMemory)]
+ [InlineData(128, SequenceKind.CustomMemory)]
+ [InlineData(1024, SequenceKind.CustomMemory)]
+ [InlineData(16 * 1024, SequenceKind.CustomMemory)]
+ [InlineData(0, SequenceKind.MultiSegment)]
+ [InlineData(128, SequenceKind.MultiSegment)]
+ [InlineData(1024, SequenceKind.MultiSegment)]
+ [InlineData(16 * 1024, SequenceKind.MultiSegment)]
+ public async Task ReadOnlySequenceBufferRoundtripAsync(int size, SequenceKind kind)
+ {
+ var cache = (await InitAsync()).BuildServiceProvider().GetService() as IBufferDistributedCache;
+ if (cache is null)
+ {
+ Log.WriteLine("Cache is not available or does not support IBufferDistributedCache");
+ return; // inconclusive
+ }
+
+ var key = $"{Me()}:{size}/{kind}";
+ await cache.RemoveAsync(key);
+ Assert.Null(await cache.GetAsync(key));
+
+ var payload = Invent(size, kind);
+ ReadOnlyMemory expected = payload.ToArray(); // simplify for testing
+ Assert.Equal(size, expected.Length);
+ await cache.SetAsync(key, payload, _fiveMinutes);
+
+ var writer = RecyclableArrayBufferWriter.Create(int.MaxValue);
+ Assert.True(await cache.TryGetAsync(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+ Log.WriteLine("Data validated");
+
+ if (CustomClockSupported)
+ {
+ Clock.Add(TimeSpan.FromMinutes(4));
+ Assert.True(await cache.TryGetAsync(key, writer));
+ Assert.True(expected.Span.SequenceEqual(writer.GetCommittedMemory().Span));
+ writer.ResetInPlace();
+
+ Clock.Add(TimeSpan.FromMinutes(2));
+ Assert.False(await cache.TryGetAsync(key, writer));
+ Assert.Equal(0, writer.CommittedBytes);
+
+ Log.WriteLine("Expiration validated");
+ }
+ else
+ {
+ Log.WriteLine("Expiration not validated - TimeProvider not supported");
+ }
+
+ writer.Dispose(); // intentionally only recycle on success
+ }
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Reliability", "CA2000:Dispose objects before losing scope", Justification = "Not relevant for this test - no-op")]
+ private static ReadOnlySequence Invent(int size, SequenceKind kind)
+ {
+ var rand = new Random();
+ ReadOnlySequence payload;
+ switch (kind)
+ {
+ case SequenceKind.FullArray:
+ var arr = new byte[size];
+ rand.NextBytes(arr);
+ payload = new(arr);
+ break;
+ case SequenceKind.PaddedArray:
+ arr = new byte[size + 10];
+ rand.NextBytes(arr);
+ payload = new(arr, 5, arr.Length - 10);
+ break;
+ case SequenceKind.CustomMemory:
+ var mem = new CustomMemory(size, rand).Memory;
+ payload = new(mem);
+ break;
+ case SequenceKind.MultiSegment:
+ if (size == 0)
+ {
+ payload = default;
+ break;
+ }
+
+ if (size < 10)
+ {
+ throw new ArgumentException("small segments not considered"); // a pain to construct
+ }
+
+ CustomSegment first = new(10, rand, null); // we'll take the last 3 of this 10
+ CustomSegment second = new(size - 7, rand, first); // we'll take all of this one
+ CustomSegment third = new(10, rand, second); // we'll take the first 4 of this 10
+ payload = new(first, 7, third, 4);
+ break;
+ default:
+ throw new ArgumentOutOfRangeException(nameof(kind));
+ }
+
+ // now validate what we expect of that payload
+ Assert.Equal(size, payload.Length);
+ switch (kind)
+ {
+ case SequenceKind.CustomMemory or SequenceKind.MultiSegment when size == 0:
+ Assert.True(payload.IsSingleSegment);
+ Assert.True(MemoryMarshal.TryGetArray(payload.First, out _));
+ break;
+ case SequenceKind.MultiSegment:
+ Assert.False(payload.IsSingleSegment);
+ break;
+ case SequenceKind.CustomMemory:
+ Assert.True(payload.IsSingleSegment);
+ Assert.False(MemoryMarshal.TryGetArray(payload.First, out _));
+ break;
+ case SequenceKind.FullArray:
+ Assert.True(payload.IsSingleSegment);
+ Assert.True(MemoryMarshal.TryGetArray(payload.First, out var segment));
+ Assert.Equal(0, segment.Offset);
+ Assert.NotNull(segment.Array);
+ Assert.Equal(size, segment.Count);
+ Assert.Equal(size, segment.Array.Length);
+ break;
+ case SequenceKind.PaddedArray:
+ Assert.True(payload.IsSingleSegment);
+ Assert.True(MemoryMarshal.TryGetArray(payload.First, out segment));
+ Assert.NotEqual(0, segment.Offset);
+ Assert.NotNull(segment.Array);
+ Assert.Equal(size, segment.Count);
+ Assert.NotEqual(size, segment.Array.Length);
+ break;
+ }
+
+ return payload;
+ }
+
+ private class CustomSegment : ReadOnlySequenceSegment
+ {
+ public CustomSegment(int size, Random? rand, CustomSegment? previous)
+ {
+ var arr = new byte[size + 10];
+ rand?.NextBytes(arr);
+ Memory = new(arr, 5, arr.Length - 10);
+ if (previous is not null)
+ {
+ RunningIndex = previous.RunningIndex + previous.Memory.Length;
+ previous.Next = this;
+ }
+ }
+ }
+
+ private class CustomMemory : MemoryManager
+ {
+ private readonly byte[] _data;
+ public CustomMemory(int size, Random? rand = null)
+ {
+ _data = new byte[size + 10];
+ rand?.NextBytes(_data);
+ }
+
+ public override Span GetSpan() => new(_data, 5, _data.Length - 10);
+ public override MemoryHandle Pin(int elementIndex = 0) => throw new NotSupportedException();
+ public override void Unpin() => throw new NotSupportedException();
+ protected override void Dispose(bool disposing)
+ {
+ }
+
+ protected override bool TryGetArray(out ArraySegment segment)
+ {
+ segment = default;
+ return false;
+ }
+ }
+
+ private static readonly DistributedCacheEntryOptions _fiveMinutes
+ = new() { AbsoluteExpirationRelativeToNow = TimeSpan.FromMinutes(5) };
+
+ protected static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/FunctionalTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/FunctionalTests.cs
new file mode 100644
index 00000000000..4cacdd59f6f
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/FunctionalTests.cs
@@ -0,0 +1,82 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Runtime.CompilerServices;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.DependencyInjection;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+public class FunctionalTests
+{
+ private static ServiceProvider GetDefaultCache(out DefaultHybridCache cache, Action? config = null)
+ {
+ var services = new ServiceCollection();
+ config?.Invoke(services);
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ [Fact]
+ public async Task RemoveSingleKey()
+ {
+ using var provider = GetDefaultCache(out var cache);
+ var key = Me();
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(42)));
+
+ // now slightly different func to show delta; should use cached value initially
+ await cache.RemoveAsync("unrelated");
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(96)));
+
+ // now remove and repeat - should get updated value
+ await cache.RemoveAsync(key);
+ Assert.Equal(96, await cache.GetOrCreateAsync(key, _ => new ValueTask(96)));
+ }
+
+ [Fact]
+ public async Task RemoveNoKeyViaArray()
+ {
+ using var provider = GetDefaultCache(out var cache);
+ var key = Me();
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(42)));
+
+ // now slightly different func to show delta; should use same cached value
+ await cache.RemoveAsync([]);
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(96)));
+ }
+
+ [Fact]
+ public async Task RemoveSingleKeyViaArray()
+ {
+ using var provider = GetDefaultCache(out var cache);
+ var key = Me();
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(42)));
+
+ // now slightly different func to show delta; should use cached value initially
+ await cache.RemoveAsync(["unrelated"]);
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(96)));
+
+ // now remove and repeat - should get updated value
+ await cache.RemoveAsync([key]);
+ Assert.Equal(96, await cache.GetOrCreateAsync(key, _ => new ValueTask(96)));
+ }
+
+ [Fact]
+ public async Task RemoveMultipleKeysViaArray()
+ {
+ using var provider = GetDefaultCache(out var cache);
+ var key = Me();
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(42)));
+
+ // now slightly different func to show delta; should use cached value initially
+ Assert.Equal(42, await cache.GetOrCreateAsync(key, _ => new ValueTask(96)));
+
+ // now remove and repeat - should get updated value
+ await cache.RemoveAsync([key, "unrelated"]);
+ Assert.Equal(96, await cache.GetOrCreateAsync(key, _ => new ValueTask(96)));
+ }
+
+ private static string Me([CallerMemberName] string caller = "") => caller;
+
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/L2Tests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/L2Tests.cs
new file mode 100644
index 00000000000..bf1f7a35fee
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/L2Tests.cs
@@ -0,0 +1,274 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.Buffers;
+using System.Runtime.CompilerServices;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.Memory;
+using Microsoft.Extensions.DependencyInjection;
+using Microsoft.Extensions.Options;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+public class L2Tests(ITestOutputHelper log)
+{
+ private static string CreateString(bool work = false)
+ {
+ Assert.True(work, "we didn't expect this to be invoked");
+ return Guid.NewGuid().ToString();
+ }
+
+ private static readonly HybridCacheEntryOptions _expiry = new() { Expiration = TimeSpan.FromMinutes(3.5) };
+
+ private static readonly HybridCacheEntryOptions _expiryNoL1 = new() { Flags = HybridCacheEntryFlags.DisableLocalCache, Expiration = TimeSpan.FromMinutes(3.5) };
+
+ private ITestOutputHelper Log => log;
+
+ private class Options(T value) : IOptions
+ where T : class
+ {
+ T IOptions.Value => value;
+ }
+
+ private ServiceProvider GetDefaultCache(bool buffers, out DefaultHybridCache cache)
+ {
+ var services = new ServiceCollection();
+ var localCacheOptions = new Options(new());
+ var localCache = new MemoryDistributedCache(localCacheOptions);
+ services.AddSingleton(buffers ? new BufferLoggingCache(Log, localCache) : new LoggingCache(Log, localCache));
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider();
+ cache = Assert.IsType(provider.GetRequiredService());
+ return provider;
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public async Task AssertL2Operations_Immutable(bool buffers)
+ {
+ using var provider = GetDefaultCache(buffers, out var cache);
+ var backend = Assert.IsAssignableFrom(cache.BackendCache);
+ Log.WriteLine("Inventing key...");
+ var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString(true)));
+ Assert.Equal(2, backend.OpCount); // GET, SET
+
+ Log.WriteLine("Reading with L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString()));
+ Assert.Equal(s, x);
+ Assert.Same(s, x);
+ }
+
+ Assert.Equal(2, backend.OpCount); // shouldn't be hit
+
+ Log.WriteLine("Reading without L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString()), _expiryNoL1);
+ Assert.Equal(s, x);
+ Assert.NotSame(s, x);
+ }
+
+ Assert.Equal(7, backend.OpCount); // should be read every time
+
+ Log.WriteLine("Setting value directly");
+ s = CreateString(true);
+ await cache.SetAsync(Me(), s);
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString()));
+ Assert.Equal(s, x);
+ Assert.Same(s, x);
+ }
+
+ Assert.Equal(8, backend.OpCount); // SET
+
+ Log.WriteLine("Removing key...");
+ await cache.RemoveAsync(Me());
+ Assert.Equal(9, backend.OpCount); // DEL
+
+ Log.WriteLine("Fetching new...");
+ var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(CreateString(true)));
+ Assert.NotEqual(s, t);
+ Assert.Equal(11, backend.OpCount); // GET, SET
+ }
+
+ public sealed class Foo
+ {
+ public string Value { get; set; } = "";
+ }
+
+ [Theory]
+ [InlineData(true)]
+ [InlineData(false)]
+ public async Task AssertL2Operations_Mutable(bool buffers)
+ {
+ using var provider = GetDefaultCache(buffers, out var cache);
+ var backend = Assert.IsAssignableFrom(cache.BackendCache);
+ Log.WriteLine("Inventing key...");
+ var s = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString(true) }), _expiry);
+ Assert.Equal(2, backend.OpCount); // GET, SET
+
+ Log.WriteLine("Reading with L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString() }), _expiry);
+ Assert.Equal(s.Value, x.Value);
+ Assert.NotSame(s, x);
+ }
+
+ Assert.Equal(2, backend.OpCount); // shouldn't be hit
+
+ Log.WriteLine("Reading without L1...");
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString() }), _expiryNoL1);
+ Assert.Equal(s.Value, x.Value);
+ Assert.NotSame(s, x);
+ }
+
+ Assert.Equal(7, backend.OpCount); // should be read every time
+
+ Log.WriteLine("Setting value directly");
+ s = new Foo { Value = CreateString(true) };
+ await cache.SetAsync(Me(), s);
+ for (var i = 0; i < 5; i++)
+ {
+ var x = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString() }), _expiry);
+ Assert.Equal(s.Value, x.Value);
+ Assert.NotSame(s, x);
+ }
+
+ Assert.Equal(8, backend.OpCount); // SET
+
+ Log.WriteLine("Removing key...");
+ await cache.RemoveAsync(Me());
+ Assert.Equal(9, backend.OpCount); // DEL
+
+ Log.WriteLine("Fetching new...");
+ var t = await cache.GetOrCreateAsync(Me(), ct => new ValueTask(new Foo { Value = CreateString(true) }), _expiry);
+ Assert.NotEqual(s.Value, t.Value);
+ Assert.Equal(11, backend.OpCount); // GET, SET
+ }
+
+ private class BufferLoggingCache : LoggingCache, IBufferDistributedCache
+ {
+ public BufferLoggingCache(ITestOutputHelper log, IDistributedCache tail)
+ : base(log, tail)
+ {
+ }
+
+ void IBufferDistributedCache.Set(string key, ReadOnlySequence value, DistributedCacheEntryOptions options)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"Set (ROS-byte): {key}");
+ Tail.Set(key, value.ToArray(), options);
+ }
+
+ ValueTask IBufferDistributedCache.SetAsync(string key, ReadOnlySequence value, DistributedCacheEntryOptions options, CancellationToken token)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"SetAsync (ROS-byte): {key}");
+ return new(Tail.SetAsync(key, value.ToArray(), options, token));
+ }
+
+ bool IBufferDistributedCache.TryGet(string key, IBufferWriter destination)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"TryGet: {key}");
+ var buffer = Tail.Get(key);
+ if (buffer is null)
+ {
+ return false;
+ }
+
+ destination.Write(buffer);
+ return true;
+ }
+
+ async ValueTask IBufferDistributedCache.TryGetAsync(string key, IBufferWriter destination, CancellationToken token)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"TryGetAsync: {key}");
+ var buffer = await Tail.GetAsync(key, token);
+ if (buffer is null)
+ {
+ return false;
+ }
+
+ destination.Write(buffer);
+ return true;
+ }
+ }
+
+ private class LoggingCache(ITestOutputHelper log, IDistributedCache tail) : IDistributedCache
+ {
+ protected ITestOutputHelper Log => log;
+ protected IDistributedCache Tail => tail;
+
+ protected int ProtectedOpCount;
+
+ public int OpCount => Volatile.Read(ref ProtectedOpCount);
+
+ byte[]? IDistributedCache.Get(string key)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"Get: {key}");
+ return Tail.Get(key);
+ }
+
+ Task IDistributedCache.GetAsync(string key, CancellationToken token)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"GetAsync: {key}");
+ return Tail.GetAsync(key, token);
+ }
+
+ void IDistributedCache.Refresh(string key)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"Refresh: {key}");
+ Tail.Refresh(key);
+ }
+
+ Task IDistributedCache.RefreshAsync(string key, CancellationToken token)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"RefreshAsync: {key}");
+ return Tail.RefreshAsync(key, token);
+ }
+
+ void IDistributedCache.Remove(string key)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"Remove: {key}");
+ Tail.Remove(key);
+ }
+
+ Task IDistributedCache.RemoveAsync(string key, CancellationToken token)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"RemoveAsync: {key}");
+ return Tail.RemoveAsync(key, token);
+ }
+
+ void IDistributedCache.Set(string key, byte[] value, DistributedCacheEntryOptions options)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"Set (byte[]): {key}");
+ Tail.Set(key, value, options);
+ }
+
+ Task IDistributedCache.SetAsync(string key, byte[] value, DistributedCacheEntryOptions options, CancellationToken token)
+ {
+ Interlocked.Increment(ref ProtectedOpCount);
+ Log.WriteLine($"SetAsync (byte[]): {key}");
+ return Tail.SetAsync(key, value, options, token);
+ }
+ }
+
+ private static string Me([CallerMemberName] string caller = "") => caller;
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/Microsoft.Extensions.Caching.Hybrid.Tests.csproj b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/Microsoft.Extensions.Caching.Hybrid.Tests.csproj
new file mode 100644
index 00000000000..ff23283e066
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/Microsoft.Extensions.Caching.Hybrid.Tests.csproj
@@ -0,0 +1,29 @@
+
+
+
+ $(NetCoreTargetFrameworks)$(ConditionalNet462)
+ enable
+ enable
+ true
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ PreserveNewest
+
+
+
+
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/RedisFixture.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/RedisFixture.cs
new file mode 100644
index 00000000000..09b37e16466
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/RedisFixture.cs
@@ -0,0 +1,30 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using StackExchange.Redis;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public sealed class RedisFixture : IDisposable
+{
+ private ConnectionMultiplexer? _muxer;
+ private Task? _sharedConnect;
+ public Task ConnectAsync() => _sharedConnect ??= DoConnectAsync();
+
+ public void Dispose() => _muxer?.Dispose();
+
+ [System.Diagnostics.CodeAnalysis.SuppressMessage("Design", "CA1031:Do not catch general exception types", Justification = "catch-all")]
+ private async Task DoConnectAsync()
+ {
+ try
+ {
+ _muxer = await ConnectionMultiplexer.ConnectAsync("127.0.0.1:6379");
+ await _muxer.GetDatabase().PingAsync();
+ return _muxer;
+ }
+ catch
+ {
+ return null;
+ }
+ }
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/RedisTests.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/RedisTests.cs
new file mode 100644
index 00000000000..d482f566a16
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/RedisTests.cs
@@ -0,0 +1,90 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using Microsoft.Extensions.Caching.Hybrid.Internal;
+using Microsoft.Extensions.Caching.StackExchangeRedis;
+using Microsoft.Extensions.DependencyInjection;
+using StackExchange.Redis;
+using Xunit.Abstractions;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public class RedisTests : DistributedCacheTests, IClassFixture
+{
+ private readonly RedisFixture _fixture;
+ public RedisTests(RedisFixture fixture, ITestOutputHelper log)
+ : base(log)
+ {
+ _fixture = fixture;
+ }
+
+ protected override bool CustomClockSupported => false;
+
+ protected override async ValueTask ConfigureAsync(IServiceCollection services)
+ {
+ var redis = await _fixture.ConnectAsync();
+ if (redis is null)
+ {
+ Log.WriteLine("Redis is not available");
+ return; // inconclusive
+ }
+
+ Log.WriteLine("Redis is available");
+ services.AddSingleton(redis);
+ services.AddStackExchangeRedisCache(options =>
+ {
+ options.ConnectionMultiplexerFactory = () => Task.FromResult(redis);
+ });
+ }
+
+ [Theory]
+ [InlineData(false)]
+ [InlineData(true)]
+ public async Task BasicUsage(bool useBuffers)
+ {
+ var services = new ServiceCollection();
+ await ConfigureAsync(services);
+ services.AddHybridCache();
+ var provider = services.BuildServiceProvider(); // not "using" - that will tear down our redis; use the fixture for that
+
+ var cache = Assert.IsType(provider.GetRequiredService());
+ if (cache.BackendCache is null)
+ {
+ Log.WriteLine("Backend cache not available; inconclusive");
+ return;
+ }
+
+ Assert.IsAssignableFrom(cache.BackendCache);
+
+ if (!useBuffers)
+ {
+ // force byte[] mode
+ cache.DebugRemoveFeatures(DefaultHybridCache.CacheFeatures.BackendBuffers);
+ }
+
+ Log.WriteLine($"features: {cache.GetFeatures()}");
+
+ var key = Me();
+ var redis = provider.GetRequiredService();
+ await redis.GetDatabase().KeyDeleteAsync(key); // start from known state
+ Assert.False(await redis.GetDatabase().KeyExistsAsync(key));
+
+ var count = 0;
+ for (var i = 0; i < 10; i++)
+ {
+ await cache.GetOrCreateAsync(key, _ =>
+ {
+ Interlocked.Increment(ref count);
+ return new(Guid.NewGuid());
+ });
+ }
+
+ Assert.Equal(1, count);
+
+ await Task.Delay(500); // the L2 write continues in the background; give it a chance
+
+ var ttl = await redis.GetDatabase().KeyTimeToLiveAsync(key);
+ Log.WriteLine($"ttl: {ttl}");
+ Assert.NotNull(ttl);
+ }
+}
diff --git a/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/SampleUsage.cs b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/SampleUsage.cs
new file mode 100644
index 00000000000..300fd6e4188
--- /dev/null
+++ b/test/Libraries/Microsoft.Extensions.Caching.Hybrid.Tests/SampleUsage.cs
@@ -0,0 +1,199 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System.ComponentModel;
+using System.Text.Json;
+using Microsoft.Extensions.Caching.Distributed;
+using Microsoft.Extensions.DependencyInjection;
+
+namespace Microsoft.Extensions.Caching.Hybrid.Tests;
+
+public class SampleUsage
+{
+ [Fact]
+ public async Task DistributedCacheWorks()
+ {
+ var services = new ServiceCollection();
+ services.AddDistributedMemoryCache();
+ services.AddTransient();
+ using var provider = services.BuildServiceProvider();
+
+ var obj = provider.GetRequiredService();
+ string name = "abc";
+ int id = 42;
+ var x = await obj.GetSomeInformationAsync(name, id);
+ var y = await obj.GetSomeInformationAsync(name, id);
+ Assert.NotSame(x, y);
+ Assert.Equal(id, x.Id);
+ Assert.Equal(name, x.Name);
+ Assert.Equal(id, y.Id);
+ Assert.Equal(name, y.Name);
+ }
+
+ [Fact]
+ public async Task HybridCacheWorks()
+ {
+ var services = new ServiceCollection();
+ services.AddHybridCache();
+ services.AddTransient();
+ using var provider = services.BuildServiceProvider();
+
+ var obj = provider.GetRequiredService();
+ string name = "abc";
+ int id = 42;
+ var x = await obj.GetSomeInformationAsync(name, id);
+ var y = await obj.GetSomeInformationAsync(name, id);
+ Assert.NotSame(x, y);
+ Assert.Equal(id, x.Id);
+ Assert.Equal(name, x.Name);
+ Assert.Equal(id, y.Id);
+ Assert.Equal(name, y.Name);
+ }
+
+ [Fact]
+ public async Task HybridCacheNoCaptureWorks()
+ {
+ var services = new ServiceCollection();
+ services.AddHybridCache();
+ services.AddTransient