From 0cd50f9e04620ad3870e8172c79728352b7c48a0 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 19 Feb 2020 02:34:36 +0000 Subject: [PATCH 01/71] Target FSharp.Control.AsyncSeq 2.0.23 --- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 1 - 1 file changed, 1 deletion(-) diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index c68577ae8..ca5699e66 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -19,7 +19,6 @@ - From 48fea8dfc674acd23a7fb8f012b1db2e778237ac Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 19 Feb 2020 03:18:20 +0000 Subject: [PATCH 02/71] Change Cosmos to netstandard2.1, tools to netcoreapp3.1 --- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 1 + 1 file changed, 1 insertion(+) diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index ca5699e66..c68577ae8 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -19,6 +19,7 @@ + From 59fda047638037b7d559c7228b5c97326b1b053d Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Fri, 16 Aug 2019 15:21:27 +0100 Subject: [PATCH 03/71] Make tests pass --- tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index 1e0e7c292..5e3f1884a 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -258,7 +258,7 @@ type Tests(testOutputHelper) = | _ -> None // validate that, despite only requesting max 1 item, we only needed one trip (which contained only one item) [1,1] =! capture.ChooseCalls queryRoundTripsAndItemCounts - verifyRequestChargesMax 4 // 3.02 // WAS 3 // 2.97 + verifyRequestChargesMax 6 // 5.84 // WAS 3 // 2.97 } (* Backward *) @@ -279,7 +279,7 @@ type Tests(testOutputHelper) = verifyCorrectEventsBackward 4L expected res test <@ [EqxAct.ResponseBackward; EqxAct.QueryBackward] = capture.ExternalCalls @> - verifyRequestChargesMax 4 // 3.04 // WAS 3 + verifyRequestChargesMax 6 // 5.86 // WAS 3 } [] @@ -323,5 +323,5 @@ type Tests(testOutputHelper) = | EqxEvent (Equinox.Cosmos.Store.Log.Event.Query (Equinox.Cosmos.Store.Direction.Backward, responses, { count = c })) -> Some (responses,c) | _ -> None [1,5] =! capture.ChooseCalls queryRoundTripsAndItemCounts - verifyRequestChargesMax 4 // 3.04 // WAS 3 // 2.98 + verifyRequestChargesMax 6 // 5.86 // WAS 3 // 2.98 } \ No newline at end of file From 723c7d8d8565a58bcb948d51319b227e0b87bbca Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 20 Aug 2019 11:47:57 +0100 Subject: [PATCH 04/71] Reduce leniency on stored proc call --- tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index 5e3f1884a..1e0e7c292 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -258,7 +258,7 @@ type Tests(testOutputHelper) = | _ -> None // validate that, despite only requesting max 1 item, we only needed one trip (which contained only one item) [1,1] =! capture.ChooseCalls queryRoundTripsAndItemCounts - verifyRequestChargesMax 6 // 5.84 // WAS 3 // 2.97 + verifyRequestChargesMax 4 // 3.02 // WAS 3 // 2.97 } (* Backward *) @@ -279,7 +279,7 @@ type Tests(testOutputHelper) = verifyCorrectEventsBackward 4L expected res test <@ [EqxAct.ResponseBackward; EqxAct.QueryBackward] = capture.ExternalCalls @> - verifyRequestChargesMax 6 // 5.86 // WAS 3 + verifyRequestChargesMax 4 // 3.04 // WAS 3 } [] @@ -323,5 +323,5 @@ type Tests(testOutputHelper) = | EqxEvent (Equinox.Cosmos.Store.Log.Event.Query (Equinox.Cosmos.Store.Direction.Backward, responses, { count = c })) -> Some (responses,c) | _ -> None [1,5] =! capture.ChooseCalls queryRoundTripsAndItemCounts - verifyRequestChargesMax 6 // 5.86 // WAS 3 // 2.98 + verifyRequestChargesMax 4 // 3.04 // WAS 3 // 2.98 } \ No newline at end of file From 3baeb3e6fe43cd710462f118b407a17f599ef847 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 11 Feb 2020 13:34:14 +0000 Subject: [PATCH 05/71] V4 --- samples/Infrastructure/Storage.fs | 4 ++-- src/Equinox.Cosmos/Cosmos.fs | 4 ++-- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 4 ++-- .../Equinox.Cosmos.Integration.fsproj | 1 - tools/Equinox.Tool/Program.fs | 2 +- 5 files changed, 7 insertions(+), 8 deletions(-) diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 35c7d21e1..fb420305e 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -35,7 +35,7 @@ module Cosmos = type [] Arguments = | [] VerboseStore - | [] ConnectionMode of Microsoft.Azure.Cosmos.ConnectionMode + | [] ConnectionMode of Azure.Cosmos.ConnectionMode | [] Timeout of float | [] Retries of int | [] RetriesWaitTimeS of float @@ -54,7 +54,7 @@ module Cosmos = | Database _ -> "specify a database name for store. (optional if environment variable EQUINOX_COSMOS_DATABASE specified)" | Container _ -> "specify a container name for store. (optional if environment variable EQUINOX_COSMOS_CONTAINER specified)" type Info(args : ParseResults) = - member __.Mode = args.GetResult(ConnectionMode,Microsoft.Azure.Cosmos.ConnectionMode.Direct) + member __.Mode = args.GetResult(ConnectionMode,Azure.Cosmos.ConnectionMode.Direct) member __.Connection = args.TryGetResult Connection |> defaultWithEnvVar "EQUINOX_COSMOS_CONNECTION" "Connection" member __.Database = args.TryGetResult Database |> defaultWithEnvVar "EQUINOX_COSMOS_DATABASE" "Database" member __.Container = args.TryGetResult Container |> defaultWithEnvVar "EQUINOX_COSMOS_CONTAINER" "Container" diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index a5d15414d..ebb5024d4 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -2,7 +2,7 @@ open Equinox.Core open FsCodec -open Microsoft.Azure.Cosmos +open Azure.Cosmos open Newtonsoft.Json open Serilog open System @@ -775,7 +775,7 @@ open Equinox.Core open Equinox.Cosmos.Store open FsCodec open FSharp.Control -open Microsoft.Azure.Cosmos +open Azure.Cosmos open Serilog open System open System.Collections.Concurrent diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index c68577ae8..3cf2594ec 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -19,15 +19,15 @@ - + - + diff --git a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj b/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj index 2355db821..01422e7c0 100644 --- a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj +++ b/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj @@ -27,7 +27,6 @@ - diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index c668ca3b5..0673965c9 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -334,7 +334,7 @@ module SqlInit = | _ -> failwith "please specify a `ms`,`my` or `pg` endpoint" } module CosmosStats = - type Microsoft.Azure.Cosmos.Container with + type Azure.Cosmos.Container with // NB DO NOT CONSIDER PROMULGATING THIS HACK member container.QueryValue<'T>(sqlQuery : string) = let query : seq<'T> = failwith "TODO translate" //container.ReadItemAsync(sqlQuery) :> _ From fa23d1673ea3ce3f4e08edf743e22627a5ff0571 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 11 Feb 2020 13:44:45 +0000 Subject: [PATCH 06/71] Up to L365 --- src/Equinox.Cosmos/Cosmos.fs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index ebb5024d4..0d5c71e43 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -351,10 +351,10 @@ module private MicrosoftAzureCosmosWrappers = | _ -> None // CosmosDB Error HttpStatusCode extractor let (|CosmosStatusCode|) (e : CosmosException) = - e.StatusCode + e.Response.Status type ReadResult<'T> = Found of 'T | NotFound | NotModified - type Container with + type Azure.Cosmos.CosmosContainer with member container.TryReadItem(partitionKey : PartitionKey, documentId : string, ?options : ItemRequestOptions): Async> = async { let options = defaultArg options null let! ct = Async.CancellationToken @@ -364,8 +364,8 @@ module private MicrosoftAzureCosmosWrappers = // NB `.Document` will NRE if a IfNoneModified precondition triggers a NotModified result // else return item.RequestCharge, Found item.Resource - with CosmosException (CosmosStatusCode System.Net.HttpStatusCode.NotFound as e) -> return e.RequestCharge, NotFound - | CosmosException (CosmosStatusCode System.Net.HttpStatusCode.NotModified as e) -> return e.RequestCharge, NotModified + with CosmosException (CosmosStatusCode 404 as e) -> return e.RequestCharge, NotFound + | CosmosException (CosmosStatusCode 304 as e) -> return e.RequestCharge, NotModified // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens | CosmosException (CosmosStatusCode System.Net.HttpStatusCode.PreconditionFailed as e) -> return e.RequestCharge, NotModified } From bbd90c6644c247050e9c238dce72bf465e498274 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 11 Feb 2020 17:47:59 -0500 Subject: [PATCH 07/71] Correctly retrieve request charge from Azure.Core response types; Switch Container to CosmosContainer and Database to CosmosDatabase --- src/Equinox.Cosmos/Cosmos.fs | 68 ++++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 30 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 0d5c71e43..4affdb600 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -354,6 +354,13 @@ module private MicrosoftAzureCosmosWrappers = e.Response.Status type ReadResult<'T> = Found of 'T | NotFound | NotModified + + type Azure.Core.ResponseHeaders with + member headers.GetRequestCharge () = + match headers.TryGetValue("x-ms-request-charge") with + | true, charge -> float charge + | _ -> 0. + type Azure.Cosmos.CosmosContainer with member container.TryReadItem(partitionKey : PartitionKey, documentId : string, ?options : ItemRequestOptions): Async> = async { let options = defaultArg options null @@ -363,11 +370,12 @@ module private MicrosoftAzureCosmosWrappers = // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified // NB `.Document` will NRE if a IfNoneModified precondition triggers a NotModified result // else - return item.RequestCharge, Found item.Resource - with CosmosException (CosmosStatusCode 404 as e) -> return e.RequestCharge, NotFound - | CosmosException (CosmosStatusCode 304 as e) -> return e.RequestCharge, NotModified + + return item.GetRawResponse().Headers.GetRequestCharge(), Found item.Value + with CosmosException (CosmosStatusCode 404 as e) -> return e.Response.Headers.GetRequestCharge(), NotFound + | CosmosException (CosmosStatusCode 304 as e) -> return e.Response.Headers.GetRequestCharge(), NotModified // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens - | CosmosException (CosmosStatusCode System.Net.HttpStatusCode.PreconditionFailed as e) -> return e.RequestCharge, NotModified } + | CosmosException (CosmosStatusCode sc as e) when sc = int System.Net.HttpStatusCode.PreconditionFailed -> return e.Response.Headers.GetRequestCharge(), NotModified } module Sync = // NB don't nest in a private module, or serialization will fail miserably ;) @@ -446,15 +454,15 @@ function sync(req, expIndex, expEtag) { | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (container : Container, stream : string) (exp, req: Tip) + let private run (container : CosmosContainer, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd let! ct = Async.CancellationToken let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] let! (res : Scripts.StoredProcedureExecuteResponse) = container.Scripts.ExecuteStoredProcedureAsync(sprocName, PartitionKey stream, args, cancellationToken = ct) |> Async.AwaitTaskCorrect - let newPos = { index = res.Resource.n; etag = Option.ofObj res.Resource.etag } - return res.RequestCharge, res.Resource.conflicts |> function + let newPos = { index = res.Value.n; etag = Option.ofObj res.Value.etag } + return res.GetRawResponse().Headers.GetRequestCharge(), res.Value.conflicts |> function | null -> Result.Written newPos | [||] when newPos.index = 0L -> Result.Conflict (newPos, Array.empty) | [||] -> Result.ConflictUnknown newPos @@ -502,10 +510,10 @@ function sync(req, expIndex, expEtag) { module Initialization = type [] Provisioning = Container of rus: int | Database of rus: int - let adjustOfferC (c:Container) rus = async { + let adjustOfferC (c: CosmosContainer) rus = async { let! ct = Async.CancellationToken let! _ = c.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } - let adjustOfferD (d:Database) rus = async { + let adjustOfferD (d: CosmosDatabase) rus = async { let! ct = Async.CancellationToken let! _ = d.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } let private createDatabaseIfNotExists (client:CosmosClient) dName maybeRus = async { @@ -519,11 +527,11 @@ function sync(req, expIndex, expEtag) { do! adjustOfferD db rus | Provisioning.Container _ -> let! _ = createDatabaseIfNotExists client dName None in () } - let private createContainerIfNotExists (d:Database) (cp:ContainerProperties) maybeRus = async { + let private createContainerIfNotExists (d: CosmosDatabase) (cp:ContainerProperties) maybeRus = async { let! ct = Async.CancellationToken let! c = d.CreateContainerIfNotExistsAsync(cp, throughput=Option.toNullable maybeRus, cancellationToken=ct) |> Async.AwaitTaskCorrect return c.Container } - let private createOrProvisionContainer (d:Database) (cp:ContainerProperties) mode = async { + let private createOrProvisionContainer (d: CosmosDatabase) (cp:ContainerProperties) mode = async { match mode with | Provisioning.Database _ -> return! createContainerIfNotExists d cp None @@ -531,13 +539,13 @@ function sync(req, expIndex, expEtag) { let! c = createContainerIfNotExists d cp (Some rus) do! adjustOfferC c rus return c } - let private createStoredProcIfNotExists (c:Container) (name, body): Async = async { + let private createStoredProcIfNotExists (c: CosmosContainer) (name, body): Async = async { try let! r = c.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(id=name, body=body)) |> Async.AwaitTaskCorrect - return r.RequestCharge - with CosmosException ((CosmosStatusCode sc) as e) when sc = System.Net.HttpStatusCode.Conflict -> return e.RequestCharge } + return r.GetRawResponse().Headers.GetRequestCharge() + with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } let private mkContainerProperties containerName partitionKeyFieldName = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" partitionKeyFieldName) - let private createBatchAndTipContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = + let private createBatchAndTipContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = let def = mkContainerProperties cName Batch.PartitionKeyField def.IndexingPolicy.IndexingMode <- IndexingMode.Consistent def.IndexingPolicy.Automatic <- true @@ -552,7 +560,7 @@ function sync(req, expIndex, expEtag) { match log with | None -> () | Some log -> log.Information("Created stored procedure {sprocId} in {ms}ms rc={ru}", sprocName, (let e = t.Elapsed in e.TotalMilliseconds), ru) } - let private createAuxContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = + let private createAuxContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = let def = mkContainerProperties cName "id" // as per Cosmos team, Partition Key must be "/id" // TL;DR no indexing of any kind; see https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet/issues/142 def.IndexingPolicy.Automatic <- false @@ -570,10 +578,10 @@ function sync(req, expIndex, expEtag) { return! createAuxContainerIfNotExists client (dName,cName) mode } module internal Tip = - let private get (container : Container, stream : string) (maybePos: Position option) = + let private get (container : CosmosContainer, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatchEtag=etag) | _ -> null container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId, ro) - let private loggedGet (get : Container * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { + let private loggedGet (get : CosmosContainer * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) @@ -602,7 +610,7 @@ module internal Tip = module internal Query = open FSharp.Control - let private mkQuery (container : Container, stream: string) maxItems (direction: Direction) startPos : FeedIterator= + let private mkQuery (container : CosmosContainer, stream: string) maxItems (direction: Direction) startPos : FeedIterator= let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -749,12 +757,12 @@ module internal Tip = let t = StopwatchInterval(startTicks, endTicks) log |> logQuery direction maxItems stream t (!responseCount,allSlices.ToArray()) -1L ru } -type [] Token = { container: Container; stream: string; pos: Position } +type [] Token = { container: CosmosContainer; stream: string; pos: Position } module Token = let create (container,stream) pos : StreamToken = { value = box { container = container; stream = stream; pos = pos } version = pos.index } - let (|Unpack|) (token: StreamToken) : Container*string*Position = let t = unbox token.value in t.container,t.stream,t.pos + let (|Unpack|) (token: StreamToken) : CosmosContainer*string*Position = let t = unbox token.value in t.container,t.stream,t.pos let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag @@ -887,14 +895,14 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven module Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, Container*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, CosmosContainer*string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, Container*string, 'context> with + interface ICategory<'event, 'state, CosmosContainer*string, 'context> with member __.Load(log, (container,streamName), opt) : Async = loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context) @@ -910,8 +918,8 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, Container*string, 'context>) - : ICategory<'event, 'state, Container*string, 'context> = + (category : ICategory<'event, 'state, CosmosContainer*string, 'context>) + : ICategory<'event, 'state, CosmosContainer*string, 'context> = let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = new CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) @@ -924,7 +932,7 @@ type private Folder<'event, 'state, 'context> ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, Container*string, 'context> with + interface ICategory<'event, 'state, CosmosContainer*string, 'context> with member __.Load(log, (container,streamName), opt): Async = match readCache with | None -> batched log (container,streamName) @@ -941,7 +949,7 @@ type private Folder<'event, 'state, 'context> | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } /// Holds Container state, coordinating initialization activities -type private ContainerWrapper(container : Container, ?initContainer : Container -> Async) = +type private ContainerWrapper(container : CosmosContainer, ?initContainer : CosmosContainer -> Async) = let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init container)) member __.Container = container @@ -956,7 +964,7 @@ type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> str let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - member internal __.Resolve(client : CosmosClient, categoryName, id, init) : (Container*string) * (unit -> Async) option = + member internal __.Resolve(client : CosmosClient, categoryName, id, init) : (CosmosContainer*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun (d,c) -> ContainerWrapper(client.GetContainer(d, c), ?initContainer = init)) @@ -972,7 +980,7 @@ type Context(gateway: Gateway, containers: Containers, [] ?log) = member __.Gateway = gateway member __.Containers = containers - member internal __.ResolveContainerStream(categoryName, id) : (Container*string) * (unit -> Async) option = + member internal __.ResolveContainerStream(categoryName, id) : (CosmosContainer*string) * (unit -> Async) option = containers.Resolve(gateway.Client, categoryName, id, init) [] @@ -1036,7 +1044,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, Container*string, 'context> = + let category : ICategory<_, _, CosmosContainer*string, 'context> = match caching with | CachingStrategy.NoCaching -> folder :> _ | CachingStrategy.SlidingWindow(cache, window) -> From 84854841b70e22d8c1eb4c59fe05344f184918cb Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Wed, 12 Feb 2020 10:40:42 -0500 Subject: [PATCH 08/71] Migrate to new etag match predicate --- src/Equinox.Cosmos/Cosmos.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 4affdb600..435c0ea6c 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -579,7 +579,7 @@ function sync(req, expIndex, expEtag) { module internal Tip = let private get (container : CosmosContainer, stream : string) (maybePos: Position option) = - let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatchEtag=etag) | _ -> null + let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId, ro) let private loggedGet (get : CosmosContainer * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream From 65c27d60ac235a5c55fcdfe828dd2b334bad9f6b Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Wed, 12 Feb 2020 16:14:17 -0500 Subject: [PATCH 09/71] Migrate from FeedIterator to IAsyncEnumerable --- src/Equinox.Core/Infrastructure.fs | 1 + src/Equinox.Cosmos/Cosmos.fs | 118 ++++++++++++++++------------- tools/Equinox.Tool/Program.fs | 2 +- 3 files changed, 69 insertions(+), 52 deletions(-) diff --git a/src/Equinox.Core/Infrastructure.fs b/src/Equinox.Core/Infrastructure.fs index d73177d6f..f8d2a7df1 100755 --- a/src/Equinox.Core/Infrastructure.fs +++ b/src/Equinox.Core/Infrastructure.fs @@ -6,6 +6,7 @@ open FSharp.Control open System open System.Diagnostics open System.Threading.Tasks +open System.Threading type OAttribute = System.Runtime.InteropServices.OptionalAttribute type DAttribute = System.Runtime.InteropServices.DefaultParameterValueAttribute diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 435c0ea6c..199ac0f93 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -2,6 +2,7 @@ open Equinox.Core open FsCodec +open Azure open Azure.Cosmos open Newtonsoft.Json open Serilog @@ -610,7 +611,7 @@ module internal Tip = module internal Query = open FSharp.Control - let private mkQuery (container : CosmosContainer, stream: string) maxItems (direction: Direction) startPos : FeedIterator= + let private mkQuery (container : CosmosContainer, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -620,40 +621,47 @@ module internal Tip = let cond = if direction = Direction.Forward then "c.n > @startPos" else "c.i < @startPos" QueryDefinition(sprintf "%s AND %s %s" root cond tail).WithParameter("@startPos", positionSoExclusiveWhenBackward) let qro = new QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) - container.GetItemQueryIterator(query, requestOptions = qro) + container.GetItemQueryIterator(query, requestOptions = qro).AsPages() |> AsyncSeq.ofAsyncEnum // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index - let private handleResponse direction (streamName: string) startPos (query: FeedIterator) (log: ILogger) - : Async[] * Position option * float> = async { - let! ct = Async.CancellationToken - let! t, (res : FeedResponse) = query.ReadNextAsync(ct) |> Async.AwaitTaskCorrect |> Stopwatch.Time - let batches, ru = Array.ofSeq res, res.RequestCharge - let events = batches |> Seq.collect (fun b -> Enum.Events(b, startPos, direction)) |> Array.ofSeq - let (Log.BatchLen bytes), count = events, events.Length - let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count; ru = ru } - let log = let evt = Log.Response (direction, reqMetric) in log |> Log.event evt - let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propEvents events - let index = if count = 0 then Nullable () else Nullable <| Seq.min (seq { for x in batches -> x.i }) - (log |> (match startPos with Some pos -> Log.propStartPos pos | None -> id) |> Log.prop "bytes" bytes) - .Information("EqxCosmos {action:l} {count}/{batches} {direction} {ms}ms i={index} rc={ru}", - "Response", count, batches.Length, direction, (let e = t.Elapsed in e.TotalMilliseconds), index, ru) - let maybePosition = batches |> Array.tryPick Position.tryFromBatch - return events, maybePosition, ru } - - let private run (log : ILogger) (readSlice: FeedIterator -> ILogger -> Async[] * Position option * float>) - (maxPermittedBatchReads: int option) - (query: FeedIterator) - : AsyncSeq[] * Position option * float> = + let private processNextPage direction (streamName: string) startPos (enumerator: IAsyncEnumerator>) (log: ILogger) + : Async[] * Position option * float>> = async { + let! t, res = enumerator.MoveNext() |> Stopwatch.Time + + return + res + |> Option.map (fun page -> + let batches, ru = Array.ofSeq page.Values, page.GetRawResponse().Headers.GetRequestCharge() + let events = batches |> Seq.collect (fun b -> Enum.Events(b, startPos, direction)) |> Array.ofSeq + let (Log.BatchLen bytes), count = events, events.Length + let reqMetric : Log.Measurement = { stream = streamName; interval = t; bytes = bytes; count = count; ru = ru } + let log = let evt = Log.Response (direction, reqMetric) in log |> Log.event evt + let log = if (not << log.IsEnabled) Events.LogEventLevel.Debug then log else log |> Log.propEvents events + let index = if count = 0 then Nullable () else Nullable <| Seq.min (seq { for x in batches -> x.i }) + (log |> (match startPos with Some pos -> Log.propStartPos pos | None -> id) |> Log.prop "bytes" bytes) + .Information("EqxCosmos {action:l} {count}/{batches} {direction} {ms}ms i={index} rc={ru}", + "Response", count, batches.Length, direction, (let e = t.Elapsed in e.TotalMilliseconds), index, ru) + let maybePosition = batches |> Array.tryPick Position.tryFromBatch + events, maybePosition, ru) } + + let private run (log : ILogger) (readNextPage: IAsyncEnumerator> -> ILogger -> Async[] * Position option * float>>) + (maxPermittedBatchReads: int option) + (query: AsyncSeq>) = + + let e = query.GetEnumerator() + let rec loop batchCount : AsyncSeq[] * Position option * float> = asyncSeq { match maxPermittedBatchReads with | Some mpbr when batchCount >= mpbr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () let batchLog = log |> Log.prop "batchIndex" batchCount - let! (slice : ITimelineEvent[] * Position option * float) = readSlice query batchLog - yield slice - if query.HasMoreResults then + let! (page : Option[] * Position option * float>) = readNextPage e batchLog + + if page |> Option.isSome then + yield page.Value yield! loop (batchCount + 1) } + loop 0 let private logQuery direction batchSize streamName interval (responsesCount, events : ITimelineEvent[]) n (ru: float) (log : ILogger) = @@ -702,11 +710,11 @@ module internal Tip = |> AsyncSeq.toArrayAsync return events, maybeTipPos, ru } let query = mkQuery (container,stream) maxItems direction startPos - let pullSlice = handleResponse direction stream startPos - let retryingLoggingReadSlice query = Log.withLoggedRetries retryPolicy "readAttempt" (pullSlice query) + let readPage = processNextPage direction stream startPos + let retryingLoggingReadPage e = Log.withLoggedRetries retryPolicy "readAttempt" (readPage e) let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readlog = log |> Log.prop "direction" direction - let batches : AsyncSeq[] * Position option * float> = run readlog retryingLoggingReadSlice maxRequests query + let batches : AsyncSeq[] * Position option * float> = run readlog retryingLoggingReadPage maxRequests query let! t, (events, maybeTipPos, ru) = mergeBatches log batches |> Stopwatch.Time let raws, decoded = (Array.map fst events), (events |> Seq.choose snd |> Array.ofSeq) let pos = match maybeTipPos with Some p -> p | None -> Position.fromMaxIndex raws @@ -719,14 +727,18 @@ module internal Tip = : AsyncSeq<'event[]> = asyncSeq { let responseCount = ref 0 let query = mkQuery (container,stream) maxItems direction startPos - let pullSlice = handleResponse direction stream startPos - let retryingLoggingReadSlice query = Log.withLoggedRetries retryPolicy "readAttempt" (pullSlice query) + let readPage = processNextPage direction stream startPos + let retryingLoggingReadPage e = Log.withLoggedRetries retryPolicy "readAttempt" (readPage e) let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let mutable ru = 0. - let allSlices = ResizeArray() + let allEvents = ResizeArray() let startTicks = System.Diagnostics.Stopwatch.GetTimestamp() + + let e = query.GetEnumerator() + try let readlog = log |> Log.prop "direction" direction let mutable ok = true + while ok do incr responseCount @@ -735,27 +747,31 @@ module internal Tip = | _ -> () let batchLog = readlog |> Log.prop "batchIndex" !responseCount - let! (slice,_pos,rus) = retryingLoggingReadSlice query batchLog - ru <- ru + rus - allSlices.AddRange(slice) - - let acc = ResizeArray() - for x in slice do - match tryDecode x with - | Some e when isOrigin e -> - let used, residual = slice |> calculateUsedVersusDroppedPayload x.Index - log.Information("EqxCosmos Stop stream={stream} at={index} {case} used={used} residual={residual}", - stream, x.Index, x.EventType, used, residual) - ok <- false - acc.Add e - | Some e -> acc.Add e - | None -> () - yield acc.ToArray() - ok <- ok && query.HasMoreResults + let! page = retryingLoggingReadPage e batchLog + + match page with + | Some (evts, _pos, rus) -> + ru <- ru + rus + allEvents.AddRange(evts) + + let acc = ResizeArray() + for x in evts do + match tryDecode x with + | Some e when isOrigin e -> + let used, residual = evts |> calculateUsedVersusDroppedPayload x.Index + log.Information("EqxCosmos Stop stream={stream} at={index} {case} used={used} residual={residual}", + stream, x.Index, x.EventType, used, residual) + ok <- false + acc.Add e + | Some e -> acc.Add e + | None -> () + + yield acc.ToArray() + | _ -> ok <- false finally let endTicks = System.Diagnostics.Stopwatch.GetTimestamp() let t = StopwatchInterval(startTicks, endTicks) - log |> logQuery direction maxItems stream t (!responseCount,allSlices.ToArray()) -1L ru } + log |> logQuery direction maxItems stream t (!responseCount,allEvents.ToArray()) -1L ru } type [] Token = { container: CosmosContainer; stream: string; pos: Position } module Token = @@ -795,7 +811,7 @@ type Connection(client: CosmosClient, []?readRetryPolicy: IRetryPoli member __.QueryRetryPolicy = readRetryPolicy member __.WriteRetryPolicy = writeRetryPolicy -/// Defines the policies in force regarding how to a) split up calls b) limit the number of events per slice +/// Defines the policies in force regarding how to a) split up calls b) limit the number of events per page type BatchingPolicy ( // Max items to request in query response. Defaults to 10. []?defaultMaxItems : int, diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 0673965c9..72bb5aa7a 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -334,7 +334,7 @@ module SqlInit = | _ -> failwith "please specify a `ms`,`my` or `pg` endpoint" } module CosmosStats = - type Azure.Cosmos.Container with + type Azure.Cosmos.CosmosContainer with // NB DO NOT CONSIDER PROMULGATING THIS HACK member container.QueryValue<'T>(sqlQuery : string) = let query : seq<'T> = failwith "TODO translate" //container.ReadItemAsync(sqlQuery) :> _ From 4aa33198733e997bfcb0c9d96500398d089d7e8c Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 18 Feb 2020 12:18:12 -0500 Subject: [PATCH 10/71] Only set gateway options when in Gateway mode --- src/Equinox.Cosmos/Cosmos.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 199ac0f93..b9ed55351 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -1146,7 +1146,7 @@ type Connector | None | Some ConnectionMode.Gateway | Some _ (* enum total match :( *) -> co.ConnectionMode <- ConnectionMode.Gateway // default; only supports Https match gatewayModeMaxConnectionLimit with | Some _ when co.ConnectionMode = ConnectionMode.Direct -> invalidArg "gatewayModeMaxConnectionLimit" "Not admissible in Direct mode" - | x -> co.GatewayModeMaxConnectionLimit <- defaultArg x 1000 + | x -> if co.ConnectionMode = ConnectionMode.Gateway then co.GatewayModeMaxConnectionLimit <- defaultArg x 1000 match defaultConsistencyLevel with | Some x -> co.ConsistencyLevel <- Nullable x | None -> () From 26fd82e894730622367ce103e92fc781bb56d648 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 18 Feb 2020 12:50:27 -0500 Subject: [PATCH 11/71] Add and use NewtonsoftJsonSerializer --- src/Equinox.Cosmos/Cosmos.fs | 2 +- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 1 + .../NewtonsoftJsonSerializer.fs | 39 +++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) create mode 100644 src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index b9ed55351..8b2c68858 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -1140,7 +1140,7 @@ type Connector /// ClientOptions for this Connector as configured member val ClientOptions = let maxAttempts, maxWait, timeout = Nullable maxRetryAttemptsOnRateLimitedRequests, Nullable maxRetryWaitTimeOnRateLimitedRequests, requestTimeout - let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout) + let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout, Serializer = NewtonsoftJsonSerializer()) match mode with | Some ConnectionMode.Direct -> co.ConnectionMode <- ConnectionMode.Direct | None | Some ConnectionMode.Gateway | Some _ (* enum total match :( *) -> co.ConnectionMode <- ConnectionMode.Gateway // default; only supports Https diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index 3cf2594ec..d5240fb1d 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -10,6 +10,7 @@ + diff --git a/src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs b/src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs new file mode 100644 index 000000000..71bc33a10 --- /dev/null +++ b/src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs @@ -0,0 +1,39 @@ +namespace Equinox.Cosmos.Store + +open System.IO; +open System.Text; +open Azure.Cosmos.Serialization; +open Newtonsoft.Json; +open Newtonsoft.Json.Serialization; + +type NewtonsoftJsonSerializer () = + inherit CosmosSerializer () + + let encoding = new UTF8Encoding(false, true) + let serializer = JsonSerializer.Create() + + override __.FromStream<'T> (stream: Stream): 'T = + use stream = stream + + if typeof.IsAssignableFrom(typeof<'T>) then + stream :> obj :?> 'T + else + use streamReader = new StreamReader(stream) + use jsonReader = new JsonTextReader(streamReader) + serializer.Deserialize<'T>(jsonReader) + + override __.ToStream<'T> (input: 'T): Stream = + let payload = new MemoryStream() + + ( + use streamWriter = new StreamWriter(payload, encoding = encoding, bufferSize = 1024, leaveOpen = true) + use jsonWriter = new JsonTextWriter(streamWriter) + + jsonWriter.Formatting <- Formatting.None + serializer.Serialize(jsonWriter, input) + jsonWriter.Flush() + streamWriter.Flush() + ) + + payload.Position <- 0L + payload :> Stream From f2a063393e044d1cc8c4b85d34a8db50afe065b0 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 18 Feb 2020 12:50:39 -0500 Subject: [PATCH 12/71] Rearrange files --- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index d5240fb1d..902cfcd2f 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -10,8 +10,8 @@ - + From 482b797e98ea0156b0d0736b96f43ca6006c7bab Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 18 Feb 2020 13:50:35 -0500 Subject: [PATCH 13/71] Fix test RU limits --- src/Equinox.Cosmos/Cosmos.fs | 2 +- .../CosmosCoreIntegration.fs | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 8b2c68858..334653586 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -359,7 +359,7 @@ module private MicrosoftAzureCosmosWrappers = type Azure.Core.ResponseHeaders with member headers.GetRequestCharge () = match headers.TryGetValue("x-ms-request-charge") with - | true, charge -> float charge + | true, charge when not <| String.IsNullOrEmpty charge -> float charge | _ -> 0. type Azure.Cosmos.CosmosContainer with diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index 1e0e7c292..e7e726e9d 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -238,7 +238,7 @@ type Tests(testOutputHelper) = // 2 items atm test <@ [EqxAct.ResponseForward; EqxAct.ResponseForward; EqxAct.QueryForward] = capture.ExternalCalls @> - verifyRequestChargesMax 6 } // 5.77 + verifyRequestChargesMax 9 } // 8.51 // WAS 6 // 5.77 [] let ``get Lazy`` (TestStream streamName) = Async.RunSynchronously <| async { @@ -258,7 +258,7 @@ type Tests(testOutputHelper) = | _ -> None // validate that, despite only requesting max 1 item, we only needed one trip (which contained only one item) [1,1] =! capture.ChooseCalls queryRoundTripsAndItemCounts - verifyRequestChargesMax 4 // 3.02 // WAS 3 // 2.97 + verifyRequestChargesMax 6 // 5.74 // WAS 4 // 3.02 // WAS 3 // 2.97 } (* Backward *) @@ -279,7 +279,7 @@ type Tests(testOutputHelper) = verifyCorrectEventsBackward 4L expected res test <@ [EqxAct.ResponseBackward; EqxAct.QueryBackward] = capture.ExternalCalls @> - verifyRequestChargesMax 4 // 3.04 // WAS 3 + verifyRequestChargesMax 6 // 5.75 // WAS 4 // 3.04 // WAS 3 } [] @@ -323,5 +323,5 @@ type Tests(testOutputHelper) = | EqxEvent (Equinox.Cosmos.Store.Log.Event.Query (Equinox.Cosmos.Store.Direction.Backward, responses, { count = c })) -> Some (responses,c) | _ -> None [1,5] =! capture.ChooseCalls queryRoundTripsAndItemCounts - verifyRequestChargesMax 4 // 3.04 // WAS 3 // 2.98 - } \ No newline at end of file + verifyRequestChargesMax 6 // 5.76 // WAS 4 // 3.04 // WAS 3 // 2.98 + } From a36e7425ed630ca8b760fab0fe56c1f8e7892bdf Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 19 Feb 2020 04:07:38 +0000 Subject: [PATCH 14/71] Remove Microsoft.Azure.Cosmos.Direct ref --- samples/Infrastructure/Infrastructure.fsproj | 1 - 1 file changed, 1 deletion(-) diff --git a/samples/Infrastructure/Infrastructure.fsproj b/samples/Infrastructure/Infrastructure.fsproj index e27d6a039..125a51a91 100644 --- a/samples/Infrastructure/Infrastructure.fsproj +++ b/samples/Infrastructure/Infrastructure.fsproj @@ -35,7 +35,6 @@ - \ No newline at end of file From f09a73179711a5215b99cf534fc9ba76b6936b44 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 25 Feb 2020 13:02:03 -0500 Subject: [PATCH 15/71] Change from byte[] payloads to STJ.JsonElement; Rewrite converters using STJ --- src/Equinox.Cosmos/Cosmos.fs | 174 +++++++++++++++++------------------ 1 file changed, 83 insertions(+), 91 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 334653586..abc1a64f7 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -4,13 +4,14 @@ open Equinox.Core open FsCodec open Azure open Azure.Cosmos -open Newtonsoft.Json open Serilog open System open System.IO +open System.Text.Json +open System.Text.Json.Serialization /// A single Domain Event from the array held in a Batch -type [] +type [] // TODO for STJ v5: All fields required unless explicitly optional Event = { /// Creation datetime (as opposed to system-defined _lastUpdated which is touched by triggers, replication etc.) t: DateTimeOffset // ISO 8601 @@ -19,24 +20,19 @@ type [] c: string // required /// Event body, as UTF-8 encoded json ready to be injected into the Json being rendered for CosmosDB - [)>] - [] - d: byte[] // Required, but can be null so Nullary cases can work + d: JsonElement // TODO for STJ v5: Required, but can be null so Nullary cases can work - /// Optional metadata, as UTF-8 encoded json, ready to emit directly (null, not written if missing) - [)>] - [] - m: byte[] + /// Optional metadata, as UTF-8 encoded json, ready to emit directly + m: JsonElement // TODO for STJ v5: Optional, not serialized if missing - /// Optional correlationId (can be null, not written if missing) - [] - correlationId : string + /// Optional correlationId + correlationId : string // TODO for STJ v5: Optional, not serialized if missing - /// Optional causationId (can be null, not written if missing) - [] - causationId : string } + /// Optional causationId + causationId : string // TODO for STJ v5: Optional, not serialized if missing + } - interface IEventData with + interface IEventData with member __.EventType = __.c member __.Data = __.d member __.Meta = __.m @@ -46,12 +42,11 @@ type [] member __.Timestamp = __.t /// A 'normal' (frozen, not Tip) Batch of Events (without any Unfolds) -type [] +type [] // TODO for STJ v5: All fields required unless explicitly optional Batch = { /// CosmosDB-mandated Partition Key, must be maintained within the document /// Not actually required if running in single partition mode, but for simplicity, we always write it - [] // Not requested in queries - p: string // "{streamName}" + p: string // "{streamName}" TODO for STJ v5: Optional, not requested in queries /// CosmosDB-mandated unique row key; needs to be unique within any partition it is maintained; must be string /// At the present time, one can't perform an ORDER BY on this field, hence we also have i shadowing it @@ -61,8 +56,7 @@ type [] /// When we read, we need to capture the value so we can retain it for caching purposes /// NB this is not relevant to fill in when we pass it to the writing stored procedure /// as it will do: 1. read 2. merge 3. write merged version contingent on the _etag not having changed - [] - _etag: string + _etag: string // TODO for STJ v5: Optional, not serialized if missing /// base 'i' value for the Events held herein i: int64 // {index} @@ -90,61 +84,57 @@ type Unfold = c: string // required /// Event body - Json -> UTF-8 -> Deflate -> Base64 - [)>] - d: byte[] // required + [)>] + d: JsonElement // required /// Optional metadata, same encoding as `d` (can be null; not written if missing) - [)>] - [] - m: byte[] } // optional + [)>] + m: JsonElement // TODO for STJ v5: Optional, not serialized if missing + } /// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc /// Only applied to snapshots in the Tip -and Base64DeflateUtf8JsonConverter() = - inherit JsonConverter() - let pickle (input : byte[]) : string = - if input = null then null else - - use output = new MemoryStream() - use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) - compressor.Write(input,0,input.Length) - compressor.Close() - System.Convert.ToBase64String(output.ToArray()) - let unpickle str : byte[] = - if str = null then null else - - let compressedBytes = System.Convert.FromBase64String str - use input = new MemoryStream(compressedBytes) - use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) - use output = new MemoryStream() - decompressor.CopyTo(output) - output.ToArray() - - override __.CanConvert(objectType) = - typeof.Equals(objectType) - override __.ReadJson(reader, _, _, serializer) = - //( if reader.TokenType = JsonToken.Null then null else - serializer.Deserialize(reader, typedefof) :?> string |> unpickle |> box - override __.WriteJson(writer, value, serializer) = - let pickled = value |> unbox |> pickle - serializer.Serialize(writer, pickled) +and JsonCompressedBase64Converter() = + inherit JsonConverter() + + override __.Read (reader, _typeToConvert, options) = + if reader.TokenType = JsonTokenType.Null then + JsonSerializer.Deserialize(&reader, options) + else + let compressedBytes = reader.GetBytesFromBase64() + use input = new MemoryStream(compressedBytes) + use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) + use output = new MemoryStream() + decompressor.CopyTo(output) + JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) + + override __.Write (writer, value, options) = + if value.ValueKind = JsonValueKind.Null then + value.WriteTo(writer) + else + let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) + use output = new MemoryStream() + use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) + compressor.Write(input, 0, input.Length) + compressor.Close() + writer.WriteBase64StringValue(ReadOnlySpan.op_Implicit(output.ToArray())) /// The special-case 'Pending' Batch Format used to read the currently active (and mutable) document /// Stored representation has the following diffs vs a 'normal' (frozen/completed) Batch: a) `id` = `-1` b) contains unfolds (`u`) /// NB the type does double duty as a) model for when we read it b) encoding a batch being sent to the stored proc -type [] +type [] // TODO for STJ v5: All fields required unless explicitly optional Tip = - { [] // Not requested in queries + { /// Partition key, as per Batch - p: string // "{streamName}" + p: string // "{streamName}" TODO for STJ v5: Optional, not requested in queries + /// Document Id within partition, as per Batch id: string // "{-1}" - Well known IdConstant used while this remains the pending batch /// When we read, we need to capture the value so we can retain it for caching purposes /// NB this is not relevant to fill in when we pass it to the writing stored procedure /// as it will do: 1. read 2. merge 3. write merged version contingent on the _etag not having changed - [] - _etag: string + _etag: string // TODO for STJ v5: Optional, not serialized if missing /// base 'i' value for the Events held herein i: int64 @@ -172,7 +162,7 @@ module internal Position = let fromAppendAtEnd = fromI -1L // sic - needs to yield -1 let fromEtag (value : string) = { fromI -2L with etag = Some value } /// NB very inefficient compared to FromDocument or using one already returned to you - let fromMaxIndex (xs: ITimelineEvent[]) = + let fromMaxIndex (xs: ITimelineEvent[]) = if Array.isEmpty xs then fromKnownEmpty else fromI (1L + Seq.max (seq { for x in xs -> x.Index })) /// Create Position from Tip record context (facilitating 1 RU reads) @@ -186,9 +176,9 @@ module internal Position = type Direction = Forward | Backward override this.ToString() = match this with Forward -> "Forward" | Backward -> "Backward" type internal Enum() = - static member internal Events(b: Tip) : ITimelineEvent seq = + static member internal Events(b: Tip) : ITimelineEvent seq = b.e |> Seq.mapi (fun offset x -> FsCodec.Core.TimelineEvent.Create(b.i + int64 offset, x.c, x.d, x.m, Guid.Empty, x.correlationId, x.causationId, x.t)) - static member Events(i: int64, e: Event[], startPos : Position option, direction) : ITimelineEvent seq = seq { + static member Events(i: int64, e: Event[], startPos : Position option, direction) : ITimelineEvent seq = seq { // If we're loading from a nominated position, we need to discard items in the batch before/after the start on the start page let isValidGivenStartPos i = match startPos with @@ -203,9 +193,9 @@ type internal Enum() = static member internal Events(b: Batch, startPos, direction) = Enum.Events(b.i, b.e, startPos, direction) |> if direction = Direction.Backward then System.Linq.Enumerable.Reverse else id - static member Unfolds(xs: Unfold[]) : ITimelineEvent seq = seq { + static member Unfolds(xs: Unfold[]) : ITimelineEvent seq = seq { for x in xs -> FsCodec.Core.TimelineEvent.Create(x.i, x.c, x.d, x.m, Guid.Empty, null, null, x.t, isUnfold=true) } - static member EventsAndUnfolds(x: Tip): ITimelineEvent seq = + static member EventsAndUnfolds(x: Tip): ITimelineEvent seq = Enum.Events x |> Seq.append (Enum.Unfolds x.u) // where Index is equal, unfolds get delivered after the events so the fold semantics can be 'idempotent' @@ -232,8 +222,8 @@ module Log = | SyncResync of Measurement | SyncConflict of Measurement let prop name value (log : ILogger) = log.ForContext(name, value) - let propData name (events: #IEventData seq) (log : ILogger) = - let render = function null -> "null" | bytes -> System.Text.Encoding.UTF8.GetString bytes + let propData name (events: #IEventData seq) (log : ILogger) = + let render = function (j: JsonElement) when j.ValueKind <> JsonValueKind.Null -> j.GetRawText() | _ -> "null" let items = seq { for e in events do yield sprintf "{\"%s\": %s}" e.EventType (render e.Data) } log.ForContext(name, sprintf "[%s]" (String.concat ",\n\r" items)) let propEvents = propData "events" @@ -255,7 +245,7 @@ module Log = let event (value : Event) (log : ILogger) = let enrich (e : LogEvent) = e.AddPropertyIfAbsent(LogEventProperty("cosmosEvt", ScalarValue(value))) log.ForContext({ new Serilog.Core.ILogEventEnricher with member __.Enrich(evt,_) = enrich evt }) - let (|BlobLen|) = function null -> 0 | (x : byte[]) -> x.Length + let (|BlobLen|) = function (j: JsonElement) when j.ValueKind <> JsonValueKind.Null -> j.GetRawText().Length | _ -> 0 let (|EventLen|) (x: #IEventData<_>) = let (BlobLen bytes), (BlobLen metaBytes) = x.Data, x.Meta in bytes+metaBytes let (|BatchLen|) = Seq.sumBy (|EventLen|) @@ -451,7 +441,7 @@ function sync(req, expIndex, expEtag) { [] type Result = | Written of Position - | Conflict of Position * events: ITimelineEvent[] + | Conflict of Position * events: ITimelineEvent[] | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any @@ -600,7 +590,7 @@ module internal Tip = let log = log |> Log.prop "_etag" tip._etag |> Log.prop "n" tip.n log.Information("EqxCosmos {action:l} {res} {ms}ms rc={ru}", "Tip", 200, (let e = t.Elapsed in e.TotalMilliseconds), ru) return ru, res } - type [] Result = NotModified | NotFound | Found of Position * ITimelineEvent[] + type [] Result = NotModified | NotFound | Found of Position * ITimelineEvent[] /// `pos` being Some implies that the caller holds a cached value and hence is ready to deal with IndexResult.NotModified let tryLoad (log : ILogger) retryPolicy containerStream (maybePos: Position option): Async = async { let! _rc, res = Log.withLoggedRetries retryPolicy "readAttempt" (loggedGet get containerStream maybePos) log @@ -625,7 +615,7 @@ module internal Tip = // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index let private processNextPage direction (streamName: string) startPos (enumerator: IAsyncEnumerator>) (log: ILogger) - : Async[] * Position option * float>> = async { + : Async[] * Position option * float>> = async { let! t, res = enumerator.MoveNext() |> Stopwatch.Time return @@ -644,19 +634,19 @@ module internal Tip = let maybePosition = batches |> Array.tryPick Position.tryFromBatch events, maybePosition, ru) } - let private run (log : ILogger) (readNextPage: IAsyncEnumerator> -> ILogger -> Async[] * Position option * float>>) + let private run (log : ILogger) (readNextPage: IAsyncEnumerator> -> ILogger -> Async[] * Position option * float>>) (maxPermittedBatchReads: int option) (query: AsyncSeq>) = let e = query.GetEnumerator() - let rec loop batchCount : AsyncSeq[] * Position option * float> = asyncSeq { + let rec loop batchCount : AsyncSeq[] * Position option * float> = asyncSeq { match maxPermittedBatchReads with | Some mpbr when batchCount >= mpbr -> log.Information "batch Limit exceeded"; invalidOp "batch Limit exceeded" | _ -> () let batchLog = log |> Log.prop "batchIndex" batchCount - let! (page : Option[] * Position option * float>) = readNextPage e batchLog + let! (page : Option[] * Position option * float>) = readNextPage e batchLog if page |> Option.isSome then yield page.Value @@ -664,7 +654,7 @@ module internal Tip = loop 0 - let private logQuery direction batchSize streamName interval (responsesCount, events : ITimelineEvent[]) n (ru: float) (log : ILogger) = + let private logQuery direction batchSize streamName interval (responsesCount, events : ITimelineEvent[]) n (ru: float) (log : ILogger) = let (Log.BatchLen bytes), count = events, events.Length let reqMetric : Log.Measurement = { stream = streamName; interval = interval; bytes = bytes; count = count; ru = ru } let evt = Log.Event.Query (direction, responsesCount, reqMetric) @@ -673,7 +663,7 @@ module internal Tip = "EqxCosmos {action:l} {stream} v{n} {count}/{responses} {ms}ms rc={ru}", action, streamName, n, count, responsesCount, (let e = interval.Elapsed in e.TotalMilliseconds), ru) - let private calculateUsedVersusDroppedPayload stopIndex (xs: ITimelineEvent[]) : int * int = + let private calculateUsedVersusDroppedPayload stopIndex (xs: ITimelineEvent[]) : int * int = let mutable used, dropped = 0, 0 let mutable found = false for x in xs do @@ -684,10 +674,10 @@ module internal Tip = used, dropped let walk<'event> (log : ILogger) (container,stream) retryPolicy maxItems maxRequests direction startPos - (tryDecode : ITimelineEvent -> 'event option, isOrigin: 'event -> bool) + (tryDecode : ITimelineEvent -> 'event option, isOrigin: 'event -> bool) : Async = async { let responseCount = ref 0 - let mergeBatches (log : ILogger) (batchesBackward: AsyncSeq[] * Position option * float>) = async { + let mergeBatches (log : ILogger) (batchesBackward: AsyncSeq[] * Position option * float>) = async { let mutable lastResponse, maybeTipPos, ru = None, None, 0. let! events = batchesBackward @@ -714,7 +704,7 @@ module internal Tip = let retryingLoggingReadPage e = Log.withLoggedRetries retryPolicy "readAttempt" (readPage e) let log = log |> Log.prop "batchSize" maxItems |> Log.prop "stream" stream let readlog = log |> Log.prop "direction" direction - let batches : AsyncSeq[] * Position option * float> = run readlog retryingLoggingReadPage maxRequests query + let batches : AsyncSeq[] * Position option * float> = run readlog retryingLoggingReadPage maxRequests query let! t, (events, maybeTipPos, ru) = mergeBatches log batches |> Stopwatch.Time let raws, decoded = (Array.map fst events), (events |> Seq.choose snd |> Array.ofSeq) let pos = match maybeTipPos with Some p -> p | None -> Position.fromMaxIndex raws @@ -723,7 +713,7 @@ module internal Tip = return pos, decoded } let walkLazy<'event> (log : ILogger) (container,stream) retryPolicy maxItems maxRequests direction startPos - (tryDecode : ITimelineEvent -> 'event option, isOrigin: 'event -> bool) + (tryDecode : ITimelineEvent -> 'event option, isOrigin: 'event -> bool) : AsyncSeq<'event[]> = asyncSeq { let responseCount = ref 0 let query = mkQuery (container,stream) maxItems direction startPos @@ -787,7 +777,7 @@ module Token = [] module Internal = [] - type InternalSyncResult = Written of StreamToken | ConflictUnknown of StreamToken | Conflict of StreamToken * ITimelineEvent[] + type InternalSyncResult = Written of StreamToken | ConflictUnknown of StreamToken | Conflict of StreamToken * ITimelineEvent[] [] type LoadFromTokenResult<'event> = Unchanged | Found of StreamToken * 'event[] @@ -803,6 +793,7 @@ open Azure.Cosmos open Serilog open System open System.Collections.Concurrent +open System.Text.Json /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) type Connection(client: CosmosClient, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = @@ -878,8 +869,8 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Sync.Result.ConflictUnknown pos' -> return InternalSyncResult.ConflictUnknown (Token.create containerStream pos') | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } -type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEventCodec<'event,byte[],'context>) = - let (|TryDecodeFold|) (fold: 'state -> 'event seq -> 'state) initial (events: ITimelineEvent seq) : 'state = Seq.choose codec.TryDecode events |> fold initial +type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEventCodec<'event,JsonElement,'context>) = + let (|TryDecodeFold|) (fold: 'state -> 'event seq -> 'state) initial (events: ITimelineEvent seq) : 'state = Seq.choose codec.TryDecode events |> fold initial member __.Load includeUnfolds containerStream fold initial isOrigin (log : ILogger): Async = async { let! token, events = if not includeUnfolds then gateway.LoadBackwardsStopping log containerStream (codec.TryDecode,isOrigin) @@ -1186,12 +1177,13 @@ open Equinox.Cosmos.Store open FsCodec open FSharp.Control open System.Runtime.InteropServices +open System.Text.Json /// Outcome of appending events, specifying the new and/or conflicting events, together with the updated Target write position [] type AppendResult<'t> = | Ok of pos: 't - | Conflict of index: 't * conflictingEvents: ITimelineEvent[] + | Conflict of index: 't * conflictingEvents: ITimelineEvent[] | ConflictUnknown of index: 't /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. @@ -1226,7 +1218,7 @@ type Context member __.ResolveStream(streamName) = containers.Resolve(conn.Client, null, streamName, gateway.CreateSyncStoredProcIfNotExists (Some log)) member __.CreateStream(streamName) = __.ResolveStream streamName |> fst - member internal __.GetLazy((stream, startPos), ?batchSize, ?direction) : AsyncSeq[]> = + member internal __.GetLazy((stream, startPos), ?batchSize, ?direction) : AsyncSeq[]> = let direction = defaultArg direction Direction.Forward let batching = BatchingPolicy(defaultArg batchSize batching.MaxItems) gateway.ReadLazy batching log stream direction startPos (Some,fun _ -> false) @@ -1251,11 +1243,11 @@ type Context /// Reads in batches of `batchSize` from the specified `Position`, allowing the reader to efficiently walk away from a running query /// ... NB as long as they Dispose! - member __.Walk(stream, batchSize, ?position, ?direction) : AsyncSeq[]> = + member __.Walk(stream, batchSize, ?position, ?direction) : AsyncSeq[]> = __.GetLazy((stream, position), batchSize, ?direction=direction) /// Reads all Events from a `Position` in a given `direction` - member __.Read(stream, ?position, ?maxCount, ?direction) : Async[]> = + member __.Read(stream, ?position, ?maxCount, ?direction) : Async[]> = __.GetInternal((stream, position), ?maxCount=maxCount, ?direction=direction) |> yieldPositionAndData /// Appends the supplied batch of events, subject to a consistency check based on the `position` @@ -1300,7 +1292,7 @@ module Events = let private stripPosition (f: Async): Async = async { let! (PositionIndex index) = f return index } - let private dropPosition (f: Async[]>): Async[]> = async { + let private dropPosition (f: Async[]>): Async[]> = async { let! _,xs = f return xs } let (|MinPosition|) = function @@ -1314,14 +1306,14 @@ module Events = /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let getAll (ctx: Context) (streamName: string) (MinPosition index: int64) (batchSize: int): FSharp.Control.AsyncSeq[]> = + let getAll (ctx: Context) (streamName: string) (MinPosition index: int64) (batchSize: int): FSharp.Control.AsyncSeq[]> = ctx.Walk(ctx.CreateStream streamName, batchSize, ?position=index) /// Returns an async array of events in the stream starting at the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let get (ctx: Context) (streamName: string) (MinPosition index: int64) (maxCount: int): Async[]> = + let get (ctx: Context) (streamName: string) (MinPosition index: int64) (maxCount: int): Async[]> = ctx.Read(ctx.CreateStream streamName, ?position=index, maxCount=maxCount) |> dropPosition /// Appends a batch of events to a stream at the specified expected sequence number. @@ -1341,14 +1333,14 @@ module Events = /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getAllBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (batchSize: int): AsyncSeq[]> = + let getAllBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (batchSize: int): AsyncSeq[]> = ctx.Walk(ctx.CreateStream streamName, batchSize, ?position=index, direction=Direction.Backward) /// Returns an async array of events in the stream backwards starting from the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (maxCount: int): Async[]> = + let getBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (maxCount: int): Async[]> = ctx.Read(ctx.CreateStream streamName, ?position=index, maxCount=maxCount, direction=Direction.Backward) |> dropPosition /// Obtains the `index` from the current write Position From aec80687afb4136a36f75828fb8dbc3d8cc15bf3 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 25 Feb 2020 13:06:10 -0500 Subject: [PATCH 16/71] Replace FSCodec.NewtonsoftJson with core FSCodec --- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index 902cfcd2f..8966ce660 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -20,13 +20,13 @@ + - From 2035d49ec27eadf64c6bba8b2924c3313d47de94 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 25 Feb 2020 13:31:21 -0500 Subject: [PATCH 17/71] Remove NewtonsoftJsonSerializer --- src/Equinox.Cosmos/Cosmos.fs | 2 +- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 1 - .../NewtonsoftJsonSerializer.fs | 39 ------------------- 3 files changed, 1 insertion(+), 41 deletions(-) delete mode 100644 src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index abc1a64f7..705ff6800 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -1131,7 +1131,7 @@ type Connector /// ClientOptions for this Connector as configured member val ClientOptions = let maxAttempts, maxWait, timeout = Nullable maxRetryAttemptsOnRateLimitedRequests, Nullable maxRetryWaitTimeOnRateLimitedRequests, requestTimeout - let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout, Serializer = NewtonsoftJsonSerializer()) + let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout) match mode with | Some ConnectionMode.Direct -> co.ConnectionMode <- ConnectionMode.Direct | None | Some ConnectionMode.Gateway | Some _ (* enum total match :( *) -> co.ConnectionMode <- ConnectionMode.Gateway // default; only supports Https diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index 8966ce660..cb24b72f0 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -11,7 +11,6 @@ - diff --git a/src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs b/src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs deleted file mode 100644 index 71bc33a10..000000000 --- a/src/Equinox.Cosmos/NewtonsoftJsonSerializer.fs +++ /dev/null @@ -1,39 +0,0 @@ -namespace Equinox.Cosmos.Store - -open System.IO; -open System.Text; -open Azure.Cosmos.Serialization; -open Newtonsoft.Json; -open Newtonsoft.Json.Serialization; - -type NewtonsoftJsonSerializer () = - inherit CosmosSerializer () - - let encoding = new UTF8Encoding(false, true) - let serializer = JsonSerializer.Create() - - override __.FromStream<'T> (stream: Stream): 'T = - use stream = stream - - if typeof.IsAssignableFrom(typeof<'T>) then - stream :> obj :?> 'T - else - use streamReader = new StreamReader(stream) - use jsonReader = new JsonTextReader(streamReader) - serializer.Deserialize<'T>(jsonReader) - - override __.ToStream<'T> (input: 'T): Stream = - let payload = new MemoryStream() - - ( - use streamWriter = new StreamWriter(payload, encoding = encoding, bufferSize = 1024, leaveOpen = true) - use jsonWriter = new JsonTextWriter(streamWriter) - - jsonWriter.Formatting <- Formatting.None - serializer.Serialize(jsonWriter, input) - jsonWriter.Flush() - streamWriter.Flush() - ) - - payload.Position <- 0L - payload :> Stream From b61e367d8c42db9bf85666f3c6fc342907f79bc8 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Wed, 26 Feb 2020 14:11:58 -0500 Subject: [PATCH 18/71] Update FsCodec to 2.0.1 --- samples/Store/Domain/Domain.fsproj | 2 +- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 2 +- src/Equinox.EventStore/Equinox.EventStore.fsproj | 2 +- src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj | 2 +- src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj | 2 +- .../Equinox.EventStore.Integration.fsproj | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/samples/Store/Domain/Domain.fsproj b/samples/Store/Domain/Domain.fsproj index 295e2de15..5960596f2 100644 --- a/samples/Store/Domain/Domain.fsproj +++ b/samples/Store/Domain/Domain.fsproj @@ -21,7 +21,7 @@ - + diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index cb24b72f0..69720f495 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -19,7 +19,7 @@ - + diff --git a/src/Equinox.EventStore/Equinox.EventStore.fsproj b/src/Equinox.EventStore/Equinox.EventStore.fsproj index 9394721b3..15847dde4 100644 --- a/src/Equinox.EventStore/Equinox.EventStore.fsproj +++ b/src/Equinox.EventStore/Equinox.EventStore.fsproj @@ -26,7 +26,7 @@ - + diff --git a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj index 5eeae410f..4895d6ccb 100644 --- a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj +++ b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj @@ -24,7 +24,7 @@ - + \ No newline at end of file diff --git a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj index 8f5feeeea..e7addd071 100644 --- a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj +++ b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj @@ -24,7 +24,7 @@ - + diff --git a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj index 4ea55867f..1aa5f6f2c 100644 --- a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj +++ b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj @@ -22,7 +22,7 @@ - + From c37789d2bb77642bf51ae4922791662f776001de Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 27 Feb 2020 15:34:52 -0500 Subject: [PATCH 19/71] Fully switch over to STJ --- src/Equinox.Core/Infrastructure.fs | 4 + src/Equinox.Cosmos/Cosmos.fs | 62 ++++--- src/Equinox.Cosmos/CosmosJsonSerializer.fs | 35 ++++ src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 5 + src/Equinox.Cosmos/Json/JsonElementHelpers.fs | 20 +++ .../Json/JsonRecordConverter.fs | 154 ++++++++++++++++++ src/Equinox.Cosmos/Json/Options.fs | 14 ++ .../Json/Utf8JsonReaderExtensions.fs | 22 +++ 8 files changed, 290 insertions(+), 26 deletions(-) create mode 100644 src/Equinox.Cosmos/CosmosJsonSerializer.fs create mode 100644 src/Equinox.Cosmos/Json/JsonElementHelpers.fs create mode 100644 src/Equinox.Cosmos/Json/JsonRecordConverter.fs create mode 100644 src/Equinox.Cosmos/Json/Options.fs create mode 100644 src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs diff --git a/src/Equinox.Core/Infrastructure.fs b/src/Equinox.Core/Infrastructure.fs index f8d2a7df1..0834c2520 100755 --- a/src/Equinox.Core/Infrastructure.fs +++ b/src/Equinox.Core/Infrastructure.fs @@ -69,6 +69,10 @@ type Async with sc ()) |> ignore) +#if NETSTANDARD2_1 + static member inline AwaitValueTask (vtask: ValueTask<'T>) : Async<'T> = vtask.AsTask() |> Async.AwaitTaskCorrect +#endif + [] module Regex = open System.Text.RegularExpressions diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 705ff6800..82dfff513 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -72,29 +72,9 @@ type [] // TODO for STJ v5: All fields required unless /// As one cannot sort by the implicit `id` field, we have an indexed `i` field for sort and range query use static member internal IndexedFields = [Batch.PartitionKeyField; "i"; "n"] -/// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` -type Unfold = - { /// Base: Stream Position (Version) of State from which this Unfold Event was generated - i: int64 - - /// Generation datetime - t: DateTimeOffset // ISO 8601 // Not written by versions <= 2.0.0-rc9 - - /// The Case (Event Type) of this compaction/snapshot, used to drive deserialization - c: string // required - - /// Event body - Json -> UTF-8 -> Deflate -> Base64 - [)>] - d: JsonElement // required - - /// Optional metadata, same encoding as `d` (can be null; not written if missing) - [)>] - m: JsonElement // TODO for STJ v5: Optional, not serialized if missing - } - /// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc /// Only applied to snapshots in the Tip -and JsonCompressedBase64Converter() = +type JsonCompressedBase64Converter() = inherit JsonConverter() override __.Read (reader, _typeToConvert, options) = @@ -108,9 +88,9 @@ and JsonCompressedBase64Converter() = decompressor.CopyTo(output) JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - override __.Write (writer, value, options) = - if value.ValueKind = JsonValueKind.Null then - value.WriteTo(writer) + override __.Write (writer, value, _options) = + if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then + writer.WriteNullValue() else let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) use output = new MemoryStream() @@ -119,6 +99,35 @@ and JsonCompressedBase64Converter() = compressor.Close() writer.WriteBase64StringValue(ReadOnlySpan.op_Implicit(output.ToArray())) +type JsonCompressedBase64ConverterAttribute () = + inherit JsonConverterAttribute(typeof) + + static let converter = JsonCompressedBase64Converter() + + override __.CreateConverter _typeToConvert = + converter :> JsonConverter + +/// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` +[] +type Unfold = + { /// Base: Stream Position (Version) of State from which this Unfold Event was generated + i: int64 + + /// Generation datetime + t: DateTimeOffset // ISO 8601 // Not written by versions <= 2.0.0-rc9 + + /// The Case (Event Type) of this compaction/snapshot, used to drive deserialization + c: string // required + + /// Event body - Json -> UTF-8 -> Deflate -> Base64 + [] + d: JsonElement // required + + /// Optional metadata, same encoding as `d` (can be null; not written if missing) + [] + m: JsonElement // TODO for STJ v5: Optional, not serialized if missing + } + /// The special-case 'Pending' Batch Format used to read the currently active (and mutable) document /// Stored representation has the following diffs vs a 'normal' (frozen/completed) Batch: a) `id` = `-1` b) contains unfolds (`u`) /// NB the type does double duty as a) model for when we read it b) encoding a batch being sent to the stored proc @@ -245,7 +254,7 @@ module Log = let event (value : Event) (log : ILogger) = let enrich (e : LogEvent) = e.AddPropertyIfAbsent(LogEventProperty("cosmosEvt", ScalarValue(value))) log.ForContext({ new Serilog.Core.ILogEventEnricher with member __.Enrich(evt,_) = enrich evt }) - let (|BlobLen|) = function (j: JsonElement) when j.ValueKind <> JsonValueKind.Null -> j.GetRawText().Length | _ -> 0 + let (|BlobLen|) = function (j: JsonElement) when j.ValueKind <> JsonValueKind.Null && j.ValueKind <> JsonValueKind.Undefined -> j.GetRawText().Length | _ -> 0 let (|EventLen|) (x: #IEventData<_>) = let (BlobLen bytes), (BlobLen metaBytes) = x.Data, x.Meta in bytes+metaBytes let (|BatchLen|) = Seq.sumBy (|EventLen|) @@ -786,6 +795,7 @@ namespace Equinox.Cosmos open Equinox open Equinox.Core +open Equinox.Cosmos.Json open Equinox.Cosmos.Store open FsCodec open FSharp.Control @@ -1131,7 +1141,7 @@ type Connector /// ClientOptions for this Connector as configured member val ClientOptions = let maxAttempts, maxWait, timeout = Nullable maxRetryAttemptsOnRateLimitedRequests, Nullable maxRetryWaitTimeOnRateLimitedRequests, requestTimeout - let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout) + let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout, Serializer = CosmosJsonSerializer(JsonSerializer.defaultOptions)) match mode with | Some ConnectionMode.Direct -> co.ConnectionMode <- ConnectionMode.Direct | None | Some ConnectionMode.Gateway | Some _ (* enum total match :( *) -> co.ConnectionMode <- ConnectionMode.Gateway // default; only supports Https diff --git a/src/Equinox.Cosmos/CosmosJsonSerializer.fs b/src/Equinox.Cosmos/CosmosJsonSerializer.fs new file mode 100644 index 000000000..612291257 --- /dev/null +++ b/src/Equinox.Cosmos/CosmosJsonSerializer.fs @@ -0,0 +1,35 @@ +namespace Equinox.Cosmos.Store + +open System.IO +open System.Text.Json +open Azure.Cosmos.Serialization +open Equinox.Core +open Equinox.Cosmos.Json + +type CosmosJsonSerializer (options: JsonSerializerOptions) = + inherit CosmosSerializer() + + override __.FromStream<'T> (stream) = + using (stream) (fun stream -> + if stream.Length = 0L then + Unchecked.defaultof<'T> + elif typeof.IsAssignableFrom(typeof<'T>) then + stream :> obj :?> 'T + else + JsonSerializer.DeserializeAsync<'T>(stream, options) + |> Async.AwaitValueTask + |> Async.RunSynchronously + ) + + override __.ToStream<'T> (input: 'T) = + async { + let memoryStream = new MemoryStream() + + do! + JsonSerializer.SerializeAsync(memoryStream, input, input.GetType(), options) + |> Async.AwaitTaskCorrect + + memoryStream.Position <- 0L + return memoryStream :> Stream + } + |> Async.RunSynchronously diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index 69720f495..ce3b281ce 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -10,7 +10,12 @@ + + + + + diff --git a/src/Equinox.Cosmos/Json/JsonElementHelpers.fs b/src/Equinox.Cosmos/Json/JsonElementHelpers.fs new file mode 100644 index 000000000..be64a85a0 --- /dev/null +++ b/src/Equinox.Cosmos/Json/JsonElementHelpers.fs @@ -0,0 +1,20 @@ +namespace Equinox.Cosmos + +open System +open System.Buffers +open System.Runtime.InteropServices +open System.Text.Json + +[] +module JsonSerializerExtensions = + type JsonSerializer with + static member SerializeToElement(value: 'T, [] ?options: JsonSerializerOptions) = + JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(JsonSerializer.SerializeToUtf8Bytes(value, defaultArg options null))) + + static member DeserializeElement<'T>(element: JsonElement, [] ?options: JsonSerializerOptions) = + let bufferWriter = ArrayBufferWriter() + ( + use jsonWriter = new Utf8JsonWriter(bufferWriter) + element.WriteTo(jsonWriter) + ) + JsonSerializer.Deserialize<'T>(bufferWriter.WrittenSpan, defaultArg options null) diff --git a/src/Equinox.Cosmos/Json/JsonRecordConverter.fs b/src/Equinox.Cosmos/Json/JsonRecordConverter.fs new file mode 100644 index 000000000..f574977ab --- /dev/null +++ b/src/Equinox.Cosmos/Json/JsonRecordConverter.fs @@ -0,0 +1,154 @@ +namespace Equinox.Cosmos.Json + +open System +open System.Collections.Generic +open System.Linq.Expressions +open System.Text.Json +open System.Text.Json.Serialization +open FSharp.Reflection + +type JsonRecordConverterActivator = delegate of JsonSerializerOptions -> JsonConverter + +type IRecordFieldConverter = + abstract member Initialize: converter: JsonConverter -> unit + abstract member Read: reader: byref * typ: Type * options: JsonSerializerOptions -> obj + abstract member Write: writer: Utf8JsonWriter * value: obj * options: JsonSerializerOptions -> unit + +type RecordFieldConverter<'F> () = + let mutable converter = Unchecked.defaultof> + + interface IRecordFieldConverter with + member __.Initialize (c) = + converter <- c :?> JsonConverter<'F> + + member __.Read (reader, typ, options) = + converter.Read(&reader, typ, options) :> obj + + member __.Write (writer, value, options) = + converter.Write(writer, value :?> 'F, options) + +[] +type RecordField = { + Name: string + Type: Type + Index: int + IsIgnored: bool + Converter: IRecordFieldConverter option +} + +type JsonRecordConverter<'T> (options: JsonSerializerOptions) = + inherit JsonConverter<'T> () + + let recordType = typeof<'T> + + let constructor = FSharpValue.PreComputeRecordConstructor(recordType, true) + let getFieldValues = FSharpValue.PreComputeRecordReader(typeof<'T>, true) + + let fields = + FSharpType.GetRecordFields(recordType, true) + |> Array.mapi (fun idx f -> + { + Name = + f.GetCustomAttributes(typedefof, true) + |> Array.tryHead + |> Option.map (fun attr -> (attr :?> JsonPropertyNameAttribute).Name) + |> Option.defaultWith (fun () -> + if options.PropertyNamingPolicy |> isNull + then f.Name + else options.PropertyNamingPolicy.ConvertName f.Name) + + Type = f.PropertyType + Index = idx + IsIgnored = f.GetCustomAttributes(typeof, true) |> Array.isEmpty |> not + Converter = + f.GetCustomAttributes(typeof, true) + |> Array.tryHead + |> Option.map (fun attr -> attr :?> JsonConverterAttribute) + |> Option.bind (fun attr -> + let baseConverter = attr.CreateConverter(f.PropertyType) + + if baseConverter |> isNull then + failwithf "Field %s is decorated with a JsonConverter attribute, but it does not implement a CreateConverter method." f.Name + + if baseConverter.CanConvert(f.PropertyType) then + let converterType = typedefof>.MakeGenericType(f.PropertyType) + let converter = Activator.CreateInstance(converterType) :?> IRecordFieldConverter + converter.Initialize(baseConverter) + Some converter + else + None + ) + }) + + let fieldsByName = + fields + |> Array.map (fun f -> f.Name, f) + |> Array.map KeyValuePair.Create + |> (fun kvp -> Dictionary(kvp, StringComparer.OrdinalIgnoreCase)) + + let tryGetFieldByName name = + match fieldsByName.TryGetValue(name) with + | true, field -> Some field + | _ -> None + + let getFieldByName name = + match tryGetFieldByName name with + | Some field -> field + | _ -> KeyNotFoundException(sprintf "Failed to find a field named '%s' on record type '%s'." name recordType.Name) |> raise + + override __.Read (reader, typ, options) = + reader.ValidateTokenType(JsonTokenType.StartObject) + + let fields = Array.zeroCreate <| fields.Length + + while reader.Read() && reader.TokenType <> JsonTokenType.EndObject do + reader.ValidateTokenType(JsonTokenType.PropertyName) + + match tryGetFieldByName <| reader.GetString() with + | Some field -> + fields.[field.Index] <- + match field.Converter with + | Some converter -> + reader.Read() |> ignore + converter.Read(&reader, field.Type, options) + | None -> + JsonSerializer.Deserialize(&reader, field.Type, options) + | _ -> + reader.Skip() + + constructor fields :?> 'T + + override __.Write (writer, record, options) = + writer.WriteStartObject() + + let fieldValues = getFieldValues record + + (fields, fieldValues) + ||> Array.iter2 (fun field value -> + match value with + | :? JsonElement as je when je.ValueKind = JsonValueKind.Undefined -> () + | _ -> + if not field.IsIgnored && not (options.IgnoreNullValues && isNull value) then + writer.WritePropertyName(field.Name) + + match field.Converter with + | Some converter -> converter.Write(writer, value, options) + | None -> JsonSerializer.Serialize(writer, value, options)) + + writer.WriteEndObject() + +type JsonRecordConverter () = + inherit JsonConverterFactory() + + override __.CanConvert typ = + FSharpType.IsRecord (typ, true) + + override __.CreateConverter (typ, options) = + let constructor = typedefof>.MakeGenericType(typ).GetConstructor(typeof |> Array.singleton) + let optionsParameter = Expression.Parameter(typeof, "options") + + let newExpression = Expression.New(constructor, optionsParameter) + let lambda = Expression.Lambda(typeof, newExpression, optionsParameter) + + let activator = lambda.Compile() :?> JsonRecordConverterActivator + activator.Invoke(options) diff --git a/src/Equinox.Cosmos/Json/Options.fs b/src/Equinox.Cosmos/Json/Options.fs new file mode 100644 index 000000000..b928ccbc8 --- /dev/null +++ b/src/Equinox.Cosmos/Json/Options.fs @@ -0,0 +1,14 @@ +namespace Equinox.Cosmos.Json + +open System.Text.Json + +[] +module JsonSerializerOptionExtensions = + type JsonSerializerOptions with + static member Create() = + let options = JsonSerializerOptions() + options.Converters.Add(new JsonRecordConverter()) + options + +module JsonSerializer = + let defaultOptions = JsonSerializerOptions.Create() diff --git a/src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs b/src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs new file mode 100644 index 000000000..a1bb391db --- /dev/null +++ b/src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs @@ -0,0 +1,22 @@ +namespace Equinox.Cosmos.Json + +open System.Text.Json +open System.Runtime.CompilerServices + +[] +type Utf8JsonReaderExtension = + [] + static member ValidateTokenType(reader: Utf8JsonReader, expectedTokenType) = + if reader.TokenType <> expectedTokenType then + sprintf "Expected a %A token, but encountered a %A token when parsing JSON." expectedTokenType (reader.TokenType) + |> JsonException + |> raise + + [] + static member ValidatePropertyName(reader: Utf8JsonReader, expectedPropertyName: string) = + reader.ValidateTokenType(JsonTokenType.PropertyName) + + if not <| reader.ValueTextEquals expectedPropertyName then + sprintf "Expected a property named '%s', but encounted property with name '%s'." expectedPropertyName (reader.GetString()) + |> JsonException + |> raise From 561e17c1ed5b501e2cf5e7559288706d5337bc5d Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 27 Feb 2020 15:37:20 -0500 Subject: [PATCH 20/71] Update tests --- .../CosmosCoreIntegration.fs | 12 +++--- .../CosmosIntegration.fs | 43 ++++++++++++++++--- .../Equinox.Cosmos.Integration.fsproj | 1 + tests/Equinox.Cosmos.Integration/Json.fs | 27 ++++++++++++ .../JsonConverterTests.fs | 29 +++++++++---- 5 files changed, 92 insertions(+), 20 deletions(-) create mode 100644 tests/Equinox.Cosmos.Integration/Json.fs diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index e7e726e9d..0e0e6138d 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -8,7 +8,7 @@ open Newtonsoft.Json.Linq open Swensen.Unquote open Serilog open System -open System.Text +open System.Text.Json #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -16,8 +16,8 @@ type TestEvents() = static member private Create(i, ?eventType, ?json) = EventData.FromUtf8Bytes ( sprintf "%s:%d" (defaultArg eventType "test_event") i, - Encoding.UTF8.GetBytes(defaultArg json "{\"d\":\"d\"}"), - Encoding.UTF8.GetBytes "{\"m\":\"m\"}") + IntegrationJsonSerializer.deserialize(defaultArg json "{\"d\":\"d\"}"), + IntegrationJsonSerializer.deserialize("{\"m\":\"m\"}") ) static member Create(i, c) = Array.init c (fun x -> TestEvents.Create(x+i)) type Tests(testOutputHelper) = @@ -69,8 +69,8 @@ type Tests(testOutputHelper) = test <@ match res with Choice2Of2 ((:? InvalidOperationException) as ex) -> ex.Message.StartsWith "Must write either events or unfolds." | x -> failwithf "%A" x @> } - let blobEquals (x: byte[]) (y: byte[]) = System.Linq.Enumerable.SequenceEqual(x,y) - let stringOfUtf8 (x: byte[]) = Encoding.UTF8.GetString(x) + let blobEquals (x: JsonElement) (y: JsonElement) = x.GetRawText().Equals(y.GetRawText()) + let stringOfUtf8 (x: JsonElement) = x.GetRawText() let xmlDiff (x: string) (y: string) = match JsonDiffPatchDotNet.JsonDiffPatch().Diff(JToken.Parse x,JToken.Parse y) with | null -> "" @@ -91,7 +91,7 @@ type Tests(testOutputHelper) = return TestEvents.Create(0,6) } - let verifyCorrectEventsEx direction baseIndex (expected: IEventData<_>[]) (xs: ITimelineEvent[]) = + let verifyCorrectEventsEx direction baseIndex (expected: IEventData<_>[]) (xs: ITimelineEvent[]) = let xs, baseIndex = if direction = Equinox.Cosmos.Store.Direction.Forward then xs, baseIndex else Array.rev xs, baseIndex - int64 (Array.length expected) + 1L diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs index 0b254b5f0..e5057337a 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs @@ -1,17 +1,38 @@ module Equinox.Cosmos.Integration.CosmosIntegration +open System +open System.Threading +open System.Text.Json open Domain open Equinox.Cosmos open Equinox.Cosmos.Integration.Infrastructure open FSharp.UMX open Swensen.Unquote -open System -open System.Threading module Cart = + module Codec = + open Domain.Cart.Events + + let encode (evt: Event) = + match evt with + | Snapshotted state -> "Snapshotted", IntegrationJsonSerializer.serializeToElement(state) + | ItemAdded addInfo -> "ItemAdded", IntegrationJsonSerializer.serializeToElement(addInfo) + | ItemRemoved removeInfo -> "ItemRemoved", IntegrationJsonSerializer.serializeToElement(removeInfo) + | ItemQuantityChanged changeInfo -> "ItemQuantityChanged", IntegrationJsonSerializer.serializeToElement(changeInfo) + | ItemWaiveReturnsChanged waiveInfo -> "ItemWaiveReturnsChanged", IntegrationJsonSerializer.serializeToElement(waiveInfo) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "Snapshotted" -> Some (Snapshotted <| IntegrationJsonSerializer.deserializeElement(data)) + | "ItemAdded" -> Some (ItemAdded <| IntegrationJsonSerializer.deserializeElement(data)) + | "ItemRemoved" -> Some (ItemRemoved <| IntegrationJsonSerializer.deserializeElement(data)) + | "ItemQuantityChanged" -> Some (ItemQuantityChanged <| IntegrationJsonSerializer.deserializeElement(data)) + | "ItemWaiveReturnsChanged" -> Some (ItemWaiveReturnsChanged <| IntegrationJsonSerializer.deserializeElement(data)) + | _ -> None + let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot - let codec = Domain.Cart.Events.codec + let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) let createServiceWithoutOptimization connection batchSize log = let store = createCosmosContext connection batchSize let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) @@ -39,8 +60,20 @@ module Cart = Backend.Cart.Service(log, resolve) module ContactPreferences = + module Codec = + open Domain.ContactPreferences.Events + + let encode (evt: Event) = + match evt with + | Updated value -> "contactPreferencesChanged", IntegrationJsonSerializer.serializeToElement(value) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "contactPreferencesChanged" -> Some (Updated <| IntegrationJsonSerializer.deserializeElement(data)) + | _ -> None + let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial - let codec = Domain.ContactPreferences.Events.codec + let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) let createServiceWithoutOptimization createGateway defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = let gateway = createGateway defaultBatchSize let resolve = Resolver(gateway, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve @@ -375,4 +408,4 @@ type Tests(testOutputHelper) = capture.Clear() do! addAndThenRemoveItemsOptimisticManyTimesExceptTheLastOne context cartId skuId service1 1 test <@ [EqxAct.Append] = capture.ExternalCalls @> - } \ No newline at end of file + } diff --git a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj b/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj index 01422e7c0..09c104a32 100644 --- a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj +++ b/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj @@ -8,6 +8,7 @@ + diff --git a/tests/Equinox.Cosmos.Integration/Json.fs b/tests/Equinox.Cosmos.Integration/Json.fs new file mode 100644 index 000000000..bcd37fad0 --- /dev/null +++ b/tests/Equinox.Cosmos.Integration/Json.fs @@ -0,0 +1,27 @@ +[] +module Equinox.Cosmos.Integration.Json + +open Equinox.Cosmos +open Equinox.Cosmos.Json +open System +open System.Text.Json +open System.Text.Json.Serialization +open Domain + +type JsonSkuIdConverter () = + inherit JsonConverter() + + override __.Read (reader, _typ, _options) = + reader.GetString() |> Guid.Parse |> SkuId + + override __.Write (writer, value, _options) = + writer.WriteStringValue(string value) + +module IntegrationJsonSerializer = + let options = JsonSerializer.defaultOptions + options.Converters.Add(JsonSkuIdConverter()) + + let serialize (value: 'T) = JsonSerializer.Serialize(value, options) + let serializeToElement (value: 'T) = JsonSerializer.SerializeToElement(value, options) + let deserialize<'T> (json: string) = JsonSerializer.Deserialize<'T>(json, options) + let deserializeElement<'T> (jsonElement: JsonElement) = JsonSerializer.DeserializeElement<'T>(jsonElement, options) diff --git a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs index fc283b40c..1e3407852 100644 --- a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs +++ b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs @@ -2,10 +2,10 @@ open Equinox.Cosmos open FsCheck.Xunit -open Newtonsoft.Json open Swensen.Unquote open System open Xunit +open System.Text.Json type Embedded = { embed : string } type Union = @@ -15,8 +15,19 @@ type Union = let defaultSettings = FsCodec.NewtonsoftJson.Settings.CreateDefault() +let encode (evt: Union) = + match evt with + | A e -> "A", IntegrationJsonSerializer.serializeToElement(e) + | B e -> "B", IntegrationJsonSerializer.serializeToElement(e) + +let tryDecode (eventType, data: JsonElement) = + match eventType with + | "A" -> Some (A <| IntegrationJsonSerializer.deserializeElement(data)) + | "B" -> Some (B <| IntegrationJsonSerializer.deserializeElement(data)) + | _ -> None + type Base64ZipUtf8Tests() = - let eventCodec = FsCodec.NewtonsoftJson.Codec.Create(defaultSettings) + let eventCodec = FsCodec.Codec.Create(encode, tryDecode) [] let ``serializes, achieving compression`` () = @@ -25,10 +36,10 @@ type Base64ZipUtf8Tests() = { i = 42L c = encoded.EventType d = encoded.Data - m = null + m = Unchecked.defaultof t = DateTimeOffset.MinValue } - let res = JsonConvert.SerializeObject e - test <@ res.Contains("\"d\":\"") && res.Length < 128 @> + let res = IntegrationJsonSerializer.serialize(e) + test <@ res.Contains("\"d\":\"") && res.Length < 138 @> [] let roundtrips value = @@ -43,11 +54,11 @@ type Base64ZipUtf8Tests() = { i = 42L c = encoded.EventType d = encoded.Data - m = null + m = Unchecked.defaultof t = DateTimeOffset.MinValue } - let ser = JsonConvert.SerializeObject(e) + let ser = IntegrationJsonSerializer.serialize(e) test <@ ser.Contains("\"d\":\"") @> - let des = JsonConvert.DeserializeObject(ser) + let des = IntegrationJsonSerializer.deserialize(ser) let d = FsCodec.Core.TimelineEvent.Create(-1L, des.c, des.d) let decoded = eventCodec.TryDecode d |> Option.get - test <@ value = decoded @> \ No newline at end of file + test <@ value = decoded @> From c121ca2ff5e12f4807015e7094d26060b3078b2b Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 27 Feb 2020 17:56:42 -0500 Subject: [PATCH 21/71] Create Tutorial STJ codecs --- samples/Tutorial/Gapless.fs | 26 ++++++++++++++++++++++++-- samples/Tutorial/Index.fs | 24 ++++++++++++++++++++++-- samples/Tutorial/Sequence.fs | 18 +++++++++++++++++- samples/Tutorial/Set.fs | 24 ++++++++++++++++++++++-- samples/Tutorial/Upload.fs | 18 +++++++++++++++++- 5 files changed, 102 insertions(+), 8 deletions(-) diff --git a/samples/Tutorial/Gapless.fs b/samples/Tutorial/Gapless.fs index b16991b90..58c4fa0d8 100644 --- a/samples/Tutorial/Gapless.fs +++ b/samples/Tutorial/Gapless.fs @@ -79,9 +79,31 @@ let [] appName = "equinox-tutorial-gapless" module Cosmos = open Equinox.Cosmos + open Equinox.Cosmos.Json + open System.Text.Json + + module Codec = + open Events + + let encode (evt: Event) = + match evt with + | Reserved item -> "Reserved", JsonSerializer.SerializeToElement(item, JsonSerializer.defaultOptions) + | Confirmed item -> "Confirmed", JsonSerializer.SerializeToElement(item, JsonSerializer.defaultOptions) + | Released item -> "Released", JsonSerializer.SerializeToElement(item, JsonSerializer.defaultOptions) + | Snapshotted snapshot -> "Snapshotted", JsonSerializer.SerializeToElement(snapshot, JsonSerializer.defaultOptions) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data)) + | "Confirmed" -> Some (Confirmed <| JsonSerializer.DeserializeElement(data)) + | "Released" -> Some (Released <| JsonSerializer.DeserializeElement(data)) + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data)) + | _ -> None + let private createService (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolve = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve Service(Serilog.Log.Logger, resolve) module Snapshot = @@ -94,4 +116,4 @@ module Cosmos = let createService (context,cache) = let accessStrategy = AccessStrategy.RollingState Fold.snapshot - createService(context,cache,accessStrategy) \ No newline at end of file + createService(context,cache,accessStrategy) diff --git a/samples/Tutorial/Index.fs b/samples/Tutorial/Index.fs index c45cd944c..7e95d4591 100644 --- a/samples/Tutorial/Index.fs +++ b/samples/Tutorial/Index.fs @@ -54,14 +54,34 @@ let create resolve indexId = Service(indexId, resolve, maxAttempts = 3) module Cosmos = open Equinox.Cosmos + open Equinox.Cosmos.Json + open System.Text.Json + + module Codec = + open Events + + let encode<'v> (evt: Event<'v>) = + match evt with + | Added items -> "Added", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) + | Deleted itemIds -> "Deleted", JsonSerializer.SerializeToElement(itemIds, JsonSerializer.defaultOptions) + | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) + + let tryDecode<'v> (eventType, data: JsonElement) = + match eventType with + | "Added" -> Some (Added <| JsonSerializer.DeserializeElement>(data)) + | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data)) + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement>(data)) + | _ -> None + let createService<'v> (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let resolve = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let codec = FsCodec.Codec.Create, JsonElement>(Codec.encode<'v>, Codec.tryDecode<'v>) + let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module MemoryStore = let createService store = let resolve = Equinox.MemoryStore.Resolver(store, Events.codec, Fold.fold, Fold.initial).Resolve - create resolve \ No newline at end of file + create resolve diff --git a/samples/Tutorial/Sequence.fs b/samples/Tutorial/Sequence.fs index 36e6aa633..99ce3c5d7 100644 --- a/samples/Tutorial/Sequence.fs +++ b/samples/Tutorial/Sequence.fs @@ -54,9 +54,25 @@ let create resolve = Service(Serilog.Log.ForContext(), resolve, maxAtte module Cosmos = open Equinox.Cosmos + open Equinox.Cosmos.Json + open System.Text.Json + + module Codec = + open Events + + let encode (evt: Event) = + match evt with + | Reserved reserved -> "Reserved", JsonSerializer.SerializeToElement(reserved, JsonSerializer.defaultOptions) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data)) + | _ -> None + let private createService (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolve = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module LatestKnownEvent = diff --git a/samples/Tutorial/Set.fs b/samples/Tutorial/Set.fs index 4e7437b60..e917584a9 100644 --- a/samples/Tutorial/Set.fs +++ b/samples/Tutorial/Set.fs @@ -54,14 +54,34 @@ let create resolve setId = Service(Serilog.Log.ForContext(), setId, res module Cosmos = open Equinox.Cosmos + open Equinox.Cosmos.Json + open System.Text.Json + + module Codec = + open Events + + let encode (evt: Event) = + match evt with + | Added items -> "Added", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) + | Deleted items -> "Deleted", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) + | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data)) + | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data)) + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data)) + | _ -> None + let createService (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let resolve = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module MemoryStore = let createService store = let resolve = Equinox.MemoryStore.Resolver(store, Events.codec, Fold.fold, Fold.initial).Resolve - create resolve \ No newline at end of file + create resolve diff --git a/samples/Tutorial/Upload.fs b/samples/Tutorial/Upload.fs index 5de5e7a6b..7023b07b5 100644 --- a/samples/Tutorial/Upload.fs +++ b/samples/Tutorial/Upload.fs @@ -69,9 +69,25 @@ let create resolve = Service(Serilog.Log.ForContext(), resolve, 3) module Cosmos = open Equinox.Cosmos + open Equinox.Cosmos.Json + open System.Text.Json + + module Codec = + open Events + + let encode (evt: Event) = + match evt with + | IdAssigned id -> "IdAssigned", JsonSerializer.SerializeToElement(id, JsonSerializer.defaultOptions) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "IdAssigned" -> Some (IdAssigned <| JsonSerializer.DeserializeElement(data)) + | _ -> None + let createService (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolve = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent).Resolve + let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent).Resolve create resolve module EventStore = From 48a874e53a6495b573d4522d2f740d89d4335582 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Fri, 28 Feb 2020 10:33:18 -0500 Subject: [PATCH 22/71] Create Store integration STJ codecs --- samples/Store/Integration/CartIntegration.fs | 14 +++++----- .../ContactPreferencesIntegration.fs | 28 ++++++++++++++----- .../Store/Integration/FavoritesIntegration.fs | 28 +++++++++++++++---- 3 files changed, 51 insertions(+), 19 deletions(-) diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index cbd377a58..adc708bba 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -17,17 +17,17 @@ let createMemoryStore () = let createServiceMemory log store = Backend.Cart.Service(log, fun (id,opt) -> MemoryStore.Resolver(store, Domain.Cart.Events.codec, fold, initial).Resolve(id,?option=opt)) -let codec = Domain.Cart.Events.codec - +let eventStoreCodec = Domain.Cart.Events.codec let resolveGesStreamWithRollingSnapshots gateway = - fun (id,opt) -> EventStore.Resolver(gateway, codec, fold, initial, access = AccessStrategy.RollingSnapshots snapshot).Resolve(id,?option=opt) + fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial, access = AccessStrategy.RollingSnapshots snapshot).Resolve(id,?option=opt) let resolveGesStreamWithoutCustomAccessStrategy gateway = - fun (id,opt) -> EventStore.Resolver(gateway, codec, fold, initial).Resolve(id,?option=opt) + fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial).Resolve(id,?option=opt) +let cosmosCodec = Equinox.Cosmos.Integration.CosmosIntegration.Cart.codec let resolveCosmosStreamWithSnapshotStrategy gateway = - fun (id,opt) -> Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) + fun (id,opt) -> Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) let resolveCosmosStreamWithoutCustomAccessStrategy gateway = - fun (id,opt) -> Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve(id,?option=opt) + fun (id,opt) -> Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve(id,?option=opt) let addAndThenRemoveItemsManyTimesExceptTheLastOne context cartId skuId (service: Backend.Cart.Service) count = service.ExecuteManyAsync(cartId, false, seq { @@ -82,4 +82,4 @@ type Tests(testOutputHelper) = let ``Can roundtrip against Cosmos, correctly folding the events with With Snapshotting`` args = Async.RunSynchronously <| async { let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveCosmosStreamWithSnapshotStrategy do! act service args - } \ No newline at end of file + } diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index b592a9d90..3b3710cb7 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -4,6 +4,7 @@ open Equinox open Equinox.Cosmos.Integration open Swensen.Unquote open Xunit +open System.Text.Json #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -14,19 +15,32 @@ let createMemoryStore () = let createServiceMemory log store = Backend.ContactPreferences.Service(log, MemoryStore.Resolver(store, FsCodec.Box.Codec.Create(), fold, initial).Resolve) -let codec = Domain.ContactPreferences.Events.codec +let eventStoreCodec = Domain.ContactPreferences.Events.codec let resolveStreamGesWithOptimizedStorageSemantics gateway = - EventStore.Resolver(gateway 1, codec, fold, initial, access = EventStore.AccessStrategy.LatestKnownEvent).Resolve + EventStore.Resolver(gateway 1, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamGesWithoutAccessStrategy gateway = - EventStore.Resolver(gateway defaultBatchSize, codec, fold, initial).Resolve + EventStore.Resolver(gateway defaultBatchSize, eventStoreCodec, fold, initial).Resolve +module CosmosCodec = + open Domain.ContactPreferences.Events + + let encode (evt: Event) = + match evt with + | Updated value -> "contactPreferencesChanged", IntegrationJsonSerializer.serializeToElement(value) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "contactPreferencesChanged" -> Some (Updated <| IntegrationJsonSerializer.deserializeElement(data)) + | _ -> None + +let cosmosCodec = FsCodec.Codec.Create(CosmosCodec.encode, CosmosCodec.tryDecode) let resolveStreamCosmosWithLatestKnownEventSemantics gateway = - Cosmos.Resolver(gateway 1, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve + Cosmos.Resolver(gateway 1, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamCosmosUnoptimized gateway = - Cosmos.Resolver(gateway defaultBatchSize, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve + Cosmos.Resolver(gateway defaultBatchSize, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve let resolveStreamCosmosRollingUnfolds gateway = let access = Cosmos.AccessStrategy.Custom(Domain.ContactPreferences.Fold.isOrigin, Domain.ContactPreferences.Fold.transmute) - Cosmos.Resolver(gateway defaultBatchSize, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve + Cosmos.Resolver(gateway defaultBatchSize, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve type Tests(testOutputHelper) = let testOutput = TestOutputAdapter testOutputHelper @@ -79,4 +93,4 @@ type Tests(testOutputHelper) = let ``Can roundtrip against Cosmos, correctly folding the events with RollingUnfold semantics`` args = Async.RunSynchronously <| async { let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosRollingUnfolds do! act service args - } \ No newline at end of file + } diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index 424e4c42d..664169ae3 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -3,6 +3,7 @@ open Equinox open Equinox.Cosmos.Integration open Swensen.Unquote +open System.Text.Json #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -14,18 +15,35 @@ let createMemoryStore () = let createServiceMemory log store = Backend.Favorites.Service(log, MemoryStore.Resolver(store, FsCodec.Box.Codec.Create(), fold, initial).Resolve) -let codec = Domain.Favorites.Events.codec +let eventStoreCodec = Domain.Favorites.Events.codec let createServiceGes gateway log = - let resolve = EventStore.Resolver(gateway, codec, fold, initial, access = EventStore.AccessStrategy.RollingSnapshots snapshot).Resolve + let resolve = EventStore.Resolver(gateway, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.RollingSnapshots snapshot).Resolve Backend.Favorites.Service(log, resolve) +module CosmosCodec = + open Domain.Favorites.Events + + let encode (evt: Event) = + match evt with + | Snapshotted snapshotted -> "Snapshotted", IntegrationJsonSerializer.serializeToElement(snapshotted) + | Favorited favorited -> "Favorited", IntegrationJsonSerializer.serializeToElement(favorited) + | Unfavorited unfavorited -> "Unfavorited", IntegrationJsonSerializer.serializeToElement(unfavorited) + + let tryDecode (eventType, data: JsonElement) = + match eventType with + | "Snapshotted" -> Some (Snapshotted <| IntegrationJsonSerializer.deserializeElement(data)) + | "Favorited" -> Some (Favorited <| IntegrationJsonSerializer.deserializeElement(data)) + | "Unfavorited" -> Some (Unfavorited <| IntegrationJsonSerializer.deserializeElement(data)) + | _ -> None + +let cosmosCodec = FsCodec.Codec.Create(CosmosCodec.encode, CosmosCodec.tryDecode) let createServiceCosmos gateway log = - let resolve = Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve + let resolve = Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve Backend.Favorites.Service(log, resolve) let createServiceCosmosRollingState gateway log = let access = Cosmos.AccessStrategy.RollingState Domain.Favorites.Fold.snapshot - let resolve = Cosmos.Resolver(gateway, codec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve + let resolve = Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve Backend.Favorites.Service(log, resolve) type Tests(testOutputHelper) = @@ -74,4 +92,4 @@ type Tests(testOutputHelper) = let gateway = createCosmosContext conn defaultBatchSize let service = createServiceCosmosRollingState gateway log do! act service args - } \ No newline at end of file + } From b231595faeae68b6f6acd65438a3011c43efe1e2 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Fri, 28 Feb 2020 14:10:59 -0500 Subject: [PATCH 23/71] Move STJ code to Equinox --- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 4 - src/Equinox.Cosmos/Json/JsonElementHelpers.fs | 20 --- .../Json/JsonRecordConverter.fs | 154 ------------------ src/Equinox.Cosmos/Json/Options.fs | 14 -- .../Json/Utf8JsonReaderExtensions.fs | 22 --- src/Equinox/Equinox.fsproj | 5 + 6 files changed, 5 insertions(+), 214 deletions(-) delete mode 100644 src/Equinox.Cosmos/Json/JsonElementHelpers.fs delete mode 100644 src/Equinox.Cosmos/Json/JsonRecordConverter.fs delete mode 100644 src/Equinox.Cosmos/Json/Options.fs delete mode 100644 src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index ce3b281ce..03b5f97e9 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -10,10 +10,6 @@ - - - - diff --git a/src/Equinox.Cosmos/Json/JsonElementHelpers.fs b/src/Equinox.Cosmos/Json/JsonElementHelpers.fs deleted file mode 100644 index be64a85a0..000000000 --- a/src/Equinox.Cosmos/Json/JsonElementHelpers.fs +++ /dev/null @@ -1,20 +0,0 @@ -namespace Equinox.Cosmos - -open System -open System.Buffers -open System.Runtime.InteropServices -open System.Text.Json - -[] -module JsonSerializerExtensions = - type JsonSerializer with - static member SerializeToElement(value: 'T, [] ?options: JsonSerializerOptions) = - JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(JsonSerializer.SerializeToUtf8Bytes(value, defaultArg options null))) - - static member DeserializeElement<'T>(element: JsonElement, [] ?options: JsonSerializerOptions) = - let bufferWriter = ArrayBufferWriter() - ( - use jsonWriter = new Utf8JsonWriter(bufferWriter) - element.WriteTo(jsonWriter) - ) - JsonSerializer.Deserialize<'T>(bufferWriter.WrittenSpan, defaultArg options null) diff --git a/src/Equinox.Cosmos/Json/JsonRecordConverter.fs b/src/Equinox.Cosmos/Json/JsonRecordConverter.fs deleted file mode 100644 index f574977ab..000000000 --- a/src/Equinox.Cosmos/Json/JsonRecordConverter.fs +++ /dev/null @@ -1,154 +0,0 @@ -namespace Equinox.Cosmos.Json - -open System -open System.Collections.Generic -open System.Linq.Expressions -open System.Text.Json -open System.Text.Json.Serialization -open FSharp.Reflection - -type JsonRecordConverterActivator = delegate of JsonSerializerOptions -> JsonConverter - -type IRecordFieldConverter = - abstract member Initialize: converter: JsonConverter -> unit - abstract member Read: reader: byref * typ: Type * options: JsonSerializerOptions -> obj - abstract member Write: writer: Utf8JsonWriter * value: obj * options: JsonSerializerOptions -> unit - -type RecordFieldConverter<'F> () = - let mutable converter = Unchecked.defaultof> - - interface IRecordFieldConverter with - member __.Initialize (c) = - converter <- c :?> JsonConverter<'F> - - member __.Read (reader, typ, options) = - converter.Read(&reader, typ, options) :> obj - - member __.Write (writer, value, options) = - converter.Write(writer, value :?> 'F, options) - -[] -type RecordField = { - Name: string - Type: Type - Index: int - IsIgnored: bool - Converter: IRecordFieldConverter option -} - -type JsonRecordConverter<'T> (options: JsonSerializerOptions) = - inherit JsonConverter<'T> () - - let recordType = typeof<'T> - - let constructor = FSharpValue.PreComputeRecordConstructor(recordType, true) - let getFieldValues = FSharpValue.PreComputeRecordReader(typeof<'T>, true) - - let fields = - FSharpType.GetRecordFields(recordType, true) - |> Array.mapi (fun idx f -> - { - Name = - f.GetCustomAttributes(typedefof, true) - |> Array.tryHead - |> Option.map (fun attr -> (attr :?> JsonPropertyNameAttribute).Name) - |> Option.defaultWith (fun () -> - if options.PropertyNamingPolicy |> isNull - then f.Name - else options.PropertyNamingPolicy.ConvertName f.Name) - - Type = f.PropertyType - Index = idx - IsIgnored = f.GetCustomAttributes(typeof, true) |> Array.isEmpty |> not - Converter = - f.GetCustomAttributes(typeof, true) - |> Array.tryHead - |> Option.map (fun attr -> attr :?> JsonConverterAttribute) - |> Option.bind (fun attr -> - let baseConverter = attr.CreateConverter(f.PropertyType) - - if baseConverter |> isNull then - failwithf "Field %s is decorated with a JsonConverter attribute, but it does not implement a CreateConverter method." f.Name - - if baseConverter.CanConvert(f.PropertyType) then - let converterType = typedefof>.MakeGenericType(f.PropertyType) - let converter = Activator.CreateInstance(converterType) :?> IRecordFieldConverter - converter.Initialize(baseConverter) - Some converter - else - None - ) - }) - - let fieldsByName = - fields - |> Array.map (fun f -> f.Name, f) - |> Array.map KeyValuePair.Create - |> (fun kvp -> Dictionary(kvp, StringComparer.OrdinalIgnoreCase)) - - let tryGetFieldByName name = - match fieldsByName.TryGetValue(name) with - | true, field -> Some field - | _ -> None - - let getFieldByName name = - match tryGetFieldByName name with - | Some field -> field - | _ -> KeyNotFoundException(sprintf "Failed to find a field named '%s' on record type '%s'." name recordType.Name) |> raise - - override __.Read (reader, typ, options) = - reader.ValidateTokenType(JsonTokenType.StartObject) - - let fields = Array.zeroCreate <| fields.Length - - while reader.Read() && reader.TokenType <> JsonTokenType.EndObject do - reader.ValidateTokenType(JsonTokenType.PropertyName) - - match tryGetFieldByName <| reader.GetString() with - | Some field -> - fields.[field.Index] <- - match field.Converter with - | Some converter -> - reader.Read() |> ignore - converter.Read(&reader, field.Type, options) - | None -> - JsonSerializer.Deserialize(&reader, field.Type, options) - | _ -> - reader.Skip() - - constructor fields :?> 'T - - override __.Write (writer, record, options) = - writer.WriteStartObject() - - let fieldValues = getFieldValues record - - (fields, fieldValues) - ||> Array.iter2 (fun field value -> - match value with - | :? JsonElement as je when je.ValueKind = JsonValueKind.Undefined -> () - | _ -> - if not field.IsIgnored && not (options.IgnoreNullValues && isNull value) then - writer.WritePropertyName(field.Name) - - match field.Converter with - | Some converter -> converter.Write(writer, value, options) - | None -> JsonSerializer.Serialize(writer, value, options)) - - writer.WriteEndObject() - -type JsonRecordConverter () = - inherit JsonConverterFactory() - - override __.CanConvert typ = - FSharpType.IsRecord (typ, true) - - override __.CreateConverter (typ, options) = - let constructor = typedefof>.MakeGenericType(typ).GetConstructor(typeof |> Array.singleton) - let optionsParameter = Expression.Parameter(typeof, "options") - - let newExpression = Expression.New(constructor, optionsParameter) - let lambda = Expression.Lambda(typeof, newExpression, optionsParameter) - - let activator = lambda.Compile() :?> JsonRecordConverterActivator - activator.Invoke(options) diff --git a/src/Equinox.Cosmos/Json/Options.fs b/src/Equinox.Cosmos/Json/Options.fs deleted file mode 100644 index b928ccbc8..000000000 --- a/src/Equinox.Cosmos/Json/Options.fs +++ /dev/null @@ -1,14 +0,0 @@ -namespace Equinox.Cosmos.Json - -open System.Text.Json - -[] -module JsonSerializerOptionExtensions = - type JsonSerializerOptions with - static member Create() = - let options = JsonSerializerOptions() - options.Converters.Add(new JsonRecordConverter()) - options - -module JsonSerializer = - let defaultOptions = JsonSerializerOptions.Create() diff --git a/src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs b/src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs deleted file mode 100644 index a1bb391db..000000000 --- a/src/Equinox.Cosmos/Json/Utf8JsonReaderExtensions.fs +++ /dev/null @@ -1,22 +0,0 @@ -namespace Equinox.Cosmos.Json - -open System.Text.Json -open System.Runtime.CompilerServices - -[] -type Utf8JsonReaderExtension = - [] - static member ValidateTokenType(reader: Utf8JsonReader, expectedTokenType) = - if reader.TokenType <> expectedTokenType then - sprintf "Expected a %A token, but encountered a %A token when parsing JSON." expectedTokenType (reader.TokenType) - |> JsonException - |> raise - - [] - static member ValidatePropertyName(reader: Utf8JsonReader, expectedPropertyName: string) = - reader.ValidateTokenType(JsonTokenType.PropertyName) - - if not <| reader.ValueTextEquals expectedPropertyName then - sprintf "Expected a property named '%s', but encounted property with name '%s'." expectedPropertyName (reader.GetString()) - |> JsonException - |> raise diff --git a/src/Equinox/Equinox.fsproj b/src/Equinox/Equinox.fsproj index 8ab6f3d94..bd322beb8 100644 --- a/src/Equinox/Equinox.fsproj +++ b/src/Equinox/Equinox.fsproj @@ -9,6 +9,10 @@ + + + + @@ -20,6 +24,7 @@ + \ No newline at end of file From dd97e50c2353e7413eefda09caf6d82407b1a4ad Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Fri, 28 Feb 2020 14:23:48 -0500 Subject: [PATCH 24/71] Add FsCodec.STJ to Equinox --- src/Equinox/Json/JsonElementHelpers.fs | 25 +++ src/Equinox/Json/JsonRecordConverter.fs | 160 +++++++++++++++++++ src/Equinox/Json/Options.fs | 14 ++ src/Equinox/Json/Utf8JsonReaderExtensions.fs | 22 +++ 4 files changed, 221 insertions(+) create mode 100644 src/Equinox/Json/JsonElementHelpers.fs create mode 100644 src/Equinox/Json/JsonRecordConverter.fs create mode 100644 src/Equinox/Json/Options.fs create mode 100644 src/Equinox/Json/Utf8JsonReaderExtensions.fs diff --git a/src/Equinox/Json/JsonElementHelpers.fs b/src/Equinox/Json/JsonElementHelpers.fs new file mode 100644 index 000000000..f753f73f8 --- /dev/null +++ b/src/Equinox/Json/JsonElementHelpers.fs @@ -0,0 +1,25 @@ +namespace FsCodec.SystemTextJson + +open System +open System.Buffers +open System.Runtime.InteropServices +open System.Text.Json + +[] +module JsonSerializerExtensions = + type JsonSerializer with + static member SerializeToElement(value: 'T, [] ?options: JsonSerializerOptions) = + JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(JsonSerializer.SerializeToUtf8Bytes(value, defaultArg options null))) + + static member DeserializeElement<'T>(element: JsonElement, [] ?options: JsonSerializerOptions) = +#if NETSTANDARD2_1 + let bufferWriter = ArrayBufferWriter() + ( + use jsonWriter = new Utf8JsonWriter(bufferWriter) + element.WriteTo(jsonWriter) + ) + JsonSerializer.Deserialize<'T>(bufferWriter.WrittenSpan, defaultArg options null) +#else + let json = element.GetRawText() + JsonSerializer.Deserialize<'T>(json, defaultArg options null) +#endif diff --git a/src/Equinox/Json/JsonRecordConverter.fs b/src/Equinox/Json/JsonRecordConverter.fs new file mode 100644 index 000000000..c796f887e --- /dev/null +++ b/src/Equinox/Json/JsonRecordConverter.fs @@ -0,0 +1,160 @@ +namespace FsCodec.SystemTextJson.Serialization + +open System +open System.Collections.Generic +open System.Linq +open System.Linq.Expressions +open System.Text.Json +open System.Text.Json.Serialization +open FSharp.Reflection + +type JsonRecordConverterActivator = delegate of JsonSerializerOptions -> JsonConverter + +type IRecordFieldConverter = + abstract member Initialize: converter: JsonConverter -> unit + abstract member Read: reader: byref * typ: Type * options: JsonSerializerOptions -> obj + abstract member Write: writer: Utf8JsonWriter * value: obj * options: JsonSerializerOptions -> unit + +type RecordFieldConverter<'F> () = + let mutable converter = Unchecked.defaultof> + + interface IRecordFieldConverter with + member __.Initialize (c) = + converter <- c :?> JsonConverter<'F> + + member __.Read (reader, typ, options) = + converter.Read(&reader, typ, options) :> obj + + member __.Write (writer, value, options) = + converter.Write(writer, value :?> 'F, options) + +[] +type RecordField = { + Name: string + Type: Type + Index: int + IsIgnored: bool + Converter: IRecordFieldConverter option +} + +type JsonRecordConverter<'T> (options: JsonSerializerOptions) = + inherit JsonConverter<'T> () + + let recordType = typeof<'T> + + let constructor = FSharpValue.PreComputeRecordConstructor(recordType, true) + let getFieldValues = FSharpValue.PreComputeRecordReader(typeof<'T>, true) + + let fields = + FSharpType.GetRecordFields(recordType, true) + |> Array.mapi (fun idx f -> + { + Name = + f.GetCustomAttributes(typedefof, true) + |> Array.tryHead + |> Option.map (fun attr -> (attr :?> JsonPropertyNameAttribute).Name) + |> Option.defaultWith (fun () -> + if options.PropertyNamingPolicy |> isNull + then f.Name + else options.PropertyNamingPolicy.ConvertName f.Name) + + Type = f.PropertyType + Index = idx + IsIgnored = f.GetCustomAttributes(typeof, true) |> Array.isEmpty |> not + Converter = + f.GetCustomAttributes(typeof, true) + |> Array.tryHead + |> Option.map (fun attr -> attr :?> JsonConverterAttribute) + |> Option.bind (fun attr -> + let baseConverter = attr.CreateConverter(f.PropertyType) + + if baseConverter |> isNull then + failwithf "Field %s is decorated with a JsonConverter attribute, but it does not implement a CreateConverter method." f.Name + + if baseConverter.CanConvert(f.PropertyType) then + let converterType = typedefof>.MakeGenericType(f.PropertyType) + let converter = Activator.CreateInstance(converterType) :?> IRecordFieldConverter + converter.Initialize(baseConverter) + Some converter + else + None + ) + }) + + let fieldsByName = + fields + |> Array.map (fun f -> f.Name, f) +#if NETSTANDARD2_1 + |> Array.map KeyValuePair.Create + |> (fun kvp -> Dictionary(kvp, StringComparer.OrdinalIgnoreCase)) +#else + |> Array.map KeyValuePair + |> (fun kvp -> kvp.ToDictionary((fun item -> item.Key), (fun item -> item.Value), StringComparer.OrdinalIgnoreCase)) +#endif + + let tryGetFieldByName name = + match fieldsByName.TryGetValue(name) with + | true, field -> Some field + | _ -> None + + let getFieldByName name = + match tryGetFieldByName name with + | Some field -> field + | _ -> KeyNotFoundException(sprintf "Failed to find a field named '%s' on record type '%s'." name recordType.Name) |> raise + + override __.Read (reader, typ, options) = + reader.ValidateTokenType(JsonTokenType.StartObject) + + let fields = Array.zeroCreate <| fields.Length + + while reader.Read() && reader.TokenType <> JsonTokenType.EndObject do + reader.ValidateTokenType(JsonTokenType.PropertyName) + + match tryGetFieldByName <| reader.GetString() with + | Some field -> + fields.[field.Index] <- + match field.Converter with + | Some converter -> + reader.Read() |> ignore + converter.Read(&reader, field.Type, options) + | None -> + JsonSerializer.Deserialize(&reader, field.Type, options) + | _ -> + reader.Skip() + + constructor fields :?> 'T + + override __.Write (writer, record, options) = + writer.WriteStartObject() + + let fieldValues = getFieldValues record + + (fields, fieldValues) + ||> Array.iter2 (fun field value -> + match value with + | :? JsonElement as je when je.ValueKind = JsonValueKind.Undefined -> () + | _ -> + if not field.IsIgnored && not (options.IgnoreNullValues && isNull value) then + writer.WritePropertyName(field.Name) + + match field.Converter with + | Some converter -> converter.Write(writer, value, options) + | None -> JsonSerializer.Serialize(writer, value, options)) + + writer.WriteEndObject() + +type JsonRecordConverter () = + inherit JsonConverterFactory() + + override __.CanConvert typ = + FSharpType.IsRecord (typ, true) + + override __.CreateConverter (typ, options) = + let constructor = typedefof>.MakeGenericType(typ).GetConstructor(typeof |> Array.singleton) + let optionsParameter = Expression.Parameter(typeof, "options") + + let newExpression = Expression.New(constructor, optionsParameter) + let lambda = Expression.Lambda(typeof, newExpression, optionsParameter) + + let activator = lambda.Compile() :?> JsonRecordConverterActivator + activator.Invoke(options) diff --git a/src/Equinox/Json/Options.fs b/src/Equinox/Json/Options.fs new file mode 100644 index 000000000..6867c76f6 --- /dev/null +++ b/src/Equinox/Json/Options.fs @@ -0,0 +1,14 @@ +namespace FsCodec.SystemTextJson.Serialization + +open System.Text.Json + +[] +module JsonSerializerOptionExtensions = + type JsonSerializerOptions with + static member Create() = + let options = JsonSerializerOptions() + options.Converters.Add(new JsonRecordConverter()) + options + +module JsonSerializer = + let defaultOptions = JsonSerializerOptions.Create() diff --git a/src/Equinox/Json/Utf8JsonReaderExtensions.fs b/src/Equinox/Json/Utf8JsonReaderExtensions.fs new file mode 100644 index 000000000..56c423392 --- /dev/null +++ b/src/Equinox/Json/Utf8JsonReaderExtensions.fs @@ -0,0 +1,22 @@ +namespace FsCodec.SystemTextJson.Serialization + +open System.Text.Json +open System.Runtime.CompilerServices + +[] +type Utf8JsonReaderExtension = + [] + static member ValidateTokenType(reader: Utf8JsonReader, expectedTokenType) = + if reader.TokenType <> expectedTokenType then + sprintf "Expected a %A token, but encountered a %A token when parsing JSON." expectedTokenType (reader.TokenType) + |> JsonException + |> raise + + [] + static member ValidatePropertyName(reader: Utf8JsonReader, expectedPropertyName: string) = + reader.ValidateTokenType(JsonTokenType.PropertyName) + + if not <| reader.ValueTextEquals expectedPropertyName then + sprintf "Expected a property named '%s', but encounted property with name '%s'." expectedPropertyName (reader.GetString()) + |> JsonException + |> raise From 7973a736386a136d710f522ba223a0c3b11c4b0f Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Fri, 28 Feb 2020 15:30:13 -0500 Subject: [PATCH 25/71] Move FsCodec.STJ to Equinox.Core; Fix net461 --- src/Equinox.Core/Equinox.Core.fsproj | 4 ++++ src/Equinox.Core/Infrastructure.fs | 4 ++++ src/{Equinox => Equinox.Core}/Json/JsonElementHelpers.fs | 0 src/{Equinox => Equinox.Core}/Json/JsonRecordConverter.fs | 1 + src/{Equinox => Equinox.Core}/Json/Options.fs | 0 .../Json/Utf8JsonReaderExtensions.fs | 0 src/Equinox/Equinox.fsproj | 4 ---- 7 files changed, 9 insertions(+), 4 deletions(-) rename src/{Equinox => Equinox.Core}/Json/JsonElementHelpers.fs (100%) rename src/{Equinox => Equinox.Core}/Json/JsonRecordConverter.fs (99%) rename src/{Equinox => Equinox.Core}/Json/Options.fs (100%) rename src/{Equinox => Equinox.Core}/Json/Utf8JsonReaderExtensions.fs (100%) diff --git a/src/Equinox.Core/Equinox.Core.fsproj b/src/Equinox.Core/Equinox.Core.fsproj index 95dde0c5c..c0e768822 100644 --- a/src/Equinox.Core/Equinox.Core.fsproj +++ b/src/Equinox.Core/Equinox.Core.fsproj @@ -17,6 +17,10 @@ + + + + diff --git a/src/Equinox.Core/Infrastructure.fs b/src/Equinox.Core/Infrastructure.fs index 0834c2520..aaa90d24a 100755 --- a/src/Equinox.Core/Infrastructure.fs +++ b/src/Equinox.Core/Infrastructure.fs @@ -12,6 +12,8 @@ type OAttribute = System.Runtime.InteropServices.OptionalAttribute type DAttribute = System.Runtime.InteropServices.DefaultParameterValueAttribute #if NET461 +let isNull v = v = null + module Array = let tryHead (array : 'T[]) = if array.Length = 0 then None @@ -28,12 +30,14 @@ module Array = elif predicate array.[i] then Some i else loop (i - 1) loop (array.Length - 1) + let singleton v = Array.create 1 v module Option = let filter predicate option = match option with None -> None | Some x -> if predicate x then Some x else None let toNullable option = match option with Some x -> Nullable x | None -> Nullable () let ofObj obj = match obj with null -> None | x -> Some x let toObj option = match option with None -> null | Some x -> x + let defaultWith f = function | Some v -> v | _ -> f() #endif type Async with diff --git a/src/Equinox/Json/JsonElementHelpers.fs b/src/Equinox.Core/Json/JsonElementHelpers.fs similarity index 100% rename from src/Equinox/Json/JsonElementHelpers.fs rename to src/Equinox.Core/Json/JsonElementHelpers.fs diff --git a/src/Equinox/Json/JsonRecordConverter.fs b/src/Equinox.Core/Json/JsonRecordConverter.fs similarity index 99% rename from src/Equinox/Json/JsonRecordConverter.fs rename to src/Equinox.Core/Json/JsonRecordConverter.fs index c796f887e..eca328820 100644 --- a/src/Equinox/Json/JsonRecordConverter.fs +++ b/src/Equinox.Core/Json/JsonRecordConverter.fs @@ -7,6 +7,7 @@ open System.Linq.Expressions open System.Text.Json open System.Text.Json.Serialization open FSharp.Reflection +open Equinox.Core type JsonRecordConverterActivator = delegate of JsonSerializerOptions -> JsonConverter diff --git a/src/Equinox/Json/Options.fs b/src/Equinox.Core/Json/Options.fs similarity index 100% rename from src/Equinox/Json/Options.fs rename to src/Equinox.Core/Json/Options.fs diff --git a/src/Equinox/Json/Utf8JsonReaderExtensions.fs b/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs similarity index 100% rename from src/Equinox/Json/Utf8JsonReaderExtensions.fs rename to src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs diff --git a/src/Equinox/Equinox.fsproj b/src/Equinox/Equinox.fsproj index bd322beb8..417cbffcb 100644 --- a/src/Equinox/Equinox.fsproj +++ b/src/Equinox/Equinox.fsproj @@ -9,10 +9,6 @@ - - - - From e2d3d6bda2a786593fe4e0027c5ecb6c30b8f2eb Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Fri, 28 Feb 2020 17:38:14 -0500 Subject: [PATCH 26/71] Pair codecs together with Event types --- samples/Store/Backend/Backend.fsproj | 2 +- samples/Store/Domain/Cart.fs | 31 +++++++++++- samples/Store/Domain/ContactPreferences.fs | 24 ++++++++- samples/Store/Domain/Domain.fsproj | 2 +- samples/Tutorial/Gapless.fs | 49 ++++++++++--------- samples/Tutorial/Index.fs | 47 ++++++++++-------- samples/Tutorial/Sequence.fs | 36 ++++++++------ samples/Tutorial/Set.fs | 47 ++++++++++-------- samples/Tutorial/Upload.fs | 38 +++++++------- src/Equinox.Cosmos/Cosmos.fs | 2 +- src/Equinox.Cosmos/CosmosJsonSerializer.fs | 1 - .../CosmosIntegration.fs | 37 +------------- tests/Equinox.Cosmos.Integration/Json.fs | 4 +- 13 files changed, 178 insertions(+), 142 deletions(-) diff --git a/samples/Store/Backend/Backend.fsproj b/samples/Store/Backend/Backend.fsproj index 8234b4a6c..fec9bcd4f 100644 --- a/samples/Store/Backend/Backend.fsproj +++ b/samples/Store/Backend/Backend.fsproj @@ -18,7 +18,7 @@ - + diff --git a/samples/Store/Domain/Cart.fs b/samples/Store/Domain/Cart.fs index 348515519..1b0562b89 100644 --- a/samples/Store/Domain/Cart.fs +++ b/samples/Store/Domain/Cart.fs @@ -1,5 +1,8 @@ module Domain.Cart +open System.Text.Json +open FsCodec.SystemTextJson + // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -24,7 +27,31 @@ module Events = | ItemQuantityChanged of ItemQuantityChangeInfo | ItemWaiveReturnsChanged of ItemWaiveReturnsInfo interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + let private encode (options: JsonSerializerOptions) = + fun (evt: Event) -> + match evt with + | Snapshotted state -> "Snapshotted", JsonSerializer.SerializeToElement(state, options) + | ItemAdded addInfo -> "ItemAdded", JsonSerializer.SerializeToElement(addInfo, options) + | ItemRemoved removeInfo -> "ItemRemoved", JsonSerializer.SerializeToElement(removeInfo, options) + | ItemQuantityChanged changeInfo -> "ItemQuantityChanged", JsonSerializer.SerializeToElement(changeInfo, options) + | ItemWaiveReturnsChanged waiveInfo -> "ItemWaiveReturnsChanged", JsonSerializer.SerializeToElement(waiveInfo, options) + + let private tryDecode (options: JsonSerializerOptions) = + fun (eventType, data: JsonElement) -> + match eventType with + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) + | "ItemAdded" -> Some (ItemAdded <| JsonSerializer.DeserializeElement(data, options)) + | "ItemRemoved" -> Some (ItemRemoved <| JsonSerializer.DeserializeElement(data, options)) + | "ItemQuantityChanged" -> Some (ItemQuantityChanged <| JsonSerializer.DeserializeElement(data, options)) + | "ItemWaiveReturnsChanged" -> Some (ItemWaiveReturnsChanged <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) module Fold = type ItemInfo = { skuId: SkuId; quantity: int; returnsWaived: bool } @@ -79,4 +106,4 @@ module Commands = match waived with | Some waived when itemExistsWithDifferentWaiveStatus skuId waived -> yield Events.ItemWaiveReturnsChanged { context = c; skuId = skuId; waived = waived } - | _ -> () ] \ No newline at end of file + | _ -> () ] diff --git a/samples/Store/Domain/ContactPreferences.fs b/samples/Store/Domain/ContactPreferences.fs index 263efebef..28185a0a2 100644 --- a/samples/Store/Domain/ContactPreferences.fs +++ b/samples/Store/Domain/ContactPreferences.fs @@ -1,5 +1,7 @@ module Domain.ContactPreferences +open System.Text.Json + type Id = Id of email: string // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care @@ -13,7 +15,25 @@ module Events = type Event = | []Updated of Value interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode (options: JsonSerializerOptions) = + fun (evt: Event) -> + match evt with + | Updated value -> "contactPreferencesChanged", JsonSerializer.SerializeToElement(value, options) + + let private tryDecode (options: JsonSerializerOptions) = + fun (eventType, data: JsonElement) -> + match eventType with + | "contactPreferencesChanged" -> Some (Updated <| JsonSerializer.DeserializeElement(data)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) module Fold = @@ -37,4 +57,4 @@ module Commands = match command with | Update ({ preferences = preferences } as value) -> if state = preferences then [] else - [ Events.Updated value ] \ No newline at end of file + [ Events.Updated value ] diff --git a/samples/Store/Domain/Domain.fsproj b/samples/Store/Domain/Domain.fsproj index 5960596f2..d2f66d3b3 100644 --- a/samples/Store/Domain/Domain.fsproj +++ b/samples/Store/Domain/Domain.fsproj @@ -25,7 +25,7 @@ - + \ No newline at end of file diff --git a/samples/Tutorial/Gapless.fs b/samples/Tutorial/Gapless.fs index 58c4fa0d8..f68ddde2c 100644 --- a/samples/Tutorial/Gapless.fs +++ b/samples/Tutorial/Gapless.fs @@ -3,6 +3,7 @@ module Gapless open System +open System.Text.Json // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -18,7 +19,30 @@ module Events = | Released of Item | Snapshotted of Snapshotted interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> + match evt with + | Reserved item -> "Reserved", JsonSerializer.SerializeToElement(item, options) + | Confirmed item -> "Confirmed", JsonSerializer.SerializeToElement(item, options) + | Released item -> "Released", JsonSerializer.SerializeToElement(item, options) + | Snapshotted snapshot -> "Snapshotted", JsonSerializer.SerializeToElement(snapshot, options) + + let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> + match eventType with + | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data, options)) + | "Confirmed" -> Some (Confirmed <| JsonSerializer.DeserializeElement(data, options)) + | "Released" -> Some (Released <| JsonSerializer.DeserializeElement(data, options)) + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + module Fold = @@ -79,30 +103,11 @@ let [] appName = "equinox-tutorial-gapless" module Cosmos = open Equinox.Cosmos - open Equinox.Cosmos.Json - open System.Text.Json - - module Codec = - open Events - - let encode (evt: Event) = - match evt with - | Reserved item -> "Reserved", JsonSerializer.SerializeToElement(item, JsonSerializer.defaultOptions) - | Confirmed item -> "Confirmed", JsonSerializer.SerializeToElement(item, JsonSerializer.defaultOptions) - | Released item -> "Released", JsonSerializer.SerializeToElement(item, JsonSerializer.defaultOptions) - | Snapshotted snapshot -> "Snapshotted", JsonSerializer.SerializeToElement(snapshot, JsonSerializer.defaultOptions) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data)) - | "Confirmed" -> Some (Confirmed <| JsonSerializer.DeserializeElement(data)) - | "Released" -> Some (Released <| JsonSerializer.DeserializeElement(data)) - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data)) - | _ -> None + open FsCodec.SystemTextJson.Serialization let private createService (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve Service(Serilog.Log.Logger, resolve) diff --git a/samples/Tutorial/Index.fs b/samples/Tutorial/Index.fs index 7e95d4591..fa89ce696 100644 --- a/samples/Tutorial/Index.fs +++ b/samples/Tutorial/Index.fs @@ -1,5 +1,7 @@ module Index +open System.Text.Json + // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -13,7 +15,27 @@ module Events = | Deleted of ItemIds | Snapshotted of Items<'v> interface TypeShape.UnionContract.IUnionContract - let codec<'v> = FsCodec.NewtonsoftJson.Codec.Create>() + + module Utf8ArrayCodec = + let codec<'v> = FsCodec.NewtonsoftJson.Codec.Create>() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode<'v> (options: JsonSerializerOptions) = fun (evt: Event<'v>) -> + match evt with + | Added items -> "Added", JsonSerializer.SerializeToElement(items, options) + | Deleted itemIds -> "Deleted", JsonSerializer.SerializeToElement(itemIds, options) + | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, options) + + let private tryDecode<'v> (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> + match eventType with + | "Added" -> Some (Added <| JsonSerializer.DeserializeElement>(data, options)) + | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data, options)) + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement>(data, options)) + | _ -> None + + let codec<'v> options = FsCodec.Codec.Create, JsonElement>(encode<'v> options, tryDecode<'v> options) module Fold = @@ -54,34 +76,17 @@ let create resolve indexId = Service(indexId, resolve, maxAttempts = 3) module Cosmos = open Equinox.Cosmos - open Equinox.Cosmos.Json - open System.Text.Json - - module Codec = - open Events - - let encode<'v> (evt: Event<'v>) = - match evt with - | Added items -> "Added", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) - | Deleted itemIds -> "Deleted", JsonSerializer.SerializeToElement(itemIds, JsonSerializer.defaultOptions) - | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) - - let tryDecode<'v> (eventType, data: JsonElement) = - match eventType with - | "Added" -> Some (Added <| JsonSerializer.DeserializeElement>(data)) - | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data)) - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement>(data)) - | _ -> None + open FsCodec.SystemTextJson.Serialization let createService<'v> (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let codec = FsCodec.Codec.Create, JsonElement>(Codec.encode<'v>, Codec.tryDecode<'v>) + let codec = Events.JsonElementCodec.codec<'v> JsonSerializer.defaultOptions let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module MemoryStore = let createService store = - let resolve = Equinox.MemoryStore.Resolver(store, Events.codec, Fold.fold, Fold.initial).Resolve + let resolve = Equinox.MemoryStore.Resolver(store, Events.Utf8ArrayCodec.codec, Fold.fold, Fold.initial).Resolve create resolve diff --git a/samples/Tutorial/Sequence.fs b/samples/Tutorial/Sequence.fs index 99ce3c5d7..82e06f061 100644 --- a/samples/Tutorial/Sequence.fs +++ b/samples/Tutorial/Sequence.fs @@ -3,6 +3,7 @@ module Sequence open System +open System.Text.Json // shim for net461 module Seq = @@ -25,7 +26,23 @@ module Events = type Event = | Reserved of Reserved interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> + match evt with + | Reserved reserved -> "Reserved", JsonSerializer.SerializeToElement(reserved, options) + + let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> + match eventType with + | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options= FsCodec.Codec.Create(encode options, tryDecode options) module Fold = @@ -54,24 +71,11 @@ let create resolve = Service(Serilog.Log.ForContext(), resolve, maxAtte module Cosmos = open Equinox.Cosmos - open Equinox.Cosmos.Json - open System.Text.Json - - module Codec = - open Events - - let encode (evt: Event) = - match evt with - | Reserved reserved -> "Reserved", JsonSerializer.SerializeToElement(reserved, JsonSerializer.defaultOptions) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data)) - | _ -> None + open FsCodec.SystemTextJson.Serialization let private createService (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve diff --git a/samples/Tutorial/Set.fs b/samples/Tutorial/Set.fs index e917584a9..15ac00bf4 100644 --- a/samples/Tutorial/Set.fs +++ b/samples/Tutorial/Set.fs @@ -1,5 +1,7 @@ module Set +open System.Text.Json + // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -12,7 +14,27 @@ module Events = | Deleted of Items | Snapshotted of Items interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> + match evt with + | Added items -> "Added", JsonSerializer.SerializeToElement(items, options) + | Deleted items -> "Deleted", JsonSerializer.SerializeToElement(items, options) + | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, options) + + let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> + match eventType with + | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data, options)) + | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data, options)) + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) module Fold = @@ -54,34 +76,17 @@ let create resolve setId = Service(Serilog.Log.ForContext(), setId, res module Cosmos = open Equinox.Cosmos - open Equinox.Cosmos.Json - open System.Text.Json - - module Codec = - open Events - - let encode (evt: Event) = - match evt with - | Added items -> "Added", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) - | Deleted items -> "Deleted", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) - | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, JsonSerializer.defaultOptions) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data)) - | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data)) - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data)) - | _ -> None + open FsCodec.SystemTextJson.Serialization let createService (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module MemoryStore = let createService store = - let resolve = Equinox.MemoryStore.Resolver(store, Events.codec, Fold.fold, Fold.initial).Resolve + let resolve = Equinox.MemoryStore.Resolver(store, Events.Utf8ArrayCodec.codec, Fold.fold, Fold.initial).Resolve create resolve diff --git a/samples/Tutorial/Upload.fs b/samples/Tutorial/Upload.fs index 7023b07b5..f6a8ca6bc 100644 --- a/samples/Tutorial/Upload.fs +++ b/samples/Tutorial/Upload.fs @@ -2,6 +2,7 @@ module Upload open System +open System.Text.Json open FSharp.UMX // shim for net461 @@ -40,7 +41,23 @@ module Events = type Event = | IdAssigned of IdAssigned interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let encode (options: JsonSerializerOptions) = fun (evt: Event) -> + match evt with + | IdAssigned id -> "IdAssigned", JsonSerializer.SerializeToElement(id, options) + + let tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> + match eventType with + | "IdAssigned" -> Some (IdAssigned <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) module Fold = @@ -69,29 +86,16 @@ let create resolve = Service(Serilog.Log.ForContext(), resolve, 3) module Cosmos = open Equinox.Cosmos - open Equinox.Cosmos.Json - open System.Text.Json - - module Codec = - open Events - - let encode (evt: Event) = - match evt with - | IdAssigned id -> "IdAssigned", JsonSerializer.SerializeToElement(id, JsonSerializer.defaultOptions) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "IdAssigned" -> Some (IdAssigned <| JsonSerializer.DeserializeElement(data)) - | _ -> None + open FsCodec.SystemTextJson.Serialization let createService (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent).Resolve create resolve module EventStore = open Equinox.EventStore let createService context = - let resolve = Resolver(context, Events.codec, Fold.fold, Fold.initial, access=AccessStrategy.LatestKnownEvent).Resolve + let resolve = Resolver(context, Events.Utf8ArrayCodec.codec, Fold.fold, Fold.initial, access=AccessStrategy.LatestKnownEvent).Resolve create resolve diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 82dfff513..463309a3b 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -795,7 +795,7 @@ namespace Equinox.Cosmos open Equinox open Equinox.Core -open Equinox.Cosmos.Json +open FsCodec.SystemTextJson.Serialization open Equinox.Cosmos.Store open FsCodec open FSharp.Control diff --git a/src/Equinox.Cosmos/CosmosJsonSerializer.fs b/src/Equinox.Cosmos/CosmosJsonSerializer.fs index 612291257..2d17c85f5 100644 --- a/src/Equinox.Cosmos/CosmosJsonSerializer.fs +++ b/src/Equinox.Cosmos/CosmosJsonSerializer.fs @@ -4,7 +4,6 @@ open System.IO open System.Text.Json open Azure.Cosmos.Serialization open Equinox.Core -open Equinox.Cosmos.Json type CosmosJsonSerializer (options: JsonSerializerOptions) = inherit CosmosSerializer() diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs index e5057337a..b6c733e81 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs @@ -2,7 +2,6 @@ open System open System.Threading -open System.Text.Json open Domain open Equinox.Cosmos open Equinox.Cosmos.Integration.Infrastructure @@ -10,29 +9,9 @@ open FSharp.UMX open Swensen.Unquote module Cart = - module Codec = - open Domain.Cart.Events - - let encode (evt: Event) = - match evt with - | Snapshotted state -> "Snapshotted", IntegrationJsonSerializer.serializeToElement(state) - | ItemAdded addInfo -> "ItemAdded", IntegrationJsonSerializer.serializeToElement(addInfo) - | ItemRemoved removeInfo -> "ItemRemoved", IntegrationJsonSerializer.serializeToElement(removeInfo) - | ItemQuantityChanged changeInfo -> "ItemQuantityChanged", IntegrationJsonSerializer.serializeToElement(changeInfo) - | ItemWaiveReturnsChanged waiveInfo -> "ItemWaiveReturnsChanged", IntegrationJsonSerializer.serializeToElement(waiveInfo) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "Snapshotted" -> Some (Snapshotted <| IntegrationJsonSerializer.deserializeElement(data)) - | "ItemAdded" -> Some (ItemAdded <| IntegrationJsonSerializer.deserializeElement(data)) - | "ItemRemoved" -> Some (ItemRemoved <| IntegrationJsonSerializer.deserializeElement(data)) - | "ItemQuantityChanged" -> Some (ItemQuantityChanged <| IntegrationJsonSerializer.deserializeElement(data)) - | "ItemWaiveReturnsChanged" -> Some (ItemWaiveReturnsChanged <| IntegrationJsonSerializer.deserializeElement(data)) - | _ -> None - let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot - let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let codec = Domain.Cart.Events.JsonElementCodec.codec IntegrationJsonSerializer.options let createServiceWithoutOptimization connection batchSize log = let store = createCosmosContext connection batchSize let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) @@ -60,20 +39,8 @@ module Cart = Backend.Cart.Service(log, resolve) module ContactPreferences = - module Codec = - open Domain.ContactPreferences.Events - - let encode (evt: Event) = - match evt with - | Updated value -> "contactPreferencesChanged", IntegrationJsonSerializer.serializeToElement(value) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "contactPreferencesChanged" -> Some (Updated <| IntegrationJsonSerializer.deserializeElement(data)) - | _ -> None - let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial - let codec = FsCodec.Codec.Create(Codec.encode, Codec.tryDecode) + let codec = Domain.ContactPreferences.Events.JsonElementCodec.codec IntegrationJsonSerializer.options let createServiceWithoutOptimization createGateway defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = let gateway = createGateway defaultBatchSize let resolve = Resolver(gateway, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve diff --git a/tests/Equinox.Cosmos.Integration/Json.fs b/tests/Equinox.Cosmos.Integration/Json.fs index bcd37fad0..398679399 100644 --- a/tests/Equinox.Cosmos.Integration/Json.fs +++ b/tests/Equinox.Cosmos.Integration/Json.fs @@ -1,12 +1,12 @@ [] module Equinox.Cosmos.Integration.Json -open Equinox.Cosmos -open Equinox.Cosmos.Json open System open System.Text.Json open System.Text.Json.Serialization open Domain +open FsCodec.SystemTextJson +open FsCodec.SystemTextJson.Serialization type JsonSkuIdConverter () = inherit JsonConverter() From 125e3b0500a50140dcfeb776adcda26e5aaaf194 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 2 Mar 2020 10:35:44 -0500 Subject: [PATCH 27/71] Pair codecs together with Event types --- samples/Store/Domain/Favorites.fs | 26 +++++++++++++++++-- samples/Store/Integration/CartIntegration.fs | 7 ++--- .../ContactPreferencesIntegration.fs | 18 +++---------- .../Store/Integration/FavoritesIntegration.fs | 22 +++------------- .../StoreIntegration.fs | 8 +++--- 5 files changed, 38 insertions(+), 43 deletions(-) diff --git a/samples/Store/Domain/Favorites.fs b/samples/Store/Domain/Favorites.fs index d9e0bef13..89f0151a8 100644 --- a/samples/Store/Domain/Favorites.fs +++ b/samples/Store/Domain/Favorites.fs @@ -1,5 +1,7 @@ module Domain.Favorites +open System.Text.Json + // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -14,7 +16,27 @@ module Events = | Favorited of Favorited | Unfavorited of Unfavorited interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> + match evt with + | Snapshotted snapshotted -> "Snapshotted", JsonSerializer.SerializeToElement(snapshotted, options) + | Favorited favorited -> "Favorited", JsonSerializer.SerializeToElement(favorited, options) + | Unfavorited unfavorited -> "Unfavorited", JsonSerializer.SerializeToElement(unfavorited, options) + + let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> + match eventType with + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) + | "Favorited" -> Some (Favorited <| JsonSerializer.DeserializeElement(data, options)) + | "Unfavorited" -> Some (Unfavorited <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) module Fold = @@ -56,4 +78,4 @@ module Commands = yield Events.Favorited { date = date; skuId = skuId } ] | Unfavorite skuId -> if doesntHave skuId then [] else - [ Events.Unfavorited { skuId = skuId } ] \ No newline at end of file + [ Events.Unfavorited { skuId = skuId } ] diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index adc708bba..8fec369ec 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -5,6 +5,7 @@ open Equinox.Cosmos.Integration open Equinox.EventStore open Equinox.MemoryStore open Swensen.Unquote +open FsCodec.SystemTextJson.Serialization #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -15,15 +16,15 @@ let createMemoryStore () = // we want to validate that the JSON UTF8 is working happily VolatileStore() let createServiceMemory log store = - Backend.Cart.Service(log, fun (id,opt) -> MemoryStore.Resolver(store, Domain.Cart.Events.codec, fold, initial).Resolve(id,?option=opt)) + Backend.Cart.Service(log, fun (id,opt) -> MemoryStore.Resolver(store, Domain.Cart.Events.Utf8ArrayCodec.codec, fold, initial).Resolve(id,?option=opt)) -let eventStoreCodec = Domain.Cart.Events.codec +let eventStoreCodec = Domain.Cart.Events.Utf8ArrayCodec.codec let resolveGesStreamWithRollingSnapshots gateway = fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial, access = AccessStrategy.RollingSnapshots snapshot).Resolve(id,?option=opt) let resolveGesStreamWithoutCustomAccessStrategy gateway = fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial).Resolve(id,?option=opt) -let cosmosCodec = Equinox.Cosmos.Integration.CosmosIntegration.Cart.codec +let cosmosCodec = Domain.Cart.Events.JsonElementCodec.codec JsonSerializer.defaultOptions let resolveCosmosStreamWithSnapshotStrategy gateway = fun (id,opt) -> Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) let resolveCosmosStreamWithoutCustomAccessStrategy gateway = diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index 3b3710cb7..dc5c2a9bf 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -4,7 +4,7 @@ open Equinox open Equinox.Cosmos.Integration open Swensen.Unquote open Xunit -open System.Text.Json +open FsCodec.SystemTextJson.Serialization #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -15,25 +15,13 @@ let createMemoryStore () = let createServiceMemory log store = Backend.ContactPreferences.Service(log, MemoryStore.Resolver(store, FsCodec.Box.Codec.Create(), fold, initial).Resolve) -let eventStoreCodec = Domain.ContactPreferences.Events.codec +let eventStoreCodec = Domain.ContactPreferences.Events.Utf8ArrayCodec.codec let resolveStreamGesWithOptimizedStorageSemantics gateway = EventStore.Resolver(gateway 1, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamGesWithoutAccessStrategy gateway = EventStore.Resolver(gateway defaultBatchSize, eventStoreCodec, fold, initial).Resolve -module CosmosCodec = - open Domain.ContactPreferences.Events - - let encode (evt: Event) = - match evt with - | Updated value -> "contactPreferencesChanged", IntegrationJsonSerializer.serializeToElement(value) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "contactPreferencesChanged" -> Some (Updated <| IntegrationJsonSerializer.deserializeElement(data)) - | _ -> None - -let cosmosCodec = FsCodec.Codec.Create(CosmosCodec.encode, CosmosCodec.tryDecode) +let cosmosCodec = Domain.ContactPreferences.Events.JsonElementCodec.codec JsonSerializer.defaultOptions let resolveStreamCosmosWithLatestKnownEventSemantics gateway = Cosmos.Resolver(gateway 1, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamCosmosUnoptimized gateway = diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index 664169ae3..aedb868ce 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -3,7 +3,7 @@ open Equinox open Equinox.Cosmos.Integration open Swensen.Unquote -open System.Text.Json +open FsCodec.SystemTextJson.Serialization #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -15,28 +15,12 @@ let createMemoryStore () = let createServiceMemory log store = Backend.Favorites.Service(log, MemoryStore.Resolver(store, FsCodec.Box.Codec.Create(), fold, initial).Resolve) -let eventStoreCodec = Domain.Favorites.Events.codec +let eventStoreCodec = Domain.Favorites.Events.Utf8ArrayCodec.codec let createServiceGes gateway log = let resolve = EventStore.Resolver(gateway, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.RollingSnapshots snapshot).Resolve Backend.Favorites.Service(log, resolve) -module CosmosCodec = - open Domain.Favorites.Events - - let encode (evt: Event) = - match evt with - | Snapshotted snapshotted -> "Snapshotted", IntegrationJsonSerializer.serializeToElement(snapshotted) - | Favorited favorited -> "Favorited", IntegrationJsonSerializer.serializeToElement(favorited) - | Unfavorited unfavorited -> "Unfavorited", IntegrationJsonSerializer.serializeToElement(unfavorited) - - let tryDecode (eventType, data: JsonElement) = - match eventType with - | "Snapshotted" -> Some (Snapshotted <| IntegrationJsonSerializer.deserializeElement(data)) - | "Favorited" -> Some (Favorited <| IntegrationJsonSerializer.deserializeElement(data)) - | "Unfavorited" -> Some (Unfavorited <| IntegrationJsonSerializer.deserializeElement(data)) - | _ -> None - -let cosmosCodec = FsCodec.Codec.Create(CosmosCodec.encode, CosmosCodec.tryDecode) +let cosmosCodec = Domain.Favorites.Events.JsonElementCodec.codec JsonSerializer.defaultOptions let createServiceCosmos gateway log = let resolve = Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve Backend.Favorites.Service(log, resolve) diff --git a/tests/Equinox.EventStore.Integration/StoreIntegration.fs b/tests/Equinox.EventStore.Integration/StoreIntegration.fs index 16370df83..f2e4c76c2 100644 --- a/tests/Equinox.EventStore.Integration/StoreIntegration.fs +++ b/tests/Equinox.EventStore.Integration/StoreIntegration.fs @@ -48,10 +48,10 @@ let createGesGateway connection batchSize = Context(connection, BatchingPolicy(m module Cart = let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial - let codec = Domain.Cart.Events.codec + let codec = Domain.Cart.Events.Utf8ArrayCodec.codec let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot let createServiceWithoutOptimization log gateway = - Backend.Cart.Service(log, fun (id,opt) -> Resolver(gateway, Domain.Cart.Events.codec, fold, initial).Resolve(id,?option=opt)) + Backend.Cart.Service(log, fun (id,opt) -> Resolver(gateway, Domain.Cart.Events.Utf8ArrayCodec.codec, fold, initial).Resolve(id,?option=opt)) let createServiceWithCompaction log gateway = let resolve (id,opt) = Resolver(gateway, codec, fold, initial, access = AccessStrategy.RollingSnapshots snapshot).Resolve(id,?option=opt) Backend.Cart.Service(log, resolve) @@ -64,7 +64,7 @@ module Cart = module ContactPreferences = let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial - let codec = Domain.ContactPreferences.Events.codec + let codec = Domain.ContactPreferences.Events.Utf8ArrayCodec.codec let createServiceWithoutOptimization log connection = let gateway = createGesGateway connection defaultBatchSize Backend.ContactPreferences.Service(log, Resolver(gateway, codec, fold, initial).Resolve) @@ -382,4 +382,4 @@ type Tests(testOutputHelper) = let! _ = service2.Read cartId let suboptimalExtraSlice = [singleSliceForward] test <@ singleBatchBackwards @ batchBackwardsAndAppend @ suboptimalExtraSlice @ singleBatchForward = capture.ExternalCalls @> - } \ No newline at end of file + } From a68f80ae33a055d8abf0c16292aab9ff092a820f Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 2 Mar 2020 12:33:40 -0500 Subject: [PATCH 28/71] Split resolvers into UTF8 and JsonElement --- samples/Infrastructure/Services.fs | 38 ++++++++-- samples/Store/Domain/ContactPreferences.fs | 2 +- samples/Store/Domain/SavedForLater.fs | 29 +++++++- samples/TodoBackend/Todo.fs | 31 +++++++- tools/Equinox.Tool/Program.fs | 87 +++++++++++++++------- 5 files changed, 151 insertions(+), 36 deletions(-) diff --git a/samples/Infrastructure/Services.fs b/samples/Infrastructure/Services.fs index 30bcaabd5..c6008d636 100644 --- a/samples/Infrastructure/Services.fs +++ b/samples/Infrastructure/Services.fs @@ -3,10 +3,18 @@ open Domain open Microsoft.Extensions.DependencyInjection open System +open System.Text.Json +open FsCodec +open FsCodec.SystemTextJson.Serialization + +[] +type StreamCodec<'event, 'context> = + | JsonElementCodec of IEventCodec<'event, JsonElement, 'context> + | Utf8ArrayCodec of IEventCodec<'event, byte[], 'context> type StreamResolver(storage) = - member __.Resolve - ( codec : FsCodec.IEventCodec<'event,byte[],_>, + member __.ResolveWithJsonElementCodec + ( codec : IEventCodec<'event, JsonElement, _>, fold: ('state -> 'event seq -> 'state), initial: 'state, snapshot: (('event -> bool) * ('state -> 'event))) = @@ -15,6 +23,14 @@ type StreamResolver(storage) = let store = Equinox.Cosmos.Context(gateway, databaseId, containerId) let accessStrategy = if unfolds then Equinox.Cosmos.AccessStrategy.Snapshot snapshot else Equinox.Cosmos.AccessStrategy.Unoptimized Equinox.Cosmos.Resolver<'event,'state,_>(store, codec, fold, initial, caching, accessStrategy).Resolve + | _ -> failwith "Currently, only Cosmos can be used with a JsonElement codec." + + member __.ResolveWithUtf8ArrayCodec + ( codec : IEventCodec<'event, byte[], _>, + fold: ('state -> 'event seq -> 'state), + initial: 'state, + snapshot: (('event -> bool) * ('state -> 'event))) = + match storage with | Storage.StorageConfig.Es (context, caching, unfolds) -> let accessStrategy = if unfolds then Equinox.EventStore.AccessStrategy.RollingSnapshots snapshot |> Some else None Equinox.EventStore.Resolver<'event,'state,_>(context, codec, fold, initial, ?caching = caching, ?access = accessStrategy).Resolve @@ -23,6 +39,7 @@ type StreamResolver(storage) = | Storage.StorageConfig.Sql (context, caching, unfolds) -> let accessStrategy = if unfolds then Equinox.SqlStreamStore.AccessStrategy.RollingSnapshots snapshot |> Some else None Equinox.SqlStreamStore.Resolver<'event,'state,_>(context, codec, fold, initial, ?caching = caching, ?access = accessStrategy).Resolve + | _ -> failwith "Only EventStore, Memory Store, and SQL Store can be used with a byte array codec." type ServiceBuilder(storageConfig, handlerLog) = let resolver = StreamResolver(storageConfig) @@ -30,17 +47,26 @@ type ServiceBuilder(storageConfig, handlerLog) = member __.CreateFavoritesService() = let fold, initial = Favorites.Fold.fold, Favorites.Fold.initial let snapshot = Favorites.Fold.isOrigin,Favorites.Fold.snapshot - Backend.Favorites.Service(handlerLog, resolver.Resolve(Favorites.Events.codec,fold,initial,snapshot)) + + match storageConfig with + | Storage.StorageConfig.Cosmos _ -> Backend.Favorites.Service(handlerLog, resolver.ResolveWithJsonElementCodec(Favorites.Events.JsonElementCodec.codec JsonSerializer.defaultOptions, fold, initial, snapshot)) + | _ -> Backend.Favorites.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(Favorites.Events.Utf8ArrayCodec.codec, fold, initial, snapshot)) member __.CreateSaveForLaterService() = let fold, initial = SavedForLater.Fold.fold, SavedForLater.Fold.initial let snapshot = SavedForLater.Fold.isOrigin,SavedForLater.Fold.compact - Backend.SavedForLater.Service(handlerLog, resolver.Resolve(SavedForLater.Events.codec,fold,initial,snapshot), maxSavedItems=50) + + match storageConfig with + | Storage.StorageConfig.Cosmos _ -> Backend.SavedForLater.Service(handlerLog, resolver.ResolveWithJsonElementCodec(SavedForLater.Events.JsonElementCodec.codec JsonSerializer.defaultOptions,fold,initial,snapshot), maxSavedItems=50) + | _ -> Backend.SavedForLater.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(SavedForLater.Events.Utf8ArrayCodec.codec,fold,initial,snapshot), maxSavedItems=50) member __.CreateTodosService() = let fold, initial = TodoBackend.Fold.fold, TodoBackend.Fold.initial let snapshot = TodoBackend.Fold.isOrigin, TodoBackend.Fold.snapshot - TodoBackend.Service(handlerLog, resolver.Resolve(TodoBackend.Events.codec,fold,initial,snapshot)) + + match storageConfig with + | Storage.StorageConfig.Cosmos _ -> TodoBackend.Service(handlerLog, resolver.ResolveWithJsonElementCodec(TodoBackend.Events.JsonElementCodec.codec JsonSerializer.defaultOptions,fold,initial,snapshot)) + | _ -> TodoBackend.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(TodoBackend.Events.Utf8ArrayCodec.codec,fold,initial,snapshot)) let register (services : IServiceCollection, storageConfig, handlerLog) = let regF (factory : IServiceProvider -> 'T) = services.AddSingleton<'T>(fun (sp: IServiceProvider) -> factory sp) |> ignore @@ -49,4 +75,4 @@ let register (services : IServiceCollection, storageConfig, handlerLog) = regF <| fun sp -> sp.GetService().CreateFavoritesService() regF <| fun sp -> sp.GetService().CreateSaveForLaterService() - regF <| fun sp -> sp.GetService().CreateTodosService() \ No newline at end of file + regF <| fun sp -> sp.GetService().CreateTodosService() diff --git a/samples/Store/Domain/ContactPreferences.fs b/samples/Store/Domain/ContactPreferences.fs index 28185a0a2..ed9afdd11 100644 --- a/samples/Store/Domain/ContactPreferences.fs +++ b/samples/Store/Domain/ContactPreferences.fs @@ -30,7 +30,7 @@ module Events = let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> match eventType with - | "contactPreferencesChanged" -> Some (Updated <| JsonSerializer.DeserializeElement(data)) + | "contactPreferencesChanged" -> Some (Updated <| JsonSerializer.DeserializeElement(data, options)) | _ -> None let codec options = FsCodec.Codec.Create(encode options, tryDecode options) diff --git a/samples/Store/Domain/SavedForLater.fs b/samples/Store/Domain/SavedForLater.fs index 994ccc1f0..6eb0796d5 100644 --- a/samples/Store/Domain/SavedForLater.fs +++ b/samples/Store/Domain/SavedForLater.fs @@ -2,6 +2,7 @@ open System open System.Collections.Generic +open System.Text.Json // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -29,7 +30,31 @@ module Events = /// Addition of a collection of skus to the list | Added of Added interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode (options: JsonSerializerOptions) = + fun (evt: Event) -> + match evt with + | Compacted compacted -> Compaction.EventType, JsonSerializer.SerializeToElement(compacted, options) + | Merged merged -> "Merged", JsonSerializer.SerializeToElement(merged, options) + | Removed removed -> "Removed", JsonSerializer.SerializeToElement(removed, options) + | Added added -> "Added", JsonSerializer.SerializeToElement(added, options) + + let private tryDecode (options: JsonSerializerOptions) = + fun (eventType, data: JsonElement) -> + match eventType with + | Compaction.EventType -> Some (Compacted <| JsonSerializer.DeserializeElement(data, options)) + | "Merged" -> Some (Merged <| JsonSerializer.DeserializeElement(data, options)) + | "Removed" -> Some (Removed <| JsonSerializer.DeserializeElement(data, options)) + | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) module Fold = open Events @@ -104,4 +129,4 @@ module Commands = let index = Index state let net = skus |> Array.filter (index.DoesNotAlreadyContainSameOrMoreRecent dateSaved) if Array.isEmpty net then true, [] - else validateAgainstInvariants [ Events.Added { skus = net ; dateSaved = dateSaved } ] \ No newline at end of file + else validateAgainstInvariants [ Events.Added { skus = net ; dateSaved = dateSaved } ] diff --git a/samples/TodoBackend/Todo.fs b/samples/TodoBackend/Todo.fs index 0349f1ada..d6880d9b3 100644 --- a/samples/TodoBackend/Todo.fs +++ b/samples/TodoBackend/Todo.fs @@ -1,6 +1,7 @@ namespace TodoBackend open Domain +open System.Text.Json // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -19,7 +20,33 @@ module Events = | Cleared | Snapshotted of Snapshotted interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module Utf8ArrayCodec = + let codec = FsCodec.NewtonsoftJson.Codec.Create() + + module JsonElementCodec = + open FsCodec.SystemTextJson + + let private encode (options: JsonSerializerOptions) = + fun (evt: Event) -> + match evt with + | Added todo -> "Added", JsonSerializer.SerializeToElement(todo, options) + | Updated todo -> "Updated", JsonSerializer.SerializeToElement(todo, options) + | Deleted deleted -> "Deleted", JsonSerializer.SerializeToElement(deleted, options) + | Cleared -> "Cleared", Unchecked.defaultof + | Snapshotted snapshotted -> "Snapshotted", JsonSerializer.SerializeToElement(snapshotted, options) + + let private tryDecode (options: JsonSerializerOptions) = + fun (eventType, data: JsonElement) -> + match eventType with + | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data, options)) + | "Updated" -> Some (Updated <| JsonSerializer.DeserializeElement(data, options)) + | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data, options)) + | "Cleared" -> Some Cleared + | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) + | _ -> None + + let codec options = FsCodec.Codec.Create(encode options, tryDecode options) module Fold = type State = { items : Events.Todo list; nextId : int } @@ -81,4 +108,4 @@ type Service(log, resolve, ?maxAttempts) = member __.Patch(clientId, item: Events.Todo) : Async = async { let! state' = handle clientId (Command.Update item) - return List.find (fun x -> x.id = item.id) state' } \ No newline at end of file + return List.find (fun x -> x.id = item.id) state' } diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 72bb5aa7a..171c06656 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -11,6 +11,7 @@ open Serilog.Events open System open System.Net.Http open System.Threading +open System.Text.Json let [] appName = "equinox-tool" @@ -362,16 +363,24 @@ module CosmosStats = | _ -> failwith "please specify a `cosmos` endpoint" } module Dump = - let run (log : ILogger, verboseConsole, maybeSeq) (args : ParseResults) = - let a = DumpInfo args - let createStoreLog verboseStore = createStoreLog verboseStore verboseConsole maybeSeq - let storeLog, storeConfig = a.ConfigureStore(log,createStoreLog) - let doU,doE = not(args.Contains EventsOnly),not(args.Contains UnfoldsOnly) - let doC,doJ,doP,doT = args.Contains Correlation,not(args.Contains JsonSkip),not(args.Contains PrettySkip),not(args.Contains TimeRegular) - let resolver = Samples.Infrastructure.Services.StreamResolver(storeConfig) + let logEvent (log: ILogger) (prevTs: DateTimeOffset option) doC doT (event: FsCodec.ITimelineEvent<'format>) (renderer: 'format -> string) = + let ty = if event.IsUnfold then "U" else "E" + let interval = + match prevTs with Some p when not event.IsUnfold -> Some (event.Timestamp - p) | _ -> None + |> function + | None -> if doT then "n/a" else "0" + | Some (i : TimeSpan) when not doT -> i.ToString() + | Some (i : TimeSpan) when i.TotalDays >= 1. -> i.ToString "d\dhh\hmm\m" + | Some i when i.TotalHours >= 1. -> i.ToString "h\hmm\mss\s" + | Some i when i.TotalMinutes >= 1. -> i.ToString "m\mss\.ff\s" + | Some i -> i.ToString("s\.fff\s") + if not doC then log.Information("{i,4}@{t:u}+{d,9} {u:l} {e:l} {data:l} {meta:l}", + event.Index, event.Timestamp, interval, ty, event.EventType, renderer event.Data, renderer event.Meta) + else log.Information("{i,4}@{t:u}+{d,9} Corr {corr} Cause {cause} {u:l} {e:l} {data:l} {meta:l}", + event.Index, event.Timestamp, interval, event.CorrelationId, event.CausationId, ty, event.EventType, renderer event.Data, renderer event.Meta) + event.Timestamp - let streams = args.GetResults DumpArguments.Stream - log.ForContext("streams",streams).Information("Reading...") + let dumpUtf8ArrayStorage (log: ILogger) (storeLog: ILogger) doU doE doC doJ doP doT (resolver: Services.StreamResolver) (streams: FsCodec.StreamName list) = let initial = List.empty let fold state events = (events,state) ||> Seq.foldBack (fun e l -> e :: l) let mutable unfolds = List.empty @@ -388,31 +397,59 @@ module Dump = | _ -> sprintf "(%d chars)" (System.Text.Encoding.UTF8.GetString(data).Length) with e -> log.ForContext("str", System.Text.Encoding.UTF8.GetString data).Warning(e, "Parse failure"); reraise() let readStream (streamName : FsCodec.StreamName) = async { - let stream = resolver.Resolve(idCodec,fold,initial,isOriginAndSnapshot) streamName + let stream = resolver.ResolveWithUtf8ArrayCodec(idCodec,fold,initial,isOriginAndSnapshot) streamName let! _token,events = stream.Load storeLog let source = if not doE && not (List.isEmpty unfolds) then Seq.ofList unfolds else Seq.append events unfolds let mutable prevTs = None for x in source |> Seq.filter (fun e -> (e.IsUnfold && doU) || (not e.IsUnfold && doE)) do - let ty,render = if x.IsUnfold then "U", render Newtonsoft.Json.Formatting.Indented else "E", render fo - let interval = - match prevTs with Some p when not x.IsUnfold -> Some (x.Timestamp - p) | _ -> None - |> function - | None -> if doT then "n/a" else "0" - | Some (i : TimeSpan) when not doT -> i.ToString() - | Some (i : TimeSpan) when i.TotalDays >= 1. -> i.ToString "d\dhh\hmm\m" - | Some i when i.TotalHours >= 1. -> i.ToString "h\hmm\mss\s" - | Some i when i.TotalMinutes >= 1. -> i.ToString "m\mss\.ff\s" - | Some i -> i.ToString("s\.fff\s") - prevTs <- Some x.Timestamp - if not doC then log.Information("{i,4}@{t:u}+{d,9} {u:l} {e:l} {data:l} {meta:l}", - x.Index, x.Timestamp, interval, ty, x.EventType, render x.Data, render x.Meta) - else log.Information("{i,4}@{t:u}+{d,9} Corr {corr} Cause {cause} {u:l} {e:l} {data:l} {meta:l}", - x.Index, x.Timestamp, interval, x.CorrelationId, x.CausationId, ty, x.EventType, render x.Data, render x.Meta) } + let render = if x.IsUnfold then render Newtonsoft.Json.Formatting.Indented else render fo + prevTs <- Some (logEvent log prevTs doC doT x render) } streams |> Seq.map readStream |> Async.Parallel |> Async.Ignore + let dumpJsonElementStorage (log: ILogger) (storeLog: ILogger) doU doE doC doJ _doP doT (resolver: Services.StreamResolver) (streams: FsCodec.StreamName list) = + let initial = List.empty + let fold state events = (events,state) ||> Seq.foldBack (fun e l -> e :: l) + let mutable unfolds = List.empty + let tryDecode (x : FsCodec.ITimelineEvent) = + if x.IsUnfold then unfolds <- x :: unfolds + Some x + let idCodec = FsCodec.Codec.Create((fun _ -> failwith "No encoding required"), tryDecode, (fun _ -> failwith "No mapCausation")) + let isOriginAndSnapshot = (fun (event : FsCodec.ITimelineEvent<_>) -> not doE && event.IsUnfold),fun _state -> failwith "no snapshot required" + let render (data : JsonElement) = + match data.ValueKind with + | JsonValueKind.Null | JsonValueKind.Undefined -> null + | _ when doJ -> data.GetRawText() + | _ -> sprintf "(%d chars)" (data.GetRawText().Length) + let readStream (streamName : FsCodec.StreamName) = async { + let stream = resolver.ResolveWithJsonElementCodec(idCodec,fold,initial,isOriginAndSnapshot) streamName + let! _token,events = stream.Load storeLog + let source = if not doE && not (List.isEmpty unfolds) then Seq.ofList unfolds else Seq.append events unfolds + let mutable prevTs = None + for x in source |> Seq.filter (fun e -> (e.IsUnfold && doU) || (not e.IsUnfold && doE)) do + prevTs <- Some (logEvent log prevTs doC doT x render) } + streams + |> Seq.map readStream + |> Async.Parallel + |> Async.Ignore + + let run (log : ILogger, verboseConsole, maybeSeq) (args : ParseResults) = + let a = DumpInfo args + let createStoreLog verboseStore = createStoreLog verboseStore verboseConsole maybeSeq + let storeLog, storeConfig = a.ConfigureStore(log,createStoreLog) + let doU,doE = not(args.Contains EventsOnly),not(args.Contains UnfoldsOnly) + let doC,doJ,doP,doT = args.Contains Correlation,not(args.Contains JsonSkip),not(args.Contains PrettySkip),not(args.Contains TimeRegular) + let resolver = Samples.Infrastructure.Services.StreamResolver(storeConfig) + + let streams = args.GetResults DumpArguments.Stream + log.ForContext("streams",streams).Information("Reading...") + + match storeConfig with + | Storage.StorageConfig.Cosmos _ -> dumpJsonElementStorage log storeLog doU doE doC doJ doP doT resolver streams + | _ -> dumpUtf8ArrayStorage log storeLog doU doE doC doJ doP doT resolver streams + [] let main argv = let programName = System.Reflection.Assembly.GetEntryAssembly().GetName().Name From 301fdc5167d62e2312f9ed405911e346828ba1b3 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 2 Mar 2020 12:54:25 -0500 Subject: [PATCH 29/71] Remove unnecessary async block --- src/Equinox.Cosmos/CosmosJsonSerializer.fs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/src/Equinox.Cosmos/CosmosJsonSerializer.fs b/src/Equinox.Cosmos/CosmosJsonSerializer.fs index 2d17c85f5..8c13db10b 100644 --- a/src/Equinox.Cosmos/CosmosJsonSerializer.fs +++ b/src/Equinox.Cosmos/CosmosJsonSerializer.fs @@ -21,14 +21,11 @@ type CosmosJsonSerializer (options: JsonSerializerOptions) = ) override __.ToStream<'T> (input: 'T) = - async { - let memoryStream = new MemoryStream() + let memoryStream = new MemoryStream() - do! - JsonSerializer.SerializeAsync(memoryStream, input, input.GetType(), options) - |> Async.AwaitTaskCorrect - - memoryStream.Position <- 0L - return memoryStream :> Stream - } + JsonSerializer.SerializeAsync(memoryStream, input, input.GetType(), options) + |> Async.AwaitTaskCorrect |> Async.RunSynchronously + + memoryStream.Position <- 0L + memoryStream :> Stream From 1c92b0869fa5cc4642aaf48d96884a477cee0226 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 2 Mar 2020 13:00:10 -0500 Subject: [PATCH 30/71] Fix typo --- src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs b/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs index 56c423392..9e29bb5d3 100644 --- a/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs +++ b/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs @@ -17,6 +17,6 @@ type Utf8JsonReaderExtension = reader.ValidateTokenType(JsonTokenType.PropertyName) if not <| reader.ValueTextEquals expectedPropertyName then - sprintf "Expected a property named '%s', but encounted property with name '%s'." expectedPropertyName (reader.GetString()) + sprintf "Expected a property named '%s', but encountered property with name '%s'." expectedPropertyName (reader.GetString()) |> JsonException |> raise From d95e12178aa2db62ee017d49454741af7fa9fa5d Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 2 Mar 2020 16:59:57 -0500 Subject: [PATCH 31/71] Change to camelCase field names --- src/Equinox.Core/Json/JsonRecordConverter.fs | 37 ++++++++++---------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/Equinox.Core/Json/JsonRecordConverter.fs b/src/Equinox.Core/Json/JsonRecordConverter.fs index eca328820..3f61f4e2e 100644 --- a/src/Equinox.Core/Json/JsonRecordConverter.fs +++ b/src/Equinox.Core/Json/JsonRecordConverter.fs @@ -7,7 +7,6 @@ open System.Linq.Expressions open System.Text.Json open System.Text.Json.Serialization open FSharp.Reflection -open Equinox.Core type JsonRecordConverterActivator = delegate of JsonSerializerOptions -> JsonConverter @@ -31,11 +30,11 @@ type RecordFieldConverter<'F> () = [] type RecordField = { - Name: string - Type: Type - Index: int - IsIgnored: bool - Converter: IRecordFieldConverter option + name: string + fieldType: Type + index: int + isIgnored: bool + converter: IRecordFieldConverter option } type JsonRecordConverter<'T> (options: JsonSerializerOptions) = @@ -50,7 +49,7 @@ type JsonRecordConverter<'T> (options: JsonSerializerOptions) = FSharpType.GetRecordFields(recordType, true) |> Array.mapi (fun idx f -> { - Name = + name = f.GetCustomAttributes(typedefof, true) |> Array.tryHead |> Option.map (fun attr -> (attr :?> JsonPropertyNameAttribute).Name) @@ -59,10 +58,10 @@ type JsonRecordConverter<'T> (options: JsonSerializerOptions) = then f.Name else options.PropertyNamingPolicy.ConvertName f.Name) - Type = f.PropertyType - Index = idx - IsIgnored = f.GetCustomAttributes(typeof, true) |> Array.isEmpty |> not - Converter = + fieldType = f.PropertyType + index = idx + isIgnored = f.GetCustomAttributes(typeof, true) |> Array.isEmpty |> not + converter = f.GetCustomAttributes(typeof, true) |> Array.tryHead |> Option.map (fun attr -> attr :?> JsonConverterAttribute) @@ -84,7 +83,7 @@ type JsonRecordConverter<'T> (options: JsonSerializerOptions) = let fieldsByName = fields - |> Array.map (fun f -> f.Name, f) + |> Array.map (fun f -> f.name, f) #if NETSTANDARD2_1 |> Array.map KeyValuePair.Create |> (fun kvp -> Dictionary(kvp, StringComparer.OrdinalIgnoreCase)) @@ -113,13 +112,13 @@ type JsonRecordConverter<'T> (options: JsonSerializerOptions) = match tryGetFieldByName <| reader.GetString() with | Some field -> - fields.[field.Index] <- - match field.Converter with + fields.[field.index] <- + match field.converter with | Some converter -> reader.Read() |> ignore - converter.Read(&reader, field.Type, options) + converter.Read(&reader, field.fieldType, options) | None -> - JsonSerializer.Deserialize(&reader, field.Type, options) + JsonSerializer.Deserialize(&reader, field.fieldType, options) | _ -> reader.Skip() @@ -135,10 +134,10 @@ type JsonRecordConverter<'T> (options: JsonSerializerOptions) = match value with | :? JsonElement as je when je.ValueKind = JsonValueKind.Undefined -> () | _ -> - if not field.IsIgnored && not (options.IgnoreNullValues && isNull value) then - writer.WritePropertyName(field.Name) + if not field.isIgnored && not (options.IgnoreNullValues && isNull value) then + writer.WritePropertyName(field.name) - match field.Converter with + match field.converter with | Some converter -> converter.Write(writer, value, options) | None -> JsonSerializer.Serialize(writer, value, options)) From 4398d30339a800dec4ead0438c3c078d359c46e3 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 2 Mar 2020 17:01:25 -0500 Subject: [PATCH 32/71] Move FsCodec dep to end --- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index 03b5f97e9..4659eff70 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -20,7 +20,6 @@ - @@ -30,6 +29,7 @@ + \ No newline at end of file From 3fabd8f62017948d0f8f5ca0cf8823de89c0b33d Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 2 Mar 2020 17:14:26 -0500 Subject: [PATCH 33/71] Rearrange open statements --- samples/Infrastructure/Services.fs | 4 ++-- samples/Store/Domain/Cart.fs | 6 +++--- samples/Store/Domain/ContactPreferences.fs | 3 +-- samples/Store/Domain/Favorites.fs | 3 +-- samples/Store/Domain/SavedForLater.fs | 2 +- samples/Store/Integration/CartIntegration.fs | 2 +- .../Store/Integration/ContactPreferencesIntegration.fs | 2 +- samples/Store/Integration/FavoritesIntegration.fs | 2 +- samples/TodoBackend/Todo.fs | 2 +- samples/Tutorial/Gapless.fs | 2 +- samples/Tutorial/Index.fs | 3 +-- samples/Tutorial/Sequence.fs | 2 +- samples/Tutorial/Set.fs | 3 +-- samples/Tutorial/Upload.fs | 2 +- src/Equinox.Core/Json/JsonRecordConverter.fs | 1 + src/Equinox.Cosmos/Cosmos.fs | 8 ++++---- src/Equinox.Cosmos/CosmosJsonSerializer.fs | 4 ++-- tests/Equinox.Cosmos.Integration/CosmosIntegration.fs | 4 ++-- tests/Equinox.Cosmos.Integration/JsonConverterTests.fs | 2 +- 19 files changed, 27 insertions(+), 30 deletions(-) diff --git a/samples/Infrastructure/Services.fs b/samples/Infrastructure/Services.fs index c6008d636..b7e0c2623 100644 --- a/samples/Infrastructure/Services.fs +++ b/samples/Infrastructure/Services.fs @@ -1,11 +1,11 @@ module Samples.Infrastructure.Services open Domain +open FsCodec +open FsCodec.SystemTextJson.Serialization open Microsoft.Extensions.DependencyInjection open System open System.Text.Json -open FsCodec -open FsCodec.SystemTextJson.Serialization [] type StreamCodec<'event, 'context> = diff --git a/samples/Store/Domain/Cart.fs b/samples/Store/Domain/Cart.fs index 1b0562b89..bf6ce47e2 100644 --- a/samples/Store/Domain/Cart.fs +++ b/samples/Store/Domain/Cart.fs @@ -1,8 +1,5 @@ module Domain.Cart -open System.Text.Json -open FsCodec.SystemTextJson - // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -32,6 +29,9 @@ module Events = let codec = FsCodec.NewtonsoftJson.Codec.Create() module JsonElementCodec = + open FsCodec.SystemTextJson + open System.Text.Json + let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> match evt with diff --git a/samples/Store/Domain/ContactPreferences.fs b/samples/Store/Domain/ContactPreferences.fs index ed9afdd11..140220496 100644 --- a/samples/Store/Domain/ContactPreferences.fs +++ b/samples/Store/Domain/ContactPreferences.fs @@ -1,7 +1,5 @@ module Domain.ContactPreferences -open System.Text.Json - type Id = Id of email: string // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care @@ -21,6 +19,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> diff --git a/samples/Store/Domain/Favorites.fs b/samples/Store/Domain/Favorites.fs index 89f0151a8..c350052ef 100644 --- a/samples/Store/Domain/Favorites.fs +++ b/samples/Store/Domain/Favorites.fs @@ -1,7 +1,5 @@ module Domain.Favorites -open System.Text.Json - // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -22,6 +20,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> match evt with diff --git a/samples/Store/Domain/SavedForLater.fs b/samples/Store/Domain/SavedForLater.fs index 6eb0796d5..f936ac535 100644 --- a/samples/Store/Domain/SavedForLater.fs +++ b/samples/Store/Domain/SavedForLater.fs @@ -2,7 +2,6 @@ open System open System.Collections.Generic -open System.Text.Json // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -36,6 +35,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index 8fec369ec..4d7db0cbd 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -4,8 +4,8 @@ open Equinox open Equinox.Cosmos.Integration open Equinox.EventStore open Equinox.MemoryStore -open Swensen.Unquote open FsCodec.SystemTextJson.Serialization +open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index dc5c2a9bf..c04fb7360 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -2,9 +2,9 @@ open Equinox open Equinox.Cosmos.Integration +open FsCodec.SystemTextJson.Serialization open Swensen.Unquote open Xunit -open FsCodec.SystemTextJson.Serialization #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index aedb868ce..2d3e90430 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -2,8 +2,8 @@ open Equinox open Equinox.Cosmos.Integration -open Swensen.Unquote open FsCodec.SystemTextJson.Serialization +open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) diff --git a/samples/TodoBackend/Todo.fs b/samples/TodoBackend/Todo.fs index d6880d9b3..133e7d40c 100644 --- a/samples/TodoBackend/Todo.fs +++ b/samples/TodoBackend/Todo.fs @@ -1,7 +1,6 @@ namespace TodoBackend open Domain -open System.Text.Json // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -26,6 +25,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> diff --git a/samples/Tutorial/Gapless.fs b/samples/Tutorial/Gapless.fs index f68ddde2c..e4d1180bd 100644 --- a/samples/Tutorial/Gapless.fs +++ b/samples/Tutorial/Gapless.fs @@ -3,7 +3,6 @@ module Gapless open System -open System.Text.Json // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -25,6 +24,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> match evt with diff --git a/samples/Tutorial/Index.fs b/samples/Tutorial/Index.fs index fa89ce696..6d39b4ca5 100644 --- a/samples/Tutorial/Index.fs +++ b/samples/Tutorial/Index.fs @@ -1,7 +1,5 @@ module Index -open System.Text.Json - // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -21,6 +19,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode<'v> (options: JsonSerializerOptions) = fun (evt: Event<'v>) -> match evt with diff --git a/samples/Tutorial/Sequence.fs b/samples/Tutorial/Sequence.fs index 82e06f061..264e26b50 100644 --- a/samples/Tutorial/Sequence.fs +++ b/samples/Tutorial/Sequence.fs @@ -3,7 +3,6 @@ module Sequence open System -open System.Text.Json // shim for net461 module Seq = @@ -32,6 +31,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> match evt with diff --git a/samples/Tutorial/Set.fs b/samples/Tutorial/Set.fs index 15ac00bf4..c500df008 100644 --- a/samples/Tutorial/Set.fs +++ b/samples/Tutorial/Set.fs @@ -1,7 +1,5 @@ module Set -open System.Text.Json - // NOTE - these types and the union case names reflect the actual storage formats and hence need to be versioned with care module Events = @@ -20,6 +18,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> match evt with diff --git a/samples/Tutorial/Upload.fs b/samples/Tutorial/Upload.fs index f6a8ca6bc..aa80caa5f 100644 --- a/samples/Tutorial/Upload.fs +++ b/samples/Tutorial/Upload.fs @@ -2,7 +2,6 @@ module Upload open System -open System.Text.Json open FSharp.UMX // shim for net461 @@ -47,6 +46,7 @@ module Events = module JsonElementCodec = open FsCodec.SystemTextJson + open System.Text.Json let encode (options: JsonSerializerOptions) = fun (evt: Event) -> match evt with diff --git a/src/Equinox.Core/Json/JsonRecordConverter.fs b/src/Equinox.Core/Json/JsonRecordConverter.fs index 3f61f4e2e..079382003 100644 --- a/src/Equinox.Core/Json/JsonRecordConverter.fs +++ b/src/Equinox.Core/Json/JsonRecordConverter.fs @@ -1,5 +1,6 @@ namespace FsCodec.SystemTextJson.Serialization +open Equinox.Core open System open System.Collections.Generic open System.Linq diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 463309a3b..7dbd01ef8 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -1,9 +1,9 @@ namespace Equinox.Cosmos.Store -open Equinox.Core -open FsCodec open Azure open Azure.Cosmos +open Equinox.Core +open FsCodec open Serilog open System open System.IO @@ -793,13 +793,13 @@ module Internal = namespace Equinox.Cosmos +open Azure.Cosmos open Equinox open Equinox.Core -open FsCodec.SystemTextJson.Serialization open Equinox.Cosmos.Store open FsCodec +open FsCodec.SystemTextJson.Serialization open FSharp.Control -open Azure.Cosmos open Serilog open System open System.Collections.Concurrent diff --git a/src/Equinox.Cosmos/CosmosJsonSerializer.fs b/src/Equinox.Cosmos/CosmosJsonSerializer.fs index 8c13db10b..6b1a5a310 100644 --- a/src/Equinox.Cosmos/CosmosJsonSerializer.fs +++ b/src/Equinox.Cosmos/CosmosJsonSerializer.fs @@ -1,9 +1,9 @@ namespace Equinox.Cosmos.Store -open System.IO -open System.Text.Json open Azure.Cosmos.Serialization open Equinox.Core +open System.IO +open System.Text.Json type CosmosJsonSerializer (options: JsonSerializerOptions) = inherit CosmosSerializer() diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs index b6c733e81..b74282e46 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs @@ -1,12 +1,12 @@ module Equinox.Cosmos.Integration.CosmosIntegration -open System -open System.Threading open Domain open Equinox.Cosmos open Equinox.Cosmos.Integration.Infrastructure open FSharp.UMX open Swensen.Unquote +open System +open System.Threading module Cart = let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial diff --git a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs index 1e3407852..7bcbecb31 100644 --- a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs +++ b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs @@ -4,8 +4,8 @@ open Equinox.Cosmos open FsCheck.Xunit open Swensen.Unquote open System -open Xunit open System.Text.Json +open Xunit type Embedded = { embed : string } type Union = From d599f6c512b0df59c93a0c2596097411aa244b47 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 3 Mar 2020 10:53:31 -0500 Subject: [PATCH 34/71] Fix ValueTask versioning --- samples/Store/Domain.Tests/Domain.Tests.fsproj | 1 + .../Equinox.EventStore.Integration.fsproj | 1 + .../Equinox.MemoryStore.Integration.fsproj | 1 + .../Equinox.SqlStreamStore.MsSql.Integration.fsproj | 1 + .../Equinox.SqlStreamStore.MySql.Integration.fsproj | 1 + .../Equinox.SqlStreamStore.Postgres.Integration.fsproj | 1 + 6 files changed, 6 insertions(+) diff --git a/samples/Store/Domain.Tests/Domain.Tests.fsproj b/samples/Store/Domain.Tests/Domain.Tests.fsproj index 40240896f..d95a52a1c 100644 --- a/samples/Store/Domain.Tests/Domain.Tests.fsproj +++ b/samples/Store/Domain.Tests/Domain.Tests.fsproj @@ -21,6 +21,7 @@ + all diff --git a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj index 1aa5f6f2c..746cf3153 100644 --- a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj +++ b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj @@ -25,6 +25,7 @@ + diff --git a/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj b/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj index ea67e9a0a..906bc4089 100644 --- a/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj +++ b/tests/Equinox.MemoryStore.Integration/Equinox.MemoryStore.Integration.fsproj @@ -21,6 +21,7 @@ + diff --git a/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj b/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj index a8d5f66c4..ed1e1dbc3 100644 --- a/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj +++ b/tests/Equinox.SqlStreamStore.MsSql.Integration/Equinox.SqlStreamStore.MsSql.Integration.fsproj @@ -23,6 +23,7 @@ + diff --git a/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj b/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj index ae3b9c1c8..430ba8b0e 100644 --- a/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj +++ b/tests/Equinox.SqlStreamStore.MySql.Integration/Equinox.SqlStreamStore.MySql.Integration.fsproj @@ -23,6 +23,7 @@ + diff --git a/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj b/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj index 0c1b67c8d..ab2d19764 100644 --- a/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj +++ b/tests/Equinox.SqlStreamStore.Postgres.Integration/Equinox.SqlStreamStore.Postgres.Integration.fsproj @@ -23,6 +23,7 @@ + From b1f4f032a6301195c33774a20899e82badae57ec Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Fri, 6 Mar 2020 17:40:26 -0500 Subject: [PATCH 35/71] Abstract Cosmos clients and allow them to be provided by consumers --- samples/Infrastructure/Services.fs | 3 +- samples/Infrastructure/Storage.fs | 11 +- samples/Store/Integration/CartIntegration.fs | 15 +- .../ContactPreferencesIntegration.fs | 23 +- .../Store/Integration/FavoritesIntegration.fs | 10 +- samples/Store/Integration/LogIntegration.fs | 7 +- src/Equinox.Core/Infrastructure.fs | 3 + src/Equinox.Cosmos/Cosmos.fs | 460 ++++++++++-------- .../CosmosCoreIntegration.fs | 36 +- .../CosmosFixtures.fs | 32 +- .../CosmosIntegration.fs | 59 +-- tools/Equinox.Tool/Program.fs | 21 +- 12 files changed, 367 insertions(+), 313 deletions(-) diff --git a/samples/Infrastructure/Services.fs b/samples/Infrastructure/Services.fs index b7e0c2623..249bab942 100644 --- a/samples/Infrastructure/Services.fs +++ b/samples/Infrastructure/Services.fs @@ -19,8 +19,7 @@ type StreamResolver(storage) = initial: 'state, snapshot: (('event -> bool) * ('state -> 'event))) = match storage with - | Storage.StorageConfig.Cosmos (gateway, caching, unfolds, databaseId, containerId) -> - let store = Equinox.Cosmos.Context(gateway, databaseId, containerId) + | Storage.StorageConfig.Cosmos (store, caching, unfolds, _databaseId, _containerId) -> let accessStrategy = if unfolds then Equinox.Cosmos.AccessStrategy.Snapshot snapshot else Equinox.Cosmos.AccessStrategy.Unoptimized Equinox.Cosmos.Resolver<'event,'state,_>(store, codec, fold, initial, caching, accessStrategy).Resolve | _ -> failwith "Currently, only Cosmos can be used with a JsonElement codec." diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index fb420305e..5c551b0ff 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -10,7 +10,7 @@ type StorageConfig = // For MemoryStore, we keep the events as UTF8 arrays - we could use FsCodec.Codec.Box to remove the JSON encoding, which would improve perf but can conceal problems | Memory of Equinox.MemoryStore.VolatileStore | Es of Equinox.EventStore.Context * Equinox.EventStore.CachingStrategy option * unfolds: bool - | Cosmos of Equinox.Cosmos.Gateway * Equinox.Cosmos.CachingStrategy * unfolds: bool * databaseId: string * containerId: string + | Cosmos of Equinox.Cosmos.Context * Equinox.Cosmos.CachingStrategy * unfolds: bool * databaseId: string * containerId: string | Sql of Equinox.SqlStreamStore.Context * Equinox.SqlStreamStore.CachingStrategy option * unfolds: bool module MemoryStore = @@ -70,19 +70,18 @@ module Cosmos = open Equinox.Cosmos open Serilog - let private createGateway connection maxItems = Gateway(connection, BatchingPolicy(defaultMaxItems=maxItems)) let connection (log: ILogger, storeLog: ILogger) (a : Info) = let (Discovery.UriAndKey (endpointUri,_)) as discovery = a.Connection |> Discovery.FromConnectionString log.Information("CosmosDb {mode} {connection} Database {database} Container {container}", a.Mode, endpointUri, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, Connector(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) + discovery, a.Database, a.Container, ClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = - let discovery, dName, cName, connector = connection (log, storeLog) info - let conn = connector.Connect(appName, discovery) |> Async.RunSynchronously + let discovery, dName, cName, factory = connection (log, storeLog) info + let ctx = Context(factory.CreateClient(appName, discovery), dName, cName, log = log, defaultMaxItems = batchSize) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching - StorageConfig.Cosmos (createGateway conn batchSize, cacheStrategy, unfolds, dName, cName) + StorageConfig.Cosmos (ctx, cacheStrategy, unfolds, dName, cName) /// To establish a local node to run the tests against: /// 1. cinst eventstore-oss -y # where cinst is an invocation of the Chocolatey Package Installer on Windows diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index 4d7db0cbd..53a7397a8 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -55,7 +55,7 @@ type Tests(testOutputHelper) = do! act service args } - let arrange connect choose resolve = async { + let arrangeEs connect choose resolve = async { let log = createLog () let! conn = connect log let gateway = choose conn defaultBatchSize @@ -63,24 +63,29 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against EventStore, correctly folding the events without compaction semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToLocalEventStoreNode createGesGateway resolveGesStreamWithoutCustomAccessStrategy + let! service = arrangeEs connectToLocalEventStoreNode createGesGateway resolveGesStreamWithoutCustomAccessStrategy do! act service args } [] let ``Can roundtrip against EventStore, correctly folding the events with RollingSnapshots`` args = Async.RunSynchronously <| async { - let! service = arrange connectToLocalEventStoreNode createGesGateway resolveGesStreamWithRollingSnapshots + let! service = arrangeEs connectToLocalEventStoreNode createGesGateway resolveGesStreamWithRollingSnapshots do! act service args } + let arrangeCosmos connect resolve = + let log = createLog () + let ctx: Cosmos.Context = connect log defaultBatchSize + Backend.Cart.Service(log, resolve ctx) + [] let ``Can roundtrip against Cosmos, correctly folding the events without custom access strategy`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveCosmosStreamWithoutCustomAccessStrategy + let service = arrangeCosmos connectToSpecifiedCosmosOrSimulator resolveCosmosStreamWithoutCustomAccessStrategy do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with With Snapshotting`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveCosmosStreamWithSnapshotStrategy + let service = arrangeCosmos connectToSpecifiedCosmosOrSimulator resolveCosmosStreamWithSnapshotStrategy do! act service args } diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index c04fb7360..691cd9b87 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -23,12 +23,12 @@ let resolveStreamGesWithoutAccessStrategy gateway = let cosmosCodec = Domain.ContactPreferences.Events.JsonElementCodec.codec JsonSerializer.defaultOptions let resolveStreamCosmosWithLatestKnownEventSemantics gateway = - Cosmos.Resolver(gateway 1, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve + Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamCosmosUnoptimized gateway = - Cosmos.Resolver(gateway defaultBatchSize, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve + Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve let resolveStreamCosmosRollingUnfolds gateway = let access = Cosmos.AccessStrategy.Custom(Domain.ContactPreferences.Fold.isOrigin, Domain.ContactPreferences.Fold.transmute) - Cosmos.Resolver(gateway defaultBatchSize, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve + Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve type Tests(testOutputHelper) = let testOutput = TestOutputAdapter testOutputHelper @@ -47,7 +47,7 @@ type Tests(testOutputHelper) = do! act service args } - let arrange connect choose resolve = async { + let arrangeEs connect choose resolve = async { let log = createLog () let! conn = connect log let gateway = choose conn @@ -55,30 +55,35 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against EventStore, correctly folding the events with normal semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToLocalEventStoreNode createGesGateway resolveStreamGesWithoutAccessStrategy + let! service = arrangeEs connectToLocalEventStoreNode createGesGateway resolveStreamGesWithoutAccessStrategy do! act service args } [] let ``Can roundtrip against EventStore, correctly folding the events with compaction semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToLocalEventStoreNode createGesGateway resolveStreamGesWithOptimizedStorageSemantics + let! service = arrangeEs connectToLocalEventStoreNode createGesGateway resolveStreamGesWithOptimizedStorageSemantics do! act service args } + let arrangeCosmos connect resolve batchSize = async { + let log = createLog () + let ctx: Cosmos.Context = connect log batchSize + return Backend.ContactPreferences.Service(log, resolve ctx) } + [] let ``Can roundtrip against Cosmos, correctly folding the events with Unoptimized semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosUnoptimized + let! service = arrangeCosmos connectToSpecifiedCosmosOrSimulator resolveStreamCosmosUnoptimized defaultBatchSize do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with LatestKnownEvent semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosWithLatestKnownEventSemantics + let! service = arrangeCosmos connectToSpecifiedCosmosOrSimulator resolveStreamCosmosWithLatestKnownEventSemantics 1 do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with RollingUnfold semantics`` args = Async.RunSynchronously <| async { - let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosRollingUnfolds + let! service = arrangeCosmos connectToSpecifiedCosmosOrSimulator resolveStreamCosmosRollingUnfolds defaultBatchSize do! act service args } diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index 2d3e90430..2255a66ff 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -63,17 +63,15 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against Cosmos, correctly folding the events`` args = Async.RunSynchronously <| async { let log = createLog () - let! conn = connectToSpecifiedCosmosOrSimulator log - let gateway = createCosmosContext conn defaultBatchSize - let service = createServiceCosmos gateway log + let store = connectToSpecifiedCosmosOrSimulator log defaultBatchSize + let service = createServiceCosmos store log do! act service args } [] let ``Can roundtrip against Cosmos, correctly folding the events with rolling unfolds`` args = Async.RunSynchronously <| async { let log = createLog () - let! conn = connectToSpecifiedCosmosOrSimulator log - let gateway = createCosmosContext conn defaultBatchSize - let service = createServiceCosmosRollingState gateway log + let store = connectToSpecifiedCosmosOrSimulator log defaultBatchSize + let service = createServiceCosmosRollingState store log do! act service args } diff --git a/samples/Store/Integration/LogIntegration.fs b/samples/Store/Integration/LogIntegration.fs index 053d0786d..d5d45658d 100644 --- a/samples/Store/Integration/LogIntegration.fs +++ b/samples/Store/Integration/LogIntegration.fs @@ -122,10 +122,9 @@ type Tests() = let batchSize = defaultBatchSize let buffer = ConcurrentQueue() let log = createLoggerWithMetricsExtraction buffer.Enqueue - let! conn = connectToSpecifiedCosmosOrSimulator log - let gateway = createCosmosContext conn batchSize - let service = Backend.Cart.Service(log, CartIntegration.resolveCosmosStreamWithSnapshotStrategy gateway) + let store = connectToSpecifiedCosmosOrSimulator log batchSize + let service = Backend.Cart.Service(log, CartIntegration.resolveCosmosStreamWithSnapshotStrategy store) let itemCount = batchSize / 2 + 1 let cartId = % Guid.NewGuid() do! act buffer service itemCount context cartId skuId "EqxCosmos Tip " // one is a 404, one is a 200 - } \ No newline at end of file + } diff --git a/src/Equinox.Core/Infrastructure.fs b/src/Equinox.Core/Infrastructure.fs index aaa90d24a..a70fbb91b 100755 --- a/src/Equinox.Core/Infrastructure.fs +++ b/src/Equinox.Core/Infrastructure.fs @@ -73,6 +73,9 @@ type Async with sc ()) |> ignore) + static member map (f:'a -> 'b) (a:Async<'a>) : Async<'b> = async.Bind(a, f >> async.Return) + static member bind (f:'a -> Async<'b>) (a:Async<'a>) : Async<'b> = async.Bind(a, f) + #if NETSTANDARD2_1 static member inline AwaitValueTask (vtask: ValueTask<'T>) : Async<'T> = vtask.AsTask() |> Async.AwaitTaskCorrect #endif diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 7dbd01ef8..0f03795c5 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -4,11 +4,13 @@ open Azure open Azure.Cosmos open Equinox.Core open FsCodec +open FSharp.Control open Serilog open System open System.IO open System.Text.Json open System.Text.Json.Serialization +open System.Threading /// A single Domain Event from the array held in a Batch type [] // TODO for STJ v5: All fields required unless explicitly optional @@ -72,41 +74,41 @@ type [] // TODO for STJ v5: All fields required unless /// As one cannot sort by the implicit `id` field, we have an indexed `i` field for sort and range query use static member internal IndexedFields = [Batch.PartitionKeyField; "i"; "n"] -/// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc -/// Only applied to snapshots in the Tip -type JsonCompressedBase64Converter() = - inherit JsonConverter() - - override __.Read (reader, _typeToConvert, options) = - if reader.TokenType = JsonTokenType.Null then - JsonSerializer.Deserialize(&reader, options) - else - let compressedBytes = reader.GetBytesFromBase64() - use input = new MemoryStream(compressedBytes) - use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) - use output = new MemoryStream() - decompressor.CopyTo(output) - JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - - override __.Write (writer, value, _options) = - if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then - writer.WriteNullValue() - else - let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) - use output = new MemoryStream() - use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) - compressor.Write(input, 0, input.Length) - compressor.Close() - writer.WriteBase64StringValue(ReadOnlySpan.op_Implicit(output.ToArray())) - -type JsonCompressedBase64ConverterAttribute () = - inherit JsonConverterAttribute(typeof) - - static let converter = JsonCompressedBase64Converter() - - override __.CreateConverter _typeToConvert = - converter :> JsonConverter - + /// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc + /// Only applied to snapshots in the Tip + type JsonCompressedBase64Converter() = + inherit JsonConverter() + + override __.Read (reader, _typeToConvert, options) = + if reader.TokenType = JsonTokenType.Null then + JsonSerializer.Deserialize(&reader, options) + else + let compressedBytes = reader.GetBytesFromBase64() + use input = new MemoryStream(compressedBytes) + use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) + use output = new MemoryStream() + decompressor.CopyTo(output) + JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) + + override __.Write (writer, value, _options) = + if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then + writer.WriteNullValue() + else + let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) + use output = new MemoryStream() + use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) + compressor.Write(input, 0, input.Length) + compressor.Close() + writer.WriteBase64StringValue(ReadOnlySpan.op_Implicit(output.ToArray())) + + type JsonCompressedBase64ConverterAttribute () = + inherit JsonConverterAttribute(typeof) + + static let converter = JsonCompressedBase64Converter() + + override __.CreateConverter _typeToConvert = + converter :> JsonConverter + /// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` [] type Unfold = @@ -335,7 +337,7 @@ module Log = for uom, f in measures do let d = f duration in if d <> 0. then logPeriodicRate uom (float totalCount/d |> int64) (totalRc/d) [] -module private MicrosoftAzureCosmosWrappers = +module MicrosoftAzureCosmosWrappers = /// Extracts the innermost exception from a nested hierarchy of Aggregate Exceptions let (|AggregateException|) (exn : exn) = let rec aux (e : exn) = @@ -352,37 +354,28 @@ module private MicrosoftAzureCosmosWrappers = // CosmosDB Error HttpStatusCode extractor let (|CosmosStatusCode|) (e : CosmosException) = e.Response.Status - + type ReadResult<'T> = Found of 'T | NotFound | NotModified - + type Azure.Core.ResponseHeaders with member headers.GetRequestCharge () = match headers.TryGetValue("x-ms-request-charge") with | true, charge when not <| String.IsNullOrEmpty charge -> float charge | _ -> 0. - - type Azure.Cosmos.CosmosContainer with - member container.TryReadItem(partitionKey : PartitionKey, documentId : string, ?options : ItemRequestOptions): Async> = async { - let options = defaultArg options null - let! ct = Async.CancellationToken - // TODO use TryReadItemStreamAsync to avoid the exception https://github.com/Azure/azure-cosmos-dotnet-v3/issues/692#issuecomment-521936888 - try let! item = async { return! container.ReadItemAsync(documentId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } - // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified - // NB `.Document` will NRE if a IfNoneModified precondition triggers a NotModified result - // else - - return item.GetRawResponse().Headers.GetRequestCharge(), Found item.Value - with CosmosException (CosmosStatusCode 404 as e) -> return e.Response.Headers.GetRequestCharge(), NotFound - | CosmosException (CosmosStatusCode 304 as e) -> return e.Response.Headers.GetRequestCharge(), NotModified - // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens - | CosmosException (CosmosStatusCode sc as e) when sc = int System.Net.HttpStatusCode.PreconditionFailed -> return e.Response.Headers.GetRequestCharge(), NotModified } - -module Sync = - // NB don't nest in a private module, or serialization will fail miserably ;) - [] - type SyncResponse = { etag: string; n: int64; conflicts: Unfold[] } - let [] private sprocName = "EquinoxRollingUnfolds3" // NB need to rename/number for any breaking change - let [] private sprocBody = """ + + + +[] +type SyncResponse = { etag: string; n: int64; conflicts: Unfold[] } +type ResourceThroughput = +| Default +| SetIfCreating of int +| ReplaceAlways of int +type [] Provisioning = Container of throughput: ResourceThroughput | Database of throughput: ResourceThroughput + +module SyncStoredProcedure = + let [] defaultName = "EquinoxRollingUnfolds3" // NB need to rename/number for any breaking change + let [] body = """ // Manages the merging of the supplied Request Batch, fulfilling one of the following end-states // 1 perform concurrency check (index=-1 -> always append; index=-2 -> check based on .etag; _ -> check .n=.index) // 2a Verify no current Tip; if so - incoming req.e and defines the `n`ext position / unfolds @@ -447,6 +440,128 @@ function sync(req, expIndex, expEtag) { } }""" +type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = + abstract member GetOrCreateContainer: props: ContainerProperties * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async + default __.GetOrCreateContainer(props, throughput, cancellationToken) = async { + let! ct = + match cancellationToken with + | Some ct -> async.Return ct + | _ -> Async.CancellationToken + + let! response = + match throughput with + | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | ReplaceAlways value -> + db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) + |> Async.AwaitTaskCorrect + |> Async.bind (fun response -> + response.Container.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) + + return EquinoxCosmosContainerClient(response.Container) } + + abstract member GetOrCreateBatchAndTipContainer: containerName: string * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async + default __.GetOrCreateBatchAndTipContainer(containerName, throughput, cancellationToken) = async { + let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) + props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent + props.IndexingPolicy.Automatic <- true + // Can either do a blacklist or a whitelist + // Given how long and variable the blacklist would be, we whitelist instead + props.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) + // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors + for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) + return! __.GetOrCreateContainer(props, throughput, ?cancellationToken = cancellationToken) + } + + abstract member GetContainer: containerName: string -> EquinoxCosmosContainerClient + default __.GetContainer(containerName) = + EquinoxCosmosContainerClient(db.GetContainer(containerName)) + +and EquinoxCosmosContainerClient (container: CosmosContainer) = + member val SdkClient = container with get + + abstract member QueryPageable<'T> : query: QueryDefinition * ?options: QueryRequestOptions -> AsyncSeq> + default __.QueryPageable<'T>(query, ?options) = + container.GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum + + abstract member TryReadItem<'T> : docId: string * stream: string * ?options: ItemRequestOptions * ?cancellationToken : CancellationToken -> Async> + default __.TryReadItem<'T>(docId, stream, ?options, ?cancellationToken) = async { + let partitionKey = PartitionKey stream + let options = defaultArg options null + let! ct = + match cancellationToken with + | Some ct -> async.Return ct + | _ -> Async.CancellationToken + // TODO use TryReadItemStreamAsync to avoid the exception https://github.com/Azure/azure-cosmos-dotnet-v3/issues/692#issuecomment-521936888 + try let! item = async { return! container.ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } + // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified + // NB `.Document` will NRE if a IfNoneModified precondition triggers a NotModified result + // else + + return item.GetRawResponse().Headers.GetRequestCharge(), Found item.Value + with CosmosException (CosmosStatusCode 404 as e) -> return e.Response.Headers.GetRequestCharge(), NotFound + | CosmosException (CosmosStatusCode 304 as e) -> return e.Response.Headers.GetRequestCharge(), NotModified + // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens + | CosmosException (CosmosStatusCode sc as e) when sc = int System.Net.HttpStatusCode.PreconditionFailed -> return e.Response.Headers.GetRequestCharge(), NotModified } + + abstract member CreateSyncStoredProcedure: name: string -> Async + default __.CreateSyncStoredProcedure (name: string) = async { + try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body)) |> Async.AwaitTaskCorrect + return r.GetRawResponse().Headers.GetRequestCharge() + with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } + + abstract member Sync: stream: string * tip: Tip * index: int64 * ?storedProcedureName: string * ?etag: string * ?cancellationToken : CancellationToken -> Async> + default __.Sync(stream, tip, index, ?storedProcedureName, ?etag, ?cancellationToken) = async { + let storedProcedureName = defaultArg storedProcedureName SyncStoredProcedure.defaultName + let partitionKey = PartitionKey stream + let! ct = + match cancellationToken with + | Some ct -> async.Return ct + | _ -> Async.CancellationToken + let args = [| box tip; box index; box (Option.toObj etag)|] + return! container.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } + +type EquinoxCosmosClient (logger: ILogger, sdk: CosmosClient) = + abstract member InitializeContainer: dbName: string * containerName: string * mode: Provisioning * createSyncStoredProcedure: bool * ?syncStoredProcedureName: string -> Async + default __.InitializeContainer (dbName, containerName, mode, createSyncStoredProcedure, syncStoredProcedureName) = async { + let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default + let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default + + let! db = __.GetOrCreateDatabase(dbName, dbThroughput) + let! container = db.GetOrCreateBatchAndTipContainer(containerName, containerThroughput) + + if createSyncStoredProcedure then + let syncStoredProcedureName = defaultArg syncStoredProcedureName SyncStoredProcedure.defaultName + do! container.CreateSyncStoredProcedure(syncStoredProcedureName) |> Async.Ignore } + + abstract member GetOrCreateDatabase: dbName: string * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async + default __.GetOrCreateDatabase(dbName, throughput, ?cancellationToken) = async { + let! ct = + match cancellationToken with + | Some ct -> async.Return ct + | _ -> Async.CancellationToken + + let! response = + match throughput with + | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | ReplaceAlways value -> + sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) + |> Async.AwaitTaskCorrect + |> Async.bind (fun response -> + response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) + + return EquinoxCosmosDatabaseClient(response.Database) } + + abstract member GetDatabase: dbName: string -> EquinoxCosmosDatabaseClient + default __.GetDatabase(dbName) = + EquinoxCosmosDatabaseClient(sdk.GetDatabase(dbName)) + + +module Sync = + // NB don't nest in a private module, or serialization will fail miserably ;) + let [] private sprocName = "EquinoxRollingUnfolds3" // NB need to rename/number for any breaking change + [] type Result = | Written of Position @@ -454,13 +569,10 @@ function sync(req, expIndex, expEtag) { | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (container : CosmosContainer, stream : string) (exp, req: Tip) + let private run (container : EquinoxCosmosContainerClient, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd - let! ct = Async.CancellationToken - let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] - let! (res : Scripts.StoredProcedureExecuteResponse) = - container.Scripts.ExecuteStoredProcedureAsync(sprocName, PartitionKey stream, args, cancellationToken = ct) |> Async.AwaitTaskCorrect + let! res = container.Sync(stream, req, ep.index, ?etag = ep.etag) let newPos = { index = res.Value.n; etag = Option.ofObj res.Value.etag } return res.GetRawResponse().Headers.GetRequestCharge(), res.Value.conflicts |> function | null -> Result.Written newPos @@ -508,80 +620,11 @@ function sync(req, expIndex, expEtag) { let mkUnfold baseIndex (unfolds: IEventData<_> seq) : Unfold seq = unfolds |> Seq.mapi (fun offset x -> { i = baseIndex + int64 offset; c = x.EventType; d = x.Data; m = x.Meta; t = DateTimeOffset.UtcNow } : Unfold) - module Initialization = - type [] Provisioning = Container of rus: int | Database of rus: int - let adjustOfferC (c: CosmosContainer) rus = async { - let! ct = Async.CancellationToken - let! _ = c.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } - let adjustOfferD (d: CosmosDatabase) rus = async { - let! ct = Async.CancellationToken - let! _ = d.ReplaceThroughputAsync(rus, cancellationToken = ct) |> Async.AwaitTaskCorrect in () } - let private createDatabaseIfNotExists (client:CosmosClient) dName maybeRus = async { - let! ct = Async.CancellationToken - let! dbr = client.CreateDatabaseIfNotExistsAsync(id=dName, throughput = Option.toNullable maybeRus, cancellationToken=ct) |> Async.AwaitTaskCorrect - return dbr.Database } - let private createOrProvisionDatabase (client:CosmosClient) dName mode = async { - match mode with - | Provisioning.Database rus -> - let! db = createDatabaseIfNotExists client dName (Some rus) - do! adjustOfferD db rus - | Provisioning.Container _ -> - let! _ = createDatabaseIfNotExists client dName None in () } - let private createContainerIfNotExists (d: CosmosDatabase) (cp:ContainerProperties) maybeRus = async { - let! ct = Async.CancellationToken - let! c = d.CreateContainerIfNotExistsAsync(cp, throughput=Option.toNullable maybeRus, cancellationToken=ct) |> Async.AwaitTaskCorrect - return c.Container } - let private createOrProvisionContainer (d: CosmosDatabase) (cp:ContainerProperties) mode = async { - match mode with - | Provisioning.Database _ -> - return! createContainerIfNotExists d cp None - | Provisioning.Container rus -> - let! c = createContainerIfNotExists d cp (Some rus) - do! adjustOfferC c rus - return c } - let private createStoredProcIfNotExists (c: CosmosContainer) (name, body): Async = async { - try let! r = c.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(id=name, body=body)) |> Async.AwaitTaskCorrect - return r.GetRawResponse().Headers.GetRequestCharge() - with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } - let private mkContainerProperties containerName partitionKeyFieldName = - ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" partitionKeyFieldName) - let private createBatchAndTipContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = - let def = mkContainerProperties cName Batch.PartitionKeyField - def.IndexingPolicy.IndexingMode <- IndexingMode.Consistent - def.IndexingPolicy.Automatic <- true - // Can either do a blacklist or a whitelist - // Given how long and variable the blacklist would be, we whitelist instead - def.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) - // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors - for k in Batch.IndexedFields do def.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) - createOrProvisionContainer (client.GetDatabase dName) def mode - let createSyncStoredProcIfNotExists (log: ILogger option) container = async { - let! t, ru = createStoredProcIfNotExists container (sprocName,sprocBody) |> Stopwatch.Time - match log with - | None -> () - | Some log -> log.Information("Created stored procedure {sprocId} in {ms}ms rc={ru}", sprocName, (let e = t.Elapsed in e.TotalMilliseconds), ru) } - let private createAuxContainerIfNotExists (client: CosmosClient) (dName,cName) mode : Async = - let def = mkContainerProperties cName "id" // as per Cosmos team, Partition Key must be "/id" - // TL;DR no indexing of any kind; see https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet/issues/142 - def.IndexingPolicy.Automatic <- false - def.IndexingPolicy.IndexingMode <- IndexingMode.None - createOrProvisionContainer (client.GetDatabase dName) def mode - let init log (client: CosmosClient) (dName,cName) mode skipStoredProc = async { - do! createOrProvisionDatabase client dName mode - let! container = createBatchAndTipContainerIfNotExists client (dName,cName) mode - if not skipStoredProc then - do! createSyncStoredProcIfNotExists (Some log) container } - let initAux (client: CosmosClient) (dName,cName) rus = async { - // Hardwired for now (not sure if CFP can store in a Database-allocated as it would need to be supplying partion keys) - let mode = Provisioning.Container rus - do! createOrProvisionDatabase client dName mode - return! createAuxContainerIfNotExists client (dName,cName) mode } - module internal Tip = - let private get (container : CosmosContainer, stream : string) (maybePos: Position option) = + let private get (container : EquinoxCosmosContainerClient, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null - container.TryReadItem(PartitionKey stream, Tip.WellKnownDocumentId, ro) - let private loggedGet (get : CosmosContainer * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { + container.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) + let private loggedGet (get : EquinoxCosmosContainerClient * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) @@ -610,7 +653,7 @@ module internal Tip = module internal Query = open FSharp.Control - let private mkQuery (container : CosmosContainer, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = + let private mkQuery (container : EquinoxCosmosContainerClient, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -620,7 +663,7 @@ module internal Tip = let cond = if direction = Direction.Forward then "c.n > @startPos" else "c.i < @startPos" QueryDefinition(sprintf "%s AND %s %s" root cond tail).WithParameter("@startPos", positionSoExclusiveWhenBackward) let qro = new QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) - container.GetItemQueryIterator(query, requestOptions = qro).AsPages() |> AsyncSeq.ofAsyncEnum + container.QueryPageable(query, options = qro) // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index let private processNextPage direction (streamName: string) startPos (enumerator: IAsyncEnumerator>) (log: ILogger) @@ -772,12 +815,12 @@ module internal Tip = let t = StopwatchInterval(startTicks, endTicks) log |> logQuery direction maxItems stream t (!responseCount,allEvents.ToArray()) -1L ru } -type [] Token = { container: CosmosContainer; stream: string; pos: Position } +type [] Token = { container: EquinoxCosmosContainerClient; stream: string; pos: Position } module Token = let create (container,stream) pos : StreamToken = { value = box { container = container; stream = stream; pos = pos } version = pos.index } - let (|Unpack|) (token: StreamToken) : CosmosContainer*string*Position = let t = unbox token.value in t.container,t.stream,t.pos + let (|Unpack|) (token: StreamToken) : EquinoxCosmosContainerClient*string*Position = let t = unbox token.value in t.container,t.stream,t.pos let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag @@ -791,7 +834,7 @@ module Internal = [] type LoadFromTokenResult<'event> = Unchanged | Found of StreamToken * 'event[] -namespace Equinox.Cosmos +namespace Equinox.Cosmos.Internal open Azure.Cosmos open Equinox @@ -806,7 +849,7 @@ open System.Collections.Concurrent open System.Text.Json /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) -type Connection(client: CosmosClient, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = +type Connection(client: EquinoxCosmosClient, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = member __.Client = client member __.TipRetryPolicy = readRetryPolicy member __.QueryRetryPolicy = readRetryPolicy @@ -869,8 +912,8 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return LoadFromTokenResult.Found (Token.create (container,stream) pos, span) | _ -> let! res = __.Read log (container,stream) Direction.Forward (Some pos) (tryDecode,isOrigin) return LoadFromTokenResult.Found res } - member __.CreateSyncStoredProcIfNotExists log container = - Sync.Initialization.createSyncStoredProcIfNotExists log container + member __.CreateSyncStoredProcIfNotExists log (container: EquinoxCosmosContainerClient) = + container.CreateSyncStoredProcedure(SyncStoredProcedure.defaultName) |> Async.Ignore member __.Sync log containerStream (exp, batch: Tip): Async = async { if Array.isEmpty batch.e && Array.isEmpty batch.u then invalidOp "Must write either events or unfolds." let! wr = Sync.batch log conn.WriteRetryPolicy containerStream (exp,batch) @@ -879,6 +922,43 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Sync.Result.ConflictUnknown pos' -> return InternalSyncResult.ConflictUnknown (Token.create containerStream pos') | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } +/// Holds Container state, coordinating initialization activities +type private ContainerWrapper(container : EquinoxCosmosContainerClient, ?initContainer : EquinoxCosmosContainerClient -> Async) = + let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init container)) + + member __.Container = container + member internal __.InitializationGate = match initGuard with Some g when g.PeekIsValid() |> not -> Some g.AwaitValue | _ -> None + +/// Defines a process for mapping from a Stream Name to the appropriate storage area, allowing control over segregation / co-locating of data +type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> string*string*string, []?disableInitialization) = + // Index of database*collection -> Initialization Context + let wrappers = ConcurrentDictionary() + new (databaseId, containerId) = + // TOCONSIDER - this works to support the Core.Events APIs + let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId + Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) + + member internal __.Resolve(client : EquinoxCosmosClient, categoryName, id, init) : (EquinoxCosmosContainerClient*string) * (unit -> Async) option = + let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id + let init = match disableInitialization with Some true -> None | _ -> Some init + let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun (d,c) -> ContainerWrapper(client.GetDatabase(d).GetContainer(c), ?initContainer = init)) + (wrapped.Container,streamName),wrapped.InitializationGate + +namespace Equinox.Cosmos + +open Azure.Cosmos +open Equinox +open Equinox.Core +open Equinox.Cosmos.Internal +open Equinox.Cosmos.Store +open FsCodec +open FsCodec.SystemTextJson.Serialization +open FSharp.Control +open Serilog +open System +open System.Collections.Concurrent +open System.Text.Json + type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEventCodec<'event,JsonElement,'context>) = let (|TryDecodeFold|) (fold: 'state -> 'event seq -> 'state) initial (events: ITimelineEvent seq) : 'state = Seq.choose codec.TryDecode events |> fold initial member __.Load includeUnfolds containerStream fold initial isOrigin (log : ILogger): Async = async { @@ -912,14 +992,14 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven module Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, CosmosContainer*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, EquinoxCosmosContainerClient*string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, CosmosContainer*string, 'context> with + interface ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context> with member __.Load(log, (container,streamName), opt) : Async = loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context) @@ -935,8 +1015,8 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, CosmosContainer*string, 'context>) - : ICategory<'event, 'state, CosmosContainer*string, 'context> = + (category : ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context>) + : ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context> = let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = new CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) @@ -949,7 +1029,7 @@ type private Folder<'event, 'state, 'context> ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, CosmosContainer*string, 'context> with + interface ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context> with member __.Load(log, (container,streamName), opt): Async = match readCache with | None -> batched log (container,streamName) @@ -965,39 +1045,27 @@ type private Folder<'event, 'state, 'context> | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } -/// Holds Container state, coordinating initialization activities -type private ContainerWrapper(container : CosmosContainer, ?initContainer : CosmosContainer -> Async) = - let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init container)) - - member __.Container = container - member internal __.InitializationGate = match initGuard with Some g when g.PeekIsValid() |> not -> Some g.AwaitValue | _ -> None - -/// Defines a process for mapping from a Stream Name to the appropriate storage area, allowing control over segregation / co-locating of data -type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> string*string*string, []?disableInitialization) = - // Index of database*collection -> Initialization Context - let wrappers = ConcurrentDictionary() - new (databaseId, containerId) = - // TOCONSIDER - this works to support the Core.Events APIs - let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId - Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - - member internal __.Resolve(client : CosmosClient, categoryName, id, init) : (CosmosContainer*string) * (unit -> Async) option = - let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id - let init = match disableInitialization with Some true -> None | _ -> Some init - let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun (d,c) -> ContainerWrapper(client.GetContainer(d, c), ?initContainer = init)) - (wrapped.Container,streamName),wrapped.InitializationGate - /// Pairs a Gateway, defining the retry policies for CosmosDb with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) -type Context(gateway: Gateway, containers: Containers, [] ?log) = +type Context + ( client: EquinoxCosmosClient, + databaseId: string, + containerId: string, + ?log: ILogger, + ?defaultMaxItems: int, + ?getDefaultMaxItems: unit -> int, + ?maxRequests: int, + ?readRetryPolicy: IRetryPolicy, + ?writeRetryPolicy: IRetryPolicy ) = + + let conn = Connection(client, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) + let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems =getDefaultMaxItems, ?maxRequests = maxRequests) + let gateway = Gateway(conn, batchingPolicy) let init = gateway.CreateSyncStoredProcIfNotExists log - new(gateway: Gateway, databaseId: string, containerId: string, []?log) = - Context(gateway, Containers(databaseId, containerId), ?log = log) - new(connection: Connection, databaseId: string, containerId: string, []?log) = - Context(Gateway(connection, BatchingPolicy()), databaseId, containerId, ?log = log) + let containers = Containers(databaseId, containerId) member __.Gateway = gateway member __.Containers = containers - member internal __.ResolveContainerStream(categoryName, id) : (CosmosContainer*string) * (unit -> Async) option = + member internal __.ResolveContainerStream(categoryName, id) : (EquinoxCosmosContainerClient*string) * (unit -> Async) option = containers.Resolve(gateway.Client, categoryName, id, init) [] @@ -1061,7 +1129,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, CosmosContainer*string, 'context> = + let category : ICategory<_, _, EquinoxCosmosContainerClient*string, 'context> = match caching with | CachingStrategy.NoCaching -> folder :> _ | CachingStrategy.SlidingWindow(cache, window) -> @@ -1103,7 +1171,7 @@ type Discovery = UriAndKey (Uri uri, key) | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" -type Connector +type ClientFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached @@ -1157,32 +1225,20 @@ type Connector // co.TransportClientHandlerFactory <- inhibitCertCheck co - /// Yields a CosmosClient configured and connected the requested `discovery` strategy + /// Yields an EquinoxCosmosClient configured and connected the requested `discovery` strategy member __.CreateClient ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, /// true to inhibit logging of client name - []?skipLog) : CosmosClient = + []?skipLog) : EquinoxCosmosClient = let (Discovery.UriAndKey (databaseUri=uri; key=key)) = discovery if skipLog <> Some true then logName uri name - new CosmosClient(string uri, key, __.ClientOptions) - - /// Yields a Connection configured per the specified strategy - member __.Connect - ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs - name, discovery : Discovery, - /// true to inhibit OpenAsync call - []?skipOpen, - /// true to inhibit logging of client name - []?skipLog) : Async = async { - let client = __.CreateClient(name, discovery, ?skipLog=skipLog) - // TODO validate this is equivalent to forcing a connect - if skipOpen <> Some true then let! _ = client.ReadAccountAsync() |> Async.AwaitTaskCorrect in () - return Connection(client, ?readRetryPolicy=readRetryPolicy, ?writeRetryPolicy=writeRetryPolicy) } + new EquinoxCosmosClient(log, new CosmosClient(string uri, key, __.ClientOptions)) namespace Equinox.Cosmos.Core open Equinox.Cosmos +open Equinox.Cosmos.Internal open Equinox.Cosmos.Store open FsCodec open FSharp.Control @@ -1198,10 +1254,9 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context - ( /// Connection to CosmosDb, includes defined Transient Read and Write Retry policies - conn : Connection, - /// Container selector, mapping Stream Categories to Containers - containers : Containers, + ( client: EquinoxCosmosClient, + databaseId: string, + containerId: string, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1209,7 +1264,10 @@ type Context []?defaultMaxItems, /// Alternate way of specifying defaultMaxItems which facilitates reading it from a cached dynamic configuration []?getDefaultMaxItems) = + do if log = null then nullArg "log" + let conn = Equinox.Cosmos.Internal.Connection(client) + let containers = Containers(databaseId, containerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) let gateway = Gateway(conn, batching) diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index 0e0e6138d..c1f780610 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -29,9 +29,9 @@ type Tests(testOutputHelper) = let (|TestStream|) (name: Guid) = incr testIterations sprintf "events-%O-%i" name !testIterations - let mkContextWithItemLimit conn defaultBatchSize = - Context(conn,containers,log,?defaultMaxItems=defaultBatchSize) - let mkContext conn = mkContextWithItemLimit conn None + let mkContextWithItemLimit log defaultBatchSize = + Context(createSpecifiedCosmosOrSimulatorClient log, dbId, cId, log, ?defaultMaxItems = defaultBatchSize) + let mkContext log = mkContextWithItemLimit log None let verifyRequestChargesMax rus = let tripRequestCharges = [ for e, c in capture.RequestCharges -> sprintf "%A" e, c ] @@ -39,8 +39,7 @@ type Tests(testOutputHelper) = [] let append (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContext conn + let ctx = mkContext log let index = 0L let! res = Events.append ctx streamName index <| TestEvents.Create(0,1) @@ -61,8 +60,7 @@ type Tests(testOutputHelper) = // As it stands with the NoTipEvents stored proc, permitting empty batches a) yields an invalid state b) provides no conceivable benefit [] let ``append Throws when passed an empty batch`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContext conn + let ctx = mkContext log let index = 0L let! res = Events.append ctx streamName index (TestEvents.Create(0,0)) |> Async.Catch @@ -104,8 +102,7 @@ type Tests(testOutputHelper) = [] let ``appendAtEnd and getNextIndex`` (extras, TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) // If a fail triggers a rerun, we need to dump the previous log entries captured capture.Clear() @@ -166,8 +163,7 @@ type Tests(testOutputHelper) = [] let ``append - fails on non-matching`` (TestStream streamName) = Async.RunSynchronously <| async { capture.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContext conn + let ctx = mkContext log // Attempt to write, skipping Index 0 let! res = Events.append ctx streamName 1L <| TestEvents.Create(0,1) @@ -209,8 +205,7 @@ type Tests(testOutputHelper) = [] let get (TestStream streamName) = Async.RunSynchronously <| async { capture.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 3) + let ctx = mkContextWithItemLimit log (Some 3) // We're going to ignore the first, to prove we can let! expected = add6EventsIn2Batches ctx streamName @@ -226,8 +221,7 @@ type Tests(testOutputHelper) = [] let ``get in 2 batches`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName let expected = expected |> Array.take 3 @@ -242,8 +236,7 @@ type Tests(testOutputHelper) = [] let ``get Lazy`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName capture.Clear() @@ -266,8 +259,7 @@ type Tests(testOutputHelper) = [] let getBackwards (TestStream streamName) = Async.RunSynchronously <| async { capture.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName @@ -284,8 +276,7 @@ type Tests(testOutputHelper) = [] let ``getBackwards in 2 batches`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName @@ -302,8 +293,7 @@ type Tests(testOutputHelper) = [] let ``getBackwards Lazy`` (TestStream streamName) = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let ctx = mkContextWithItemLimit conn (Some 1) + let ctx = mkContextWithItemLimit log (Some 1) let! expected = add6EventsIn2Batches ctx streamName capture.Clear() diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index 0240c7d04..0b83c83f7 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -10,28 +10,30 @@ module Option = /// Standing up an Equinox instance is necessary to run for test purposes; either: /// - replace connection below with a connection string or Uri+Key for an initialized Equinox instance /// - Create a local Equinox via dotnet run cli/Equinox.cli -s $env:EQUINOX_COSMOS_CONNECTION -d test -c $env:EQUINOX_COSMOS_CONTAINER provision -ru 10000 -let private connectToCosmos (log: Serilog.ILogger) name discovery = - Connector(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - .Connect(name, discovery) let private read env = Environment.GetEnvironmentVariable env |> Option.ofObj let (|Default|) def name = (read name),def ||> defaultArg -let connectToSpecifiedCosmosOrSimulator (log: Serilog.ILogger) = +let dbId = read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" +let cId = read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" + +let private connectToCosmos (log: Serilog.ILogger) batchSize client = + Context(client, dbId, cId, log = log, defaultMaxItems = batchSize) + +let createSpecifiedCosmosOrSimulatorClient log = + let createClient name discovery = + ClientFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + .CreateClient(name, discovery) + match read "EQUINOX_COSMOS_CONNECTION" with | None -> Discovery.UriAndKey(Uri "https://localhost:8081", "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") - |> connectToCosmos log "localDocDbSim" + |> createClient "localDocDbSim" | Some connectionString -> Discovery.FromConnectionString connectionString - |> connectToCosmos log "EQUINOX_COSMOS_CONNECTION" + |> createClient "EQUINOX_COSMOS_CONNECTION" -let defaultBatchSize = 500 - -let containers = - Containers( - read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test", - read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test") +let connectToSpecifiedCosmosOrSimulator (log: Serilog.ILogger) batchSize = + createSpecifiedCosmosOrSimulatorClient log + |> connectToCosmos log batchSize -let createCosmosContext connection batchSize = - let gateway = Gateway(connection, BatchingPolicy(defaultMaxItems=batchSize)) - Context(gateway, containers) +let defaultBatchSize = 500 diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs index b74282e46..1f3bb7691 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs @@ -12,28 +12,23 @@ module Cart = let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot let codec = Domain.Cart.Events.JsonElementCodec.codec IntegrationJsonSerializer.options - let createServiceWithoutOptimization connection batchSize log = - let store = createCosmosContext connection batchSize + let createServiceWithoutOptimization store log = let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) Backend.Cart.Service(log, resolve) let projection = "Compacted",snd snapshot /// Trigger looking in Tip (we want those calls to occur, but without leaning on snapshots, which would reduce the paths covered) - let createServiceWithEmptyUnfolds connection batchSize log = - let store = createCosmosContext connection batchSize + let createServiceWithEmptyUnfolds store log = let unfArgs = Domain.Cart.Fold.isOrigin, fun _ -> Seq.empty let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.MultiSnapshot unfArgs).Resolve(id,?option=opt) Backend.Cart.Service(log, resolve) - let createServiceWithSnapshotStrategy connection batchSize log = - let store = createCosmosContext connection batchSize + let createServiceWithSnapshotStrategy store log = let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) Backend.Cart.Service(log, resolve) - let createServiceWithSnapshotStrategyAndCaching connection batchSize log cache = - let store = createCosmosContext connection batchSize + let createServiceWithSnapshotStrategyAndCaching store log cache = let sliding20m = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) let resolve (id,opt) = Resolver(store, codec, fold, initial, sliding20m, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) Backend.Cart.Service(log, resolve) - let createServiceWithRollingState connection log = - let store = createCosmosContext connection 1 + let createServiceWithRollingState store log = let access = AccessStrategy.RollingState Domain.Cart.Fold.snapshot let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, access).Resolve(id,?option=opt) Backend.Cart.Service(log, resolve) @@ -45,11 +40,11 @@ module ContactPreferences = let gateway = createGateway defaultBatchSize let resolve = Resolver(gateway, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve Backend.ContactPreferences.Service(log, resolve) - let createService log createGateway = - let resolve = Resolver(createGateway 1, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.LatestKnownEvent).Resolve + let createService log store = + let resolve = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.LatestKnownEvent).Resolve Backend.ContactPreferences.Service(log, resolve) - let createServiceWithLatestKnownEvent createGateway log cachingStrategy = - let resolve = Resolver(createGateway 1, codec, fold, initial, cachingStrategy, AccessStrategy.LatestKnownEvent).Resolve + let createServiceWithLatestKnownEvent store log cachingStrategy = + let resolve = Resolver(store, codec, fold, initial, cachingStrategy, AccessStrategy.LatestKnownEvent).Resolve Backend.ContactPreferences.Service(log, resolve) #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -77,10 +72,10 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against Cosmos, correctly batching the reads [without reading the Tip]`` context skuId = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let maxItemsPerRequest = 2 - let service = Cart.createServiceWithoutOptimization conn maxItemsPerRequest log + let store = connectToSpecifiedCosmosOrSimulator log maxItemsPerRequest + + let service = Cart.createServiceWithoutOptimization store log capture.Clear() // for re-runs of the test let cartId = % Guid.NewGuid() @@ -110,15 +105,15 @@ type Tests(testOutputHelper) = let ``Can roundtrip against Cosmos, managing sync conflicts by retrying`` ctx initialState = Async.RunSynchronously <| async { let log1, capture1 = log, capture capture1.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log1 - // Ensure batching is included at some point in the proceedings let batchSize = 3 + let store = connectToSpecifiedCosmosOrSimulator log1 batchSize + // Ensure batching is included at some point in the proceedings let context, (sku11, sku12, sku21, sku22) = ctx let cartId = % Guid.NewGuid() // establish base stream state - let service1 = Cart.createServiceWithEmptyUnfolds conn batchSize log1 + let service1 = Cart.createServiceWithEmptyUnfolds store log1 let! maybeInitialSku = let (streamEmpty, skuId) = initialState async { @@ -151,7 +146,7 @@ type Tests(testOutputHelper) = do! s4 } let log2, capture2 = TestsWithLogCapture.CreateLoggerWithCapture testOutputHelper use _flush = log2 - let service2 = Cart.createServiceWithEmptyUnfolds conn batchSize log2 + let service2 = Cart.createServiceWithEmptyUnfolds store log2 let t2 = async { // Signal we have state, wait for other to do same, engineer conflict let prepare = async { @@ -194,8 +189,8 @@ type Tests(testOutputHelper) = [] let ``Can correctly read and update against Cosmos with LatestKnownEvent Access Strategy`` value = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let service = ContactPreferences.createService log (createCosmosContext conn) + let store = connectToSpecifiedCosmosOrSimulator log 1 + let service = ContactPreferences.createService log store let email = let g = System.Guid.NewGuid() in g.ToString "N" //let (Domain.ContactPreferences.Id email) = id () @@ -217,8 +212,8 @@ type Tests(testOutputHelper) = [] let ``Can correctly read and update Contacts against Cosmos with RollingUnfolds Access Strategy`` value = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log - let service = ContactPreferences.createServiceWithLatestKnownEvent (createCosmosContext conn) log CachingStrategy.NoCaching + let store = connectToSpecifiedCosmosOrSimulator log 1 + let service = ContactPreferences.createServiceWithLatestKnownEvent store log CachingStrategy.NoCaching let email = let g = System.Guid.NewGuid() in g.ToString "N" // Feed some junk into the stream @@ -241,13 +236,13 @@ type Tests(testOutputHelper) = let ``Can roundtrip Cart against Cosmos with RollingUnfolds, detecting conflicts based on _etag`` ctx initialState = Async.RunSynchronously <| async { let log1, capture1 = log, capture capture1.Clear() - let! conn = connectToSpecifiedCosmosOrSimulator log1 + let store = connectToSpecifiedCosmosOrSimulator log1 1 let context, (sku11, sku12, sku21, sku22) = ctx let cartId = % Guid.NewGuid() // establish base stream state - let service1 = Cart.createServiceWithRollingState conn log1 + let service1 = Cart.createServiceWithRollingState store log1 let! maybeInitialSku = let (streamEmpty, skuId) = initialState async { @@ -280,7 +275,7 @@ type Tests(testOutputHelper) = do! s4 } let log2, capture2 = TestsWithLogCapture.CreateLoggerWithCapture testOutputHelper use _flush = log2 - let service2 = Cart.createServiceWithRollingState conn log2 + let service2 = Cart.createServiceWithRollingState store log2 let t2 = async { // Signal we have state, wait for other to do same, engineer conflict let prepare = async { @@ -314,9 +309,9 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against Cosmos, using Snapshotting to avoid queries`` context skuId = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log let batchSize = 10 - let createServiceIndexed () = Cart.createServiceWithSnapshotStrategy conn batchSize log + let store = connectToSpecifiedCosmosOrSimulator log batchSize + let createServiceIndexed () = Cart.createServiceWithSnapshotStrategy store log let service1, service2 = createServiceIndexed (), createServiceIndexed () capture.Clear() @@ -341,10 +336,10 @@ type Tests(testOutputHelper) = [] let ``Can roundtrip against Cosmos, correctly using Snapshotting and Cache to avoid redundant reads`` context skuId = Async.RunSynchronously <| async { - let! conn = connectToSpecifiedCosmosOrSimulator log let batchSize = 10 + let store = connectToSpecifiedCosmosOrSimulator log batchSize let cache = Equinox.Cache("cart", sizeMb = 50) - let createServiceCached () = Cart.createServiceWithSnapshotStrategyAndCaching conn batchSize log cache + let createServiceCached () = Cart.createServiceWithSnapshotStrategyAndCaching store log cache let service1, service2 = createServiceCached (), createServiceCached () capture.Clear() diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 171c06656..f513fb4c9 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -302,22 +302,23 @@ let createDomainLog verbose verboseConsole maybeSeqEndpoint = c.CreateLogger() module CosmosInit = - open Equinox.Cosmos.Store.Sync.Initialization + open Equinox.Cosmos.Store + let conn (log,verboseConsole,maybeSeq) (sargs : ParseResults) = async { let storeLog = createStoreLog (sargs.Contains Storage.Cosmos.Arguments.VerboseStore) verboseConsole maybeSeq - let discovery, dName, cName, connector = Storage.Cosmos.connection (log,storeLog) (Storage.Cosmos.Info sargs) - let! conn = connector.Connect(appName, discovery) - return storeLog, conn, dName, cName } + let discovery, dName, cName, factory = Storage.Cosmos.connection (log,storeLog) (Storage.Cosmos.Info sargs) + let client = factory.CreateClient(appName, discovery) + return storeLog, client, dName, cName } let containerAndOrDb (log: ILogger, verboseConsole, maybeSeq) (iargs: ParseResults) = async { match iargs.TryGetSubCommand() with | Some (InitArguments.Cosmos sargs) -> let rus, skipStoredProc = iargs.GetResult(InitArguments.Rus), iargs.Contains InitArguments.SkipStoredProc - let mode = if iargs.Contains InitArguments.Shared then Provisioning.Database rus else Provisioning.Container rus + let mode = if iargs.Contains InitArguments.Shared then Provisioning.Database (ReplaceAlways rus) else Provisioning.Container (ReplaceAlways rus) let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus - let! _storeLog,conn,dName,cName = conn (log,verboseConsole,maybeSeq) sargs + let! _storeLog,client,dName,cName = conn (log,verboseConsole,maybeSeq) sargs log.Information("Provisioning `Equinox.Cosmos` Store collection at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - return! init log conn.Client (dName,cName) mode skipStoredProc + return! client.InitializeContainer(dName, cName, mode, not skipStoredProc) | _ -> failwith "please specify a `cosmos` endpoint" } module SqlInit = @@ -346,8 +347,8 @@ module CosmosStats = let doS,doD,doE = args.Contains StatsArguments.Streams, args.Contains StatsArguments.Documents, args.Contains StatsArguments.Events let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel - let! _storeLog,conn,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs - let container = conn.Client.GetDatabase(dName).GetContainer(cName) + let! _storeLog,client,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs + let container = client.GetDatabase(dName).GetContainer(cName) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" @@ -355,7 +356,7 @@ module CosmosStats = log.Information("Computing {measures} ({mode})", Seq.map fst ops, (if inParallel then "in parallel" else "serially")) ops |> Seq.map (fun (name,sql) -> async { log.Debug("Running query: {sql}", sql) - let res = container.QueryValue(sql) + let res = container.SdkClient.QueryValue(sql) log.Information("{stat}: {result:N0}", name, res)}) |> if inParallel then Async.Parallel else Async.ParallelThrottled 1 // TOCONSIDER replace with Async.Sequence when using new enough FSharp.Core |> Async.Ignore From 88ec2bf44bb66c5c9d1b0e4fbf544068c92aa9c1 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 9 Mar 2020 12:53:10 -0400 Subject: [PATCH 36/71] Clean up CancellationToken usages --- src/Equinox.Cosmos/Cosmos.fs | 42 ++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 24 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 0f03795c5..5081d5ed3 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -440,14 +440,13 @@ function sync(req, expIndex, expEtag) { } }""" +module CancellationToken = + let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken + type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = abstract member GetOrCreateContainer: props: ContainerProperties * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async default __.GetOrCreateContainer(props, throughput, cancellationToken) = async { - let! ct = - match cancellationToken with - | Some ct -> async.Return ct - | _ -> Async.CancellationToken - + let! ct = CancellationToken.useOrCreate cancellationToken let! response = match throughput with | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect @@ -462,6 +461,7 @@ type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = abstract member GetOrCreateBatchAndTipContainer: containerName: string * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async default __.GetOrCreateBatchAndTipContainer(containerName, throughput, cancellationToken) = async { + let! ct = CancellationToken.useOrCreate cancellationToken let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent props.IndexingPolicy.Automatic <- true @@ -470,7 +470,7 @@ type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = props.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) - return! __.GetOrCreateContainer(props, throughput, ?cancellationToken = cancellationToken) + return! __.GetOrCreateContainer(props, throughput, cancellationToken = ct) } abstract member GetContainer: containerName: string -> EquinoxCosmosContainerClient @@ -504,43 +504,37 @@ and EquinoxCosmosContainerClient (container: CosmosContainer) = // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens | CosmosException (CosmosStatusCode sc as e) when sc = int System.Net.HttpStatusCode.PreconditionFailed -> return e.Response.Headers.GetRequestCharge(), NotModified } - abstract member CreateSyncStoredProcedure: name: string -> Async - default __.CreateSyncStoredProcedure (name: string) = async { - try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body)) |> Async.AwaitTaskCorrect + abstract member CreateSyncStoredProcedure: name: string * ?cancellationToken: CancellationToken -> Async + default __.CreateSyncStoredProcedure (name, cancellationToken) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body), cancellationToken = ct) |> Async.AwaitTaskCorrect return r.GetRawResponse().Headers.GetRequestCharge() with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } abstract member Sync: stream: string * tip: Tip * index: int64 * ?storedProcedureName: string * ?etag: string * ?cancellationToken : CancellationToken -> Async> default __.Sync(stream, tip, index, ?storedProcedureName, ?etag, ?cancellationToken) = async { + let! ct = CancellationToken.useOrCreate cancellationToken let storedProcedureName = defaultArg storedProcedureName SyncStoredProcedure.defaultName let partitionKey = PartitionKey stream - let! ct = - match cancellationToken with - | Some ct -> async.Return ct - | _ -> Async.CancellationToken let args = [| box tip; box index; box (Option.toObj etag)|] return! container.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } type EquinoxCosmosClient (logger: ILogger, sdk: CosmosClient) = - abstract member InitializeContainer: dbName: string * containerName: string * mode: Provisioning * createSyncStoredProcedure: bool * ?syncStoredProcedureName: string -> Async - default __.InitializeContainer (dbName, containerName, mode, createSyncStoredProcedure, syncStoredProcedureName) = async { + abstract member InitializeContainer: dbName: string * containerName: string * mode: Provisioning * createSyncStoredProcedure: bool * ?syncStoredProcedureName: string * ?cancellationToken: CancellationToken -> Async + default __.InitializeContainer (dbName, containerName, mode, createSyncStoredProcedure, syncStoredProcedureName, cancellationToken) = async { + let! ct = CancellationToken.useOrCreate cancellationToken let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default - - let! db = __.GetOrCreateDatabase(dbName, dbThroughput) - let! container = db.GetOrCreateBatchAndTipContainer(containerName, containerThroughput) + let! db = __.GetOrCreateDatabase(dbName, dbThroughput, cancellationToken = ct) + let! container = db.GetOrCreateBatchAndTipContainer(containerName, containerThroughput, cancellationToken = ct) if createSyncStoredProcedure then let syncStoredProcedureName = defaultArg syncStoredProcedureName SyncStoredProcedure.defaultName do! container.CreateSyncStoredProcedure(syncStoredProcedureName) |> Async.Ignore } abstract member GetOrCreateDatabase: dbName: string * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async - default __.GetOrCreateDatabase(dbName, throughput, ?cancellationToken) = async { - let! ct = - match cancellationToken with - | Some ct -> async.Return ct - | _ -> Async.CancellationToken - + default __.GetOrCreateDatabase(dbName, throughput, cancellationToken) = async { + let! ct = CancellationToken.useOrCreate cancellationToken let! response = match throughput with | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect From c509a1ba6a8570e907640aed3a95a6e0cda711de Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Mon, 9 Mar 2020 16:42:40 -0400 Subject: [PATCH 37/71] Streamline clients --- src/Equinox.Cosmos/Cosmos.fs | 37 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 5081d5ed3..bf9e8f307 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -444,9 +444,10 @@ module CancellationToken = let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = - abstract member GetOrCreateContainer: props: ContainerProperties * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async + abstract member GetOrCreateContainer: props: ContainerProperties * ?throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async default __.GetOrCreateContainer(props, throughput, cancellationToken) = async { let! ct = CancellationToken.useOrCreate cancellationToken + let throughput = defaultArg throughput Default let! response = match throughput with | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect @@ -459,9 +460,9 @@ type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = return EquinoxCosmosContainerClient(response.Container) } - abstract member GetOrCreateBatchAndTipContainer: containerName: string * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async - default __.GetOrCreateBatchAndTipContainer(containerName, throughput, cancellationToken) = async { + member internal __.GetOrCreateBatchAndTipContainer(containerName: string, ?throughput: ResourceThroughput, ?cancellationToken: CancellationToken) = async { let! ct = CancellationToken.useOrCreate cancellationToken + let throughput = defaultArg throughput Default let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent props.IndexingPolicy.Automatic <- true @@ -470,12 +471,7 @@ type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = props.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) - return! __.GetOrCreateContainer(props, throughput, cancellationToken = ct) - } - - abstract member GetContainer: containerName: string -> EquinoxCosmosContainerClient - default __.GetContainer(containerName) = - EquinoxCosmosContainerClient(db.GetContainer(containerName)) + return! __.GetOrCreateContainer(props, throughput, cancellationToken = ct) } and EquinoxCosmosContainerClient (container: CosmosContainer) = member val SdkClient = container with get @@ -520,8 +516,7 @@ and EquinoxCosmosContainerClient (container: CosmosContainer) = return! container.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } type EquinoxCosmosClient (logger: ILogger, sdk: CosmosClient) = - abstract member InitializeContainer: dbName: string * containerName: string * mode: Provisioning * createSyncStoredProcedure: bool * ?syncStoredProcedureName: string * ?cancellationToken: CancellationToken -> Async - default __.InitializeContainer (dbName, containerName, mode, createSyncStoredProcedure, syncStoredProcedureName, cancellationToken) = async { + member internal __.InitializeContainer (dbName: string, containerName: string, mode: Provisioning, createSyncStoredProcedure: bool, ?syncStoredProcedureName: string, ?cancellationToken: CancellationToken) = async { let! ct = CancellationToken.useOrCreate cancellationToken let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default @@ -532,9 +527,10 @@ type EquinoxCosmosClient (logger: ILogger, sdk: CosmosClient) = let syncStoredProcedureName = defaultArg syncStoredProcedureName SyncStoredProcedure.defaultName do! container.CreateSyncStoredProcedure(syncStoredProcedureName) |> Async.Ignore } - abstract member GetOrCreateDatabase: dbName: string * throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async + abstract member GetOrCreateDatabase: dbName: string * ?throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async default __.GetOrCreateDatabase(dbName, throughput, cancellationToken) = async { let! ct = CancellationToken.useOrCreate cancellationToken + let throughput = defaultArg throughput Default let! response = match throughput with | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect @@ -547,11 +543,6 @@ type EquinoxCosmosClient (logger: ILogger, sdk: CosmosClient) = return EquinoxCosmosDatabaseClient(response.Database) } - abstract member GetDatabase: dbName: string -> EquinoxCosmosDatabaseClient - default __.GetDatabase(dbName) = - EquinoxCosmosDatabaseClient(sdk.GetDatabase(dbName)) - - module Sync = // NB don't nest in a private module, or serialization will fail miserably ;) let [] private sprocName = "EquinoxRollingUnfolds3" // NB need to rename/number for any breaking change @@ -935,7 +926,9 @@ type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> str member internal __.Resolve(client : EquinoxCosmosClient, categoryName, id, init) : (EquinoxCosmosContainerClient*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init - let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun (d,c) -> ContainerWrapper(client.GetDatabase(d).GetContainer(c), ?initContainer = init)) + let db = client.GetOrCreateDatabase(databaseId) |> Async.RunSynchronously + let container = db.GetOrCreateBatchAndTipContainer(containerName) |> Async.RunSynchronously + let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun (d,c) -> ContainerWrapper(container, ?initContainer = init)) (wrapped.Container,streamName),wrapped.InitializationGate namespace Equinox.Cosmos @@ -1182,15 +1175,9 @@ type ClientFactory /// consistency mode (default: ConsistencyLevel.Session) []?defaultConsistencyLevel : ConsistencyLevel, - /// Retries for read requests, over and above those defined by the mandatory policies - []?readRetryPolicy, - /// Retries for write requests, over and above those defined by the mandatory policies - []?writeRetryPolicy, /// Additional strings identifying the context of this connection; should provide enough context to disambiguate all potential connections to a cluster /// NB as this will enter server and client logs, it should not contain sensitive information - []?tags : (string*string) seq, - /// Inhibits certificate verification when set to true, i.e. for working with the CosmosDB Emulator (default false) - []?bypassCertificateValidation : bool) = + []?tags : (string*string) seq) = do if log = null then nullArg "log" let logName (uri : Uri) name = From 0a5a176ae4a511a27e6b76364e4beef3a2786a88 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 10 Mar 2020 11:49:26 -0400 Subject: [PATCH 38/71] Move all initialization functions to separate module --- samples/Infrastructure/Storage.fs | 2 +- src/Equinox.Cosmos/Cosmos.fs | 208 +++++++++--------- .../CosmosCoreIntegration.fs | 2 +- .../CosmosFixtures.fs | 4 +- tools/Equinox.Tool/Program.fs | 21 +- 5 files changed, 121 insertions(+), 116 deletions(-) diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 5c551b0ff..cd09ccd9c 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -79,7 +79,7 @@ module Cosmos = discovery, a.Database, a.Container, ClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = let discovery, dName, cName, factory = connection (log, storeLog) info - let ctx = Context(factory.CreateClient(appName, discovery), dName, cName, log = log, defaultMaxItems = batchSize) + let ctx = Context(factory.CreateClient(appName, discovery, dName, cName), log = log, defaultMaxItems = batchSize) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching StorageConfig.Cosmos (ctx, cacheStrategy, unfolds, dName, cName) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index bf9e8f307..6a8419599 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -443,42 +443,15 @@ function sync(req, expIndex, expEtag) { module CancellationToken = let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken -type EquinoxCosmosDatabaseClient (db: CosmosDatabase) = - abstract member GetOrCreateContainer: props: ContainerProperties * ?throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async - default __.GetOrCreateContainer(props, throughput, cancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - let throughput = defaultArg throughput Default - let! response = - match throughput with - | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect - | SetIfCreating value -> db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - | ReplaceAlways value -> - db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) - |> Async.AwaitTaskCorrect - |> Async.bind (fun response -> - response.Container.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) - - return EquinoxCosmosContainerClient(response.Container) } - - member internal __.GetOrCreateBatchAndTipContainer(containerName: string, ?throughput: ResourceThroughput, ?cancellationToken: CancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - let throughput = defaultArg throughput Default - let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) - props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent - props.IndexingPolicy.Automatic <- true - // Can either do a blacklist or a whitelist - // Given how long and variable the blacklist would be, we whitelist instead - props.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) - // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors - for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) - return! __.GetOrCreateContainer(props, throughput, cancellationToken = ct) } - -and EquinoxCosmosContainerClient (container: CosmosContainer) = - member val SdkClient = container with get +type EquinoxCosmosClient (cosmosClient: CosmosClient, containerClient: CosmosContainer, databaseId: string, containerId: string) = + member val DatabaseId = databaseId with get + member val ContainerId = containerId with get + member val CosmosSdkClient = cosmosClient with get + member val ContainerSdkClient = containerClient with get abstract member QueryPageable<'T> : query: QueryDefinition * ?options: QueryRequestOptions -> AsyncSeq> default __.QueryPageable<'T>(query, ?options) = - container.GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum + containerClient.GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum abstract member TryReadItem<'T> : docId: string * stream: string * ?options: ItemRequestOptions * ?cancellationToken : CancellationToken -> Async> default __.TryReadItem<'T>(docId, stream, ?options, ?cancellationToken) = async { @@ -489,7 +462,7 @@ and EquinoxCosmosContainerClient (container: CosmosContainer) = | Some ct -> async.Return ct | _ -> Async.CancellationToken // TODO use TryReadItemStreamAsync to avoid the exception https://github.com/Azure/azure-cosmos-dotnet-v3/issues/692#issuecomment-521936888 - try let! item = async { return! container.ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } + try let! item = async { return! containerClient.ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified // NB `.Document` will NRE if a IfNoneModified precondition triggers a NotModified result // else @@ -500,48 +473,13 @@ and EquinoxCosmosContainerClient (container: CosmosContainer) = // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens | CosmosException (CosmosStatusCode sc as e) when sc = int System.Net.HttpStatusCode.PreconditionFailed -> return e.Response.Headers.GetRequestCharge(), NotModified } - abstract member CreateSyncStoredProcedure: name: string * ?cancellationToken: CancellationToken -> Async - default __.CreateSyncStoredProcedure (name, cancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body), cancellationToken = ct) |> Async.AwaitTaskCorrect - return r.GetRawResponse().Headers.GetRequestCharge() - with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } - abstract member Sync: stream: string * tip: Tip * index: int64 * ?storedProcedureName: string * ?etag: string * ?cancellationToken : CancellationToken -> Async> default __.Sync(stream, tip, index, ?storedProcedureName, ?etag, ?cancellationToken) = async { let! ct = CancellationToken.useOrCreate cancellationToken let storedProcedureName = defaultArg storedProcedureName SyncStoredProcedure.defaultName let partitionKey = PartitionKey stream let args = [| box tip; box index; box (Option.toObj etag)|] - return! container.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } - -type EquinoxCosmosClient (logger: ILogger, sdk: CosmosClient) = - member internal __.InitializeContainer (dbName: string, containerName: string, mode: Provisioning, createSyncStoredProcedure: bool, ?syncStoredProcedureName: string, ?cancellationToken: CancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default - let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default - let! db = __.GetOrCreateDatabase(dbName, dbThroughput, cancellationToken = ct) - let! container = db.GetOrCreateBatchAndTipContainer(containerName, containerThroughput, cancellationToken = ct) - - if createSyncStoredProcedure then - let syncStoredProcedureName = defaultArg syncStoredProcedureName SyncStoredProcedure.defaultName - do! container.CreateSyncStoredProcedure(syncStoredProcedureName) |> Async.Ignore } - - abstract member GetOrCreateDatabase: dbName: string * ?throughput: ResourceThroughput * ?cancellationToken: CancellationToken -> Async - default __.GetOrCreateDatabase(dbName, throughput, cancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - let throughput = defaultArg throughput Default - let! response = - match throughput with - | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect - | SetIfCreating value -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - | ReplaceAlways value -> - sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) - |> Async.AwaitTaskCorrect - |> Async.bind (fun response -> - response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) - - return EquinoxCosmosDatabaseClient(response.Database) } + return! containerClient.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } module Sync = // NB don't nest in a private module, or serialization will fail miserably ;) @@ -554,7 +492,7 @@ module Sync = | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (container : EquinoxCosmosContainerClient, stream : string) (exp, req: Tip) + let private run (container : EquinoxCosmosClient, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd let! res = container.Sync(stream, req, ep.index, ?etag = ep.etag) @@ -606,10 +544,10 @@ module Sync = unfolds |> Seq.mapi (fun offset x -> { i = baseIndex + int64 offset; c = x.EventType; d = x.Data; m = x.Meta; t = DateTimeOffset.UtcNow } : Unfold) module internal Tip = - let private get (container : EquinoxCosmosContainerClient, stream : string) (maybePos: Position option) = + let private get (container : EquinoxCosmosClient, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null container.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) - let private loggedGet (get : EquinoxCosmosContainerClient * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { + let private loggedGet (get : EquinoxCosmosClient * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) @@ -638,7 +576,7 @@ module internal Tip = module internal Query = open FSharp.Control - let private mkQuery (container : EquinoxCosmosContainerClient, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = + let private mkQuery (container : EquinoxCosmosClient, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -800,12 +738,12 @@ module internal Tip = let t = StopwatchInterval(startTicks, endTicks) log |> logQuery direction maxItems stream t (!responseCount,allEvents.ToArray()) -1L ru } -type [] Token = { container: EquinoxCosmosContainerClient; stream: string; pos: Position } +type [] Token = { container: EquinoxCosmosClient; stream: string; pos: Position } module Token = let create (container,stream) pos : StreamToken = { value = box { container = container; stream = stream; pos = pos } version = pos.index } - let (|Unpack|) (token: StreamToken) : EquinoxCosmosContainerClient*string*Position = let t = unbox token.value in t.container,t.stream,t.pos + let (|Unpack|) (token: StreamToken) : EquinoxCosmosClient*string*Position = let t = unbox token.value in t.container,t.stream,t.pos let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag @@ -897,8 +835,6 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return LoadFromTokenResult.Found (Token.create (container,stream) pos, span) | _ -> let! res = __.Read log (container,stream) Direction.Forward (Some pos) (tryDecode,isOrigin) return LoadFromTokenResult.Found res } - member __.CreateSyncStoredProcIfNotExists log (container: EquinoxCosmosContainerClient) = - container.CreateSyncStoredProcedure(SyncStoredProcedure.defaultName) |> Async.Ignore member __.Sync log containerStream (exp, batch: Tip): Async = async { if Array.isEmpty batch.e && Array.isEmpty batch.u then invalidOp "Must write either events or unfolds." let! wr = Sync.batch log conn.WriteRetryPolicy containerStream (exp,batch) @@ -908,8 +844,8 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } /// Holds Container state, coordinating initialization activities -type private ContainerWrapper(container : EquinoxCosmosContainerClient, ?initContainer : EquinoxCosmosContainerClient -> Async) = - let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init container)) +type private ContainerWrapper(container : EquinoxCosmosClient, ?initContainer : unit -> Async) = + let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init ())) member __.Container = container member internal __.InitializationGate = match initGuard with Some g when g.PeekIsValid() |> not -> Some g.AwaitValue | _ -> None @@ -923,12 +859,10 @@ type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> str let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - member internal __.Resolve(client : EquinoxCosmosClient, categoryName, id, init) : (EquinoxCosmosContainerClient*string) * (unit -> Async) option = + member internal __.Resolve(client : EquinoxCosmosClient, categoryName, id, init) : (EquinoxCosmosClient*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init - let db = client.GetOrCreateDatabase(databaseId) |> Async.RunSynchronously - let container = db.GetOrCreateBatchAndTipContainer(containerName) |> Async.RunSynchronously - let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun (d,c) -> ContainerWrapper(container, ?initContainer = init)) + let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(client, ?initContainer = init)) (wrapped.Container,streamName),wrapped.InitializationGate namespace Equinox.Cosmos @@ -945,6 +879,7 @@ open Serilog open System open System.Collections.Concurrent open System.Text.Json +open System.Threading type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEventCodec<'event,JsonElement,'context>) = let (|TryDecodeFold|) (fold: 'state -> 'event seq -> 'state) initial (events: ITimelineEvent seq) : 'state = Seq.choose codec.TryDecode events |> fold initial @@ -979,14 +914,14 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven module Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, EquinoxCosmosContainerClient*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, EquinoxCosmosClient*string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context> with + interface ICategory<'event, 'state, EquinoxCosmosClient*string, 'context> with member __.Load(log, (container,streamName), opt) : Async = loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context) @@ -1002,8 +937,8 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context>) - : ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context> = + (category : ICategory<'event, 'state, EquinoxCosmosClient*string, 'context>) + : ICategory<'event, 'state, EquinoxCosmosClient*string, 'context> = let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = new CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) @@ -1016,7 +951,7 @@ type private Folder<'event, 'state, 'context> ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, EquinoxCosmosContainerClient*string, 'context> with + interface ICategory<'event, 'state, EquinoxCosmosClient*string, 'context> with member __.Load(log, (container,streamName), opt): Async = match readCache with | None -> batched log (container,streamName) @@ -1032,11 +967,68 @@ type private Folder<'event, 'state, 'context> | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } +module EquinoxCosmosInitialization = + let internal getOrCreateDatabase (sdk: CosmosClient) (dbName: string) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + let! response = + match throughput with + | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | ReplaceAlways value -> + sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) + |> Async.AwaitTaskCorrect + |> Async.bind (fun response -> + response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) + + return response.Database } + + let internal getOrCreateContainer (db: CosmosDatabase) (props: ContainerProperties) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + let! response = + match throughput with + | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | ReplaceAlways value -> + db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) + |> Async.AwaitTaskCorrect + |> Async.bind (fun response -> + response.Container.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) + + return response.Container } + + let internal getBatchAndTipContainerProps (containerName: string) = + let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) + props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent + props.IndexingPolicy.Automatic <- true + // Can either do a blacklist or a whitelist + // Given how long and variable the blacklist would be, we whitelist instead + props.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) + // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors + for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) + props + + let internal createSyncStoredProcedure (container: CosmosContainer) (name) (cancellationToken) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body), cancellationToken = ct) |> Async.AwaitTaskCorrect + return r.GetRawResponse().Headers.GetRequestCharge() + with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } + + let initializeContainer (sdk: CosmosClient) (dbName: string) (containerName: string) (mode: Provisioning) (createStoredProcedure: bool) (storedProcedureName: string option) (cancellationToken: CancellationToken option) = async { + let! ct = CancellationToken.useOrCreate cancellationToken |> Async.map Some + let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default + let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default + let! db = getOrCreateDatabase sdk dbName dbThroughput ct + let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput ct + + if createStoredProcedure then + let syncStoredProcedureName = storedProcedureName |> Option.defaultValue SyncStoredProcedure.defaultName + do! createSyncStoredProcedure container syncStoredProcedureName ct |> Async.Ignore + + return container } + /// Pairs a Gateway, defining the retry policies for CosmosDb with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) type Context ( client: EquinoxCosmosClient, - databaseId: string, - containerId: string, ?log: ILogger, ?defaultMaxItems: int, ?getDefaultMaxItems: unit -> int, @@ -1047,12 +1039,12 @@ type Context let conn = Connection(client, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems =getDefaultMaxItems, ?maxRequests = maxRequests) let gateway = Gateway(conn, batchingPolicy) - let init = gateway.CreateSyncStoredProcIfNotExists log - let containers = Containers(databaseId, containerId) + let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure client.ContainerSdkClient SyncStoredProcedure.defaultName None |> Async.Ignore + let containers = Containers(client.DatabaseId, client.ContainerId) member __.Gateway = gateway member __.Containers = containers - member internal __.ResolveContainerStream(categoryName, id) : (EquinoxCosmosContainerClient*string) * (unit -> Async) option = + member internal __.ResolveContainerStream(categoryName, id) : (EquinoxCosmosClient*string) * (unit -> Async) option = containers.Resolve(gateway.Client, categoryName, id, init) [] @@ -1116,7 +1108,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, EquinoxCosmosContainerClient*string, 'context> = + let category : ICategory<_, _, EquinoxCosmosClient*string, 'context> = match caching with | CachingStrategy.NoCaching -> folder :> _ | CachingStrategy.SlidingWindow(cache, window) -> @@ -1206,15 +1198,30 @@ type ClientFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - /// Yields an EquinoxCosmosClient configured and connected the requested `discovery` strategy member __.CreateClient ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, + dbName: string, + containerName: string, + ?provisioningMode: Provisioning, + ?createStoredProcedure: bool, + ?storedProcedureName: string, /// true to inhibit logging of client name []?skipLog) : EquinoxCosmosClient = + let (Discovery.UriAndKey (databaseUri=uri; key=key)) = discovery if skipLog <> Some true then logName uri name - new EquinoxCosmosClient(log, new CosmosClient(string uri, key, __.ClientOptions)) + let cosmosClient = new CosmosClient(string uri, key, __.ClientOptions) + + match provisioningMode with + | Some mode -> + EquinoxCosmosInitialization.initializeContainer cosmosClient dbName containerName mode (defaultArg createStoredProcedure true) storedProcedureName None + |> Async.Ignore + |> Async.RunSynchronously + | _ -> () + + let containerClient = cosmosClient.GetContainer(dbName, containerName) + new EquinoxCosmosClient(cosmosClient, containerClient, dbName, containerName) namespace Equinox.Cosmos.Core @@ -1236,8 +1243,6 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context ( client: EquinoxCosmosClient, - databaseId: string, - containerId: string, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1248,9 +1253,10 @@ type Context do if log = null then nullArg "log" let conn = Equinox.Cosmos.Internal.Connection(client) - let containers = Containers(databaseId, containerId) + let containers = Containers(client.DatabaseId, client.ContainerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) + let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure client.ContainerSdkClient SyncStoredProcedure.defaultName None |> Async.Ignore let gateway = Gateway(conn, batching) let maxCountPredicate count = @@ -1264,7 +1270,7 @@ type Context let! (Token.Unpack (_,_,pos')), data = res return pos', data } - member __.ResolveStream(streamName) = containers.Resolve(conn.Client, null, streamName, gateway.CreateSyncStoredProcIfNotExists (Some log)) + member __.ResolveStream(streamName) = containers.Resolve(conn.Client, null, streamName, init) member __.CreateStream(streamName) = __.ResolveStream streamName |> fst member internal __.GetLazy((stream, startPos), ?batchSize, ?direction) : AsyncSeq[]> = diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index c1f780610..47848b9d8 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -30,7 +30,7 @@ type Tests(testOutputHelper) = incr testIterations sprintf "events-%O-%i" name !testIterations let mkContextWithItemLimit log defaultBatchSize = - Context(createSpecifiedCosmosOrSimulatorClient log, dbId, cId, log, ?defaultMaxItems = defaultBatchSize) + Context(createSpecifiedCosmosOrSimulatorClient log, log, ?defaultMaxItems = defaultBatchSize) let mkContext log = mkContextWithItemLimit log None let verifyRequestChargesMax rus = diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index 0b83c83f7..541b243bb 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -17,12 +17,12 @@ let dbId = read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" let cId = read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" let private connectToCosmos (log: Serilog.ILogger) batchSize client = - Context(client, dbId, cId, log = log, defaultMaxItems = batchSize) + Context(client, log = log, defaultMaxItems = batchSize) let createSpecifiedCosmosOrSimulatorClient log = let createClient name discovery = ClientFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - .CreateClient(name, discovery) + .CreateClient(name, discovery, dbId, cId) match read "EQUINOX_COSMOS_CONNECTION" with | None -> diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index f513fb4c9..b067c5f4b 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -304,22 +304,21 @@ let createDomainLog verbose verboseConsole maybeSeqEndpoint = module CosmosInit = open Equinox.Cosmos.Store - let conn (log,verboseConsole,maybeSeq) (sargs : ParseResults) = async { + let conn (log,verboseConsole,maybeSeq) (sargs : ParseResults) = let storeLog = createStoreLog (sargs.Contains Storage.Cosmos.Arguments.VerboseStore) verboseConsole maybeSeq let discovery, dName, cName, factory = Storage.Cosmos.connection (log,storeLog) (Storage.Cosmos.Info sargs) - let client = factory.CreateClient(appName, discovery) - return storeLog, client, dName, cName } + storeLog, factory, discovery, dName, cName - let containerAndOrDb (log: ILogger, verboseConsole, maybeSeq) (iargs: ParseResults) = async { + let containerAndOrDb (log: ILogger, verboseConsole, maybeSeq) (iargs: ParseResults) = match iargs.TryGetSubCommand() with | Some (InitArguments.Cosmos sargs) -> let rus, skipStoredProc = iargs.GetResult(InitArguments.Rus), iargs.Contains InitArguments.SkipStoredProc let mode = if iargs.Contains InitArguments.Shared then Provisioning.Database (ReplaceAlways rus) else Provisioning.Container (ReplaceAlways rus) let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus - let! _storeLog,client,dName,cName = conn (log,verboseConsole,maybeSeq) sargs + let _storeLog, factory, discovery, dName, cName = conn (log,verboseConsole,maybeSeq) sargs log.Information("Provisioning `Equinox.Cosmos` Store collection at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - return! client.InitializeContainer(dName, cName, mode, not skipStoredProc) - | _ -> failwith "please specify a `cosmos` endpoint" } + factory.CreateClient(appName, discovery, dName, cName, mode, not skipStoredProc) |> ignore + | _ -> failwith "please specify a `cosmos` endpoint" module SqlInit = let databaseOrSchema (log: ILogger) (iargs: ParseResults) = async { @@ -347,8 +346,8 @@ module CosmosStats = let doS,doD,doE = args.Contains StatsArguments.Streams, args.Contains StatsArguments.Documents, args.Contains StatsArguments.Events let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel - let! _storeLog,client,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs - let container = client.GetDatabase(dName).GetContainer(cName) + let _storeLog,factory,discovery,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs + let container = factory.CreateClient(appName, discovery, dName, cName) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" @@ -356,7 +355,7 @@ module CosmosStats = log.Information("Computing {measures} ({mode})", Seq.map fst ops, (if inParallel then "in parallel" else "serially")) ops |> Seq.map (fun (name,sql) -> async { log.Debug("Running query: {sql}", sql) - let res = container.SdkClient.QueryValue(sql) + let res = container.ContainerSdkClient.QueryValue(sql) log.Information("{stat}: {result:N0}", name, res)}) |> if inParallel then Async.Parallel else Async.ParallelThrottled 1 // TOCONSIDER replace with Async.Sequence when using new enough FSharp.Core |> Async.Ignore @@ -461,7 +460,7 @@ let main argv = let verbose = args.Contains Verbose use log = createDomainLog verbose verboseConsole maybeSeq try match args.GetSubCommand() with - | Init iargs -> CosmosInit.containerAndOrDb (log, verboseConsole, maybeSeq) iargs |> Async.RunSynchronously + | Init iargs -> CosmosInit.containerAndOrDb (log, verboseConsole, maybeSeq) iargs | Config cargs -> SqlInit.databaseOrSchema log cargs |> Async.RunSynchronously | Dump dargs -> Dump.run (log, verboseConsole, maybeSeq) dargs |> Async.RunSynchronously | Stats sargs -> CosmosStats.run (log, verboseConsole, maybeSeq) sargs |> Async.RunSynchronously From 4b92f85b76514a6006127a81daa4742192f2e57b Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 10 Mar 2020 12:01:39 -0400 Subject: [PATCH 39/71] Make abstractions more generic --- src/Equinox.Cosmos/Cosmos.fs | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 6a8419599..98420792a 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -449,13 +449,13 @@ type EquinoxCosmosClient (cosmosClient: CosmosClient, containerClient: CosmosCon member val CosmosSdkClient = cosmosClient with get member val ContainerSdkClient = containerClient with get - abstract member QueryPageable<'T> : query: QueryDefinition * ?options: QueryRequestOptions -> AsyncSeq> - default __.QueryPageable<'T>(query, ?options) = + abstract member GetQueryIteratorByPage<'T> : query: QueryDefinition * ?options: QueryRequestOptions -> AsyncSeq> + default __.GetQueryIteratorByPage<'T>(query, ?options) = containerClient.GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum - abstract member TryReadItem<'T> : docId: string * stream: string * ?options: ItemRequestOptions * ?cancellationToken : CancellationToken -> Async> - default __.TryReadItem<'T>(docId, stream, ?options, ?cancellationToken) = async { - let partitionKey = PartitionKey stream + abstract member TryReadItem<'T> : docId: string * partitionKey: string * ?options: ItemRequestOptions * ?cancellationToken : CancellationToken -> Async> + default __.TryReadItem<'T>(docId, partitionKey, ?options, ?cancellationToken) = async { + let partitionKey = PartitionKey partitionKey let options = defaultArg options null let! ct = match cancellationToken with @@ -473,12 +473,11 @@ type EquinoxCosmosClient (cosmosClient: CosmosClient, containerClient: CosmosCon // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens | CosmosException (CosmosStatusCode sc as e) when sc = int System.Net.HttpStatusCode.PreconditionFailed -> return e.Response.Headers.GetRequestCharge(), NotModified } - abstract member Sync: stream: string * tip: Tip * index: int64 * ?storedProcedureName: string * ?etag: string * ?cancellationToken : CancellationToken -> Async> - default __.Sync(stream, tip, index, ?storedProcedureName, ?etag, ?cancellationToken) = async { + abstract member ExecuteStoredProcedure: storedProcedureName: string * partitionKey: string * args: obj[] * ?cancellationToken : CancellationToken -> Async> + default __.ExecuteStoredProcedure(storedProcedureName, partitionKey, args, ?cancellationToken) = async { let! ct = CancellationToken.useOrCreate cancellationToken - let storedProcedureName = defaultArg storedProcedureName SyncStoredProcedure.defaultName - let partitionKey = PartitionKey stream - let args = [| box tip; box index; box (Option.toObj etag)|] + let partitionKey = PartitionKey partitionKey + //let args = [| box tip; box index; box (Option.toObj etag)|] return! containerClient.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } module Sync = @@ -495,7 +494,8 @@ module Sync = let private run (container : EquinoxCosmosClient, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd - let! res = container.Sync(stream, req, ep.index, ?etag = ep.etag) + let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] + let! res = container.ExecuteStoredProcedure(SyncStoredProcedure.defaultName, stream, args) let newPos = { index = res.Value.n; etag = Option.ofObj res.Value.etag } return res.GetRawResponse().Headers.GetRequestCharge(), res.Value.conflicts |> function | null -> Result.Written newPos @@ -586,7 +586,7 @@ module internal Tip = let cond = if direction = Direction.Forward then "c.n > @startPos" else "c.i < @startPos" QueryDefinition(sprintf "%s AND %s %s" root cond tail).WithParameter("@startPos", positionSoExclusiveWhenBackward) let qro = new QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) - container.QueryPageable(query, options = qro) + container.GetQueryIteratorByPage(query, options = qro) // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index let private processNextPage direction (streamName: string) startPos (enumerator: IAsyncEnumerator>) (log: ILogger) From 618c535b2e4900bad8a6141862f527ae846c139e Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 10 Mar 2020 12:10:18 -0400 Subject: [PATCH 40/71] Rename to EquinoxCosmosClientFactory --- samples/Infrastructure/Storage.fs | 2 +- src/Equinox.Cosmos/Cosmos.fs | 2 +- tests/Equinox.Cosmos.Integration/CosmosFixtures.fs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index cd09ccd9c..b57fa20c6 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -76,7 +76,7 @@ module Cosmos = a.Mode, endpointUri, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, ClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) + discovery, a.Database, a.Container, EquinoxCosmosClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = let discovery, dName, cName, factory = connection (log, storeLog) info let ctx = Context(factory.CreateClient(appName, discovery, dName, cName), log = log, defaultMaxItems = batchSize) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 98420792a..e4ed09f92 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -1150,7 +1150,7 @@ type Discovery = UriAndKey (Uri uri, key) | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" -type ClientFactory +type EquinoxCosmosClientFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index 541b243bb..07d74b5f0 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -21,7 +21,7 @@ let private connectToCosmos (log: Serilog.ILogger) batchSize client = let createSpecifiedCosmosOrSimulatorClient log = let createClient name discovery = - ClientFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + EquinoxCosmosClientFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) .CreateClient(name, discovery, dbId, cId) match read "EQUINOX_COSMOS_CONNECTION" with From 474575ae5ba205db57d253c885f7e2d4c7d2aa06 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 10 Mar 2020 12:35:32 -0400 Subject: [PATCH 41/71] Fix spacing --- src/Equinox.Cosmos/Cosmos.fs | 60 ++++++++++++++++++------------------ 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index e4ed09f92..208cabdb1 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -74,40 +74,40 @@ type [] // TODO for STJ v5: All fields required unless /// As one cannot sort by the implicit `id` field, we have an indexed `i` field for sort and range query use static member internal IndexedFields = [Batch.PartitionKeyField; "i"; "n"] - /// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc - /// Only applied to snapshots in the Tip - type JsonCompressedBase64Converter() = - inherit JsonConverter() +/// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc +/// Only applied to snapshots in the Tip +type JsonCompressedBase64Converter() = + inherit JsonConverter() - override __.Read (reader, _typeToConvert, options) = - if reader.TokenType = JsonTokenType.Null then - JsonSerializer.Deserialize(&reader, options) - else - let compressedBytes = reader.GetBytesFromBase64() - use input = new MemoryStream(compressedBytes) - use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) - use output = new MemoryStream() - decompressor.CopyTo(output) - JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - - override __.Write (writer, value, _options) = - if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then - writer.WriteNullValue() - else - let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) - use output = new MemoryStream() - use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) - compressor.Write(input, 0, input.Length) - compressor.Close() - writer.WriteBase64StringValue(ReadOnlySpan.op_Implicit(output.ToArray())) + override __.Read (reader, _typeToConvert, options) = + if reader.TokenType = JsonTokenType.Null then + JsonSerializer.Deserialize(&reader, options) + else + let compressedBytes = reader.GetBytesFromBase64() + use input = new MemoryStream(compressedBytes) + use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) + use output = new MemoryStream() + decompressor.CopyTo(output) + JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - type JsonCompressedBase64ConverterAttribute () = - inherit JsonConverterAttribute(typeof) + override __.Write (writer, value, _options) = + if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then + writer.WriteNullValue() + else + let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) + use output = new MemoryStream() + use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) + compressor.Write(input, 0, input.Length) + compressor.Close() + writer.WriteBase64StringValue(ReadOnlySpan.op_Implicit(output.ToArray())) + +type JsonCompressedBase64ConverterAttribute () = + inherit JsonConverterAttribute(typeof) - static let converter = JsonCompressedBase64Converter() + static let converter = JsonCompressedBase64Converter() - override __.CreateConverter _typeToConvert = - converter :> JsonConverter + override __.CreateConverter _typeToConvert = + converter :> JsonConverter /// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` [] From a42329995738986e1feb61b2363c0a52c47b85e0 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 10 Mar 2020 12:46:40 -0400 Subject: [PATCH 42/71] Add argument docs --- src/Equinox.Cosmos/Cosmos.fs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 208cabdb1..b975cb823 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -1198,13 +1198,17 @@ type EquinoxCosmosClientFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - member __.CreateClient + abstract member CreateClient: name: string * discovery: Discovery * dbName: string * containerName: string * ?provisioningMode: Provisioning * ?createStoredProcedure: bool * ?storedProcedureName: string * ?skipLog: bool -> EquinoxCosmosClient + default __.CreateClient ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, dbName: string, containerName: string, + /// If provided, the database and container will be initialized based on the provided values ?provisioningMode: Provisioning, + /// true to create the sync stored procedure during initialization ?createStoredProcedure: bool, + /// If provided along with createStoredProcedure being set to true, will create the stored procedure with a custom name ?storedProcedureName: string, /// true to inhibit logging of client name []?skipLog) : EquinoxCosmosClient = From 138a01e18fdb4fb2cd2bfcb8bc85bcbec3b237a6 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 11 Mar 2020 08:44:47 +0000 Subject: [PATCH 43/71] Inline map/bind 'help'ers --- src/Equinox.Core/Infrastructure.fs | 3 -- src/Equinox.Cosmos/Cosmos.fs | 55 ++++++++++++++---------------- 2 files changed, 26 insertions(+), 32 deletions(-) diff --git a/src/Equinox.Core/Infrastructure.fs b/src/Equinox.Core/Infrastructure.fs index a70fbb91b..aaa90d24a 100755 --- a/src/Equinox.Core/Infrastructure.fs +++ b/src/Equinox.Core/Infrastructure.fs @@ -73,9 +73,6 @@ type Async with sc ()) |> ignore) - static member map (f:'a -> 'b) (a:Async<'a>) : Async<'b> = async.Bind(a, f >> async.Return) - static member bind (f:'a -> Async<'b>) (a:Async<'a>) : Async<'b> = async.Bind(a, f) - #if NETSTANDARD2_1 static member inline AwaitValueTask (vtask: ValueTask<'T>) : Async<'T> = vtask.AsTask() |> Async.AwaitTaskCorrect #endif diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index b975cb823..535720f88 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -32,7 +32,7 @@ type [] // TODO for STJ v5: All fields required unless /// Optional causationId causationId : string // TODO for STJ v5: Optional, not serialized if missing - } + } interface IEventData with member __.EventType = __.c @@ -78,7 +78,7 @@ type [] // TODO for STJ v5: All fields required unless /// Only applied to snapshots in the Tip type JsonCompressedBase64Converter() = inherit JsonConverter() - + override __.Read (reader, _typeToConvert, options) = if reader.TokenType = JsonTokenType.Null then JsonSerializer.Deserialize(&reader, options) @@ -89,7 +89,7 @@ type JsonCompressedBase64Converter() = use output = new MemoryStream() decompressor.CopyTo(output) JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - + override __.Write (writer, value, _options) = if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then writer.WriteNullValue() @@ -103,12 +103,12 @@ type JsonCompressedBase64Converter() = type JsonCompressedBase64ConverterAttribute () = inherit JsonConverterAttribute(typeof) - + static let converter = JsonCompressedBase64Converter() - + override __.CreateConverter _typeToConvert = converter :> JsonConverter - + /// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` [] type Unfold = @@ -354,16 +354,16 @@ module MicrosoftAzureCosmosWrappers = // CosmosDB Error HttpStatusCode extractor let (|CosmosStatusCode|) (e : CosmosException) = e.Response.Status - + type ReadResult<'T> = Found of 'T | NotFound | NotModified - + type Azure.Core.ResponseHeaders with member headers.GetRequestCharge () = match headers.TryGetValue("x-ms-request-charge") with | true, charge when not <| String.IsNullOrEmpty charge -> float charge | _ -> 0. - - + + [] type SyncResponse = { etag: string; n: int64; conflicts: Unfold[] } @@ -715,7 +715,7 @@ module internal Tip = let! page = retryingLoggingReadPage e batchLog match page with - | Some (evts, _pos, rus) -> + | Some (evts, _pos, rus) -> ru <- ru + rus allEvents.AddRange(evts) @@ -970,33 +970,30 @@ type private Folder<'event, 'state, 'context> module EquinoxCosmosInitialization = let internal getOrCreateDatabase (sdk: CosmosClient) (dbName: string) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { let! ct = CancellationToken.useOrCreate cancellationToken - let! response = + let! response = match throughput with | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect | SetIfCreating value -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - | ReplaceAlways value -> - sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) - |> Async.AwaitTaskCorrect - |> Async.bind (fun response -> - response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) + | ReplaceAlways value -> async { + let! response = sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + let! _ = response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect + return response } return response.Database } let internal getOrCreateContainer (db: CosmosDatabase) (props: ContainerProperties) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { let! ct = CancellationToken.useOrCreate cancellationToken - let! response = + let! response = match throughput with | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect | SetIfCreating value -> db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - | ReplaceAlways value -> - db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) - |> Async.AwaitTaskCorrect - |> Async.bind (fun response -> - response.Container.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect |> Async.map (fun _ -> response)) - + | ReplaceAlways value -> async { + let! response = db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + let! _ = response.Container.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect + return response } return response.Container } - let internal getBatchAndTipContainerProps (containerName: string) = + let internal getBatchAndTipContainerProps (containerName: string) = let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent props.IndexingPolicy.Automatic <- true @@ -1014,15 +1011,15 @@ module EquinoxCosmosInitialization = with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } let initializeContainer (sdk: CosmosClient) (dbName: string) (containerName: string) (mode: Provisioning) (createStoredProcedure: bool) (storedProcedureName: string option) (cancellationToken: CancellationToken option) = async { - let! ct = CancellationToken.useOrCreate cancellationToken |> Async.map Some + let! ct = CancellationToken.useOrCreate cancellationToken let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default - let! db = getOrCreateDatabase sdk dbName dbThroughput ct - let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput ct + let! db = getOrCreateDatabase sdk dbName dbThroughput (Some ct) + let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput (Some ct) if createStoredProcedure then let syncStoredProcedureName = storedProcedureName |> Option.defaultValue SyncStoredProcedure.defaultName - do! createSyncStoredProcedure container syncStoredProcedureName ct |> Async.Ignore + do! createSyncStoredProcedure container syncStoredProcedureName (Some ct) |> Async.Ignore return container } From 4b84eddff982bbd7dc542b9b4bbc3fa6e5bb1699 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 12 Mar 2020 14:11:29 +0000 Subject: [PATCH 44/71] Rebase on Codec logic extracted into FsCodec.System.Text.Json (#202) * Fix FSharp.Core ref in Cosmos * Update codec refs * Change code to use FsCodec.SystemTextJson --- samples/Infrastructure/Services.fs | 13 +- samples/Store/Backend/Backend.fsproj | 5 +- samples/Store/Domain/Cart.fs | 31 +--- samples/Store/Domain/ContactPreferences.fs | 23 +-- samples/Store/Domain/Domain.fsproj | 7 +- samples/Store/Domain/Favorites.fs | 23 +-- samples/Store/Domain/SavedForLater.fs | 29 +--- samples/Store/Integration/CartIntegration.fs | 7 +- .../ContactPreferencesIntegration.fs | 10 +- .../Store/Integration/FavoritesIntegration.fs | 7 +- samples/TodoBackend/Todo.fs | 29 +--- samples/TodoBackend/TodoBackend.fsproj | 5 +- samples/Tutorial/Gapless.fs | 30 +--- samples/Tutorial/Index.fs | 29 +--- samples/Tutorial/Sequence.fs | 23 +-- samples/Tutorial/Set.fs | 29 +--- samples/Tutorial/Upload.fs | 25 +-- src/Equinox.Core/Equinox.Core.fsproj | 4 - src/Equinox.Core/Json/JsonElementHelpers.fs | 25 --- src/Equinox.Core/Json/JsonRecordConverter.fs | 161 ------------------ src/Equinox.Core/Json/Options.fs | 14 -- .../Json/Utf8JsonReaderExtensions.fs | 22 --- src/Equinox.Cosmos/Cosmos.fs | 10 +- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 6 +- .../Equinox.EventStore.fsproj | 2 +- .../Equinox.MemoryStore.fsproj | 2 +- .../Equinox.SqlStreamStore.fsproj | 2 +- .../CosmosCoreIntegration.fs | 4 +- .../CosmosIntegration.fs | 4 +- tests/Equinox.Cosmos.Integration/Json.fs | 14 +- .../JsonConverterTests.fs | 21 +-- .../Equinox.EventStore.Integration.fsproj | 2 +- .../StoreIntegration.fs | 6 +- 33 files changed, 83 insertions(+), 541 deletions(-) delete mode 100644 src/Equinox.Core/Json/JsonElementHelpers.fs delete mode 100644 src/Equinox.Core/Json/JsonRecordConverter.fs delete mode 100644 src/Equinox.Core/Json/Options.fs delete mode 100644 src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs diff --git a/samples/Infrastructure/Services.fs b/samples/Infrastructure/Services.fs index b7e0c2623..14531e341 100644 --- a/samples/Infrastructure/Services.fs +++ b/samples/Infrastructure/Services.fs @@ -2,7 +2,6 @@ open Domain open FsCodec -open FsCodec.SystemTextJson.Serialization open Microsoft.Extensions.DependencyInjection open System open System.Text.Json @@ -49,24 +48,24 @@ type ServiceBuilder(storageConfig, handlerLog) = let snapshot = Favorites.Fold.isOrigin,Favorites.Fold.snapshot match storageConfig with - | Storage.StorageConfig.Cosmos _ -> Backend.Favorites.Service(handlerLog, resolver.ResolveWithJsonElementCodec(Favorites.Events.JsonElementCodec.codec JsonSerializer.defaultOptions, fold, initial, snapshot)) - | _ -> Backend.Favorites.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(Favorites.Events.Utf8ArrayCodec.codec, fold, initial, snapshot)) + | Storage.StorageConfig.Cosmos _ -> Backend.Favorites.Service(handlerLog, resolver.ResolveWithJsonElementCodec(Favorites.Events.codecStj, fold, initial, snapshot)) + | _ -> Backend.Favorites.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(Favorites.Events.codecNewtonsoft, fold, initial, snapshot)) member __.CreateSaveForLaterService() = let fold, initial = SavedForLater.Fold.fold, SavedForLater.Fold.initial let snapshot = SavedForLater.Fold.isOrigin,SavedForLater.Fold.compact match storageConfig with - | Storage.StorageConfig.Cosmos _ -> Backend.SavedForLater.Service(handlerLog, resolver.ResolveWithJsonElementCodec(SavedForLater.Events.JsonElementCodec.codec JsonSerializer.defaultOptions,fold,initial,snapshot), maxSavedItems=50) - | _ -> Backend.SavedForLater.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(SavedForLater.Events.Utf8ArrayCodec.codec,fold,initial,snapshot), maxSavedItems=50) + | Storage.StorageConfig.Cosmos _ -> Backend.SavedForLater.Service(handlerLog, resolver.ResolveWithJsonElementCodec(SavedForLater.Events.codecStj,fold,initial,snapshot), maxSavedItems=50) + | _ -> Backend.SavedForLater.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(SavedForLater.Events.codecNewtonsoft,fold,initial,snapshot), maxSavedItems=50) member __.CreateTodosService() = let fold, initial = TodoBackend.Fold.fold, TodoBackend.Fold.initial let snapshot = TodoBackend.Fold.isOrigin, TodoBackend.Fold.snapshot match storageConfig with - | Storage.StorageConfig.Cosmos _ -> TodoBackend.Service(handlerLog, resolver.ResolveWithJsonElementCodec(TodoBackend.Events.JsonElementCodec.codec JsonSerializer.defaultOptions,fold,initial,snapshot)) - | _ -> TodoBackend.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(TodoBackend.Events.Utf8ArrayCodec.codec,fold,initial,snapshot)) + | Storage.StorageConfig.Cosmos _ -> TodoBackend.Service(handlerLog, resolver.ResolveWithJsonElementCodec(TodoBackend.Events.codecStj,fold,initial,snapshot)) + | _ -> TodoBackend.Service(handlerLog, resolver.ResolveWithUtf8ArrayCodec(TodoBackend.Events.codecNewtonsoft,fold,initial,snapshot)) let register (services : IServiceCollection, storageConfig, handlerLog) = let regF (factory : IServiceProvider -> 'T) = services.AddSingleton<'T>(fun (sp: IServiceProvider) -> factory sp) |> ignore diff --git a/samples/Store/Backend/Backend.fsproj b/samples/Store/Backend/Backend.fsproj index fec9bcd4f..0288582cb 100644 --- a/samples/Store/Backend/Backend.fsproj +++ b/samples/Store/Backend/Backend.fsproj @@ -1,7 +1,7 @@ - netstandard2.0;net461 + netstandard2.1 5 false true @@ -23,8 +23,7 @@ - - + \ No newline at end of file diff --git a/samples/Store/Domain/Cart.fs b/samples/Store/Domain/Cart.fs index bf6ce47e2..ecaf1c059 100644 --- a/samples/Store/Domain/Cart.fs +++ b/samples/Store/Domain/Cart.fs @@ -25,33 +25,8 @@ module Events = | ItemWaiveReturnsChanged of ItemWaiveReturnsInfo interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = - fun (evt: Event) -> - match evt with - | Snapshotted state -> "Snapshotted", JsonSerializer.SerializeToElement(state, options) - | ItemAdded addInfo -> "ItemAdded", JsonSerializer.SerializeToElement(addInfo, options) - | ItemRemoved removeInfo -> "ItemRemoved", JsonSerializer.SerializeToElement(removeInfo, options) - | ItemQuantityChanged changeInfo -> "ItemQuantityChanged", JsonSerializer.SerializeToElement(changeInfo, options) - | ItemWaiveReturnsChanged waiveInfo -> "ItemWaiveReturnsChanged", JsonSerializer.SerializeToElement(waiveInfo, options) - - let private tryDecode (options: JsonSerializerOptions) = - fun (eventType, data: JsonElement) -> - match eventType with - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) - | "ItemAdded" -> Some (ItemAdded <| JsonSerializer.DeserializeElement(data, options)) - | "ItemRemoved" -> Some (ItemRemoved <| JsonSerializer.DeserializeElement(data, options)) - | "ItemQuantityChanged" -> Some (ItemQuantityChanged <| JsonSerializer.DeserializeElement(data, options)) - | "ItemWaiveReturnsChanged" -> Some (ItemWaiveReturnsChanged <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = type ItemInfo = { skuId: SkuId; quantity: int; returnsWaived: bool } @@ -106,4 +81,4 @@ module Commands = match waived with | Some waived when itemExistsWithDifferentWaiveStatus skuId waived -> yield Events.ItemWaiveReturnsChanged { context = c; skuId = skuId; waived = waived } - | _ -> () ] + | _ -> () ] diff --git a/samples/Store/Domain/ContactPreferences.fs b/samples/Store/Domain/ContactPreferences.fs index 140220496..3613f5562 100644 --- a/samples/Store/Domain/ContactPreferences.fs +++ b/samples/Store/Domain/ContactPreferences.fs @@ -14,25 +14,8 @@ module Events = | []Updated of Value interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = - fun (evt: Event) -> - match evt with - | Updated value -> "contactPreferencesChanged", JsonSerializer.SerializeToElement(value, options) - - let private tryDecode (options: JsonSerializerOptions) = - fun (eventType, data: JsonElement) -> - match eventType with - | "contactPreferencesChanged" -> Some (Updated <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = @@ -56,4 +39,4 @@ module Commands = match command with | Update ({ preferences = preferences } as value) -> if state = preferences then [] else - [ Events.Updated value ] + [ Events.Updated value ] diff --git a/samples/Store/Domain/Domain.fsproj b/samples/Store/Domain/Domain.fsproj index d2f66d3b3..90393441a 100644 --- a/samples/Store/Domain/Domain.fsproj +++ b/samples/Store/Domain/Domain.fsproj @@ -1,7 +1,7 @@  - netstandard2.0;net461 + netstandard2.1 5 false true @@ -19,9 +19,10 @@ - + - + + diff --git a/samples/Store/Domain/Favorites.fs b/samples/Store/Domain/Favorites.fs index c350052ef..428232fa3 100644 --- a/samples/Store/Domain/Favorites.fs +++ b/samples/Store/Domain/Favorites.fs @@ -15,27 +15,8 @@ module Events = | Unfavorited of Unfavorited interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> - match evt with - | Snapshotted snapshotted -> "Snapshotted", JsonSerializer.SerializeToElement(snapshotted, options) - | Favorited favorited -> "Favorited", JsonSerializer.SerializeToElement(favorited, options) - | Unfavorited unfavorited -> "Unfavorited", JsonSerializer.SerializeToElement(unfavorited, options) - - let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> - match eventType with - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) - | "Favorited" -> Some (Favorited <| JsonSerializer.DeserializeElement(data, options)) - | "Unfavorited" -> Some (Unfavorited <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = diff --git a/samples/Store/Domain/SavedForLater.fs b/samples/Store/Domain/SavedForLater.fs index f936ac535..44ba972dd 100644 --- a/samples/Store/Domain/SavedForLater.fs +++ b/samples/Store/Domain/SavedForLater.fs @@ -30,31 +30,8 @@ module Events = | Added of Added interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = - fun (evt: Event) -> - match evt with - | Compacted compacted -> Compaction.EventType, JsonSerializer.SerializeToElement(compacted, options) - | Merged merged -> "Merged", JsonSerializer.SerializeToElement(merged, options) - | Removed removed -> "Removed", JsonSerializer.SerializeToElement(removed, options) - | Added added -> "Added", JsonSerializer.SerializeToElement(added, options) - - let private tryDecode (options: JsonSerializerOptions) = - fun (eventType, data: JsonElement) -> - match eventType with - | Compaction.EventType -> Some (Compacted <| JsonSerializer.DeserializeElement(data, options)) - | "Merged" -> Some (Merged <| JsonSerializer.DeserializeElement(data, options)) - | "Removed" -> Some (Removed <| JsonSerializer.DeserializeElement(data, options)) - | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = open Events @@ -129,4 +106,4 @@ module Commands = let index = Index state let net = skus |> Array.filter (index.DoesNotAlreadyContainSameOrMoreRecent dateSaved) if Array.isEmpty net then true, [] - else validateAgainstInvariants [ Events.Added { skus = net ; dateSaved = dateSaved } ] + else validateAgainstInvariants [ Events.Added { skus = net ; dateSaved = dateSaved } ] diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index 4d7db0cbd..f917c7bb4 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -4,7 +4,6 @@ open Equinox open Equinox.Cosmos.Integration open Equinox.EventStore open Equinox.MemoryStore -open FsCodec.SystemTextJson.Serialization open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -16,15 +15,15 @@ let createMemoryStore () = // we want to validate that the JSON UTF8 is working happily VolatileStore() let createServiceMemory log store = - Backend.Cart.Service(log, fun (id,opt) -> MemoryStore.Resolver(store, Domain.Cart.Events.Utf8ArrayCodec.codec, fold, initial).Resolve(id,?option=opt)) + Backend.Cart.Service(log, fun (id,opt) -> MemoryStore.Resolver(store, Domain.Cart.Events.codecNewtonsoft, fold, initial).Resolve(id,?option=opt)) -let eventStoreCodec = Domain.Cart.Events.Utf8ArrayCodec.codec +let eventStoreCodec = Domain.Cart.Events.codecNewtonsoft let resolveGesStreamWithRollingSnapshots gateway = fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial, access = AccessStrategy.RollingSnapshots snapshot).Resolve(id,?option=opt) let resolveGesStreamWithoutCustomAccessStrategy gateway = fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial).Resolve(id,?option=opt) -let cosmosCodec = Domain.Cart.Events.JsonElementCodec.codec JsonSerializer.defaultOptions +let cosmosCodec = Domain.Cart.Events.codecStj let resolveCosmosStreamWithSnapshotStrategy gateway = fun (id,opt) -> Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) let resolveCosmosStreamWithoutCustomAccessStrategy gateway = diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index c04fb7360..120f1a50f 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -2,26 +2,24 @@ open Equinox open Equinox.Cosmos.Integration -open FsCodec.SystemTextJson.Serialization open Swensen.Unquote -open Xunit #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial let createMemoryStore () = - new MemoryStore.VolatileStore<_>() + MemoryStore.VolatileStore<_>() let createServiceMemory log store = Backend.ContactPreferences.Service(log, MemoryStore.Resolver(store, FsCodec.Box.Codec.Create(), fold, initial).Resolve) -let eventStoreCodec = Domain.ContactPreferences.Events.Utf8ArrayCodec.codec +let eventStoreCodec = Domain.ContactPreferences.Events.codecNewtonsoft let resolveStreamGesWithOptimizedStorageSemantics gateway = EventStore.Resolver(gateway 1, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamGesWithoutAccessStrategy gateway = EventStore.Resolver(gateway defaultBatchSize, eventStoreCodec, fold, initial).Resolve -let cosmosCodec = Domain.ContactPreferences.Events.JsonElementCodec.codec JsonSerializer.defaultOptions +let cosmosCodec = Domain.ContactPreferences.Events.codecStj let resolveStreamCosmosWithLatestKnownEventSemantics gateway = Cosmos.Resolver(gateway 1, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamCosmosUnoptimized gateway = @@ -76,7 +74,7 @@ type Tests(testOutputHelper) = let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosWithLatestKnownEventSemantics do! act service args } - + [] let ``Can roundtrip against Cosmos, correctly folding the events with RollingUnfold semantics`` args = Async.RunSynchronously <| async { let! service = arrange connectToSpecifiedCosmosOrSimulator createCosmosContext resolveStreamCosmosRollingUnfolds diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index 2d3e90430..ef72c5a0e 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -2,7 +2,6 @@ open Equinox open Equinox.Cosmos.Integration -open FsCodec.SystemTextJson.Serialization open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -15,12 +14,12 @@ let createMemoryStore () = let createServiceMemory log store = Backend.Favorites.Service(log, MemoryStore.Resolver(store, FsCodec.Box.Codec.Create(), fold, initial).Resolve) -let eventStoreCodec = Domain.Favorites.Events.Utf8ArrayCodec.codec +let eventStoreCodec = Domain.Favorites.Events.codecNewtonsoft let createServiceGes gateway log = let resolve = EventStore.Resolver(gateway, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.RollingSnapshots snapshot).Resolve Backend.Favorites.Service(log, resolve) -let cosmosCodec = Domain.Favorites.Events.JsonElementCodec.codec JsonSerializer.defaultOptions +let cosmosCodec = Domain.Favorites.Events.codecStj let createServiceCosmos gateway log = let resolve = Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve Backend.Favorites.Service(log, resolve) @@ -68,7 +67,7 @@ type Tests(testOutputHelper) = let service = createServiceCosmos gateway log do! act service args } - + [] let ``Can roundtrip against Cosmos, correctly folding the events with rolling unfolds`` args = Async.RunSynchronously <| async { let log = createLog () diff --git a/samples/TodoBackend/Todo.fs b/samples/TodoBackend/Todo.fs index 133e7d40c..a60dfddba 100644 --- a/samples/TodoBackend/Todo.fs +++ b/samples/TodoBackend/Todo.fs @@ -20,33 +20,8 @@ module Events = | Snapshotted of Snapshotted interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = - fun (evt: Event) -> - match evt with - | Added todo -> "Added", JsonSerializer.SerializeToElement(todo, options) - | Updated todo -> "Updated", JsonSerializer.SerializeToElement(todo, options) - | Deleted deleted -> "Deleted", JsonSerializer.SerializeToElement(deleted, options) - | Cleared -> "Cleared", Unchecked.defaultof - | Snapshotted snapshotted -> "Snapshotted", JsonSerializer.SerializeToElement(snapshotted, options) - - let private tryDecode (options: JsonSerializerOptions) = - fun (eventType, data: JsonElement) -> - match eventType with - | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data, options)) - | "Updated" -> Some (Updated <| JsonSerializer.DeserializeElement(data, options)) - | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data, options)) - | "Cleared" -> Some Cleared - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = type State = { items : Events.Todo list; nextId : int } diff --git a/samples/TodoBackend/TodoBackend.fsproj b/samples/TodoBackend/TodoBackend.fsproj index 42dc7a64a..c8646fa1e 100644 --- a/samples/TodoBackend/TodoBackend.fsproj +++ b/samples/TodoBackend/TodoBackend.fsproj @@ -1,7 +1,7 @@  - netstandard2.0;net461 + netstandard2.1 5 false true @@ -13,8 +13,7 @@ - - + diff --git a/samples/Tutorial/Gapless.fs b/samples/Tutorial/Gapless.fs index e4d1180bd..68aa82aea 100644 --- a/samples/Tutorial/Gapless.fs +++ b/samples/Tutorial/Gapless.fs @@ -19,30 +19,8 @@ module Events = | Snapshotted of Snapshotted interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> - match evt with - | Reserved item -> "Reserved", JsonSerializer.SerializeToElement(item, options) - | Confirmed item -> "Confirmed", JsonSerializer.SerializeToElement(item, options) - | Released item -> "Released", JsonSerializer.SerializeToElement(item, options) - | Snapshotted snapshot -> "Snapshotted", JsonSerializer.SerializeToElement(snapshot, options) - - let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> - match eventType with - | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data, options)) - | "Confirmed" -> Some (Confirmed <| JsonSerializer.DeserializeElement(data, options)) - | "Released" -> Some (Released <| JsonSerializer.DeserializeElement(data, options)) - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) - + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = @@ -103,12 +81,10 @@ let [] appName = "equinox-tutorial-gapless" module Cosmos = open Equinox.Cosmos - open FsCodec.SystemTextJson.Serialization let private createService (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions - let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let resolve = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve Service(Serilog.Log.Logger, resolve) module Snapshot = diff --git a/samples/Tutorial/Index.fs b/samples/Tutorial/Index.fs index 6d39b4ca5..9da5af0b0 100644 --- a/samples/Tutorial/Index.fs +++ b/samples/Tutorial/Index.fs @@ -14,27 +14,8 @@ module Events = | Snapshotted of Items<'v> interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec<'v> = FsCodec.NewtonsoftJson.Codec.Create>() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode<'v> (options: JsonSerializerOptions) = fun (evt: Event<'v>) -> - match evt with - | Added items -> "Added", JsonSerializer.SerializeToElement(items, options) - | Deleted itemIds -> "Deleted", JsonSerializer.SerializeToElement(itemIds, options) - | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, options) - - let private tryDecode<'v> (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> - match eventType with - | "Added" -> Some (Added <| JsonSerializer.DeserializeElement>(data, options)) - | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data, options)) - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement>(data, options)) - | _ -> None - - let codec<'v> options = FsCodec.Codec.Create, JsonElement>(encode<'v> options, tryDecode<'v> options) + let codecNewtonsoft<'v> = FsCodec.NewtonsoftJson.Codec.Create>() + let codecStj<'v> = FsCodec.SystemTextJson.Codec.Create>() module Fold = @@ -75,17 +56,15 @@ let create resolve indexId = Service(indexId, resolve, maxAttempts = 3) module Cosmos = open Equinox.Cosmos - open FsCodec.SystemTextJson.Serialization let createService<'v> (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let codec = Events.JsonElementCodec.codec<'v> JsonSerializer.defaultOptions - let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let resolve = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module MemoryStore = let createService store = - let resolve = Equinox.MemoryStore.Resolver(store, Events.Utf8ArrayCodec.codec, Fold.fold, Fold.initial).Resolve + let resolve = Equinox.MemoryStore.Resolver(store, Events.codecNewtonsoft, Fold.fold, Fold.initial).Resolve create resolve diff --git a/samples/Tutorial/Sequence.fs b/samples/Tutorial/Sequence.fs index 264e26b50..e8931e361 100644 --- a/samples/Tutorial/Sequence.fs +++ b/samples/Tutorial/Sequence.fs @@ -26,23 +26,8 @@ module Events = | Reserved of Reserved interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> - match evt with - | Reserved reserved -> "Reserved", JsonSerializer.SerializeToElement(reserved, options) - - let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> - match eventType with - | "Reserved" -> Some (Reserved <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options= FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = @@ -71,12 +56,10 @@ let create resolve = Service(Serilog.Log.ForContext(), resolve, maxAtte module Cosmos = open Equinox.Cosmos - open FsCodec.SystemTextJson.Serialization let private createService (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions - let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let resolve = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module LatestKnownEvent = diff --git a/samples/Tutorial/Set.fs b/samples/Tutorial/Set.fs index c500df008..f0eb7e4de 100644 --- a/samples/Tutorial/Set.fs +++ b/samples/Tutorial/Set.fs @@ -13,27 +13,8 @@ module Events = | Snapshotted of Items interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let private encode (options: JsonSerializerOptions) = fun (evt: Event) -> - match evt with - | Added items -> "Added", JsonSerializer.SerializeToElement(items, options) - | Deleted items -> "Deleted", JsonSerializer.SerializeToElement(items, options) - | Snapshotted items -> "Snapshotted", JsonSerializer.SerializeToElement(items, options) - - let private tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> - match eventType with - | "Added" -> Some (Added <| JsonSerializer.DeserializeElement(data, options)) - | "Deleted" -> Some (Deleted <| JsonSerializer.DeserializeElement(data, options)) - | "Snapshotted" -> Some (Snapshotted <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = @@ -75,17 +56,15 @@ let create resolve setId = Service(Serilog.Log.ForContext(), setId, res module Cosmos = open Equinox.Cosmos - open FsCodec.SystemTextJson.Serialization let createService (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions - let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve + let resolve = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy).Resolve create resolve module MemoryStore = let createService store = - let resolve = Equinox.MemoryStore.Resolver(store, Events.Utf8ArrayCodec.codec, Fold.fold, Fold.initial).Resolve + let resolve = Equinox.MemoryStore.Resolver(store, Events.codecNewtonsoft, Fold.fold, Fold.initial).Resolve create resolve diff --git a/samples/Tutorial/Upload.fs b/samples/Tutorial/Upload.fs index aa80caa5f..b360bd81f 100644 --- a/samples/Tutorial/Upload.fs +++ b/samples/Tutorial/Upload.fs @@ -41,23 +41,8 @@ module Events = | IdAssigned of IdAssigned interface TypeShape.UnionContract.IUnionContract - module Utf8ArrayCodec = - let codec = FsCodec.NewtonsoftJson.Codec.Create() - - module JsonElementCodec = - open FsCodec.SystemTextJson - open System.Text.Json - - let encode (options: JsonSerializerOptions) = fun (evt: Event) -> - match evt with - | IdAssigned id -> "IdAssigned", JsonSerializer.SerializeToElement(id, options) - - let tryDecode (options: JsonSerializerOptions) = fun (eventType, data: JsonElement) -> - match eventType with - | "IdAssigned" -> Some (IdAssigned <| JsonSerializer.DeserializeElement(data, options)) - | _ -> None - - let codec options = FsCodec.Codec.Create(encode options, tryDecode options) + let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() + let codecStj = FsCodec.SystemTextJson.Codec.Create() module Fold = @@ -86,16 +71,14 @@ let create resolve = Service(Serilog.Log.ForContext(), resolve, 3) module Cosmos = open Equinox.Cosmos - open FsCodec.SystemTextJson.Serialization let createService (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let codec = Events.JsonElementCodec.codec JsonSerializer.defaultOptions - let resolve = Resolver(context, codec, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent).Resolve + let resolve = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent).Resolve create resolve module EventStore = open Equinox.EventStore let createService context = - let resolve = Resolver(context, Events.Utf8ArrayCodec.codec, Fold.fold, Fold.initial, access=AccessStrategy.LatestKnownEvent).Resolve + let resolve = Resolver(context, Events.codecNewtonsoft, Fold.fold, Fold.initial, access=AccessStrategy.LatestKnownEvent).Resolve create resolve diff --git a/src/Equinox.Core/Equinox.Core.fsproj b/src/Equinox.Core/Equinox.Core.fsproj index c0e768822..95dde0c5c 100644 --- a/src/Equinox.Core/Equinox.Core.fsproj +++ b/src/Equinox.Core/Equinox.Core.fsproj @@ -17,10 +17,6 @@ - - - - diff --git a/src/Equinox.Core/Json/JsonElementHelpers.fs b/src/Equinox.Core/Json/JsonElementHelpers.fs deleted file mode 100644 index f753f73f8..000000000 --- a/src/Equinox.Core/Json/JsonElementHelpers.fs +++ /dev/null @@ -1,25 +0,0 @@ -namespace FsCodec.SystemTextJson - -open System -open System.Buffers -open System.Runtime.InteropServices -open System.Text.Json - -[] -module JsonSerializerExtensions = - type JsonSerializer with - static member SerializeToElement(value: 'T, [] ?options: JsonSerializerOptions) = - JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(JsonSerializer.SerializeToUtf8Bytes(value, defaultArg options null))) - - static member DeserializeElement<'T>(element: JsonElement, [] ?options: JsonSerializerOptions) = -#if NETSTANDARD2_1 - let bufferWriter = ArrayBufferWriter() - ( - use jsonWriter = new Utf8JsonWriter(bufferWriter) - element.WriteTo(jsonWriter) - ) - JsonSerializer.Deserialize<'T>(bufferWriter.WrittenSpan, defaultArg options null) -#else - let json = element.GetRawText() - JsonSerializer.Deserialize<'T>(json, defaultArg options null) -#endif diff --git a/src/Equinox.Core/Json/JsonRecordConverter.fs b/src/Equinox.Core/Json/JsonRecordConverter.fs deleted file mode 100644 index 079382003..000000000 --- a/src/Equinox.Core/Json/JsonRecordConverter.fs +++ /dev/null @@ -1,161 +0,0 @@ -namespace FsCodec.SystemTextJson.Serialization - -open Equinox.Core -open System -open System.Collections.Generic -open System.Linq -open System.Linq.Expressions -open System.Text.Json -open System.Text.Json.Serialization -open FSharp.Reflection - -type JsonRecordConverterActivator = delegate of JsonSerializerOptions -> JsonConverter - -type IRecordFieldConverter = - abstract member Initialize: converter: JsonConverter -> unit - abstract member Read: reader: byref * typ: Type * options: JsonSerializerOptions -> obj - abstract member Write: writer: Utf8JsonWriter * value: obj * options: JsonSerializerOptions -> unit - -type RecordFieldConverter<'F> () = - let mutable converter = Unchecked.defaultof> - - interface IRecordFieldConverter with - member __.Initialize (c) = - converter <- c :?> JsonConverter<'F> - - member __.Read (reader, typ, options) = - converter.Read(&reader, typ, options) :> obj - - member __.Write (writer, value, options) = - converter.Write(writer, value :?> 'F, options) - -[] -type RecordField = { - name: string - fieldType: Type - index: int - isIgnored: bool - converter: IRecordFieldConverter option -} - -type JsonRecordConverter<'T> (options: JsonSerializerOptions) = - inherit JsonConverter<'T> () - - let recordType = typeof<'T> - - let constructor = FSharpValue.PreComputeRecordConstructor(recordType, true) - let getFieldValues = FSharpValue.PreComputeRecordReader(typeof<'T>, true) - - let fields = - FSharpType.GetRecordFields(recordType, true) - |> Array.mapi (fun idx f -> - { - name = - f.GetCustomAttributes(typedefof, true) - |> Array.tryHead - |> Option.map (fun attr -> (attr :?> JsonPropertyNameAttribute).Name) - |> Option.defaultWith (fun () -> - if options.PropertyNamingPolicy |> isNull - then f.Name - else options.PropertyNamingPolicy.ConvertName f.Name) - - fieldType = f.PropertyType - index = idx - isIgnored = f.GetCustomAttributes(typeof, true) |> Array.isEmpty |> not - converter = - f.GetCustomAttributes(typeof, true) - |> Array.tryHead - |> Option.map (fun attr -> attr :?> JsonConverterAttribute) - |> Option.bind (fun attr -> - let baseConverter = attr.CreateConverter(f.PropertyType) - - if baseConverter |> isNull then - failwithf "Field %s is decorated with a JsonConverter attribute, but it does not implement a CreateConverter method." f.Name - - if baseConverter.CanConvert(f.PropertyType) then - let converterType = typedefof>.MakeGenericType(f.PropertyType) - let converter = Activator.CreateInstance(converterType) :?> IRecordFieldConverter - converter.Initialize(baseConverter) - Some converter - else - None - ) - }) - - let fieldsByName = - fields - |> Array.map (fun f -> f.name, f) -#if NETSTANDARD2_1 - |> Array.map KeyValuePair.Create - |> (fun kvp -> Dictionary(kvp, StringComparer.OrdinalIgnoreCase)) -#else - |> Array.map KeyValuePair - |> (fun kvp -> kvp.ToDictionary((fun item -> item.Key), (fun item -> item.Value), StringComparer.OrdinalIgnoreCase)) -#endif - - let tryGetFieldByName name = - match fieldsByName.TryGetValue(name) with - | true, field -> Some field - | _ -> None - - let getFieldByName name = - match tryGetFieldByName name with - | Some field -> field - | _ -> KeyNotFoundException(sprintf "Failed to find a field named '%s' on record type '%s'." name recordType.Name) |> raise - - override __.Read (reader, typ, options) = - reader.ValidateTokenType(JsonTokenType.StartObject) - - let fields = Array.zeroCreate <| fields.Length - - while reader.Read() && reader.TokenType <> JsonTokenType.EndObject do - reader.ValidateTokenType(JsonTokenType.PropertyName) - - match tryGetFieldByName <| reader.GetString() with - | Some field -> - fields.[field.index] <- - match field.converter with - | Some converter -> - reader.Read() |> ignore - converter.Read(&reader, field.fieldType, options) - | None -> - JsonSerializer.Deserialize(&reader, field.fieldType, options) - | _ -> - reader.Skip() - - constructor fields :?> 'T - - override __.Write (writer, record, options) = - writer.WriteStartObject() - - let fieldValues = getFieldValues record - - (fields, fieldValues) - ||> Array.iter2 (fun field value -> - match value with - | :? JsonElement as je when je.ValueKind = JsonValueKind.Undefined -> () - | _ -> - if not field.isIgnored && not (options.IgnoreNullValues && isNull value) then - writer.WritePropertyName(field.name) - - match field.converter with - | Some converter -> converter.Write(writer, value, options) - | None -> JsonSerializer.Serialize(writer, value, options)) - - writer.WriteEndObject() - -type JsonRecordConverter () = - inherit JsonConverterFactory() - - override __.CanConvert typ = - FSharpType.IsRecord (typ, true) - - override __.CreateConverter (typ, options) = - let constructor = typedefof>.MakeGenericType(typ).GetConstructor(typeof |> Array.singleton) - let optionsParameter = Expression.Parameter(typeof, "options") - - let newExpression = Expression.New(constructor, optionsParameter) - let lambda = Expression.Lambda(typeof, newExpression, optionsParameter) - - let activator = lambda.Compile() :?> JsonRecordConverterActivator - activator.Invoke(options) diff --git a/src/Equinox.Core/Json/Options.fs b/src/Equinox.Core/Json/Options.fs deleted file mode 100644 index 6867c76f6..000000000 --- a/src/Equinox.Core/Json/Options.fs +++ /dev/null @@ -1,14 +0,0 @@ -namespace FsCodec.SystemTextJson.Serialization - -open System.Text.Json - -[] -module JsonSerializerOptionExtensions = - type JsonSerializerOptions with - static member Create() = - let options = JsonSerializerOptions() - options.Converters.Add(new JsonRecordConverter()) - options - -module JsonSerializer = - let defaultOptions = JsonSerializerOptions.Create() diff --git a/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs b/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs deleted file mode 100644 index 9e29bb5d3..000000000 --- a/src/Equinox.Core/Json/Utf8JsonReaderExtensions.fs +++ /dev/null @@ -1,22 +0,0 @@ -namespace FsCodec.SystemTextJson.Serialization - -open System.Text.Json -open System.Runtime.CompilerServices - -[] -type Utf8JsonReaderExtension = - [] - static member ValidateTokenType(reader: Utf8JsonReader, expectedTokenType) = - if reader.TokenType <> expectedTokenType then - sprintf "Expected a %A token, but encountered a %A token when parsing JSON." expectedTokenType (reader.TokenType) - |> JsonException - |> raise - - [] - static member ValidatePropertyName(reader: Utf8JsonReader, expectedPropertyName: string) = - reader.ValidateTokenType(JsonTokenType.PropertyName) - - if not <| reader.ValueTextEquals expectedPropertyName then - sprintf "Expected a property named '%s', but encountered property with name '%s'." expectedPropertyName (reader.GetString()) - |> JsonException - |> raise diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 7dbd01ef8..e63a92eed 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -30,7 +30,7 @@ type [] // TODO for STJ v5: All fields required unless /// Optional causationId causationId : string // TODO for STJ v5: Optional, not serialized if missing - } + } interface IEventData with member __.EventType = __.c @@ -749,7 +749,7 @@ module internal Tip = let! page = retryingLoggingReadPage e batchLog match page with - | Some (evts, _pos, rus) -> + | Some (evts, _pos, rus) -> ru <- ru + rus allEvents.AddRange(evts) @@ -798,7 +798,6 @@ open Equinox open Equinox.Core open Equinox.Cosmos.Store open FsCodec -open FsCodec.SystemTextJson.Serialization open FSharp.Control open Serilog open System @@ -1141,7 +1140,10 @@ type Connector /// ClientOptions for this Connector as configured member val ClientOptions = let maxAttempts, maxWait, timeout = Nullable maxRetryAttemptsOnRateLimitedRequests, Nullable maxRetryWaitTimeOnRateLimitedRequests, requestTimeout - let co = CosmosClientOptions(MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout, Serializer = CosmosJsonSerializer(JsonSerializer.defaultOptions)) + let co = + CosmosClientOptions( + MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout, + Serializer = CosmosJsonSerializer(FsCodec.SystemTextJson.Options.CreateDefault(converters=[|FsCodec.SystemTextJson.Converters.JsonRecordConverter()|]))) match mode with | Some ConnectionMode.Direct -> co.ConnectionMode <- ConnectionMode.Direct | None | Some ConnectionMode.Gateway | Some _ (* enum total match :( *) -> co.ConnectionMode <- ConnectionMode.Gateway // default; only supports Https diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index 4659eff70..0e29ccdf6 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -1,7 +1,7 @@  - netstandard2.1 + netstandard2.1 5 false true @@ -23,13 +23,13 @@ - + + - \ No newline at end of file diff --git a/src/Equinox.EventStore/Equinox.EventStore.fsproj b/src/Equinox.EventStore/Equinox.EventStore.fsproj index 15847dde4..20699c979 100644 --- a/src/Equinox.EventStore/Equinox.EventStore.fsproj +++ b/src/Equinox.EventStore/Equinox.EventStore.fsproj @@ -26,7 +26,7 @@ - + diff --git a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj index 4895d6ccb..26c6cfefe 100644 --- a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj +++ b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj @@ -24,7 +24,7 @@ - + \ No newline at end of file diff --git a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj index e7addd071..724ef3d28 100644 --- a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj +++ b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj @@ -24,7 +24,7 @@ - + diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index 0e0e6138d..77d2b97b6 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -16,8 +16,8 @@ type TestEvents() = static member private Create(i, ?eventType, ?json) = EventData.FromUtf8Bytes ( sprintf "%s:%d" (defaultArg eventType "test_event") i, - IntegrationJsonSerializer.deserialize(defaultArg json "{\"d\":\"d\"}"), - IntegrationJsonSerializer.deserialize("{\"m\":\"m\"}") ) + FsCodec.NewtonsoftJson.Serdes.Deserialize(defaultArg json "{\"d\":\"d\"}"), + FsCodec.NewtonsoftJson.Serdes.Deserialize("{\"m\":\"m\"}")) static member Create(i, c) = Array.init c (fun x -> TestEvents.Create(x+i)) type Tests(testOutputHelper) = diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs index b74282e46..c6c7fd8b1 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs @@ -11,7 +11,7 @@ open System.Threading module Cart = let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot - let codec = Domain.Cart.Events.JsonElementCodec.codec IntegrationJsonSerializer.options + let codec = Domain.Cart.Events.codecStj let createServiceWithoutOptimization connection batchSize log = let store = createCosmosContext connection batchSize let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) @@ -40,7 +40,7 @@ module Cart = module ContactPreferences = let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial - let codec = Domain.ContactPreferences.Events.JsonElementCodec.codec IntegrationJsonSerializer.options + let codec = Domain.ContactPreferences.Events.codecStj let createServiceWithoutOptimization createGateway defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = let gateway = createGateway defaultBatchSize let resolve = Resolver(gateway, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve diff --git a/tests/Equinox.Cosmos.Integration/Json.fs b/tests/Equinox.Cosmos.Integration/Json.fs index 398679399..80a5c976a 100644 --- a/tests/Equinox.Cosmos.Integration/Json.fs +++ b/tests/Equinox.Cosmos.Integration/Json.fs @@ -2,13 +2,10 @@ module Equinox.Cosmos.Integration.Json open System -open System.Text.Json open System.Text.Json.Serialization open Domain -open FsCodec.SystemTextJson -open FsCodec.SystemTextJson.Serialization -type JsonSkuIdConverter () = +type JsonSkuIdConverter() = inherit JsonConverter() override __.Read (reader, _typ, _options) = @@ -18,10 +15,5 @@ type JsonSkuIdConverter () = writer.WriteStringValue(string value) module IntegrationJsonSerializer = - let options = JsonSerializer.defaultOptions - options.Converters.Add(JsonSkuIdConverter()) - - let serialize (value: 'T) = JsonSerializer.Serialize(value, options) - let serializeToElement (value: 'T) = JsonSerializer.SerializeToElement(value, options) - let deserialize<'T> (json: string) = JsonSerializer.Deserialize<'T>(json, options) - let deserializeElement<'T> (jsonElement: JsonElement) = JsonSerializer.DeserializeElement<'T>(jsonElement, options) + let options = FsCodec.SystemTextJson.Options.Create() + options.Converters.Add <| JsonSkuIdConverter() diff --git a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs index 7bcbecb31..ecbcf6278 100644 --- a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs +++ b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs @@ -13,21 +13,10 @@ type Union = | B of Embedded interface TypeShape.UnionContract.IUnionContract -let defaultSettings = FsCodec.NewtonsoftJson.Settings.CreateDefault() - -let encode (evt: Union) = - match evt with - | A e -> "A", IntegrationJsonSerializer.serializeToElement(e) - | B e -> "B", IntegrationJsonSerializer.serializeToElement(e) - -let tryDecode (eventType, data: JsonElement) = - match eventType with - | "A" -> Some (A <| IntegrationJsonSerializer.deserializeElement(data)) - | "B" -> Some (B <| IntegrationJsonSerializer.deserializeElement(data)) - | _ -> None +let defaultOptions = FsCodec.SystemTextJson.Options.CreateDefault() type Base64ZipUtf8Tests() = - let eventCodec = FsCodec.Codec.Create(encode, tryDecode) + let eventCodec = FsCodec.SystemTextJson.Codec.Create(defaultOptions) [] let ``serializes, achieving compression`` () = @@ -38,7 +27,7 @@ type Base64ZipUtf8Tests() = d = encoded.Data m = Unchecked.defaultof t = DateTimeOffset.MinValue } - let res = IntegrationJsonSerializer.serialize(e) + let res = FsCodec.SystemTextJson.Serdes.Serialize(e, defaultOptions) test <@ res.Contains("\"d\":\"") && res.Length < 138 @> [] @@ -56,9 +45,9 @@ type Base64ZipUtf8Tests() = d = encoded.Data m = Unchecked.defaultof t = DateTimeOffset.MinValue } - let ser = IntegrationJsonSerializer.serialize(e) + let ser = FsCodec.SystemTextJson.Serdes.Serialize(e, defaultOptions) test <@ ser.Contains("\"d\":\"") @> - let des = IntegrationJsonSerializer.deserialize(ser) + let des = FsCodec.SystemTextJson.Serdes.Deserialize(ser, defaultOptions) let d = FsCodec.Core.TimelineEvent.Create(-1L, des.c, des.d) let decoded = eventCodec.TryDecode d |> Option.get test <@ value = decoded @> diff --git a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj index 746cf3153..06db62a14 100644 --- a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj +++ b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj @@ -22,7 +22,7 @@ - + diff --git a/tests/Equinox.EventStore.Integration/StoreIntegration.fs b/tests/Equinox.EventStore.Integration/StoreIntegration.fs index f2e4c76c2..29c8aff0f 100644 --- a/tests/Equinox.EventStore.Integration/StoreIntegration.fs +++ b/tests/Equinox.EventStore.Integration/StoreIntegration.fs @@ -48,10 +48,10 @@ let createGesGateway connection batchSize = Context(connection, BatchingPolicy(m module Cart = let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial - let codec = Domain.Cart.Events.Utf8ArrayCodec.codec + let codec = Domain.Cart.Events.codecNewtonsoft let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot let createServiceWithoutOptimization log gateway = - Backend.Cart.Service(log, fun (id,opt) -> Resolver(gateway, Domain.Cart.Events.Utf8ArrayCodec.codec, fold, initial).Resolve(id,?option=opt)) + Backend.Cart.Service(log, fun (id,opt) -> Resolver(gateway, Domain.Cart.Events.codecNewtonsoft, fold, initial).Resolve(id,?option=opt)) let createServiceWithCompaction log gateway = let resolve (id,opt) = Resolver(gateway, codec, fold, initial, access = AccessStrategy.RollingSnapshots snapshot).Resolve(id,?option=opt) Backend.Cart.Service(log, resolve) @@ -64,7 +64,7 @@ module Cart = module ContactPreferences = let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial - let codec = Domain.ContactPreferences.Events.Utf8ArrayCodec.codec + let codec = Domain.ContactPreferences.Events.codecNewtonsoft let createServiceWithoutOptimization log connection = let gateway = createGesGateway connection defaultBatchSize Backend.ContactPreferences.Service(log, Resolver(gateway, codec, fold, initial).Resolve) From 28499e6378fc2a25ddafd9b830390e2ab2cd058d Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Fri, 13 Mar 2020 12:40:52 -0400 Subject: [PATCH 45/71] Remove net461 target from tests --- Directory.Build.props | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Directory.Build.props b/Directory.Build.props index 9aafb25f4..8b7772af5 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -9,7 +9,7 @@ Copyright © 2016-20 - netcoreapp3.1;net461 + netcoreapp3.1 netcoreapp3.1 $([System.IO.Path]::GetFullPath("$(MSBuildThisFileDirectory)")) From a2d742f9a4238a847b21e5bebc3efe167d0a9b65 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 17 Mar 2020 13:19:41 -0400 Subject: [PATCH 46/71] Fix various test issues --- samples/Store/Domain/Cart.fs | 2 +- samples/Store/Domain/ContactPreferences.fs | 2 +- tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs | 4 ++-- tests/Equinox.Cosmos.Integration/CosmosIntegration.fs | 4 ++-- tests/Equinox.Cosmos.Integration/JsonConverterTests.fs | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/samples/Store/Domain/Cart.fs b/samples/Store/Domain/Cart.fs index 4fbf905f1..933fb6e4f 100644 --- a/samples/Store/Domain/Cart.fs +++ b/samples/Store/Domain/Cart.fs @@ -26,7 +26,7 @@ module Events = interface TypeShape.UnionContract.IUnionContract let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() - let codecStj = FsCodec.SystemTextJson.Codec.Create() + let codecStj options = FsCodec.SystemTextJson.Codec.Create(options = options) module Fold = diff --git a/samples/Store/Domain/ContactPreferences.fs b/samples/Store/Domain/ContactPreferences.fs index d47cd3cc5..dcf4d7c8e 100644 --- a/samples/Store/Domain/ContactPreferences.fs +++ b/samples/Store/Domain/ContactPreferences.fs @@ -14,7 +14,7 @@ module Events = interface TypeShape.UnionContract.IUnionContract let codecNewtonsoft = FsCodec.NewtonsoftJson.Codec.Create() - let codecStj = FsCodec.SystemTextJson.Codec.Create() + let codecStj options = FsCodec.SystemTextJson.Codec.Create(options = options) module Fold = diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index 77d2b97b6..3fb5cdfcc 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -16,8 +16,8 @@ type TestEvents() = static member private Create(i, ?eventType, ?json) = EventData.FromUtf8Bytes ( sprintf "%s:%d" (defaultArg eventType "test_event") i, - FsCodec.NewtonsoftJson.Serdes.Deserialize(defaultArg json "{\"d\":\"d\"}"), - FsCodec.NewtonsoftJson.Serdes.Deserialize("{\"m\":\"m\"}")) + FsCodec.SystemTextJson.Serdes.Deserialize(defaultArg json "{\"d\":\"d\"}"), + FsCodec.SystemTextJson.Serdes.Deserialize("{\"m\":\"m\"}")) static member Create(i, c) = Array.init c (fun x -> TestEvents.Create(x+i)) type Tests(testOutputHelper) = diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs index a3fabec3b..561ce65e2 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs @@ -11,7 +11,7 @@ open System.Threading module Cart = let fold, initial = Domain.Cart.Fold.fold, Domain.Cart.Fold.initial let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot - let codec = Domain.Cart.Events.codecStj + let codec = Domain.Cart.Events.codecStj IntegrationJsonSerializer.options let createServiceWithoutOptimization connection batchSize log = let store = createCosmosContext connection batchSize let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) @@ -40,7 +40,7 @@ module Cart = module ContactPreferences = let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial - let codec = Domain.ContactPreferences.Events.codecStj + let codec = Domain.ContactPreferences.Events.codecStj IntegrationJsonSerializer.options let createServiceWithoutOptimization createGateway defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = let gateway = createGateway defaultBatchSize let resolver = Resolver(gateway, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized) diff --git a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs index ecbcf6278..61a2cb3ee 100644 --- a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs +++ b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs @@ -13,7 +13,7 @@ type Union = | B of Embedded interface TypeShape.UnionContract.IUnionContract -let defaultOptions = FsCodec.SystemTextJson.Options.CreateDefault() +let defaultOptions = FsCodec.SystemTextJson.Options.Create() type Base64ZipUtf8Tests() = let eventCodec = FsCodec.SystemTextJson.Codec.Create(defaultOptions) From 2f3cfaab283197f795663c5e162eb67e5118fcb6 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Tue, 17 Mar 2020 13:31:52 -0400 Subject: [PATCH 47/71] Provide options --- samples/Store/Integration/CartIntegration.fs | 2 +- .../Integration/ContactPreferencesIntegration.fs | 2 +- tools/Equinox.Tool/Properties/launchSettings.json | 13 +++++++++++++ 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 tools/Equinox.Tool/Properties/launchSettings.json diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index cb7b4ed73..87fbc9c3c 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -23,7 +23,7 @@ let resolveGesStreamWithRollingSnapshots gateway = let resolveGesStreamWithoutCustomAccessStrategy gateway = fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial).Resolve(id,?option=opt) -let cosmosCodec = Domain.Cart.Events.codecStj +let cosmosCodec = Domain.Cart.Events.codecStj (FsCodec.SystemTextJson.Options.Create()) let resolveCosmosStreamWithSnapshotStrategy gateway = fun (id,opt) -> Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) let resolveCosmosStreamWithoutCustomAccessStrategy gateway = diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index b8a53981a..0751eb3b3 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -19,7 +19,7 @@ let resolveStreamGesWithOptimizedStorageSemantics gateway = let resolveStreamGesWithoutAccessStrategy gateway = EventStore.Resolver(gateway defaultBatchSize, eventStoreCodec, fold, initial).Resolve -let cosmosCodec = Domain.ContactPreferences.Events.codecStj +let cosmosCodec = Domain.ContactPreferences.Events.codecStj (FsCodec.SystemTextJson.Options.Create()) let resolveStreamCosmosWithLatestKnownEventSemantics gateway = Cosmos.Resolver(gateway 1, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamCosmosUnoptimized gateway = diff --git a/tools/Equinox.Tool/Properties/launchSettings.json b/tools/Equinox.Tool/Properties/launchSettings.json new file mode 100644 index 000000000..3d969fc8e --- /dev/null +++ b/tools/Equinox.Tool/Properties/launchSettings.json @@ -0,0 +1,13 @@ +{ + "profiles": { + "Equinox.Tool": { + "commandName": "Project", + "commandLineArgs": "init -ru 400 cosmos", + "environmentVariables": { + "EQUINOX_COSMOS_CONNECTION": "AccountEndpoint=https://localhost:8081/;AccountKey=C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==", + "EQUINOX_COSMOS_CONTAINER": "equinox-master", + "EQUINOX_COSMOS_DATABASE": "equinox-master" + } + } + } +} \ No newline at end of file From a188f69bc02ef5c7015d6e8398d90456f530413f Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 19 Mar 2020 11:11:34 -0400 Subject: [PATCH 48/71] Tip compression toggle through optional argument (#206) * Add optional compress flag to toggle tip compression --- src/Equinox.Core/Stream.fs | 6 +- src/Equinox.Core/Types.fs | 2 +- src/Equinox.Cosmos/Cosmos.fs | 78 ++++++++++++------- src/Equinox.EventStore/EventStore.fs | 12 +-- src/Equinox.MemoryStore/MemoryStore.fs | 4 +- src/Equinox.SqlStreamStore/SqlStreamStore.fs | 12 +-- .../JsonConverterTests.fs | 19 +---- 7 files changed, 73 insertions(+), 60 deletions(-) diff --git a/src/Equinox.Core/Stream.fs b/src/Equinox.Core/Stream.fs index a69a9b079..f75fdba37 100755 --- a/src/Equinox.Core/Stream.fs +++ b/src/Equinox.Core/Stream.fs @@ -2,15 +2,15 @@ module Equinox.Core.Stream /// Represents a specific stream in a ICategory -type private Stream<'event, 'state, 'streamId, 'context>(category : ICategory<'event, 'state, 'streamId, 'context>, streamId: 'streamId, opt, context) = +type private Stream<'event, 'state, 'streamId, 'context>(category : ICategory<'event, 'state, 'streamId, 'context>, streamId: 'streamId, opt, context, compress) = interface IStream<'event, 'state> with member __.Load log = category.Load(log, streamId, opt) member __.TrySync(log: Serilog.ILogger, token: StreamToken, originState: 'state, events: 'event list) = - category.TrySync(log, token, originState, events, context) + category.TrySync(log, token, originState, events, context, compress) -let create (category : ICategory<'event, 'state, 'streamId, 'context>) streamId opt context : IStream<'event, 'state> = Stream(category, streamId, opt, context) :> _ +let create (category : ICategory<'event, 'state, 'streamId, 'context>) streamId opt context compress : IStream<'event, 'state> = Stream(category, streamId, opt, context, compress) :> _ /// Handles case where some earlier processing has loaded or determined a the state of a stream, allowing us to avoid a read roundtrip type private InitializedStream<'event, 'state>(inner : IStream<'event, 'state>, memento : StreamToken * 'state) = diff --git a/src/Equinox.Core/Types.fs b/src/Equinox.Core/Types.fs index 37f3c470a..cd6aa1b63 100755 --- a/src/Equinox.Core/Types.fs +++ b/src/Equinox.Core/Types.fs @@ -15,7 +15,7 @@ type ICategory<'event, 'state, 'streamId, 'context> = /// - Conflict: signifies the sync failed, and the proposed decision hence needs to be reconsidered in light of the supplied conflicting Stream State /// NB the central precondition upon which the sync is predicated is that the stream has not diverged from the `originState` represented by `token` /// where the precondition is not met, the SyncResult.Conflict bears a [lazy] async result (in a specific manner optimal for the store) - abstract TrySync : log: ILogger * StreamToken * 'state * events: 'event list * 'context option -> Async> + abstract TrySync : log: ILogger * StreamToken * 'state * events: 'event list * 'context option * compress: bool -> Async> /// Represents a time measurement of a computation that includes stopwatch tick metadata [] diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 2aa2e1546..7012faf03 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -79,8 +79,19 @@ type [] // TODO for STJ v5: All fields required unless type JsonCompressedBase64Converter() = inherit JsonConverter() + static member Compress (value: JsonElement) = + if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then + value + else + let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) + use output = new MemoryStream() + use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) + compressor.Write(input, 0, input.Length) + compressor.Close() + JsonDocument.Parse("\"" + System.Convert.ToBase64String(output.ToArray()) + "\"").RootElement + override __.Read (reader, _typeToConvert, options) = - if reader.TokenType = JsonTokenType.Null then + if reader.TokenType <> JsonTokenType.String then JsonSerializer.Deserialize(&reader, options) else let compressedBytes = reader.GetBytesFromBase64() @@ -90,16 +101,8 @@ type JsonCompressedBase64Converter() = decompressor.CopyTo(output) JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - override __.Write (writer, value, _options) = - if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then - writer.WriteNullValue() - else - let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) - use output = new MemoryStream() - use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) - compressor.Write(input, 0, input.Length) - compressor.Close() - writer.WriteBase64StringValue(ReadOnlySpan.op_Implicit(output.ToArray())) + override __.Write (writer, value, options) = + JsonSerializer.Serialize(writer, value, options) type JsonCompressedBase64ConverterAttribute () = inherit JsonConverterAttribute(typeof) @@ -536,12 +539,23 @@ module Sync = let batch (log : ILogger) retryPolicy containerStream batch: Async = let call = logged containerStream batch Log.withLoggedRetries retryPolicy "writeAttempt" call log + let mkBatch (stream: string) (events: IEventData<_>[]) unfolds: Tip = { p = stream; id = Tip.WellKnownDocumentId; n = -1L(*Server-managed*); i = -1L(*Server-managed*); _etag = null e = [| for e in events -> { t = e.Timestamp; c = e.EventType; d = e.Data; m = e.Meta; correlationId = e.CorrelationId; causationId = e.CausationId } |] u = Array.ofSeq unfolds } - let mkUnfold baseIndex (unfolds: IEventData<_> seq) : Unfold seq = - unfolds |> Seq.mapi (fun offset x -> { i = baseIndex + int64 offset; c = x.EventType; d = x.Data; m = x.Meta; t = DateTimeOffset.UtcNow } : Unfold) + + let mkUnfold compress baseIndex (unfolds: IEventData<_> seq) : Unfold seq = + let compressor = if compress then JsonCompressedBase64Converter.Compress else id + unfolds + |> Seq.mapi (fun offset x -> + { + i = baseIndex + int64 offset + c = x.EventType + d = compressor x.Data + m = compressor x.Meta + t = DateTimeOffset.UtcNow + } : Unfold) module internal Tip = let private get (container : EquinoxCosmosClient, stream : string) (maybePos: Position option) = @@ -891,7 +905,7 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven match res with | LoadFromTokenResult.Unchanged -> return current | LoadFromTokenResult.Found (token', events') -> return token', fold state events' } - member __.Sync(Token.Unpack (container,stream,pos), state as current, events, mapUnfolds, fold, isOrigin, log, context): Async> = async { + member __.Sync(Token.Unpack (container,stream,pos), state as current, events, mapUnfolds, fold, isOrigin, compress, log, context): Async> = async { let state' = fold state (Seq.ofList events) let encode e = codec.Encode(context,e) let exp,events,eventsEncoded,projectionsEncoded = @@ -902,7 +916,7 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven let events', unfolds = transmute events state' Sync.Exp.Etag (defaultArg pos.etag null), events', Seq.map encode events' |> Array.ofSeq, Seq.map encode unfolds let baseIndex = pos.index + int64 (List.length events) - let projections = Sync.mkUnfold baseIndex projectionsEncoded + let projections = Sync.mkUnfold compress baseIndex projectionsEncoded let batch = Sync.mkBatch stream eventsEncoded projections let! res = gateway.Sync log (container,stream) (exp,batch) match res with @@ -922,9 +936,9 @@ module Caching = interface ICategory<'event, 'state, EquinoxCosmosClient*string, 'context> with member __.Load(log, (container,streamName), opt) : Async = loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName - member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context) + member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context, compress) : Async> = async { - let! syncRes = inner.TrySync(log, streamToken, state, events, context) + let! syncRes = inner.TrySync(log, streamToken, state, events, context, compress) match syncRes with | SyncResult.Conflict resync -> return SyncResult.Conflict(loadAndIntercept resync stream) | SyncResult.Written(token', state') -> @@ -958,9 +972,9 @@ type private Folder<'event, 'state, 'context> | None -> return! batched log (container,streamName) | Some tokenAndState when opt = Some AllowStale -> return tokenAndState | Some tokenAndState -> return! category.LoadFromToken tokenAndState fold isOrigin log } - member __.TrySync(log : ILogger, streamToken, state, events : 'event list, context) + member __.TrySync(log : ILogger, streamToken, state, events : 'event list, context, compress) : Async> = async { - let! res = category.Sync((streamToken,state), events, mapUnfolds, fold, isOrigin, log, context) + let! res = category.Sync((streamToken,state), events, mapUnfolds, fold, isOrigin, compress, log, context) match res with | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } @@ -1109,28 +1123,38 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | CachingStrategy.SlidingWindow(cache, window) -> Caching.applyCacheUpdatesWithSlidingExpiration cache null window folder - let resolveStream (streamId, maybeContainerInitializationGate) opt context = + let resolveStream (streamId, maybeContainerInitializationGate) opt context compress = { new IStream<'event, 'state> with member __.Load log = category.Load(log, streamId, opt) member __.TrySync(log: ILogger, token: StreamToken, originState: 'state, events: 'event list) = match maybeContainerInitializationGate with - | None -> category.TrySync(log, token, originState, events, context) + | None -> category.TrySync(log, token, originState, events, context, compress) | Some init -> async { do! init () - return! category.TrySync(log, token, originState, events, context) } } + return! category.TrySync(log, token, originState, events, context, compress) } } let resolveTarget = function | StreamName.CategoryAndId (categoryName, streamId) -> context.ResolveContainerStream(categoryName, streamId) - member __.Resolve(streamName : StreamName, []?option, []?context) = + member __.Resolve + ( streamName : StreamName, + []?option, + []?context, + /// Determines whether the data and metadata payloads of the `u`nfolds in the Tip document are base64 encoded and compressed; defaults to true + []?compress) = + let compress = defaultArg compress true match resolveTarget streamName, option with - | streamArgs,(None|Some AllowStale) -> resolveStream streamArgs option context + | streamArgs,(None|Some AllowStale) -> resolveStream streamArgs option context compress | (containerStream,maybeInit),Some AssumeEmpty -> - Stream.ofMemento (Token.create containerStream Position.fromKnownEmpty,initial) (resolveStream (containerStream,maybeInit) option context) + Stream.ofMemento (Token.create containerStream Position.fromKnownEmpty,initial) (resolveStream (containerStream,maybeInit) option context compress) - member __.FromMemento(Token.Unpack (container,stream,_pos) as streamToken,state) = + member __.FromMemento + ( Token.Unpack (container,stream,_pos) as streamToken, + state, + /// Determines whether the data and metadata payloads of the `u`nfolds in the Tip document are base64 encoded and compressed; defaults to true + []?compress) = let skipInitialization = None - Stream.ofMemento (streamToken,state) (resolveStream ((container,stream),skipInitialization) None None) + Stream.ofMemento (streamToken,state) (resolveStream ((container,stream),skipInitialization) None None (defaultArg compress true)) [] type Discovery = diff --git a/src/Equinox.EventStore/EventStore.fs b/src/Equinox.EventStore/EventStore.fs index feef7ff7a..df9baf81b 100755 --- a/src/Equinox.EventStore/EventStore.fs +++ b/src/Equinox.EventStore/EventStore.fs @@ -517,8 +517,8 @@ module Caching = member __.Load(log, streamName : string, opt) : Async = loadAndIntercept (inner.Load(log, streamName, opt)) streamName - member __.TrySync(log : ILogger, (Token.StreamPos (stream,_) as token), state, events : 'event list, context) : Async> = async { - let! syncRes = inner.TrySync(log, token, state, events, context) + member __.TrySync(log : ILogger, (Token.StreamPos (stream,_) as token), state, events : 'event list, context, compress) : Async> = async { + let! syncRes = inner.TrySync(log, token, state, events, context, compress) match syncRes with | SyncResult.Conflict resync -> return SyncResult.Conflict (loadAndIntercept resync stream.name) | SyncResult.Written (token', state') -> @@ -548,7 +548,7 @@ type private Folder<'event, 'state, 'context>(category : Category<'event, 'state | Some tokenAndState when opt = Some AllowStale -> return tokenAndState | Some (token, state) -> return! category.LoadFromToken fold state streamName token log } - member __.TrySync(log : ILogger, token, initialState, events : 'event list, context) : Async> = async { + member __.TrySync(log : ILogger, token, initialState, events : 'event list, context, _compress) : Async> = async { let! syncRes = category.TrySync(log, fold, token, initialState, events, context) match syncRes with | SyncResult.Conflict resync -> return SyncResult.Conflict resync @@ -596,12 +596,12 @@ type Resolver<'event, 'state, 'context> member __.Resolve(streamName : FsCodec.StreamName, [] ?option, [] ?context) = match FsCodec.StreamName.toString streamName, option with - | sn, (None|Some AllowStale) -> resolveStream sn option context - | sn, Some AssumeEmpty -> Stream.ofMemento (loadEmpty sn) (resolveStream sn option context) + | sn, (None|Some AllowStale) -> resolveStream sn option context true + | sn, Some AssumeEmpty -> Stream.ofMemento (loadEmpty sn) (resolveStream sn option context true) /// Resolve from a Memento being used in a Continuation [based on position and state typically from Stream.CreateMemento] member __.FromMemento(Token.Unpack token as streamToken, state, ?context) = - Stream.ofMemento (streamToken, state) (resolveStream token.stream.name context None) + Stream.ofMemento (streamToken, state) (resolveStream token.stream.name context None true) type private SerilogAdapter(log : ILogger) = interface EventStore.ClientAPI.ILogger with diff --git a/src/Equinox.MemoryStore/MemoryStore.fs b/src/Equinox.MemoryStore/MemoryStore.fs index 74e6fc994..da5e6a056 100644 --- a/src/Equinox.MemoryStore/MemoryStore.fs +++ b/src/Equinox.MemoryStore/MemoryStore.fs @@ -64,7 +64,7 @@ type Category<'event, 'state, 'context, 'Format>(store : VolatileStore<'Format>, match store.TryLoad streamName with | None -> return Token.ofEmpty streamName initial | Some (Decode events) -> return Token.ofEventArray streamName fold initial events } - member __.TrySync(_log, Token.Unpack token, state, events : 'event list, context : 'context option) = async { + member __.TrySync(_log, Token.Unpack token, state, events : 'event list, context : 'context option, _compress) = async { let inline map i (e : FsCodec.IEventData<'Format>) = FsCodec.Core.TimelineEvent.Create(int64 i, e.EventType, e.Data, e.Meta, e.EventId, e.CorrelationId, e.CausationId, e.Timestamp) let encoded : FsCodec.ITimelineEvent<_>[] = events |> Seq.mapi (fun i e -> map (token.streamVersion+i) (codec.Encode(context,e))) |> Array.ofSeq @@ -82,7 +82,7 @@ type Category<'event, 'state, 'context, 'Format>(store : VolatileStore<'Format>, type Resolver<'event, 'state, 'Format, 'context>(store : VolatileStore<'Format>, codec : FsCodec.IEventCodec<'event,'Format,'context>, fold, initial) = let category = Category<'event, 'state, 'context, 'Format>(store, codec, fold, initial) - let resolveStream streamName context = Stream.create category streamName None context + let resolveStream streamName context = Stream.create category streamName None context true member __.Resolve(streamName : FsCodec.StreamName, [] ?option, [] ?context : 'context) = match FsCodec.StreamName.toString streamName, option with | sn, (None|Some AllowStale) -> resolveStream sn context diff --git a/src/Equinox.SqlStreamStore/SqlStreamStore.fs b/src/Equinox.SqlStreamStore/SqlStreamStore.fs index d1cc56d69..56807fe71 100644 --- a/src/Equinox.SqlStreamStore/SqlStreamStore.fs +++ b/src/Equinox.SqlStreamStore/SqlStreamStore.fs @@ -474,8 +474,8 @@ module Caching = interface ICategory<'event, 'state, string, 'context> with member __.Load(log, streamName : string, opt) : Async = loadAndIntercept (inner.Load(log, streamName, opt)) streamName - member __.TrySync(log : ILogger, (Token.StreamPos (stream,_) as token), state, events : 'event list, context) : Async> = async { - let! syncRes = inner.TrySync(log, token, state, events, context) + member __.TrySync(log : ILogger, (Token.StreamPos (stream,_) as token), state, events : 'event list, context, compress) : Async> = async { + let! syncRes = inner.TrySync(log, token, state, events, context, compress) match syncRes with | SyncResult.Conflict resync -> return SyncResult.Conflict (loadAndIntercept resync stream.name) | SyncResult.Written (token',state') -> @@ -504,7 +504,7 @@ type private Folder<'event, 'state, 'context>(category : Category<'event, 'state | None -> return! batched log streamName | Some tokenAndState when opt = Some AllowStale -> return tokenAndState | Some (token, state) -> return! category.LoadFromToken fold state streamName token log } - member __.TrySync(log : ILogger, token, initialState, events : 'event list, context) : Async> = async { + member __.TrySync(log : ILogger, token, initialState, events : 'event list, context, _compress) : Async> = async { let! syncRes = category.TrySync(log, fold, token, initialState, events, context) match syncRes with | SyncResult.Conflict resync -> return SyncResult.Conflict resync @@ -547,12 +547,12 @@ type Resolver<'event, 'state, 'context> let loadEmpty sn = context.LoadEmpty sn,initial member __.Resolve(streamName : FsCodec.StreamName, []?option, []?context) = match FsCodec.StreamName.toString streamName, option with - | sn, (None|Some AllowStale) -> resolveStream sn option context - | sn, Some AssumeEmpty -> Stream.ofMemento (loadEmpty sn) (resolveStream sn option context) + | sn, (None|Some AllowStale) -> resolveStream sn option context true + | sn, Some AssumeEmpty -> Stream.ofMemento (loadEmpty sn) (resolveStream sn option context true) /// Resolve from a Memento being used in a Continuation [based on position and state typically from Stream.CreateMemento] member __.FromMemento(Token.Unpack token as streamToken, state, ?context) = - Stream.ofMemento (streamToken,state) (resolveStream token.stream.name context None) + Stream.ofMemento (streamToken,state) (resolveStream token.stream.name context None true) [] type ConnectorBase([]?readRetryPolicy, []?writeRetryPolicy) = diff --git a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs index 61a2cb3ee..9651e6227 100644 --- a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs +++ b/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs @@ -1,6 +1,7 @@ module Equinox.Cosmos.Integration.JsonConverterTests open Equinox.Cosmos +open Equinox.Cosmos.Store open FsCheck.Xunit open Swensen.Unquote open System @@ -18,20 +19,8 @@ let defaultOptions = FsCodec.SystemTextJson.Options.Create() type Base64ZipUtf8Tests() = let eventCodec = FsCodec.SystemTextJson.Codec.Create(defaultOptions) - [] - let ``serializes, achieving compression`` () = - let encoded = eventCodec.Encode(None,A { embed = String('x',5000) }) - let e : Store.Unfold = - { i = 42L - c = encoded.EventType - d = encoded.Data - m = Unchecked.defaultof - t = DateTimeOffset.MinValue } - let res = FsCodec.SystemTextJson.Serdes.Serialize(e, defaultOptions) - test <@ res.Contains("\"d\":\"") && res.Length < 138 @> - [] - let roundtrips value = + let ``Can read uncompressed and compressed`` compress value = let hasNulls = match value with | A x | B x when obj.ReferenceEquals(null, x) -> true @@ -39,14 +28,14 @@ type Base64ZipUtf8Tests() = if hasNulls then () else let encoded = eventCodec.Encode(None,value) + let compressor = if compress then JsonCompressedBase64Converter.Compress else id let e : Store.Unfold = { i = 42L c = encoded.EventType - d = encoded.Data + d = compressor encoded.Data m = Unchecked.defaultof t = DateTimeOffset.MinValue } let ser = FsCodec.SystemTextJson.Serdes.Serialize(e, defaultOptions) - test <@ ser.Contains("\"d\":\"") @> let des = FsCodec.SystemTextJson.Serdes.Deserialize(ser, defaultOptions) let d = FsCodec.Core.TimelineEvent.Create(-1L, des.c, des.d) let decoded = eventCodec.TryDecode d |> Option.get From 0a7fa6d5e78db9c035532ff5ac082fa1892cb364 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 19 Mar 2020 12:01:53 -0400 Subject: [PATCH 49/71] Change container initialization to be explicitly called on demand --- src/Equinox.Cosmos/Cosmos.fs | 153 ++++++++++++++++------------------ tools/Equinox.Tool/Program.fs | 4 +- 2 files changed, 76 insertions(+), 81 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 7012faf03..12af4d175 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -446,15 +446,80 @@ function sync(req, expIndex, expEtag) { module CancellationToken = let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken -type EquinoxCosmosClient (cosmosClient: CosmosClient, containerClient: CosmosContainer, databaseId: string, containerId: string) = +module EquinoxCosmosInitialization = + let internal getOrCreateDatabase (sdk: CosmosClient) (dbName: string) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + let! response = + match throughput with + | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | ReplaceAlways value -> async { + let! response = sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + let! _ = response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect + return response } + + return response.Database } + + let internal getOrCreateContainer (db: CosmosDatabase) (props: ContainerProperties) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + let! response = + match throughput with + | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | ReplaceAlways value -> async { + let! response = db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + let! _ = response.Container.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect + return response } + return response.Container } + + let internal getBatchAndTipContainerProps (containerName: string) = + let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) + props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent + props.IndexingPolicy.Automatic <- true + // Can either do a blacklist or a whitelist + // Given how long and variable the blacklist would be, we whitelist instead + props.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) + // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors + for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) + props + + let createSyncStoredProcedure (container: CosmosContainer) (name) (cancellationToken) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body), cancellationToken = ct) |> Async.AwaitTaskCorrect + return r.GetRawResponse().Headers.GetRequestCharge() + with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } + + let initializeContainer (sdk: CosmosClient) (dbName: string) (containerName: string) (mode: Provisioning) (createStoredProcedure: bool) (storedProcedureName: string option) (cancellationToken: CancellationToken option) = async { + let! ct = CancellationToken.useOrCreate cancellationToken + let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default + let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default + let! db = getOrCreateDatabase sdk dbName dbThroughput (Some ct) + let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput (Some ct) + + if createStoredProcedure then + let syncStoredProcedureName = storedProcedureName |> Option.defaultValue SyncStoredProcedure.defaultName + do! createSyncStoredProcedure container syncStoredProcedureName (Some ct) |> Async.Ignore + + return container } + +type EquinoxCosmosClient (cosmosClient: CosmosClient, databaseId: string, containerId: string) = + let containerClient = lazy(cosmosClient.GetContainer(databaseId, containerId)) + member val DatabaseId = databaseId with get member val ContainerId = containerId with get member val CosmosSdkClient = cosmosClient with get - member val ContainerSdkClient = containerClient with get + + abstract member InitializeContainer: mode: Provisioning * createStoredProcedure: bool * ?storedProcedureName: string -> Async + default __.InitializeContainer(mode, createStoredProcedure, storedProcedureName) = + EquinoxCosmosInitialization.initializeContainer cosmosClient databaseId containerId mode createStoredProcedure storedProcedureName None + + abstract member GetContainer: unit -> CosmosContainer + default __.GetContainer() = + containerClient.Force() abstract member GetQueryIteratorByPage<'T> : query: QueryDefinition * ?options: QueryRequestOptions -> AsyncSeq> default __.GetQueryIteratorByPage<'T>(query, ?options) = - containerClient.GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum + __.GetContainer().GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum abstract member TryReadItem<'T> : docId: string * partitionKey: string * ?options: ItemRequestOptions * ?cancellationToken : CancellationToken -> Async> default __.TryReadItem<'T>(docId, partitionKey, ?options, ?cancellationToken) = async { @@ -465,7 +530,7 @@ type EquinoxCosmosClient (cosmosClient: CosmosClient, containerClient: CosmosCon | Some ct -> async.Return ct | _ -> Async.CancellationToken // TODO use TryReadItemStreamAsync to avoid the exception https://github.com/Azure/azure-cosmos-dotnet-v3/issues/692#issuecomment-521936888 - try let! item = async { return! containerClient.ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } + try let! item = async { return! __.GetContainer().ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified // NB `.Document` will NRE if a IfNoneModified precondition triggers a NotModified result // else @@ -481,7 +546,7 @@ type EquinoxCosmosClient (cosmosClient: CosmosClient, containerClient: CosmosCon let! ct = CancellationToken.useOrCreate cancellationToken let partitionKey = PartitionKey partitionKey //let args = [| box tip; box index; box (Option.toObj etag)|] - return! containerClient.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } + return! __.GetContainer().Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } module Sync = // NB don't nest in a private module, or serialization will fail miserably ;) @@ -979,62 +1044,6 @@ type private Folder<'event, 'state, 'context> | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } -module EquinoxCosmosInitialization = - let internal getOrCreateDatabase (sdk: CosmosClient) (dbName: string) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - let! response = - match throughput with - | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect - | SetIfCreating value -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - | ReplaceAlways value -> async { - let! response = sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - let! _ = response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect - return response } - - return response.Database } - - let internal getOrCreateContainer (db: CosmosDatabase) (props: ContainerProperties) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - let! response = - match throughput with - | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect - | SetIfCreating value -> db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - | ReplaceAlways value -> async { - let! response = db.CreateContainerIfNotExistsAsync(props, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect - let! _ = response.Container.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect - return response } - return response.Container } - - let internal getBatchAndTipContainerProps (containerName: string) = - let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) - props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent - props.IndexingPolicy.Automatic <- true - // Can either do a blacklist or a whitelist - // Given how long and variable the blacklist would be, we whitelist instead - props.IndexingPolicy.ExcludedPaths.Add(ExcludedPath(Path="/*")) - // NB its critical to index the nominated PartitionKey field defined above or there will be runtime errors - for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) - props - - let internal createSyncStoredProcedure (container: CosmosContainer) (name) (cancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body), cancellationToken = ct) |> Async.AwaitTaskCorrect - return r.GetRawResponse().Headers.GetRequestCharge() - with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } - - let initializeContainer (sdk: CosmosClient) (dbName: string) (containerName: string) (mode: Provisioning) (createStoredProcedure: bool) (storedProcedureName: string option) (cancellationToken: CancellationToken option) = async { - let! ct = CancellationToken.useOrCreate cancellationToken - let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default - let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default - let! db = getOrCreateDatabase sdk dbName dbThroughput (Some ct) - let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput (Some ct) - - if createStoredProcedure then - let syncStoredProcedureName = storedProcedureName |> Option.defaultValue SyncStoredProcedure.defaultName - do! createSyncStoredProcedure container syncStoredProcedureName (Some ct) |> Async.Ignore - - return container } - /// Pairs a Gateway, defining the retry policies for CosmosDb with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) type Context ( client: EquinoxCosmosClient, @@ -1048,7 +1057,7 @@ type Context let conn = Connection(client, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems =getDefaultMaxItems, ?maxRequests = maxRequests) let gateway = Gateway(conn, batchingPolicy) - let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure client.ContainerSdkClient SyncStoredProcedure.defaultName None |> Async.Ignore + let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (client.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore let containers = Containers(client.DatabaseId, client.ContainerId) member __.Gateway = gateway @@ -1223,18 +1232,12 @@ type EquinoxCosmosClientFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - abstract member CreateClient: name: string * discovery: Discovery * dbName: string * containerName: string * ?provisioningMode: Provisioning * ?createStoredProcedure: bool * ?storedProcedureName: string * ?skipLog: bool -> EquinoxCosmosClient + abstract member CreateClient: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> EquinoxCosmosClient default __.CreateClient ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, dbName: string, containerName: string, - /// If provided, the database and container will be initialized based on the provided values - ?provisioningMode: Provisioning, - /// true to create the sync stored procedure during initialization - ?createStoredProcedure: bool, - /// If provided along with createStoredProcedure being set to true, will create the stored procedure with a custom name - ?storedProcedureName: string, /// true to inhibit logging of client name []?skipLog) : EquinoxCosmosClient = @@ -1242,15 +1245,7 @@ type EquinoxCosmosClientFactory if skipLog <> Some true then logName uri name let cosmosClient = new CosmosClient(string uri, key, __.ClientOptions) - match provisioningMode with - | Some mode -> - EquinoxCosmosInitialization.initializeContainer cosmosClient dbName containerName mode (defaultArg createStoredProcedure true) storedProcedureName None - |> Async.Ignore - |> Async.RunSynchronously - | _ -> () - - let containerClient = cosmosClient.GetContainer(dbName, containerName) - new EquinoxCosmosClient(cosmosClient, containerClient, dbName, containerName) + new EquinoxCosmosClient(cosmosClient, dbName, containerName) namespace Equinox.Cosmos.Core @@ -1285,7 +1280,7 @@ type Context let containers = Containers(client.DatabaseId, client.ContainerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) - let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure client.ContainerSdkClient SyncStoredProcedure.defaultName None |> Async.Ignore + let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (client.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore let gateway = Gateway(conn, batching) let maxCountPredicate count = diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index b067c5f4b..5515f0282 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -317,7 +317,7 @@ module CosmosInit = let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus let _storeLog, factory, discovery, dName, cName = conn (log,verboseConsole,maybeSeq) sargs log.Information("Provisioning `Equinox.Cosmos` Store collection at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - factory.CreateClient(appName, discovery, dName, cName, mode, not skipStoredProc) |> ignore + factory.CreateClient(appName, discovery, dName, cName).InitializeContainer(mode, not skipStoredProc) |> ignore | _ -> failwith "please specify a `cosmos` endpoint" module SqlInit = @@ -355,7 +355,7 @@ module CosmosStats = log.Information("Computing {measures} ({mode})", Seq.map fst ops, (if inParallel then "in parallel" else "serially")) ops |> Seq.map (fun (name,sql) -> async { log.Debug("Running query: {sql}", sql) - let res = container.ContainerSdkClient.QueryValue(sql) + let res = container.GetContainer().QueryValue(sql) log.Information("{stat}: {result:N0}", name, res)}) |> if inParallel then Async.Parallel else Async.ParallelThrottled 1 // TOCONSIDER replace with Async.Sequence when using new enough FSharp.Core |> Async.Ignore From d6637eea9671da8cadaaff675ae33136cd514a94 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 19 Mar 2020 12:07:32 -0400 Subject: [PATCH 50/71] Switch from Force() to Value --- src/Equinox.Cosmos/Cosmos.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 12af4d175..2a522dd93 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -515,7 +515,7 @@ type EquinoxCosmosClient (cosmosClient: CosmosClient, databaseId: string, contai abstract member GetContainer: unit -> CosmosContainer default __.GetContainer() = - containerClient.Force() + containerClient.Value abstract member GetQueryIteratorByPage<'T> : query: QueryDefinition * ?options: QueryRequestOptions -> AsyncSeq> default __.GetQueryIteratorByPage<'T>(query, ?options) = From 3374a6df93ab1af152d9cc84b4b53e7e2453e763 Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 19 Mar 2020 12:28:07 -0400 Subject: [PATCH 51/71] Rename Client to Operations --- samples/Infrastructure/Storage.fs | 4 +- src/Equinox.Cosmos/Cosmos.fs | 52 +++++++++---------- .../CosmosFixtures.fs | 4 +- tools/Equinox.Tool/Program.fs | 6 +-- 4 files changed, 33 insertions(+), 33 deletions(-) diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index b57fa20c6..5f976a670 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -76,10 +76,10 @@ module Cosmos = a.Mode, endpointUri, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, EquinoxCosmosClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) + discovery, a.Database, a.Container, EquinoxCosmosOperationsFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = let discovery, dName, cName, factory = connection (log, storeLog) info - let ctx = Context(factory.CreateClient(appName, discovery, dName, cName), log = log, defaultMaxItems = batchSize) + let ctx = Context(factory.CreateOperationsClient(appName, discovery, dName, cName), log = log, defaultMaxItems = batchSize) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching StorageConfig.Cosmos (ctx, cacheStrategy, unfolds, dName, cName) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 2a522dd93..db9dddc9f 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -502,7 +502,7 @@ module EquinoxCosmosInitialization = return container } -type EquinoxCosmosClient (cosmosClient: CosmosClient, databaseId: string, containerId: string) = +type EquinoxCosmosOperations (cosmosClient: CosmosClient, databaseId: string, containerId: string) = let containerClient = lazy(cosmosClient.GetContainer(databaseId, containerId)) member val DatabaseId = databaseId with get @@ -559,7 +559,7 @@ module Sync = | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (container : EquinoxCosmosClient, stream : string) (exp, req: Tip) + let private run (container : EquinoxCosmosOperations, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] @@ -623,10 +623,10 @@ module Sync = } : Unfold) module internal Tip = - let private get (container : EquinoxCosmosClient, stream : string) (maybePos: Position option) = + let private get (container : EquinoxCosmosOperations, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null container.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) - let private loggedGet (get : EquinoxCosmosClient * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { + let private loggedGet (get : EquinoxCosmosOperations * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) @@ -655,7 +655,7 @@ module internal Tip = module internal Query = open FSharp.Control - let private mkQuery (container : EquinoxCosmosClient, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = + let private mkQuery (container : EquinoxCosmosOperations, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -817,12 +817,12 @@ module internal Tip = let t = StopwatchInterval(startTicks, endTicks) log |> logQuery direction maxItems stream t (!responseCount,allEvents.ToArray()) -1L ru } -type [] Token = { container: EquinoxCosmosClient; stream: string; pos: Position } +type [] Token = { container: EquinoxCosmosOperations; stream: string; pos: Position } module Token = let create (container,stream) pos : StreamToken = { value = box { container = container; stream = stream; pos = pos } version = pos.index } - let (|Unpack|) (token: StreamToken) : EquinoxCosmosClient*string*Position = let t = unbox token.value in t.container,t.stream,t.pos + let (|Unpack|) (token: StreamToken) : EquinoxCosmosOperations*string*Position = let t = unbox token.value in t.container,t.stream,t.pos let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag @@ -850,7 +850,7 @@ open System.Collections.Concurrent open System.Text.Json /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) -type Connection(client: EquinoxCosmosClient, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = +type Connection(client: EquinoxCosmosOperations, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = member __.Client = client member __.TipRetryPolicy = readRetryPolicy member __.QueryRetryPolicy = readRetryPolicy @@ -922,7 +922,7 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } /// Holds Container state, coordinating initialization activities -type private ContainerWrapper(container : EquinoxCosmosClient, ?initContainer : unit -> Async) = +type private ContainerWrapper(container : EquinoxCosmosOperations, ?initContainer : unit -> Async) = let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init ())) member __.Container = container @@ -937,7 +937,7 @@ type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> str let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - member internal __.Resolve(client : EquinoxCosmosClient, categoryName, id, init) : (EquinoxCosmosClient*string) * (unit -> Async) option = + member internal __.Resolve(client : EquinoxCosmosOperations, categoryName, id, init) : (EquinoxCosmosOperations*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(client, ?initContainer = init)) @@ -991,14 +991,14 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven module Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, EquinoxCosmosClient*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, EquinoxCosmosOperations*string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, EquinoxCosmosClient*string, 'context> with + interface ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context> with member __.Load(log, (container,streamName), opt) : Async = loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context, compress) @@ -1014,8 +1014,8 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, EquinoxCosmosClient*string, 'context>) - : ICategory<'event, 'state, EquinoxCosmosClient*string, 'context> = + (category : ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context>) + : ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context> = let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = new CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) @@ -1028,7 +1028,7 @@ type private Folder<'event, 'state, 'context> ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, EquinoxCosmosClient*string, 'context> with + interface ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context> with member __.Load(log, (container,streamName), opt): Async = match readCache with | None -> batched log (container,streamName) @@ -1046,7 +1046,7 @@ type private Folder<'event, 'state, 'context> /// Pairs a Gateway, defining the retry policies for CosmosDb with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) type Context - ( client: EquinoxCosmosClient, + ( client: EquinoxCosmosOperations, ?log: ILogger, ?defaultMaxItems: int, ?getDefaultMaxItems: unit -> int, @@ -1062,7 +1062,7 @@ type Context member __.Gateway = gateway member __.Containers = containers - member internal __.ResolveContainerStream(categoryName, id) : (EquinoxCosmosClient*string) * (unit -> Async) option = + member internal __.ResolveContainerStream(categoryName, id) : (EquinoxCosmosOperations*string) * (unit -> Async) option = containers.Resolve(gateway.Client, categoryName, id, init) [] @@ -1126,7 +1126,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, EquinoxCosmosClient*string, 'context> = + let category : ICategory<_, _, EquinoxCosmosOperations*string, 'context> = match caching with | CachingStrategy.NoCaching -> folder :> _ | CachingStrategy.SlidingWindow(cache, window) -> @@ -1178,7 +1178,7 @@ type Discovery = UriAndKey (Uri uri, key) | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" -type EquinoxCosmosClientFactory +type EquinoxCosmosOperationsFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached @@ -1208,7 +1208,7 @@ type EquinoxCosmosClientFactory log.ForContext("Uri", uri).Information("CosmosDb Connection Name {connectionName}", sanitizedName) /// ClientOptions for this Connector as configured - member val ClientOptions = + member val CosmosClientOptions = let maxAttempts, maxWait, timeout = Nullable maxRetryAttemptsOnRateLimitedRequests, Nullable maxRetryWaitTimeOnRateLimitedRequests, requestTimeout let co = CosmosClientOptions( @@ -1232,20 +1232,20 @@ type EquinoxCosmosClientFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - abstract member CreateClient: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> EquinoxCosmosClient - default __.CreateClient + abstract member CreateOperationsClient: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> EquinoxCosmosOperations + default __.CreateOperationsClient ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, dbName: string, containerName: string, /// true to inhibit logging of client name - []?skipLog) : EquinoxCosmosClient = + []?skipLog) : EquinoxCosmosOperations = let (Discovery.UriAndKey (databaseUri=uri; key=key)) = discovery if skipLog <> Some true then logName uri name - let cosmosClient = new CosmosClient(string uri, key, __.ClientOptions) + let cosmosClient = new CosmosClient(string uri, key, __.CosmosClientOptions) - new EquinoxCosmosClient(cosmosClient, dbName, containerName) + new EquinoxCosmosOperations(cosmosClient, dbName, containerName) namespace Equinox.Cosmos.Core @@ -1266,7 +1266,7 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context - ( client: EquinoxCosmosClient, + ( client: EquinoxCosmosOperations, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index 07d74b5f0..0a8030b97 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -21,8 +21,8 @@ let private connectToCosmos (log: Serilog.ILogger) batchSize client = let createSpecifiedCosmosOrSimulatorClient log = let createClient name discovery = - EquinoxCosmosClientFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - .CreateClient(name, discovery, dbId, cId) + EquinoxCosmosOperationsFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + .CreateOperationsClient(name, discovery, dbId, cId) match read "EQUINOX_COSMOS_CONNECTION" with | None -> diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 5515f0282..bbc4408af 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -317,7 +317,7 @@ module CosmosInit = let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus let _storeLog, factory, discovery, dName, cName = conn (log,verboseConsole,maybeSeq) sargs log.Information("Provisioning `Equinox.Cosmos` Store collection at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - factory.CreateClient(appName, discovery, dName, cName).InitializeContainer(mode, not skipStoredProc) |> ignore + factory.CreateOperationsClient(appName, discovery, dName, cName).InitializeContainer(mode, not skipStoredProc) |> ignore | _ -> failwith "please specify a `cosmos` endpoint" module SqlInit = @@ -347,7 +347,7 @@ module CosmosStats = let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel let _storeLog,factory,discovery,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs - let container = factory.CreateClient(appName, discovery, dName, cName) + let client = factory.CreateOperationsClient(appName, discovery, dName, cName) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" @@ -355,7 +355,7 @@ module CosmosStats = log.Information("Computing {measures} ({mode})", Seq.map fst ops, (if inParallel then "in parallel" else "serially")) ops |> Seq.map (fun (name,sql) -> async { log.Debug("Running query: {sql}", sql) - let res = container.GetContainer().QueryValue(sql) + let res = client.GetContainer().QueryValue(sql) log.Information("{stat}: {result:N0}", name, res)}) |> if inParallel then Async.Parallel else Async.ParallelThrottled 1 // TOCONSIDER replace with Async.Sequence when using new enough FSharp.Core |> Async.Ignore From a17704b0a5cf82bf5e94c33856764ed892035ccb Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 19 Mar 2020 12:30:31 -0400 Subject: [PATCH 52/71] Remove Sdk from naming --- src/Equinox.Cosmos/Cosmos.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index db9dddc9f..ee2a8c562 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -507,7 +507,7 @@ type EquinoxCosmosOperations (cosmosClient: CosmosClient, databaseId: string, co member val DatabaseId = databaseId with get member val ContainerId = containerId with get - member val CosmosSdkClient = cosmosClient with get + member val CosmosClient = cosmosClient with get abstract member InitializeContainer: mode: Provisioning * createStoredProcedure: bool * ?storedProcedureName: string -> Async default __.InitializeContainer(mode, createStoredProcedure, storedProcedureName) = From e4270f9932ebdc371cb6ed93e49d84e2ae83bc4f Mon Sep 17 00:00:00 2001 From: Yaron Librach Date: Thu, 19 Mar 2020 12:34:14 -0400 Subject: [PATCH 53/71] Rename to compressUnfolds for clarity --- src/Equinox.Cosmos/Cosmos.fs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index ee2a8c562..82a1b0311 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -1150,8 +1150,8 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, []?option, []?context, /// Determines whether the data and metadata payloads of the `u`nfolds in the Tip document are base64 encoded and compressed; defaults to true - []?compress) = - let compress = defaultArg compress true + []?compressUnfolds) = + let compress = defaultArg compressUnfolds true match resolveTarget streamName, option with | streamArgs,(None|Some AllowStale) -> resolveStream streamArgs option context compress | (containerStream,maybeInit),Some AssumeEmpty -> @@ -1161,9 +1161,10 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, ( Token.Unpack (container,stream,_pos) as streamToken, state, /// Determines whether the data and metadata payloads of the `u`nfolds in the Tip document are base64 encoded and compressed; defaults to true - []?compress) = + []?compressUnfolds) = + let compress = defaultArg compressUnfolds true let skipInitialization = None - Stream.ofMemento (streamToken,state) (resolveStream ((container,stream),skipInitialization) None None (defaultArg compress true)) + Stream.ofMemento (streamToken,state) (resolveStream ((container,stream),skipInitialization) None None compress) [] type Discovery = From 820db2433ae339a52d627011535d01afa78266a1 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 19 Mar 2020 17:20:35 +0000 Subject: [PATCH 54/71] Target FsCodec 2.0.2-alpha.0.8 --- samples/Store/Domain/Domain.fsproj | 4 ++-- src/Equinox.Cosmos/Equinox.Cosmos.fsproj | 3 +-- src/Equinox.EventStore/Equinox.EventStore.fsproj | 2 +- src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj | 2 +- src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj | 2 +- .../Equinox.EventStore.Integration.fsproj | 2 +- 6 files changed, 7 insertions(+), 8 deletions(-) diff --git a/samples/Store/Domain/Domain.fsproj b/samples/Store/Domain/Domain.fsproj index 90393441a..4259f1fca 100644 --- a/samples/Store/Domain/Domain.fsproj +++ b/samples/Store/Domain/Domain.fsproj @@ -21,8 +21,8 @@ - - + + diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj index 0e29ccdf6..8e015a0e3 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.Cosmos/Equinox.Cosmos.fsproj @@ -6,7 +6,6 @@ false true true - $(DefineConstants);NET461 @@ -26,7 +25,7 @@ - + diff --git a/src/Equinox.EventStore/Equinox.EventStore.fsproj b/src/Equinox.EventStore/Equinox.EventStore.fsproj index 20699c979..1078ebad0 100644 --- a/src/Equinox.EventStore/Equinox.EventStore.fsproj +++ b/src/Equinox.EventStore/Equinox.EventStore.fsproj @@ -26,7 +26,7 @@ - + diff --git a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj index 26c6cfefe..32eff1090 100644 --- a/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj +++ b/src/Equinox.MemoryStore/Equinox.MemoryStore.fsproj @@ -24,7 +24,7 @@ - + \ No newline at end of file diff --git a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj index 724ef3d28..4e6f7cf56 100644 --- a/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj +++ b/src/Equinox.SqlStreamStore/Equinox.SqlStreamStore.fsproj @@ -24,7 +24,7 @@ - + diff --git a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj index 06db62a14..14e8eb2f7 100644 --- a/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj +++ b/tests/Equinox.EventStore.Integration/Equinox.EventStore.Integration.fsproj @@ -22,7 +22,7 @@ - + From 634ae3dc5dde5a19fb04e72628c877cc043bcc87 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 19 Mar 2020 17:58:48 +0000 Subject: [PATCH 55/71] Misc cleanup --- samples/Infrastructure/Storage.fs | 4 +- src/Equinox.Cosmos/Cosmos.fs | 91 ++++++++----------- .../CosmosFixtures.fs | 10 +- tools/Equinox.Tool/Program.fs | 6 +- 4 files changed, 48 insertions(+), 63 deletions(-) diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 5f976a670..7039a954c 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -76,10 +76,10 @@ module Cosmos = a.Mode, endpointUri, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, EquinoxCosmosOperationsFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) + discovery, a.Database, a.Container, CosmosOperationsFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = let discovery, dName, cName, factory = connection (log, storeLog) info - let ctx = Context(factory.CreateOperationsClient(appName, discovery, dName, cName), log = log, defaultMaxItems = batchSize) + let ctx = Context(factory.Create(appName, discovery, dName, cName), defaultMaxItems = batchSize) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching StorageConfig.Cosmos (ctx, cacheStrategy, unfolds, dName, cName) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 82a1b0311..8bb0e3098 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -340,7 +340,7 @@ module Log = for uom, f in measures do let d = f duration in if d <> 0. then logPeriodicRate uom (float totalCount/d |> int64) (totalRc/d) [] -module MicrosoftAzureCosmosWrappers = +module AzureCosmosWrappers = /// Extracts the innermost exception from a nested hierarchy of Aggregate Exceptions let (|AggregateException|) (exn : exn) = let rec aux (e : exn) = @@ -366,8 +366,6 @@ module MicrosoftAzureCosmosWrappers = | true, charge when not <| String.IsNullOrEmpty charge -> float charge | _ -> 0. - - [] type SyncResponse = { etag: string; n: int64; conflicts: Unfold[] } type ResourceThroughput = @@ -443,7 +441,7 @@ function sync(req, expIndex, expEtag) { } }""" -module CancellationToken = +module private CancellationToken = let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken module EquinoxCosmosInitialization = @@ -525,10 +523,7 @@ type EquinoxCosmosOperations (cosmosClient: CosmosClient, databaseId: string, co default __.TryReadItem<'T>(docId, partitionKey, ?options, ?cancellationToken) = async { let partitionKey = PartitionKey partitionKey let options = defaultArg options null - let! ct = - match cancellationToken with - | Some ct -> async.Return ct - | _ -> Async.CancellationToken + let! ct = CancellationToken.useOrCreate cancellationToken // TODO use TryReadItemStreamAsync to avoid the exception https://github.com/Azure/azure-cosmos-dotnet-v3/issues/692#issuecomment-521936888 try let! item = async { return! __.GetContainer().ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified @@ -549,9 +544,8 @@ type EquinoxCosmosOperations (cosmosClient: CosmosClient, databaseId: string, co return! __.GetContainer().Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } module Sync = - // NB don't nest in a private module, or serialization will fail miserably ;) - let [] private sprocName = "EquinoxRollingUnfolds3" // NB need to rename/number for any breaking change + // NB don't nest in a private module, or serialization will fail miserably ;) [] type Result = | Written of Position @@ -559,11 +553,11 @@ module Sync = | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (container : EquinoxCosmosOperations, stream : string) (exp, req: Tip) + let private run (ops : EquinoxCosmosOperations, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] - let! res = container.ExecuteStoredProcedure(SyncStoredProcedure.defaultName, stream, args) + let! res = ops.ExecuteStoredProcedure(SyncStoredProcedure.defaultName, stream, args) let newPos = { index = res.Value.n; etag = Option.ofObj res.Value.etag } return res.GetRawResponse().Headers.GetRequestCharge(), res.Value.conflicts |> function | null -> Result.Written newPos @@ -611,21 +605,21 @@ module Sync = u = Array.ofSeq unfolds } let mkUnfold compress baseIndex (unfolds: IEventData<_> seq) : Unfold seq = - let compressor = if compress then JsonCompressedBase64Converter.Compress else id + let compressIfRequested x = if compress then JsonCompressedBase64Converter.Compress x else x unfolds |> Seq.mapi (fun offset x -> { i = baseIndex + int64 offset c = x.EventType - d = compressor x.Data - m = compressor x.Meta + d = compressIfRequested x.Data + m = compressIfRequested x.Meta t = DateTimeOffset.UtcNow } : Unfold) module internal Tip = - let private get (container : EquinoxCosmosOperations, stream : string) (maybePos: Position option) = + let private get (ops : EquinoxCosmosOperations, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null - container.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) + ops.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) let private loggedGet (get : EquinoxCosmosOperations * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time @@ -654,8 +648,7 @@ module internal Tip = | ReadResult.Found tip -> return Result.Found (Position.fromTip tip, Enum.EventsAndUnfolds tip |> Array.ofSeq) } module internal Query = - open FSharp.Control - let private mkQuery (container : EquinoxCosmosOperations, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = + let private mkQuery (ops : EquinoxCosmosOperations, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -665,7 +658,7 @@ module internal Tip = let cond = if direction = Direction.Forward then "c.n > @startPos" else "c.i < @startPos" QueryDefinition(sprintf "%s AND %s %s" root cond tail).WithParameter("@startPos", positionSoExclusiveWhenBackward) let qro = new QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) - container.GetQueryIteratorByPage(query, options = qro) + ops.GetQueryIteratorByPage(query, options = qro) // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index let private processNextPage direction (streamName: string) startPos (enumerator: IAsyncEnumerator>) (log: ILogger) @@ -839,19 +832,13 @@ module Internal = namespace Equinox.Cosmos.Internal open Azure.Cosmos -open Equinox open Equinox.Core open Equinox.Cosmos.Store -open FsCodec -open FSharp.Control -open Serilog -open System open System.Collections.Concurrent -open System.Text.Json /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) -type Connection(client: EquinoxCosmosOperations, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = - member __.Client = client +type Connection(ops: EquinoxCosmosOperations, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = + member __.Operations = ops member __.TipRetryPolicy = readRetryPolicy member __.QueryRetryPolicy = readRetryPolicy member __.WriteRetryPolicy = writeRetryPolicy @@ -871,7 +858,7 @@ type BatchingPolicy member __.MaxRequests = maxRequests type Gateway(conn : Connection, batching : BatchingPolicy) = - let (|FromUnfold|_|) (tryDecode: #IEventData<_> -> 'event option) (isOrigin: 'event -> bool) (xs:#IEventData<_>[]) : Option<'event[]> = + let (|FromUnfold|_|) (tryDecode: #FsCodec.IEventData<_> -> 'event option) (isOrigin: 'event -> bool) (xs:#FsCodec.IEventData<_>[]) : Option<'event[]> = let items = ResizeArray() let isOrigin' e = match tryDecode e with @@ -882,15 +869,15 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = match Array.tryFindIndexBack isOrigin' xs with | None -> None | Some _ -> items.ToArray() |> Some - member __.Client = conn.Client + member __.Client = conn.Operations member __.LoadBackwardsStopping log (container, stream) (tryDecode,isOrigin): Async = async { let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests Direction.Backward None (tryDecode,isOrigin) - Array.Reverse events + System.Array.Reverse events return Token.create (container,stream) pos, events } member __.Read log (container,stream) direction startPos (tryDecode,isOrigin) : Async = async { let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) return Token.create (container,stream) pos, events } - member __.ReadLazy (batching: BatchingPolicy) log (container,stream) direction startPos (tryDecode,isOrigin) : AsyncSeq<'event[]> = + member __.ReadLazy (batching: BatchingPolicy) log (container,stream) direction startPos (tryDecode,isOrigin) : FSharp.Control.AsyncSeq<'event[]> = Query.walkLazy log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) member __.LoadFromUnfoldsOrRollingSnapshots log (containerStream,maybePos) (tryDecode,isOrigin): Async = async { let! res = Tip.tryLoad log conn.TipRetryPolicy containerStream maybePos @@ -922,10 +909,10 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } /// Holds Container state, coordinating initialization activities -type private ContainerWrapper(container : EquinoxCosmosOperations, ?initContainer : unit -> Async) = +type private ContainerWrapper(ops : EquinoxCosmosOperations, ?initContainer : unit -> Async) = let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init ())) - member __.Container = container + member __.Container = ops member internal __.InitializationGate = match initGuard with Some g when g.PeekIsValid() |> not -> Some g.AwaitValue | _ -> None /// Defines a process for mapping from a Stream Name to the appropriate storage area, allowing control over segregation / co-locating of data @@ -937,10 +924,10 @@ type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> str let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - member internal __.Resolve(client : EquinoxCosmosOperations, categoryName, id, init) : (EquinoxCosmosOperations*string) * (unit -> Async) option = + member internal __.Resolve(ops : EquinoxCosmosOperations, categoryName, id, init) : (EquinoxCosmosOperations*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init - let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(client, ?initContainer = init)) + let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(ops, ?initContainer = init)) (wrapped.Container,streamName),wrapped.InitializationGate namespace Equinox.Cosmos @@ -1044,21 +1031,20 @@ type private Folder<'event, 'state, 'context> | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } -/// Pairs a Gateway, defining the retry policies for CosmosDb with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) +/// Defines a set of related access policies for a given CosmosDb, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) type Context - ( client: EquinoxCosmosOperations, - ?log: ILogger, + ( ops: EquinoxCosmosOperations, ?defaultMaxItems: int, ?getDefaultMaxItems: unit -> int, ?maxRequests: int, ?readRetryPolicy: IRetryPolicy, ?writeRetryPolicy: IRetryPolicy ) = - let conn = Connection(client, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) - let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems =getDefaultMaxItems, ?maxRequests = maxRequests) + let conn = Connection(ops, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) + let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems, ?maxRequests = maxRequests) let gateway = Gateway(conn, batchingPolicy) - let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (client.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore - let containers = Containers(client.DatabaseId, client.ContainerId) + let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore + let containers = Containers(ops.DatabaseId, ops.ContainerId) member __.Gateway = gateway member __.Containers = containers @@ -1179,7 +1165,7 @@ type Discovery = UriAndKey (Uri uri, key) | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" -type EquinoxCosmosOperationsFactory +type CosmosOperationsFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached @@ -1233,8 +1219,8 @@ type EquinoxCosmosOperationsFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - abstract member CreateOperationsClient: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> EquinoxCosmosOperations - default __.CreateOperationsClient + abstract member Create: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> EquinoxCosmosOperations + default __.Create ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, dbName: string, @@ -1246,11 +1232,10 @@ type EquinoxCosmosOperationsFactory if skipLog <> Some true then logName uri name let cosmosClient = new CosmosClient(string uri, key, __.CosmosClientOptions) - new EquinoxCosmosOperations(cosmosClient, dbName, containerName) + EquinoxCosmosOperations(cosmosClient, dbName, containerName) namespace Equinox.Cosmos.Core -open Equinox.Cosmos open Equinox.Cosmos.Internal open Equinox.Cosmos.Store open FsCodec @@ -1267,7 +1252,7 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context - ( client: EquinoxCosmosOperations, + ( ops: EquinoxCosmosOperations, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1277,11 +1262,11 @@ type Context []?getDefaultMaxItems) = do if log = null then nullArg "log" - let conn = Equinox.Cosmos.Internal.Connection(client) - let containers = Containers(client.DatabaseId, client.ContainerId) + let conn = Equinox.Cosmos.Internal.Connection(ops) + let containers = Containers(ops.DatabaseId, ops.ContainerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) - let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (client.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore + let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore let gateway = Gateway(conn, batching) let maxCountPredicate count = @@ -1295,7 +1280,7 @@ type Context let! (Token.Unpack (_,_,pos')), data = res return pos', data } - member __.ResolveStream(streamName) = containers.Resolve(conn.Client, null, streamName, init) + member __.ResolveStream(streamName) = containers.Resolve(conn.Operations, null, streamName, init) member __.CreateStream(streamName) = __.ResolveStream streamName |> fst member internal __.GetLazy((stream, startPos), ?batchSize, ?direction) : AsyncSeq[]> = diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index 0a8030b97..08c19ba7b 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -16,13 +16,13 @@ let (|Default|) def name = (read name),def ||> defaultArg let dbId = read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" let cId = read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" -let private connectToCosmos (log: Serilog.ILogger) batchSize client = - Context(client, log = log, defaultMaxItems = batchSize) +let private connectToCosmos batchSize client = + Context(client, defaultMaxItems = batchSize) let createSpecifiedCosmosOrSimulatorClient log = let createClient name discovery = - EquinoxCosmosOperationsFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - .CreateOperationsClient(name, discovery, dbId, cId) + CosmosOperationsFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + .Create(name, discovery, dbId, cId) match read "EQUINOX_COSMOS_CONNECTION" with | None -> @@ -34,6 +34,6 @@ let createSpecifiedCosmosOrSimulatorClient log = let connectToSpecifiedCosmosOrSimulator (log: Serilog.ILogger) batchSize = createSpecifiedCosmosOrSimulatorClient log - |> connectToCosmos log batchSize + |> connectToCosmos batchSize let defaultBatchSize = 500 diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index bbc4408af..6875108af 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -304,7 +304,7 @@ let createDomainLog verbose verboseConsole maybeSeqEndpoint = module CosmosInit = open Equinox.Cosmos.Store - let conn (log,verboseConsole,maybeSeq) (sargs : ParseResults) = + let conn (log,verboseConsole,maybeSeq) (sargs : ParseResults) = let storeLog = createStoreLog (sargs.Contains Storage.Cosmos.Arguments.VerboseStore) verboseConsole maybeSeq let discovery, dName, cName, factory = Storage.Cosmos.connection (log,storeLog) (Storage.Cosmos.Info sargs) storeLog, factory, discovery, dName, cName @@ -317,7 +317,7 @@ module CosmosInit = let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus let _storeLog, factory, discovery, dName, cName = conn (log,verboseConsole,maybeSeq) sargs log.Information("Provisioning `Equinox.Cosmos` Store collection at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - factory.CreateOperationsClient(appName, discovery, dName, cName).InitializeContainer(mode, not skipStoredProc) |> ignore + factory.Create(appName, discovery, dName, cName).InitializeContainer(mode, not skipStoredProc) |> ignore | _ -> failwith "please specify a `cosmos` endpoint" module SqlInit = @@ -347,7 +347,7 @@ module CosmosStats = let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel let _storeLog,factory,discovery,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs - let client = factory.CreateOperationsClient(appName, discovery, dName, cName) + let client = factory.Create(appName, discovery, dName, cName) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" From a5374fb2cd70786b7c9ab4b5feaeedfccbf34637 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 19 Mar 2020 18:41:17 +0000 Subject: [PATCH 56/71] Store.Operations ? --- samples/Infrastructure/Storage.fs | 2 +- src/Equinox.Cosmos/Cosmos.fs | 54 +++++++++---------- .../CosmosFixtures.fs | 2 +- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 7039a954c..75e0a7d23 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -76,7 +76,7 @@ module Cosmos = a.Mode, endpointUri, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, CosmosOperationsFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) + discovery, a.Database, a.Container, StoreOperationsFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = let discovery, dName, cName, factory = connection (log, storeLog) info let ctx = Context(factory.Create(appName, discovery, dName, cName), defaultMaxItems = batchSize) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 8bb0e3098..26581c40f 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -444,7 +444,7 @@ function sync(req, expIndex, expEtag) { module private CancellationToken = let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken -module EquinoxCosmosInitialization = +module Initialization = let internal getOrCreateDatabase (sdk: CosmosClient) (dbName: string) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { let! ct = CancellationToken.useOrCreate cancellationToken let! response = @@ -500,7 +500,7 @@ module EquinoxCosmosInitialization = return container } -type EquinoxCosmosOperations (cosmosClient: CosmosClient, databaseId: string, containerId: string) = +type Operations(cosmosClient: CosmosClient, databaseId: string, containerId: string) = let containerClient = lazy(cosmosClient.GetContainer(databaseId, containerId)) member val DatabaseId = databaseId with get @@ -509,7 +509,7 @@ type EquinoxCosmosOperations (cosmosClient: CosmosClient, databaseId: string, co abstract member InitializeContainer: mode: Provisioning * createStoredProcedure: bool * ?storedProcedureName: string -> Async default __.InitializeContainer(mode, createStoredProcedure, storedProcedureName) = - EquinoxCosmosInitialization.initializeContainer cosmosClient databaseId containerId mode createStoredProcedure storedProcedureName None + Initialization.initializeContainer cosmosClient databaseId containerId mode createStoredProcedure storedProcedureName None abstract member GetContainer: unit -> CosmosContainer default __.GetContainer() = @@ -553,7 +553,7 @@ module Sync = | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (ops : EquinoxCosmosOperations, stream : string) (exp, req: Tip) + let private run (ops : Operations, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] @@ -617,10 +617,10 @@ module Sync = } : Unfold) module internal Tip = - let private get (ops : EquinoxCosmosOperations, stream : string) (maybePos: Position option) = + let private get (ops : Operations, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null ops.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) - let private loggedGet (get : EquinoxCosmosOperations * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { + let private loggedGet (get : Operations * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) @@ -648,7 +648,7 @@ module internal Tip = | ReadResult.Found tip -> return Result.Found (Position.fromTip tip, Enum.EventsAndUnfolds tip |> Array.ofSeq) } module internal Query = - let private mkQuery (ops : EquinoxCosmosOperations, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = + let private mkQuery (ops : Operations, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -810,12 +810,12 @@ module internal Tip = let t = StopwatchInterval(startTicks, endTicks) log |> logQuery direction maxItems stream t (!responseCount,allEvents.ToArray()) -1L ru } -type [] Token = { container: EquinoxCosmosOperations; stream: string; pos: Position } +type [] Token = { container: Operations; stream: string; pos: Position } module Token = let create (container,stream) pos : StreamToken = { value = box { container = container; stream = stream; pos = pos } version = pos.index } - let (|Unpack|) (token: StreamToken) : EquinoxCosmosOperations*string*Position = let t = unbox token.value in t.container,t.stream,t.pos + let (|Unpack|) (token: StreamToken) : Operations*string*Position = let t = unbox token.value in t.container,t.stream,t.pos let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag @@ -837,7 +837,7 @@ open Equinox.Cosmos.Store open System.Collections.Concurrent /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) -type Connection(ops: EquinoxCosmosOperations, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = +type Connection(ops: Operations, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = member __.Operations = ops member __.TipRetryPolicy = readRetryPolicy member __.QueryRetryPolicy = readRetryPolicy @@ -909,7 +909,7 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } /// Holds Container state, coordinating initialization activities -type private ContainerWrapper(ops : EquinoxCosmosOperations, ?initContainer : unit -> Async) = +type private ContainerWrapper(ops : Operations, ?initContainer : unit -> Async) = let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init ())) member __.Container = ops @@ -924,7 +924,7 @@ type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> str let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - member internal __.Resolve(ops : EquinoxCosmosOperations, categoryName, id, init) : (EquinoxCosmosOperations*string) * (unit -> Async) option = + member internal __.Resolve(ops : Operations, categoryName, id, init) : (Operations*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(ops, ?initContainer = init)) @@ -978,14 +978,14 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven module Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, EquinoxCosmosOperations*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, Operations*string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context> with + interface ICategory<'event, 'state, Operations*string, 'context> with member __.Load(log, (container,streamName), opt) : Async = loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context, compress) @@ -1001,8 +1001,8 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context>) - : ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context> = + (category : ICategory<'event, 'state, Operations*string, 'context>) + : ICategory<'event, 'state, Operations*string, 'context> = let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = new CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) @@ -1015,7 +1015,7 @@ type private Folder<'event, 'state, 'context> ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, EquinoxCosmosOperations*string, 'context> with + interface ICategory<'event, 'state, Operations*string, 'context> with member __.Load(log, (container,streamName), opt): Async = match readCache with | None -> batched log (container,streamName) @@ -1033,7 +1033,7 @@ type private Folder<'event, 'state, 'context> /// Defines a set of related access policies for a given CosmosDb, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) type Context - ( ops: EquinoxCosmosOperations, + ( ops: Operations, ?defaultMaxItems: int, ?getDefaultMaxItems: unit -> int, ?maxRequests: int, @@ -1043,12 +1043,12 @@ type Context let conn = Connection(ops, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems, ?maxRequests = maxRequests) let gateway = Gateway(conn, batchingPolicy) - let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore + let init = fun () -> Initialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore let containers = Containers(ops.DatabaseId, ops.ContainerId) member __.Gateway = gateway member __.Containers = containers - member internal __.ResolveContainerStream(categoryName, id) : (EquinoxCosmosOperations*string) * (unit -> Async) option = + member internal __.ResolveContainerStream(categoryName, id) : (Operations*string) * (unit -> Async) option = containers.Resolve(gateway.Client, categoryName, id, init) [] @@ -1112,7 +1112,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, EquinoxCosmosOperations*string, 'context> = + let category : ICategory<_, _, Operations*string, 'context> = match caching with | CachingStrategy.NoCaching -> folder :> _ | CachingStrategy.SlidingWindow(cache, window) -> @@ -1165,7 +1165,7 @@ type Discovery = UriAndKey (Uri uri, key) | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" -type CosmosOperationsFactory +type StoreOperationsFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached @@ -1219,20 +1219,20 @@ type CosmosOperationsFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - abstract member Create: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> EquinoxCosmosOperations + abstract member Create: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> Operations default __.Create ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, dbName: string, containerName: string, /// true to inhibit logging of client name - []?skipLog) : EquinoxCosmosOperations = + []?skipLog) : Operations = let (Discovery.UriAndKey (databaseUri=uri; key=key)) = discovery if skipLog <> Some true then logName uri name let cosmosClient = new CosmosClient(string uri, key, __.CosmosClientOptions) - EquinoxCosmosOperations(cosmosClient, dbName, containerName) + Operations(cosmosClient, dbName, containerName) namespace Equinox.Cosmos.Core @@ -1252,7 +1252,7 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context - ( ops: EquinoxCosmosOperations, + ( ops: Operations, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1266,7 +1266,7 @@ type Context let containers = Containers(ops.DatabaseId, ops.ContainerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) - let init = fun () -> EquinoxCosmosInitialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore + let init = fun () -> Initialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore let gateway = Gateway(conn, batching) let maxCountPredicate count = diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index 08c19ba7b..298869087 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -21,7 +21,7 @@ let private connectToCosmos batchSize client = let createSpecifiedCosmosOrSimulatorClient log = let createClient name discovery = - CosmosOperationsFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + StoreOperationsFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) .Create(name, discovery, dbId, cId) match read "EQUINOX_COSMOS_CONNECTION" with From a94843addb5d50b021a9eada85d74a756b2f7897 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 19 Mar 2020 19:50:16 +0000 Subject: [PATCH 57/71] Store* naming --- samples/Infrastructure/Storage.fs | 2 +- samples/Store/Integration/CartIntegration.fs | 8 +- .../ContactPreferencesIntegration.fs | 12 +- .../Store/Integration/FavoritesIntegration.fs | 14 +-- src/Equinox.Cosmos/Cosmos.fs | 115 ++++++++---------- .../CosmosFixtures.fs | 2 +- .../CosmosIntegration.fs | 6 +- 7 files changed, 74 insertions(+), 85 deletions(-) diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 75e0a7d23..5c4407b95 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -76,7 +76,7 @@ module Cosmos = a.Mode, endpointUri, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, StoreOperationsFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) + discovery, a.Database, a.Container, StoreGatewayFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = let discovery, dName, cName, factory = connection (log, storeLog) info let ctx = Context(factory.Create(appName, discovery, dName, cName), defaultMaxItems = batchSize) diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index 65a17d521..68408f27d 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -24,10 +24,10 @@ let resolveGesStreamWithoutCustomAccessStrategy gateway = fun (id,opt) -> EventStore.Resolver(gateway, eventStoreCodec, fold, initial).Resolve(id,?option=opt) let cosmosCodec = Domain.Cart.Events.codecStj (FsCodec.SystemTextJson.Options.Create()) -let resolveCosmosStreamWithSnapshotStrategy gateway = - fun (id,opt) -> Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) -let resolveCosmosStreamWithoutCustomAccessStrategy gateway = - fun (id,opt) -> Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve(id,?option=opt) +let resolveCosmosStreamWithSnapshotStrategy context = + fun (id,opt) -> Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) +let resolveCosmosStreamWithoutCustomAccessStrategy context = + fun (id,opt) -> Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve(id,?option=opt) let addAndThenRemoveItemsManyTimesExceptTheLastOne context cartId skuId (service: Backend.Cart.Service) count = service.ExecuteManyAsync(cartId, false, seq { diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index bded74f99..763c1c4e8 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -20,13 +20,13 @@ let resolveStreamGesWithoutAccessStrategy gateway = EventStore.Resolver(gateway defaultBatchSize, eventStoreCodec, fold, initial).Resolve let cosmosCodec = Domain.ContactPreferences.Events.codecStj (FsCodec.SystemTextJson.Options.Create()) -let resolveStreamCosmosWithLatestKnownEventSemantics gateway = - Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve -let resolveStreamCosmosUnoptimized gateway = - Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve -let resolveStreamCosmosRollingUnfolds gateway = +let resolveStreamCosmosWithLatestKnownEventSemantics context = + Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve +let resolveStreamCosmosUnoptimized context = + Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve +let resolveStreamCosmosRollingUnfolds context = let access = Cosmos.AccessStrategy.Custom(Domain.ContactPreferences.Fold.isOrigin, Domain.ContactPreferences.Fold.transmute) - Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve + Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve type Tests(testOutputHelper) = let testOutput = TestOutputAdapter testOutputHelper diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index 73b439278..5da08e3c0 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -10,23 +10,23 @@ let fold, initial = Domain.Favorites.Fold.fold, Domain.Favorites.Fold.initial let snapshot = Domain.Favorites.Fold.isOrigin, Domain.Favorites.Fold.snapshot let createMemoryStore () = - new MemoryStore.VolatileStore<_>() + MemoryStore.VolatileStore<_>() let createServiceMemory log store = Backend.Favorites.create log (MemoryStore.Resolver(store, FsCodec.Box.Codec.Create(), fold, initial).Resolve) let eventStoreCodec = Domain.Favorites.Events.codecNewtonsoft -let createServiceGes gateway log = - let resolver = EventStore.Resolver(gateway, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.RollingSnapshots snapshot) +let createServiceGes context log = + let resolver = EventStore.Resolver(context, eventStoreCodec, fold, initial, access = EventStore.AccessStrategy.RollingSnapshots snapshot) Backend.Favorites.create log resolver.Resolve let cosmosCodec = Domain.Favorites.Events.codecStj -let createServiceCosmos gateway log = - let resolver = Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot) +let createServiceCosmos context log = + let resolver = Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot) Backend.Favorites.create log resolver.Resolve -let createServiceCosmosRollingState gateway log = +let createServiceCosmosRollingState context log = let access = Cosmos.AccessStrategy.RollingState Domain.Favorites.Fold.snapshot - let resolver = Cosmos.Resolver(gateway, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access) + let resolver = Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access) Backend.Favorites.create log resolver.Resolve type Tests(testOutputHelper) = diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 26581c40f..4afeb2fa9 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -500,7 +500,7 @@ module Initialization = return container } -type Operations(cosmosClient: CosmosClient, databaseId: string, containerId: string) = +type StoreGateway(cosmosClient: CosmosClient, databaseId: string, containerId: string) = let containerClient = lazy(cosmosClient.GetContainer(databaseId, containerId)) member val DatabaseId = databaseId with get @@ -553,11 +553,11 @@ module Sync = | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (ops : Operations, stream : string) (exp, req: Tip) + let private run (gateway : StoreGateway, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] - let! res = ops.ExecuteStoredProcedure(SyncStoredProcedure.defaultName, stream, args) + let! res = gateway.ExecuteStoredProcedure(SyncStoredProcedure.defaultName, stream, args) let newPos = { index = res.Value.n; etag = Option.ofObj res.Value.etag } return res.GetRawResponse().Headers.GetRequestCharge(), res.Value.conflicts |> function | null -> Result.Written newPos @@ -617,10 +617,10 @@ module Sync = } : Unfold) module internal Tip = - let private get (ops : Operations, stream : string) (maybePos: Position option) = + let private get (gateway : StoreGateway, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null - ops.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) - let private loggedGet (get : Operations * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { + gateway.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) + let private loggedGet (get : StoreGateway * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) @@ -648,7 +648,7 @@ module internal Tip = | ReadResult.Found tip -> return Result.Found (Position.fromTip tip, Enum.EventsAndUnfolds tip |> Array.ofSeq) } module internal Query = - let private mkQuery (ops : Operations, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = + let private mkQuery (gateway : StoreGateway, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -658,7 +658,7 @@ module internal Tip = let cond = if direction = Direction.Forward then "c.n > @startPos" else "c.i < @startPos" QueryDefinition(sprintf "%s AND %s %s" root cond tail).WithParameter("@startPos", positionSoExclusiveWhenBackward) let qro = new QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) - ops.GetQueryIteratorByPage(query, options = qro) + gateway.GetQueryIteratorByPage(query, options = qro) // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index let private processNextPage direction (streamName: string) startPos (enumerator: IAsyncEnumerator>) (log: ILogger) @@ -810,12 +810,12 @@ module internal Tip = let t = StopwatchInterval(startTicks, endTicks) log |> logQuery direction maxItems stream t (!responseCount,allEvents.ToArray()) -1L ru } -type [] Token = { container: Operations; stream: string; pos: Position } +type [] Token = { gateway: StoreGateway; stream: string; pos: Position } module Token = - let create (container,stream) pos : StreamToken = - { value = box { container = container; stream = stream; pos = pos } + let create (gateway,stream) pos : StreamToken = + { value = box { gateway = gateway; stream = stream; pos = pos } version = pos.index } - let (|Unpack|) (token: StreamToken) : Operations*string*Position = let t = unbox token.value in t.container,t.stream,t.pos + let (|Unpack|) (token: StreamToken) : StoreGateway*string*Position = let t = unbox token.value in t.gateway,t.stream,t.pos let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag @@ -829,16 +829,9 @@ module Internal = [] type LoadFromTokenResult<'event> = Unchanged | Found of StreamToken * 'event[] -namespace Equinox.Cosmos.Internal - -open Azure.Cosmos -open Equinox.Core -open Equinox.Cosmos.Store -open System.Collections.Concurrent - /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) -type Connection(ops: Operations, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = - member __.Operations = ops +type StoreConnection(gateway: StoreGateway, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = + member __.Gateway = gateway member __.TipRetryPolicy = readRetryPolicy member __.QueryRetryPolicy = readRetryPolicy member __.WriteRetryPolicy = writeRetryPolicy @@ -857,7 +850,7 @@ type BatchingPolicy /// Maximum number of trips to permit when slicing the work into multiple responses based on `MaxItems` member __.MaxRequests = maxRequests -type Gateway(conn : Connection, batching : BatchingPolicy) = +type StoreClient(conn : StoreConnection, batching : BatchingPolicy) = let (|FromUnfold|_|) (tryDecode: #FsCodec.IEventData<_> -> 'event option) (isOrigin: 'event -> bool) (xs:#FsCodec.IEventData<_>[]) : Option<'event[]> = let items = ResizeArray() let isOrigin' e = @@ -869,7 +862,7 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = match Array.tryFindIndexBack isOrigin' xs with | None -> None | Some _ -> items.ToArray() |> Some - member __.Client = conn.Operations + member __.Operations = conn.Gateway member __.LoadBackwardsStopping log (container, stream) (tryDecode,isOrigin): Async = async { let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests Direction.Backward None (tryDecode,isOrigin) System.Array.Reverse events @@ -909,51 +902,48 @@ type Gateway(conn : Connection, batching : BatchingPolicy) = | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } /// Holds Container state, coordinating initialization activities -type private ContainerWrapper(ops : Operations, ?initContainer : unit -> Async) = +type private ContainerWrapper(gateway : StoreGateway, ?initContainer : unit -> Async) = let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init ())) - member __.Container = ops + member __.Gateway = gateway member internal __.InitializationGate = match initGuard with Some g when g.PeekIsValid() |> not -> Some g.AwaitValue | _ -> None /// Defines a process for mapping from a Stream Name to the appropriate storage area, allowing control over segregation / co-locating of data type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> string*string*string, []?disableInitialization) = // Index of database*collection -> Initialization Context - let wrappers = ConcurrentDictionary() + let wrappers = System.Collections.Concurrent.ConcurrentDictionary() new (databaseId, containerId) = // TOCONSIDER - this works to support the Core.Events APIs let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - member internal __.Resolve(ops : Operations, categoryName, id, init) : (Operations*string) * (unit -> Async) option = + member internal __.Resolve(ops : StoreGateway, categoryName, id, init) : (StoreGateway*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(ops, ?initContainer = init)) - (wrapped.Container,streamName),wrapped.InitializationGate + (wrapped.Gateway,streamName),wrapped.InitializationGate namespace Equinox.Cosmos open Azure.Cosmos open Equinox open Equinox.Core -open Equinox.Cosmos.Internal open Equinox.Cosmos.Store open FsCodec open FSharp.Control open Serilog open System -open System.Collections.Concurrent open System.Text.Json -open System.Threading -type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEventCodec<'event,JsonElement,'context>) = +type private Category<'event, 'state, 'context>(client : StoreClient, codec : IEventCodec<'event,JsonElement,'context>) = let (|TryDecodeFold|) (fold: 'state -> 'event seq -> 'state) initial (events: ITimelineEvent seq) : 'state = Seq.choose codec.TryDecode events |> fold initial member __.Load includeUnfolds containerStream fold initial isOrigin (log : ILogger): Async = async { let! token, events = - if not includeUnfolds then gateway.LoadBackwardsStopping log containerStream (codec.TryDecode,isOrigin) - else gateway.LoadFromUnfoldsOrRollingSnapshots log (containerStream,None) (codec.TryDecode,isOrigin) + if not includeUnfolds then client.LoadBackwardsStopping log containerStream (codec.TryDecode,isOrigin) + else client.LoadFromUnfoldsOrRollingSnapshots log (containerStream,None) (codec.TryDecode,isOrigin) return token, fold initial events } member __.LoadFromToken (Token.Unpack streamPos, state: 'state as current) fold isOrigin (log : ILogger): Async = async { - let! res = gateway.LoadFromToken(log, streamPos, (codec.TryDecode,isOrigin)) + let! res = client.LoadFromToken(log, streamPos, (codec.TryDecode,isOrigin)) match res with | LoadFromTokenResult.Unchanged -> return current | LoadFromTokenResult.Found (token', events') -> return token', fold state events' } @@ -970,7 +960,7 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven let baseIndex = pos.index + int64 (List.length events) let projections = Sync.mkUnfold compress baseIndex projectionsEncoded let batch = Sync.mkBatch stream eventsEncoded projections - let! res = gateway.Sync log (container,stream) (exp,batch) + let! res = client.Sync log (container,stream) (exp,batch) match res with | InternalSyncResult.Conflict (token',TryDecodeFold fold state events') -> return SyncResult.Conflict (async { return token', events' }) | InternalSyncResult.ConflictUnknown _token' -> return SyncResult.Conflict (__.LoadFromToken current fold isOrigin log) @@ -978,14 +968,14 @@ type private Category<'event, 'state, 'context>(gateway : Gateway, codec : IEven module Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, Operations*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, StoreGateway*string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, Operations*string, 'context> with + interface ICategory<'event, 'state, StoreGateway*string, 'context> with member __.Load(log, (container,streamName), opt) : Async = loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context, compress) @@ -1001,8 +991,8 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, Operations*string, 'context>) - : ICategory<'event, 'state, Operations*string, 'context> = + (category : ICategory<'event, 'state, StoreGateway*string, 'context>) + : ICategory<'event, 'state, StoreGateway*string, 'context> = let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = new CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) @@ -1015,7 +1005,7 @@ type private Folder<'event, 'state, 'context> ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, Operations*string, 'context> with + interface ICategory<'event, 'state, StoreGateway*string, 'context> with member __.Load(log, (container,streamName), opt): Async = match readCache with | None -> batched log (container,streamName) @@ -1033,23 +1023,23 @@ type private Folder<'event, 'state, 'context> /// Defines a set of related access policies for a given CosmosDb, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) type Context - ( ops: Operations, + ( gateway: StoreGateway, ?defaultMaxItems: int, ?getDefaultMaxItems: unit -> int, ?maxRequests: int, ?readRetryPolicy: IRetryPolicy, ?writeRetryPolicy: IRetryPolicy ) = - let conn = Connection(ops, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) + let conn = StoreConnection(gateway, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems, ?maxRequests = maxRequests) - let gateway = Gateway(conn, batchingPolicy) - let init = fun () -> Initialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore - let containers = Containers(ops.DatabaseId, ops.ContainerId) + let client = StoreClient(conn, batchingPolicy) + let init = fun () -> Initialization.createSyncStoredProcedure (gateway.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore + let containers = Containers(gateway.DatabaseId, gateway.ContainerId) - member __.Gateway = gateway + member __.Gateway = client member __.Containers = containers - member internal __.ResolveContainerStream(categoryName, id) : (Operations*string) * (unit -> Async) option = - containers.Resolve(gateway.Client, categoryName, id, init) + member internal __.ResolveContainerStream(categoryName, id) : (StoreGateway*string) * (unit -> Async) option = + containers.Resolve(client.Operations, categoryName, id, init) [] type CachingStrategy = @@ -1112,7 +1102,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, Operations*string, 'context> = + let category : ICategory<_, _, StoreGateway*string, 'context> = match caching with | CachingStrategy.NoCaching -> folder :> _ | CachingStrategy.SlidingWindow(cache, window) -> @@ -1165,7 +1155,7 @@ type Discovery = UriAndKey (Uri uri, key) | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" -type StoreOperationsFactory +type StoreGatewayFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached @@ -1219,24 +1209,23 @@ type StoreOperationsFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - abstract member Create: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> Operations + abstract member Create: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> StoreGateway default __.Create ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs name, discovery : Discovery, dbName: string, containerName: string, /// true to inhibit logging of client name - []?skipLog) : Operations = + []?skipLog) : StoreGateway = let (Discovery.UriAndKey (databaseUri=uri; key=key)) = discovery if skipLog <> Some true then logName uri name let cosmosClient = new CosmosClient(string uri, key, __.CosmosClientOptions) - Operations(cosmosClient, dbName, containerName) + StoreGateway(cosmosClient, dbName, containerName) namespace Equinox.Cosmos.Core -open Equinox.Cosmos.Internal open Equinox.Cosmos.Store open FsCodec open FSharp.Control @@ -1252,7 +1241,7 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context - ( ops: Operations, + ( ops: StoreGateway, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1262,12 +1251,12 @@ type Context []?getDefaultMaxItems) = do if log = null then nullArg "log" - let conn = Equinox.Cosmos.Internal.Connection(ops) + let conn = Equinox.Cosmos.Store.StoreConnection(ops) let containers = Containers(ops.DatabaseId, ops.ContainerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) let init = fun () -> Initialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore - let gateway = Gateway(conn, batching) + let client = StoreClient(conn, batching) let maxCountPredicate count = let acc = ref (max (count-1) 0) @@ -1280,13 +1269,13 @@ type Context let! (Token.Unpack (_,_,pos')), data = res return pos', data } - member __.ResolveStream(streamName) = containers.Resolve(conn.Operations, null, streamName, init) + member __.ResolveStream(streamName) = containers.Resolve(conn.Gateway, null, streamName, init) member __.CreateStream(streamName) = __.ResolveStream streamName |> fst member internal __.GetLazy((stream, startPos), ?batchSize, ?direction) : AsyncSeq[]> = let direction = defaultArg direction Direction.Forward let batching = BatchingPolicy(defaultArg batchSize batching.MaxItems) - gateway.ReadLazy batching log stream direction startPos (Some,fun _ -> false) + client.ReadLazy batching log stream direction startPos (Some,fun _ -> false) member internal __.GetInternal((stream, startPos), ?maxCount, ?direction) = async { let direction = defaultArg direction Direction.Forward @@ -1298,12 +1287,12 @@ type Context match maxCount with | Some limit -> maxCountPredicate limit | None -> fun _ -> false - return! gateway.Read log stream direction startPos (Some,isOrigin) } + return! client.Read log stream direction startPos (Some,isOrigin) } /// Establishes the current position of the stream in as efficient a manner as possible /// (The ideal situation is that the preceding token is supplied as input in order to avail of 1RU low latency state checks) member __.Sync(stream, ?position: Position) : Async = async { - let! (Token.Unpack (_,_,pos')) = gateway.GetPosition(log, stream, ?pos=position) + let! (Token.Unpack (_,_,pos')) = client.GetPosition(log, stream, ?pos=position) return pos' } /// Reads in batches of `batchSize` from the specified `Position`, allowing the reader to efficiently walk away from a running query @@ -1325,7 +1314,7 @@ type Context | None -> () | Some init -> do! init () let batch = Sync.mkBatch stream events Seq.empty - let! res = gateway.Sync log (container,stream) (Sync.Exp.Version position.index,batch) + let! res = client.Sync log (container,stream) (Sync.Exp.Version position.index,batch) match res with | InternalSyncResult.Written (Token.Unpack (_,_,pos)) -> return AppendResult.Ok pos | InternalSyncResult.Conflict (Token.Unpack (_,_,pos),events) -> return AppendResult.Conflict (pos, events) diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index 298869087..aa6b6cece 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -21,7 +21,7 @@ let private connectToCosmos batchSize client = let createSpecifiedCosmosOrSimulatorClient log = let createClient name discovery = - StoreOperationsFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + StoreGatewayFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) .Create(name, discovery, dbId, cId) match read "EQUINOX_COSMOS_CONNECTION" with diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs index d09d27480..0e681dfee 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs @@ -36,9 +36,9 @@ module Cart = module ContactPreferences = let fold, initial = Domain.ContactPreferences.Fold.fold, Domain.ContactPreferences.Fold.initial let codec = Domain.ContactPreferences.Events.codecStj IntegrationJsonSerializer.options - let createServiceWithoutOptimization createGateway defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = - let gateway = createGateway defaultBatchSize - let resolve = Resolver(gateway, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve + let createServiceWithoutOptimization createContext defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = + let context = createContext defaultBatchSize + let resolve = Resolver(context, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve Backend.ContactPreferences.create log resolve let createService log store = let resolve = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.LatestKnownEvent).Resolve From 0b1a4009b092ee746bbe415f38a3e5c1317c5f50 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 19 Mar 2020 21:36:47 +0000 Subject: [PATCH 58/71] Move Converter out of type definitions --- src/Equinox.Cosmos/Cosmos.fs | 51 +++------------------- src/Equinox.Cosmos/CosmosJsonSerializer.fs | 40 +++++++++++++++++ 2 files changed, 45 insertions(+), 46 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 4afeb2fa9..e741b4577 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -7,9 +7,7 @@ open FsCodec open FSharp.Control open Serilog open System -open System.IO open System.Text.Json -open System.Text.Json.Serialization open System.Threading /// A single Domain Event from the array held in a Batch @@ -74,44 +72,6 @@ type [] // TODO for STJ v5: All fields required unless /// As one cannot sort by the implicit `id` field, we have an indexed `i` field for sort and range query use static member internal IndexedFields = [Batch.PartitionKeyField; "i"; "n"] -/// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc -/// Only applied to snapshots in the Tip -type JsonCompressedBase64Converter() = - inherit JsonConverter() - - static member Compress (value: JsonElement) = - if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then - value - else - let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) - use output = new MemoryStream() - use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) - compressor.Write(input, 0, input.Length) - compressor.Close() - JsonDocument.Parse("\"" + System.Convert.ToBase64String(output.ToArray()) + "\"").RootElement - - override __.Read (reader, _typeToConvert, options) = - if reader.TokenType <> JsonTokenType.String then - JsonSerializer.Deserialize(&reader, options) - else - let compressedBytes = reader.GetBytesFromBase64() - use input = new MemoryStream(compressedBytes) - use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) - use output = new MemoryStream() - decompressor.CopyTo(output) - JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - - override __.Write (writer, value, options) = - JsonSerializer.Serialize(writer, value, options) - -type JsonCompressedBase64ConverterAttribute () = - inherit JsonConverterAttribute(typeof) - - static let converter = JsonCompressedBase64Converter() - - override __.CreateConverter _typeToConvert = - converter :> JsonConverter - /// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` [] type Unfold = @@ -136,8 +96,7 @@ type Unfold = /// The special-case 'Pending' Batch Format used to read the currently active (and mutable) document /// Stored representation has the following diffs vs a 'normal' (frozen/completed) Batch: a) `id` = `-1` b) contains unfolds (`u`) /// NB the type does double duty as a) model for when we read it b) encoding a batch being sent to the stored proc -type [] // TODO for STJ v5: All fields required unless explicitly optional - Tip = +type [] Tip = // TODO for STJ v5: All fields required unless explicitly optional { /// Partition key, as per Batch p: string // "{streamName}" TODO for STJ v5: Optional, not requested in queries @@ -851,7 +810,7 @@ type BatchingPolicy member __.MaxRequests = maxRequests type StoreClient(conn : StoreConnection, batching : BatchingPolicy) = - let (|FromUnfold|_|) (tryDecode: #FsCodec.IEventData<_> -> 'event option) (isOrigin: 'event -> bool) (xs:#FsCodec.IEventData<_>[]) : Option<'event[]> = + let (|FromUnfold|_|) (tryDecode: #IEventData<_> -> 'event option) (isOrigin: 'event -> bool) (xs:#IEventData<_>[]) : Option<'event[]> = let items = ResizeArray() let isOrigin' e = match tryDecode e with @@ -870,7 +829,7 @@ type StoreClient(conn : StoreConnection, batching : BatchingPolicy) = member __.Read log (container,stream) direction startPos (tryDecode,isOrigin) : Async = async { let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) return Token.create (container,stream) pos, events } - member __.ReadLazy (batching: BatchingPolicy) log (container,stream) direction startPos (tryDecode,isOrigin) : FSharp.Control.AsyncSeq<'event[]> = + member __.ReadLazy (batching: BatchingPolicy) log (container,stream) direction startPos (tryDecode,isOrigin) : AsyncSeq<'event[]> = Query.walkLazy log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) member __.LoadFromUnfoldsOrRollingSnapshots log (containerStream,maybePos) (tryDecode,isOrigin): Async = async { let! res = Tip.tryLoad log conn.TipRetryPolicy containerStream maybePos @@ -917,10 +876,10 @@ type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> str let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - member internal __.Resolve(ops : StoreGateway, categoryName, id, init) : (StoreGateway*string) * (unit -> Async) option = + member internal __.Resolve(gateway : StoreGateway, categoryName, id, init) : (StoreGateway*string) * (unit -> Async) option = let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id let init = match disableInitialization with Some true -> None | _ -> Some init - let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(ops, ?initContainer = init)) + let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(gateway, ?initContainer = init)) (wrapped.Gateway,streamName),wrapped.InitializationGate namespace Equinox.Cosmos diff --git a/src/Equinox.Cosmos/CosmosJsonSerializer.fs b/src/Equinox.Cosmos/CosmosJsonSerializer.fs index 6b1a5a310..1484133d0 100644 --- a/src/Equinox.Cosmos/CosmosJsonSerializer.fs +++ b/src/Equinox.Cosmos/CosmosJsonSerializer.fs @@ -2,8 +2,10 @@ open Azure.Cosmos.Serialization open Equinox.Core +open System open System.IO open System.Text.Json +open System.Text.Json.Serialization type CosmosJsonSerializer (options: JsonSerializerOptions) = inherit CosmosSerializer() @@ -29,3 +31,41 @@ type CosmosJsonSerializer (options: JsonSerializerOptions) = memoryStream.Position <- 0L memoryStream :> Stream + +/// Manages zipping of the UTF-8 json bytes to make the index record minimal from the perspective of the writer stored proc +/// Only applied to snapshots in the Tip +and JsonCompressedBase64Converter() = + inherit JsonConverter() + + static member Compress (value: JsonElement) = + if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then + value + else + let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) + use output = new MemoryStream() + use compressor = new System.IO.Compression.DeflateStream(output, System.IO.Compression.CompressionLevel.Optimal) + compressor.Write(input, 0, input.Length) + compressor.Close() + JsonDocument.Parse("\"" + System.Convert.ToBase64String(output.ToArray()) + "\"").RootElement + + override __.Read (reader, _typeToConvert, options) = + if reader.TokenType <> JsonTokenType.String then + JsonSerializer.Deserialize(&reader, options) + else + let compressedBytes = reader.GetBytesFromBase64() + use input = new MemoryStream(compressedBytes) + use decompressor = new System.IO.Compression.DeflateStream(input, System.IO.Compression.CompressionMode.Decompress) + use output = new MemoryStream() + decompressor.CopyTo(output) + JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) + + override __.Write (writer, value, options) = + JsonSerializer.Serialize(writer, value, options) + +type JsonCompressedBase64ConverterAttribute () = + inherit JsonConverterAttribute(typeof) + + static let converter = JsonCompressedBase64Converter() + + override __.CreateConverter _typeToConvert = + converter :> JsonConverter From e40d9d9d8a51e8554c0ed07e231fd6fd1e9043b8 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 19 Mar 2020 21:51:29 +0000 Subject: [PATCH 59/71] Fix naming stragglers --- src/Equinox.Cosmos/Cosmos.fs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index e741b4577..35e674d69 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -11,8 +11,8 @@ open System.Text.Json open System.Threading /// A single Domain Event from the array held in a Batch -type [] // TODO for STJ v5: All fields required unless explicitly optional - Event = +[] +type Event = // TODO for STJ v5: All fields required unless explicitly optional { /// Creation datetime (as opposed to system-defined _lastUpdated which is touched by triggers, replication etc.) t: DateTimeOffset // ISO 8601 @@ -42,8 +42,8 @@ type [] // TODO for STJ v5: All fields required unless member __.Timestamp = __.t /// A 'normal' (frozen, not Tip) Batch of Events (without any Unfolds) -type [] // TODO for STJ v5: All fields required unless explicitly optional - Batch = +[] +type Batch = // TODO for STJ v5: All fields required unless explicitly optional { /// CosmosDB-mandated Partition Key, must be maintained within the document /// Not actually required if running in single partition mode, but for simplicity, we always write it p: string // "{streamName}" TODO for STJ v5: Optional, not requested in queries @@ -73,7 +73,7 @@ type [] // TODO for STJ v5: All fields required unless static member internal IndexedFields = [Batch.PartitionKeyField; "i"; "n"] /// Compaction/Snapshot/Projection Event based on the state at a given point in time `i` -[] +[] type Unfold = { /// Base: Stream Position (Version) of State from which this Unfold Event was generated i: int64 @@ -96,7 +96,8 @@ type Unfold = /// The special-case 'Pending' Batch Format used to read the currently active (and mutable) document /// Stored representation has the following diffs vs a 'normal' (frozen/completed) Batch: a) `id` = `-1` b) contains unfolds (`u`) /// NB the type does double duty as a) model for when we read it b) encoding a batch being sent to the stored proc -type [] Tip = // TODO for STJ v5: All fields required unless explicitly optional +[] +type Tip = // TODO for STJ v5: All fields required unless explicitly optional { /// Partition key, as per Batch p: string // "{streamName}" TODO for STJ v5: Optional, not requested in queries @@ -123,8 +124,8 @@ type [] Tip = // TODO for STJ v5: All fields required static member internal WellKnownDocumentId = "-1" /// Position and Etag to which an operation is relative -type [] - Position = { index: int64; etag: string option } +[] +type Position = { index: int64; etag: string option } module internal Position = /// NB very inefficient compared to FromDocument or using one already returned to you @@ -821,7 +822,7 @@ type StoreClient(conn : StoreConnection, batching : BatchingPolicy) = match Array.tryFindIndexBack isOrigin' xs with | None -> None | Some _ -> items.ToArray() |> Some - member __.Operations = conn.Gateway + member __.Gateway = conn.Gateway member __.LoadBackwardsStopping log (container, stream) (tryDecode,isOrigin): Async = async { let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests Direction.Backward None (tryDecode,isOrigin) System.Array.Reverse events @@ -995,10 +996,9 @@ type Context let init = fun () -> Initialization.createSyncStoredProcedure (gateway.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore let containers = Containers(gateway.DatabaseId, gateway.ContainerId) - member __.Gateway = client - member __.Containers = containers + member __.Client = client member internal __.ResolveContainerStream(categoryName, id) : (StoreGateway*string) * (unit -> Async) option = - containers.Resolve(client.Operations, categoryName, id, init) + containers.Resolve(client.Gateway, categoryName, id, init) [] type CachingStrategy = @@ -1059,7 +1059,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.MultiSnapshot (isOrigin, unfold) -> isOrigin, Choice2Of3 (fun _ state -> unfold state) | AccessStrategy.RollingState toSnapshot -> (fun _ -> true), Choice3Of3 (fun _ state -> [],[toSnapshot state]) | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute - let cosmosCat = Category<'event, 'state, 'context>(context.Gateway, codec) + let cosmosCat = Category<'event, 'state, 'context>(context.Client, codec) let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) let category : ICategory<_, _, StoreGateway*string, 'context> = match caching with @@ -1200,7 +1200,7 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context - ( ops: StoreGateway, + ( gateway : StoreGateway, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1210,11 +1210,11 @@ type Context []?getDefaultMaxItems) = do if log = null then nullArg "log" - let conn = Equinox.Cosmos.Store.StoreConnection(ops) - let containers = Containers(ops.DatabaseId, ops.ContainerId) + let conn = Equinox.Cosmos.Store.StoreConnection(gateway) + let containers = Containers(gateway.DatabaseId, gateway.ContainerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) - let init = fun () -> Initialization.createSyncStoredProcedure (ops.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore + let init = fun () -> Initialization.createSyncStoredProcedure (gateway.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore let client = StoreClient(conn, batching) let maxCountPredicate count = From 6a36d464e85d6b4256731d5ff5292c2c1c042d9c Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 1 Apr 2020 09:29:51 +0100 Subject: [PATCH 60/71] Add dump -b to enable overriding Cosmos MaxItems --- CHANGELOG.md | 1 + tools/Equinox.Tool/Program.fs | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82007882f..7cc084de9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ The `Unreleased` section name is replaced by the expected version of next releas ### Added - now targets `Microsoft.Azure.Cosmos` v `3.1.1` (instead of `Microsoft.Azure.DocumentDB`[`.Core`] v 2.x) [#144](https://github.com/jet/equinox/pull/144) +- Add `eqx dump -b`, enabling overriding of Max Events per Batch ### Changed diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index c8a345fbf..b54e98d9c 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -78,6 +78,7 @@ and []DumpArguments = | [] TimeRegular | [] UnfoldsOnly | [] EventsOnly + | [] BatchSize of int | [] Cosmos of ParseResults | [] Es of ParseResults | [] MsSql of ParseResults @@ -92,6 +93,7 @@ and []DumpArguments = | TimeRegular -> "Don't humanize time intervals between events" | UnfoldsOnly -> "Exclude Events. Default: show both Events and Unfolds" | EventsOnly -> "Exclude Unfolds/Snapshots. Default: show both Events and Unfolds." + | BatchSize -> "Maximum number of documents to request per batch. Default 1000." | Es _ -> "Parameters for EventStore." | Cosmos _ -> "Parameters for CosmosDb." | MsSql _ -> "Parameters for Sql Server." @@ -99,7 +101,7 @@ and []DumpArguments = | Postgres _ -> "Parameters for Postgres." and DumpInfo(args: ParseResults) = member __.ConfigureStore(log : ILogger, createStoreLog) = - let storeConfig = None,true,1000 + let storeConfig = None, true, args.GetResult(DumpArguments.BatchSize,1000) match args.TryGetSubCommand() with | Some (DumpArguments.Cosmos sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Arguments.VerboseStore @@ -357,7 +359,7 @@ module CosmosStats = log.Debug("Running query: {sql}", sql) let res = container.QueryValue(sql) log.Information("{stat}: {result:N0}", name, res)}) - |> if inParallel then Async.Parallel else Async.ParallelThrottled 1 // TOCONSIDER replace with Async.Sequence when using new enough FSharp.Core + |> if inParallel then Async.Parallel else Async.Sequential |> Async.Ignore |> Async.RunSynchronously | _ -> failwith "please specify a `cosmos` endpoint" } From 07c8492879712c1f01e93c6f0f0b2b1c71755ad8 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 7 Apr 2020 18:16:40 +0100 Subject: [PATCH 61/71] Cosmos V4: Reduce initialization confusion (#208) --- CONTRIBUTING.md | 22 + DOCUMENTATION.md | 9 + Equinox.sln | 1 + README.md | 31 +- diagrams/CosmosCode.puml | 2 +- diagrams/MemoryStoreContainer.puml | 4 +- diagrams/container.puml | 30 +- diagrams/context.puml | 30 +- samples/Infrastructure/Storage.fs | 16 +- samples/Web/Startup.fs | 2 +- src/Equinox.Cosmos/Cosmos.fs | 452 +++++++++--------- .../CosmosCoreIntegration.fs | 6 +- .../CosmosFixtures.fs | 20 +- tools/Equinox.Tool/Program.fs | 33 +- .../Properties/launchSettings.json | 13 - 15 files changed, 338 insertions(+), 333 deletions(-) create mode 100644 CONTRIBUTING.md delete mode 100644 tools/Equinox.Tool/Properties/launchSettings.json diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..66823c0f7 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,22 @@ +## CONTRIBUTING + +Where it makes sense, raise GitHub issues for any questions so others can benefit from the discussion, or follow the links to [the DDD-CQRS-ES #equinox Slack channel](https://ddd-cqrs-es.slack.com/messages/CF5J67H6Z) above for quick discussions. + +This is an Open Source project for many reasons; some central goals: + +- quality dependency-free reference code (the code should be clean and easy to read; where it makes sense, components can be grabbed and cloned locally and used in altered form) +- optimal resilience and performance (getting performance right can add huge value for some systems) +- this code underpins non-trivial production systems (so having good tests is not optional for reasons far deeper than having impressive coverage stats) + +We'll do our best to be accommodating to PRs and issues, but please appreciate that [we emphasize decisiveness for the greater good of the project and its users](https://www.hanselman.com/blog/AlwaysBeClosingPullRequests.aspx); _new features [start with -100 points](https://blogs.msdn.microsoft.com/oldnewthing/20090928-00/?p=16573)_. + +Within those constraints, contributions of all kinds are welcome: + +- raising [Issues](https://github.com/jet/equinox/issues) (including [relevant question-Issues](https://github.com/jet/equinox/issues/56)) is always welcome (but we'll aim to be decisive in the interests of keeping the list navigable). +- bugfixes with good test coverage are naturally always welcome; in general we'll seek to move them to NuGet prerelease and then NuGet release packages with relatively short timelines (there's unfortunately not presently a MyGet feed for CI packages rigged). +- improvements / tweaks, _subject to filing a GitHub issue outlining the work first to see if it fits a general enough need to warrant adding code to the implementation and to make sure work is not wasted or duplicated_: +- [support for new stores](https://github.com/jet/equinox/issues/76) that can fulfill the normal test cases. +- tests, examples and scenarios are always welcome; Equinox is intended to address a very broad base of usage patterns. Please note that the emphasis will always be (in order) + 1. providing advice on how to achieve your aims without changing Equinox + 2. how to open up an appropriate extension point in Equinox + 3. (when all else fails), add to the complexity of the system by adding API surface area or logic diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index a2661fe15..ff7e8d983 100755 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -22,8 +22,17 @@ The following diagrams are based on the style defined in [@simonbrowndotje](http Equinox and Propulsion together provide a loosely related set of libraries that you can leverage in an application as you see fit. These diagrams are intended to give a rough orientation; what you actually build is up to you... +Equinox focuses on the **Consistent Processing** elements of building an event-sourced system, offering tailored components that interact with a specific **Consistent Event Store**, as laid out here in this [C4](https://c4model.com) System Context Diagram: + ![Equinox c4model.com Context Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/context.puml&fmt=svg) +:point_up: Propulsion elements (which we consider External to Equinox) support the building of complementary facilities as part of an overall Application: + +- **Ingesters**: read stuff from outside the Bounded Context of the System. This kind of service covers aspects such as feeding reference data into **Read Models**, ingesting changes into a consistent model via **Consistent Processing**. _These services are not acting in reaction to events emanating from the **Consistent Event Store**, as opposed to..._ +- **Publishers**: react to events as they are arrive from the **Consistent Event Store** by filtering, rendering and producing to feeds for downstreams. _While these services may in some cases rely on synchronous queries via **Consistent Processing**, it's never transacting or driving follow-on work; which brings us to..._ +- **Reactors**: drive reactive actions triggered by either upstream feeds, or events observed in the **Consistent Event Store**. _These services handle anything beyond the duties of **Ingesters** or **Publishers**, and will often drive follow-on processing via Process Managers and/or transacting via **Consistent Processing**. In some cases, a reactor app's function may be to progressively compose a notification for a **Publisher** to eventually publish._ + + ## Container diagram The Systems and Components involved break out roughly like this: diff --git a/Equinox.sln b/Equinox.sln index 8ca4f68dc..ac98a2a97 100644 --- a/Equinox.sln +++ b/Equinox.sln @@ -13,6 +13,7 @@ Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = ".project", ".project", "{7E build.proj = build.proj build.ps1 = build.ps1 CHANGELOG.md = CHANGELOG.md + CONTRIBUTING.md = CONTRIBUTING.md Directory.Build.props = Directory.Build.props Directory.Build.targets = Directory.Build.targets DOCUMENTATION.md = DOCUMENTATION.md diff --git a/README.md b/README.md index 4c007bf51..2ec239b39 100644 --- a/README.md +++ b/README.md @@ -116,6 +116,12 @@ Equinox focuses on the **Consistent Processing** element of building an event-so ![Equinox c4model.com Context Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/context.puml&fmt=svg) +:point_up: Propulsion elements (which we consider External to Equinox) support the building of complementary facilities as part of an overall Application: + +- **Ingesters**: read stuff from outside the Bounded Context of the System. This kind of service covers aspects such as feeding reference data into **Read Models**, ingesting changes into a consistent model via **Consistent Processing**. _These services are not acting in reaction to events emanating from the **Consistent Event Store**, as opposed to..._ +- **Publishers**: react to events as they are arrive from the **Consistent Event Store** by filtering, rendering and producing to feeds for downstreams (here we label that _Publish Simple Notifications_). _While these services may in some cases rely on synchronous queries via **Consistent Processing**, it's never transacting or driving follow-on work; which brings us to..._ +- **Reactors**: drive reactive actions triggered by either upstream feeds, or events observed in the **Consistent Event Store**. _These services handle anything beyond the duties of **Ingesters** or **Publishers**, and will often drive follow-on processing via Process Managers and/or transacting via **Consistent Processing**. In some cases, a reactor app's function may be to progressively compose a notification for a **Publisher** to eventually publish._ + ## [C4](https://c4model.com) Container diagram The relevant pieces of the above break down as follows, when we emphasize the [Containers](https://c4model.com) aspects relevant to Equinox: @@ -379,29 +385,6 @@ A key facility of this repo is being able to run load tests, either in process a - the `Cleared` event acts as a natural event to use in the `isOrigin` check. This makes snapshotting less crucial than it is, for example, in the case of the `Favorite` test - the `-s` parameter can be used to adjust the maximum item text length from the default (`100`, implying average length of 50) -## CONTRIBUTING - -Where it makes sense, raise GitHub issues for any questions so others can benefit from the discussion, or follow the links to [the DDD-CQRS-ES #equinox Slack channel](https://ddd-cqrs-es.slack.com/messages/CF5J67H6Z) above for quick discussions. - -This is an Open Source project for many reasons; some central goals: - -- quality dependency-free reference code (the code should be clean and easy to read; where it makes sense, components can be grabbed and cloned locally and used in altered form) -- optimal resilience and performance (getting performance right can add huge value for some systems) -- this code underpins non-trivial production systems (so having good tests is not optional for reasons far deeper than having impressive coverage stats) - -We'll do our best to be accommodating to PRs and issues, but please appreciate that [we emphasize decisiveness for the greater good of the project and its users](https://www.hanselman.com/blog/AlwaysBeClosingPullRequests.aspx); _new features [start with -100 points](https://blogs.msdn.microsoft.com/oldnewthing/20090928-00/?p=16573)_. - -Within those constraints, contributions of all kinds are welcome: - -- raising [Issues](https://github.com/jet/equinox/issues) (including [relevant question-Issues](https://github.com/jet/equinox/issues/56)) is always welcome (but we'll aim to be decisive in the interests of keeping the list navigable). -- bugfixes with good test coverage are naturally always welcome; in general we'll seek to move them to NuGet prerelease and then NuGet release packages with relatively short timelines (there's unfortunately not presently a MyGet feed for CI packages rigged). -- improvements / tweaks, _subject to filing a GitHub issue outlining the work first to see if it fits a general enough need to warrant adding code to the implementation and to make sure work is not wasted or duplicated_: -- [support for new stores](https://github.com/jet/equinox/issues/76) that can fulfill the normal test cases. -- tests, examples and scenarios are always welcome; Equinox is intended to address a very broad base of usage patterns. Please note that the emphasis will always be (in order) - 1. providing advice on how to achieve your aims without changing Equinox - 2. how to open up an appropriate extension point in Equinox - 3. (when all else fails), add to the complexity of the system by adding API surface area or logic - ## BUILDING Please note the [QuickStart](#quickstart) is probably the best way to gain an overview - these instructions are intended to illustrated various facilities of the build script for people making changes. @@ -663,7 +646,7 @@ Ouch, not looking forward to reading all that logic :frown: ? [Have a read, it's The diagrams in this README.md and the DOCUMENTATION.md would not and could not have happened without the hard work and assistance of at least: - [@simonbrowndotje](https://github.com/simonbrowndotje) taking the time to define and evangelize the [C4 model](https://c4model.com/). It's highly recommended to view [the talk linked from `c4model.com`](https://www.youtube.com/watch?v=x2-rSnhpw0g&feature=emb_logo). -- the wonder that is [PlantUml](https://plantuml.com/); authoring them [in text](https://github.com/jet/equinox/tree/master/diagrams) is a delight. The images are also presently rendered by the [PlantUml proxy](https://plantuml.com/server) +- the wonder that is [PlantUml](https://plantuml.com/); authoring them [in text](https://github.com/jet/equinox/tree/master/diagrams) with [the CVSCode PlantUML plugin](https://marketplace.visualstudio.com/items?itemName=jebbs.plantuml) is a delight. The inline images are also presently rendered by the [PlantUml proxy](https://plantuml.com/server) - [@skleanthous](https://github.com/skleanthous) for making _and documenting_ [C4-PlantUmlSkin](https://github.com/skleanthous/C4-PlantumlSkin/blob/master/README.md) # FURTHER READING diff --git a/diagrams/CosmosCode.puml b/diagrams/CosmosCode.puml index f3896debc..3f79f20f5 100644 --- a/diagrams/CosmosCode.puml +++ b/diagrams/CosmosCode.puml @@ -1,5 +1,5 @@ @startuml -title Code diagram for Equinox.EventStore Query operation, with empty cache and nothing written to the stream yet +title Code diagram for Equinox.Cosmos Query operation, with empty cache and nothing written to the stream yet actor Caller order 20 box "Equinox.Stream" diff --git a/diagrams/MemoryStoreContainer.puml b/diagrams/MemoryStoreContainer.puml index 50600c5f6..1faacf9c1 100644 --- a/diagrams/MemoryStoreContainer.puml +++ b/diagrams/MemoryStoreContainer.puml @@ -38,7 +38,7 @@ rectangle "System Under Test + Simulators" <> { ] } -tests <-D-> aggregate : transact\nagainst +tests -D-> aggregate : act on tests <- caches : may probe\neventually\nconsistent tests -L-> eqxms : may validate\nby probing @@ -50,6 +50,6 @@ ingesters <.U. inputs : feed into reactors <- eqxms : can subscribe to\n(TODO) reactors <-> aggregate2 : run transactions against -reactors -U-> caches : maintain\nviews\nin +reactors -U-> caches : drive reactions by writing into @enduml \ No newline at end of file diff --git a/diagrams/container.puml b/diagrams/container.puml index 2a242575f..2561d7953 100644 --- a/diagrams/container.puml +++ b/diagrams/container.puml @@ -24,12 +24,12 @@ together { frame "Consistent Event Stores" as stores <> { frame "Cosmos" as cosmos <> { rectangle "Equinox.Cosmos" <> as cs - rectangle "Propulsion.Cosmos" <> as cfp + rectangle "Propulsion.Cosmos" <> as cr rectangle "Azure.Cosmos" <> as cc } frame "EventStore" <> { rectangle "Equinox.EventStore" <> as es - rectangle "Propulsion.EventStore" <> as all + rectangle "Propulsion.EventStore" <> as er rectangle "EventStore.ClientAPI" <> as esc } frame "Integration Test Support" <> { @@ -57,25 +57,26 @@ apps .> caches : reference\neventually consistent apps <-> aggs : transact\nagainst aggs -> equinox : use -equinox <-> IStream : Query/\nTransact - -ingesters <. inputs : sync from external -ingesters -> aggs : reconcile into -ingesters .> caches : push into +equinox -> IStream : Query/\nTransact stores .> publishers : are followed by stores .U.> reactors : trigger -publishers .> outputs : emit notifications to -publishers <-- cfp : can feed from -publishers <-- all : can feed from +ingesters <. inputs : sync from external +ingesters -> aggs : reconcile into +ingesters .> caches : push into reactors <-U-> aggs : feed back into -reactors .> outputs : emit summaries to reactors .> caches : maintain views in reactors <- ms : can react to -reactors <- cfp : can react to -reactors <- all : can react to +reactors <- cr : can react to +reactors <- er : can react to +reactors .> publishers : trigger + +publishers .> outputs : emit notifications to +publishers .> outputs : render events to +publishers <-- cr : can feed from +publishers <-- er : can feed from ms .> IStream : implements es -> ICache @@ -93,9 +94,10 @@ ssp -> sss : is a sss <-> SqlStreamStore services -[hidden]D-> reactors -ingesters -[hidden]D-> services +ingesters -[hidden]D-> services publishers -[hidden]D-> es + IStream -[hidden]D-> ssss red -[hidden]R-> sql diff --git a/diagrams/context.puml b/diagrams/context.puml index be4589e54..792c9bfde 100644 --- a/diagrams/context.puml +++ b/diagrams/context.puml @@ -1,4 +1,4 @@ -@startuml + @startuml !includeurl https://raw.githubusercontent.com/skleanthous/C4-PlantumlSkin/master/build/output/c4.puml title System Context Diagram for Equinox (+Propulsion) @@ -11,7 +11,7 @@ rectangle services <> [ __Equinox__ Executing Commands Making Decisions - Querying syßnchronously + Querying synchronously ] database stores <> [ @@ -23,21 +23,21 @@ database stores <> [ ] database caches <> [ - **Read Model** - ____ + **Read Models** + __(eventually consistent)__ Document Stores SQL DBs, REDIS, etc ] together { - rectangle ingesters <> [ + rectangle ingesters <> [ **Ingesters** __Propulsion / FsKafka / Equinox__ Sync from reference data Change Data Capture Event Carried State Transfer ] - rectangle reactors <> [ + rectangle reactors <> [ **Reactors** __Propulsion / Equinox__ Drive Reactions @@ -71,17 +71,19 @@ apps <-L-> services : transact against /\nsynchronously query services <-R-> stores : Query/Transact\n[in terms of events] stores ..> publishers : are followed by -stores ..> reactors : trigger +stores ..> reactors : fresh\nwrites trigger + +ingesters <.. inputs : pull external\ndata from\nupstream +ingesters --> services : reconcile upstream\nchanges into +ingesters .U.> caches : push reference data into -reactors --> services : feed back into -reactors ..> outputs : generate complex\nnotifications for +reactors --> services : manage follow-on reactions via reactors .U.> caches : maintain views in +reactors .> publishers : trigger -publishers ..> outputs : emit complex notifications to -publishers ..> outputs : render some events to +publishers ..> outputs : may emit generated\nnotifications composed by reactors to +publishers ..> outputs : can render some\nevents directly to downstream -ingesters <.. inputs : pull external\ndata from -ingesters --> services : reconcile changes into -ingesters .U.> caches : push reference data into +reactors -[hidden]R-> publishers @enduml \ No newline at end of file diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 5c4407b95..60e868300 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -70,16 +70,18 @@ module Cosmos = open Equinox.Cosmos open Serilog - let connection (log: ILogger, storeLog: ILogger) (a : Info) = - let (Discovery.UriAndKey (endpointUri,_)) as discovery = a.Connection |> Discovery.FromConnectionString + let connection (log: ILogger) (a : Info) = + let discovery = Discovery.ConnectionString a.Connection + let cosmosClient = CosmosClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, mode=a.Mode).Create(discovery) log.Information("CosmosDb {mode} {connection} Database {database} Container {container}", - a.Mode, endpointUri, a.Database, a.Container) + a.Mode, cosmosClient.Endpoint, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - discovery, a.Database, a.Container, StoreGatewayFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, log=storeLog, mode=a.Mode) - let config (log: ILogger, storeLog) (cache, unfolds, batchSize) info = - let discovery, dName, cName, factory = connection (log, storeLog) info - let ctx = Context(factory.Create(appName, discovery, dName, cName), defaultMaxItems = batchSize) + cosmosClient, a.Database, a.Container + let config (log: ILogger) (cache, unfolds, batchSize) info = + let cosmosClient, dName, cName = connection log info + let client = Client(cosmosClient, dName, cName) + let ctx = Context(client, defaultMaxItems = batchSize) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching StorageConfig.Cosmos (ctx, cacheStrategy, unfolds, dName, cName) diff --git a/samples/Web/Startup.fs b/samples/Web/Startup.fs index 4d887d88f..7b90b60f1 100644 --- a/samples/Web/Startup.fs +++ b/samples/Web/Startup.fs @@ -65,7 +65,7 @@ type Startup() = | Some (Cosmos sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Arguments.VerboseStore log.Information("CosmosDb Storage options: {options:l}", options) - Storage.Cosmos.config (log,storeLog) (cache, unfolds, defaultBatchSize) (Storage.Cosmos.Info sargs), storeLog + Storage.Cosmos.config log (cache, unfolds, defaultBatchSize) (Storage.Cosmos.Info sargs), storeLog | Some (Es sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Arguments.VerboseStore log.Information("EventStore Storage options: {options:l}", options) diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.Cosmos/Cosmos.fs index 35e674d69..08f955076 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.Cosmos/Cosmos.fs @@ -405,21 +405,20 @@ module private CancellationToken = let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken module Initialization = - let internal getOrCreateDatabase (sdk: CosmosClient) (dbName: string) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { - let! ct = CancellationToken.useOrCreate cancellationToken + let internal getOrCreateDatabase (cosmosClient: CosmosClient) (dbName: string) (throughput: ResourceThroughput) = async { + let! ct = Async.CancellationToken let! response = match throughput with - | Default -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect - | SetIfCreating value -> sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | Default -> cosmosClient.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> cosmosClient.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect | ReplaceAlways value -> async { - let! response = sdk.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + let! response = cosmosClient.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect let! _ = response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect return response } - return response.Database } - let internal getOrCreateContainer (db: CosmosDatabase) (props: ContainerProperties) (throughput: ResourceThroughput) (cancellationToken: CancellationToken option) = async { - let! ct = CancellationToken.useOrCreate cancellationToken + let internal getOrCreateContainer (db: CosmosDatabase) (props: ContainerProperties) (throughput: ResourceThroughput) = async { + let! ct = Async.CancellationToken let! response = match throughput with | Default -> db.CreateContainerIfNotExistsAsync(props, cancellationToken = ct) |> Async.AwaitTaskCorrect @@ -441,51 +440,39 @@ module Initialization = for k in Batch.IndexedFields do props.IndexingPolicy.IncludedPaths.Add(IncludedPath(Path = sprintf "/%s/?" k)) props - let createSyncStoredProcedure (container: CosmosContainer) (name) (cancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken + let createSyncStoredProcedure (container: CosmosContainer) nameOverride = async { + let! ct = Async.CancellationToken + let name = nameOverride |> Option.defaultValue SyncStoredProcedure.defaultName try let! r = container.Scripts.CreateStoredProcedureAsync(Scripts.StoredProcedureProperties(name, SyncStoredProcedure.body), cancellationToken = ct) |> Async.AwaitTaskCorrect return r.GetRawResponse().Headers.GetRequestCharge() with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } - let initializeContainer (sdk: CosmosClient) (dbName: string) (containerName: string) (mode: Provisioning) (createStoredProcedure: bool) (storedProcedureName: string option) (cancellationToken: CancellationToken option) = async { - let! ct = CancellationToken.useOrCreate cancellationToken + let initializeContainer (cosmosClient: CosmosClient) (dbName: string) (containerName: string) (mode: Provisioning) (createStoredProcedure: bool, nameOverride: string option) = async { let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default - let! db = getOrCreateDatabase sdk dbName dbThroughput (Some ct) - let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput (Some ct) + let! db = getOrCreateDatabase cosmosClient dbName dbThroughput + let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput if createStoredProcedure then - let syncStoredProcedureName = storedProcedureName |> Option.defaultValue SyncStoredProcedure.defaultName - do! createSyncStoredProcedure container syncStoredProcedureName (Some ct) |> Async.Ignore + let! (_ru : float) = createSyncStoredProcedure container nameOverride in () return container } -type StoreGateway(cosmosClient: CosmosClient, databaseId: string, containerId: string) = - let containerClient = lazy(cosmosClient.GetContainer(databaseId, containerId)) - - member val DatabaseId = databaseId with get - member val ContainerId = containerId with get - member val CosmosClient = cosmosClient with get +type ContainerGateway(cosmosContainer : CosmosContainer) = - abstract member InitializeContainer: mode: Provisioning * createStoredProcedure: bool * ?storedProcedureName: string -> Async - default __.InitializeContainer(mode, createStoredProcedure, storedProcedureName) = - Initialization.initializeContainer cosmosClient databaseId containerId mode createStoredProcedure storedProcedureName None - - abstract member GetContainer: unit -> CosmosContainer - default __.GetContainer() = - containerClient.Value + member val CosmosContainer = cosmosContainer with get abstract member GetQueryIteratorByPage<'T> : query: QueryDefinition * ?options: QueryRequestOptions -> AsyncSeq> default __.GetQueryIteratorByPage<'T>(query, ?options) = - __.GetContainer().GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum + cosmosContainer.GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum - abstract member TryReadItem<'T> : docId: string * partitionKey: string * ?options: ItemRequestOptions * ?cancellationToken : CancellationToken -> Async> + abstract member TryReadItem<'T> : docId: string * partitionKey: string * ?options: ItemRequestOptions * ?cancellationToken: CancellationToken -> Async> default __.TryReadItem<'T>(docId, partitionKey, ?options, ?cancellationToken) = async { let partitionKey = PartitionKey partitionKey let options = defaultArg options null let! ct = CancellationToken.useOrCreate cancellationToken // TODO use TryReadItemStreamAsync to avoid the exception https://github.com/Azure/azure-cosmos-dotnet-v3/issues/692#issuecomment-521936888 - try let! item = async { return! __.GetContainer().ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } + try let! item = async { return! cosmosContainer.ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified // NB `.Document` will NRE if a IfNoneModified precondition triggers a NotModified result // else @@ -501,7 +488,7 @@ type StoreGateway(cosmosClient: CosmosClient, databaseId: string, containerId: s let! ct = CancellationToken.useOrCreate cancellationToken let partitionKey = PartitionKey partitionKey //let args = [| box tip; box index; box (Option.toObj etag)|] - return! __.GetContainer().Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } + return! cosmosContainer.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } module Sync = @@ -513,7 +500,7 @@ module Sync = | ConflictUnknown of Position type [] Exp = Version of int64 | Etag of string | Any - let private run (gateway : StoreGateway, stream : string) (exp, req: Tip) + let private run (gateway : ContainerGateway, stream : string) (exp, req: Tip) : Async = async { let ep = match exp with Exp.Version ev -> Position.fromI ev | Exp.Etag et -> Position.fromEtag et | Exp.Any -> Position.fromAppendAtEnd let args = [| box req; box ep.index; box (Option.toObj ep.etag)|] @@ -577,10 +564,10 @@ module Sync = } : Unfold) module internal Tip = - let private get (gateway : StoreGateway, stream : string) (maybePos: Position option) = + let private get (gateway : ContainerGateway, stream : string) (maybePos: Position option) = let ro = match maybePos with Some { etag=Some etag } -> ItemRequestOptions(IfNoneMatch=Nullable(Azure.ETag(etag))) | _ -> null gateway.TryReadItem(Tip.WellKnownDocumentId, stream, options = ro) - let private loggedGet (get : StoreGateway * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { + let private loggedGet (get : ContainerGateway * string -> Position option -> Async<_>) (container,stream) (maybePos: Position option) (log: ILogger) = async { let log = log |> Log.prop "stream" stream let! t, (ru, res : ReadResult) = get (container,stream) maybePos |> Stopwatch.Time let log bytes count (f : Log.Measurement -> _) = log |> Log.event (f { stream = stream; interval = t; bytes = bytes; count = count; ru = ru }) @@ -608,7 +595,7 @@ module internal Tip = | ReadResult.Found tip -> return Result.Found (Position.fromTip tip, Enum.EventsAndUnfolds tip |> Array.ofSeq) } module internal Query = - let private mkQuery (gateway : StoreGateway, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = + let private mkQuery (gateway : ContainerGateway, stream: string) maxItems (direction: Direction) startPos : AsyncSeq> = let query = let root = sprintf "SELECT c.id, c.i, c._etag, c.n, c.e FROM c WHERE c.id!=\"%s\"" Tip.WellKnownDocumentId let tail = sprintf "ORDER BY c.i %s" (if direction = Direction.Forward then "ASC" else "DESC") @@ -617,7 +604,7 @@ module internal Tip = | Some { index = positionSoExclusiveWhenBackward } -> let cond = if direction = Direction.Forward then "c.n > @startPos" else "c.i < @startPos" QueryDefinition(sprintf "%s AND %s %s" root cond tail).WithParameter("@startPos", positionSoExclusiveWhenBackward) - let qro = new QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) + let qro = QueryRequestOptions(PartitionKey = Nullable(PartitionKey stream), MaxItemCount=Nullable maxItems) gateway.GetQueryIteratorByPage(query, options = qro) // Unrolls the Batches in a response - note when reading backwards, the events are emitted in reverse order of index @@ -770,13 +757,13 @@ module internal Tip = let t = StopwatchInterval(startTicks, endTicks) log |> logQuery direction maxItems stream t (!responseCount,allEvents.ToArray()) -1L ru } -type [] Token = { gateway: StoreGateway; stream: string; pos: Position } +type [] Token = { stream: string; pos: Position } module Token = - let create (gateway,stream) pos : StreamToken = - { value = box { gateway = gateway; stream = stream; pos = pos } + let create stream pos : StreamToken = + { value = box { stream = stream; pos = pos } version = pos.index } - let (|Unpack|) (token: StreamToken) : StoreGateway*string*Position = let t = unbox token.value in t.gateway,t.stream,t.pos - let supersedes (Unpack (_,_,currentPos)) (Unpack (_,_,xPos)) = + let (|Unpack|) (token: StreamToken) : string*Position = let t = unbox token.value in t.stream,t.pos + let supersedes (Unpack (_,currentPos)) (Unpack (_,xPos)) = let currentVersion, newVersion = currentPos.index, xPos.index let currentETag, newETag = currentPos.etag, xPos.etag newVersion > currentVersion || currentETag <> newETag @@ -790,8 +777,7 @@ module Internal = type LoadFromTokenResult<'event> = Unchanged | Found of StreamToken * 'event[] /// Defines policies for retrying with respect to transient failures calling CosmosDb (as opposed to application level concurrency conflicts) -type StoreConnection(gateway: StoreGateway, []?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = - member __.Gateway = gateway +type RetryPolicy([]?readRetryPolicy: IRetryPolicy, []?writeRetryPolicy) = member __.TipRetryPolicy = readRetryPolicy member __.QueryRetryPolicy = readRetryPolicy member __.WriteRetryPolicy = writeRetryPolicy @@ -810,7 +796,41 @@ type BatchingPolicy /// Maximum number of trips to permit when slicing the work into multiple responses based on `MaxItems` member __.MaxRequests = maxRequests -type StoreClient(conn : StoreConnection, batching : BatchingPolicy) = +/// Holds Container state, coordinating initialization activities +type internal ContainerInitializerGuard(gateway : ContainerGateway, ?initContainer : CosmosContainer -> Async) = + let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init gateway.CosmosContainer)) + + member __.Gateway = gateway + member internal __.InitializationGate = match initGuard with Some g when g.PeekIsValid() |> not -> Some g.AwaitValue | _ -> None + +/// Defines a process for mapping from a Stream Name to the appropriate storage area, allowing control over segregation / co-locating of data +type Containers + ( /// Facilitates custom mapping of Stream Category Name to underlying Cosmos Database/Container names + categoryAndStreamNameToDatabaseContainerStream : string * string -> string * string * string, + /// Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time + []?disableInitialization) = + // Index of database*collection -> Initialization Context + let containerInitGuards = System.Collections.Concurrent.ConcurrentDictionary() + + /// Create a Container Map where all streams are stored within a single global CosmosContainer. + new (databaseId, containerId, []?disableInitialization) = + let genStreamName (categoryName, streamId) = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId + let catAndStreamToDatabaseContainerStream (categoryName, streamId) = databaseId, containerId, genStreamName (categoryName, streamId) + Containers(catAndStreamToDatabaseContainerStream, ?disableInitialization = disableInitialization) + + member internal __.ResolveContainerGuardAndStreamName(cosmosClient : CosmosClient, createGateway, categoryName, streamId) : ContainerInitializerGuard * string = + let databaseId, containerId, streamName = categoryAndStreamNameToDatabaseContainerStream (categoryName, streamId) + let createContainerInitializerGuard (d, c) = + let init = + if Some true = disableInitialization then None + else Some (fun cosmosContainer -> Initialization.createSyncStoredProcedure cosmosContainer None |> Async.Ignore) + ContainerInitializerGuard + ( createGateway (cosmosClient.GetDatabase(d).GetContainer(c)), + ?initContainer = init) + let g = containerInitGuards.GetOrAdd((databaseId, containerId), createContainerInitializerGuard) + g, streamName + +type ContainerClient(gateway : ContainerGateway, batching : BatchingPolicy, retry: RetryPolicy) = let (|FromUnfold|_|) (tryDecode: #IEventData<_> -> 'event option) (isOrigin: 'event -> bool) (xs:#IEventData<_>[]) : Option<'event[]> = let items = ResizeArray() let isOrigin' e = @@ -822,92 +842,57 @@ type StoreClient(conn : StoreConnection, batching : BatchingPolicy) = match Array.tryFindIndexBack isOrigin' xs with | None -> None | Some _ -> items.ToArray() |> Some - member __.Gateway = conn.Gateway - member __.LoadBackwardsStopping log (container, stream) (tryDecode,isOrigin): Async = async { - let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests Direction.Backward None (tryDecode,isOrigin) + member __.LoadBackwardsStopping(log, stream, (tryDecode,isOrigin)): Async = async { + let! pos, events = Query.walk log (gateway,stream) retry.QueryRetryPolicy batching.MaxItems batching.MaxRequests Direction.Backward None (tryDecode,isOrigin) System.Array.Reverse events - return Token.create (container,stream) pos, events } - member __.Read log (container,stream) direction startPos (tryDecode,isOrigin) : Async = async { - let! pos, events = Query.walk log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) - return Token.create (container,stream) pos, events } - member __.ReadLazy (batching: BatchingPolicy) log (container,stream) direction startPos (tryDecode,isOrigin) : AsyncSeq<'event[]> = - Query.walkLazy log (container,stream) conn.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) - member __.LoadFromUnfoldsOrRollingSnapshots log (containerStream,maybePos) (tryDecode,isOrigin): Async = async { - let! res = Tip.tryLoad log conn.TipRetryPolicy containerStream maybePos + return Token.create stream pos, events } + member __.Read(log, stream, direction, startPos, (tryDecode,isOrigin)) : Async = async { + let! pos, events = Query.walk log (gateway,stream) retry.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) + return Token.create stream pos, events } + member __.ReadLazy(batching: BatchingPolicy, log, stream, direction, startPos, (tryDecode,isOrigin)) : AsyncSeq<'event[]> = + Query.walkLazy log (gateway,stream) retry.QueryRetryPolicy batching.MaxItems batching.MaxRequests direction startPos (tryDecode,isOrigin) + member __.LoadFromUnfoldsOrRollingSnapshots(log, (stream,maybePos), (tryDecode,isOrigin)): Async = async { + let! res = Tip.tryLoad log retry.TipRetryPolicy (gateway,stream) maybePos match res with - | Tip.Result.NotFound -> return Token.create containerStream Position.fromKnownEmpty, Array.empty + | Tip.Result.NotFound -> return Token.create stream Position.fromKnownEmpty, Array.empty | Tip.Result.NotModified -> return invalidOp "Not handled" - | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return Token.create containerStream pos, span - | _ -> return! __.LoadBackwardsStopping log containerStream (tryDecode,isOrigin) } - member __.GetPosition(log, containerStream, ?pos): Async = async { - let! res = Tip.tryLoad log conn.TipRetryPolicy containerStream pos + | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return Token.create stream pos, span + | _ -> return! __.LoadBackwardsStopping(log,stream, (tryDecode,isOrigin)) } + member __.GetPosition(log, stream, ?pos): Async = async { + let! res = Tip.tryLoad log retry.TipRetryPolicy (gateway,stream) pos match res with - | Tip.Result.NotFound -> return Token.create containerStream Position.fromKnownEmpty - | Tip.Result.NotModified -> return Token.create containerStream pos.Value - | Tip.Result.Found (pos, _unfoldsAndEvents) -> return Token.create containerStream pos } - member __.LoadFromToken(log, (container,stream,pos), (tryDecode, isOrigin)): Async> = async { - let! res = Tip.tryLoad log conn.TipRetryPolicy (container,stream) (Some pos) + | Tip.Result.NotFound -> return Token.create stream Position.fromKnownEmpty + | Tip.Result.NotModified -> return Token.create stream pos.Value + | Tip.Result.Found (pos, _unfoldsAndEvents) -> return Token.create stream pos } + member __.LoadFromToken(log, (stream,pos), (tryDecode, isOrigin)): Async> = async { + let! res = Tip.tryLoad log retry.TipRetryPolicy (gateway,stream) (Some pos) match res with - | Tip.Result.NotFound -> return LoadFromTokenResult.Found (Token.create (container,stream) Position.fromKnownEmpty,Array.empty) + | Tip.Result.NotFound -> return LoadFromTokenResult.Found (Token.create stream Position.fromKnownEmpty,Array.empty) | Tip.Result.NotModified -> return LoadFromTokenResult.Unchanged - | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return LoadFromTokenResult.Found (Token.create (container,stream) pos, span) - | _ -> let! res = __.Read log (container,stream) Direction.Forward (Some pos) (tryDecode,isOrigin) + | Tip.Result.Found (pos, FromUnfold tryDecode isOrigin span) -> return LoadFromTokenResult.Found (Token.create stream pos, span) + | _ -> let! res = __.Read(log, stream, Direction.Forward, Some pos, (tryDecode,isOrigin)) return LoadFromTokenResult.Found res } - member __.Sync log containerStream (exp, batch: Tip): Async = async { + member __.Sync(log, stream, (exp, batch: Tip)): Async = async { if Array.isEmpty batch.e && Array.isEmpty batch.u then invalidOp "Must write either events or unfolds." - let! wr = Sync.batch log conn.WriteRetryPolicy containerStream (exp,batch) + let! wr = Sync.batch log retry.WriteRetryPolicy (gateway,stream) (exp,batch) match wr with - | Sync.Result.Conflict (pos',events) -> return InternalSyncResult.Conflict (Token.create containerStream pos',events) - | Sync.Result.ConflictUnknown pos' -> return InternalSyncResult.ConflictUnknown (Token.create containerStream pos') - | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create containerStream pos') } - -/// Holds Container state, coordinating initialization activities -type private ContainerWrapper(gateway : StoreGateway, ?initContainer : unit -> Async) = - let initGuard = initContainer |> Option.map (fun init -> AsyncCacheCell(init ())) - - member __.Gateway = gateway - member internal __.InitializationGate = match initGuard with Some g when g.PeekIsValid() |> not -> Some g.AwaitValue | _ -> None - -/// Defines a process for mapping from a Stream Name to the appropriate storage area, allowing control over segregation / co-locating of data -type Containers(categoryAndIdToDatabaseContainerStream : string -> string -> string*string*string, []?disableInitialization) = - // Index of database*collection -> Initialization Context - let wrappers = System.Collections.Concurrent.ConcurrentDictionary() - new (databaseId, containerId) = - // TOCONSIDER - this works to support the Core.Events APIs - let genStreamName categoryName streamId = if categoryName = null then streamId else sprintf "%s-%s" categoryName streamId - Containers(fun categoryName streamId -> databaseId, containerId, genStreamName categoryName streamId) - - member internal __.Resolve(gateway : StoreGateway, categoryName, id, init) : (StoreGateway*string) * (unit -> Async) option = - let databaseId, containerName, streamName = categoryAndIdToDatabaseContainerStream categoryName id - let init = match disableInitialization with Some true -> None | _ -> Some init - let wrapped = wrappers.GetOrAdd((databaseId,containerName), fun _ -> ContainerWrapper(gateway, ?initContainer = init)) - (wrapped.Gateway,streamName),wrapped.InitializationGate - -namespace Equinox.Cosmos - -open Azure.Cosmos -open Equinox -open Equinox.Core -open Equinox.Cosmos.Store -open FsCodec -open FSharp.Control -open Serilog -open System -open System.Text.Json + | Sync.Result.Conflict (pos',events) -> return InternalSyncResult.Conflict (Token.create stream pos',events) + | Sync.Result.ConflictUnknown pos' -> return InternalSyncResult.ConflictUnknown (Token.create stream pos') + | Sync.Result.Written pos' -> return InternalSyncResult.Written (Token.create stream pos') } -type private Category<'event, 'state, 'context>(client : StoreClient, codec : IEventCodec<'event,JsonElement,'context>) = +type internal Category<'event, 'state, 'context>(container : ContainerClient, codec : IEventCodec<'event,JsonElement,'context>) = let (|TryDecodeFold|) (fold: 'state -> 'event seq -> 'state) initial (events: ITimelineEvent seq) : 'state = Seq.choose codec.TryDecode events |> fold initial - member __.Load includeUnfolds containerStream fold initial isOrigin (log : ILogger): Async = async { + member __.Load(includeUnfolds, stream, fold, initial, isOrigin, log : ILogger): Async = async { let! token, events = - if not includeUnfolds then client.LoadBackwardsStopping log containerStream (codec.TryDecode,isOrigin) - else client.LoadFromUnfoldsOrRollingSnapshots log (containerStream,None) (codec.TryDecode,isOrigin) + if not includeUnfolds then container.LoadBackwardsStopping(log, stream, (codec.TryDecode,isOrigin)) + else container.LoadFromUnfoldsOrRollingSnapshots(log, (stream, None), (codec.TryDecode,isOrigin)) return token, fold initial events } - member __.LoadFromToken (Token.Unpack streamPos, state: 'state as current) fold isOrigin (log : ILogger): Async = async { - let! res = client.LoadFromToken(log, streamPos, (codec.TryDecode,isOrigin)) + member __.LoadFromToken(Token.Unpack (stream,pos), state: 'state as current) fold isOrigin (log : ILogger): Async = async { + let! res = container.LoadFromToken(log, (stream, pos), (codec.TryDecode,isOrigin)) match res with | LoadFromTokenResult.Unchanged -> return current | LoadFromTokenResult.Found (token', events') -> return token', fold state events' } - member __.Sync(Token.Unpack (container,stream,pos), state as current, events, mapUnfolds, fold, isOrigin, compress, log, context): Async> = async { + member __.Sync(Token.Unpack (stream,pos), state as current, events, mapUnfolds, fold, isOrigin, compress, log, context): Async> = async { let state' = fold state (Seq.ofList events) let encode e = codec.Encode(context,e) let exp,events,eventsEncoded,projectionsEncoded = @@ -920,7 +905,7 @@ type private Category<'event, 'state, 'context>(client : StoreClient, codec : IE let baseIndex = pos.index + int64 (List.length events) let projections = Sync.mkUnfold compress baseIndex projectionsEncoded let batch = Sync.mkBatch stream eventsEncoded projections - let! res = client.Sync log (container,stream) (exp,batch) + let! res = container.Sync(log, stream, (exp,batch)) match res with | InternalSyncResult.Conflict (token',TryDecodeFold fold state events') -> return SyncResult.Conflict (async { return token', events' }) | InternalSyncResult.ConflictUnknown _token' -> return SyncResult.Conflict (__.LoadFromToken current fold isOrigin log) @@ -928,17 +913,17 @@ type private Category<'event, 'state, 'context>(client : StoreClient, codec : IE module Caching = /// Forwards all state changes in all streams of an ICategory to a `tee` function - type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, StoreGateway*string,'context>, tee : string -> StreamToken * 'state -> Async) = + type CategoryTee<'event, 'state, 'context>(inner: ICategory<'event, 'state, string,'context>, tee : string -> StreamToken * 'state -> Async) = let intercept streamName tokenAndState = async { let! _ = tee streamName tokenAndState return tokenAndState } let loadAndIntercept load streamName = async { let! tokenAndState = load return! intercept streamName tokenAndState } - interface ICategory<'event, 'state, StoreGateway*string, 'context> with - member __.Load(log, (container,streamName), opt) : Async = - loadAndIntercept (inner.Load(log, (container,streamName), opt)) streamName - member __.TrySync(log : ILogger, (Token.Unpack (_container,stream,_) as streamToken), state, events : 'event list, context, compress) + interface ICategory<'event, 'state, string, 'context> with + member __.Load(log, streamName, opt) : Async = + loadAndIntercept (inner.Load(log, streamName, opt)) streamName + member __.TrySync(log : ILogger, (Token.Unpack (stream,_) as streamToken), state, events : 'event list, context, compress) : Async> = async { let! syncRes = inner.TrySync(log, streamToken, state, events, context, compress) match syncRes with @@ -951,28 +936,28 @@ module Caching = (cache : ICache) (prefix : string) (slidingExpiration : TimeSpan) - (category : ICategory<'event, 'state, StoreGateway*string, 'context>) - : ICategory<'event, 'state, StoreGateway*string, 'context> = - let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = new CacheEntry<'state>(initialToken, initialState, Token.supersedes) + (category : ICategory<'event, 'state, string, 'context>) + : ICategory<'event, 'state, string, 'context> = + let mkCacheEntry (initialToken : StreamToken, initialState : 'state) = CacheEntry<'state>(initialToken, initialState, Token.supersedes) let options = CacheItemOptions.RelativeExpiration slidingExpiration let addOrUpdateSlidingExpirationCacheEntry streamName value = cache.UpdateIfNewer(prefix + streamName, options, mkCacheEntry value) CategoryTee<'event, 'state, 'context>(category, addOrUpdateSlidingExpirationCacheEntry) :> _ -type private Folder<'event, 'state, 'context> +type internal Folder<'event, 'state, 'context> ( category: Category<'event, 'state, 'context>, fold: 'state -> 'event seq -> 'state, initial: 'state, isOrigin: 'event -> bool, mapUnfolds: Choice 'state -> 'event seq),('event list -> 'state -> 'event list * 'event list)>, ?readCache) = let inspectUnfolds = match mapUnfolds with Choice1Of3 () -> false | _ -> true - let batched log containerStream = category.Load inspectUnfolds containerStream fold initial isOrigin log - interface ICategory<'event, 'state, StoreGateway*string, 'context> with - member __.Load(log, (container,streamName), opt): Async = + let batched log stream = category.Load(inspectUnfolds, stream, fold, initial, isOrigin, log) + interface ICategory<'event, 'state, string, 'context> with + member __.Load(log, streamName, opt): Async = match readCache with - | None -> batched log (container,streamName) + | None -> batched log streamName | Some (cache : ICache, prefix : string) -> async { match! cache.TryGet(prefix + streamName) with - | None -> return! batched log (container,streamName) - | Some tokenAndState when opt = Some AllowStale -> return tokenAndState + | None -> return! batched log streamName + | Some tokenAndState when opt = Some Equinox.AllowStale -> return tokenAndState | Some tokenAndState -> return! category.LoadFromToken tokenAndState fold isOrigin log } member __.TrySync(log : ILogger, streamToken, state, events : 'event list, context, compress) : Async> = async { @@ -981,24 +966,16 @@ type private Folder<'event, 'state, 'context> | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } -/// Defines a set of related access policies for a given CosmosDb, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) -type Context - ( gateway: StoreGateway, - ?defaultMaxItems: int, - ?getDefaultMaxItems: unit -> int, - ?maxRequests: int, - ?readRetryPolicy: IRetryPolicy, - ?writeRetryPolicy: IRetryPolicy ) = - - let conn = StoreConnection(gateway, ?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) - let batchingPolicy = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems, ?maxRequests = maxRequests) - let client = StoreClient(conn, batchingPolicy) - let init = fun () -> Initialization.createSyncStoredProcedure (gateway.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore - let containers = Containers(gateway.DatabaseId, gateway.ContainerId) - - member __.Client = client - member internal __.ResolveContainerStream(categoryName, id) : (StoreGateway*string) * (unit -> Async) option = - containers.Resolve(client.Gateway, categoryName, id, init) +namespace Equinox.Cosmos + +open Azure.Cosmos +open Equinox +open Equinox.Core +open Equinox.Cosmos.Store +open FsCodec +open FSharp.Control +open Serilog +open System [] type CachingStrategy = @@ -1046,6 +1023,39 @@ type AccessStrategy<'event,'state> = /// | Custom of isOrigin: ('event -> bool) * transmute: ('event list -> 'state -> 'event list*'event list) +/// Holds all relevant state for a Store within a given CosmosDB Database +/// - The (singleton) CosmosDB CosmosClient (there should be a single one of these per process) +type Client + ( cosmosClient : CosmosClient, + /// Singleton used to cache initialization state per CosmosContainer. + containers : Containers, + /// Admits a hook to enable customization of how Equinox.Cosmos handles the low level interactions with the underlying CosmosContainer. + ?createGateway) = + let createGateway = match createGateway with Some creator -> creator | None -> ContainerGateway + new (cosmosClient, databaseId : string, containerId : string, + /// Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time + []?disableInitialization, + /// Admits a hook to enable customization of how Equinox.Cosmos handles the low level interactions with the underlying CosmosContainer. + []?createGateway : CosmosContainer -> ContainerGateway) = + let containers = Containers(databaseId, containerId, ?disableInitialization = disableInitialization) + Client(cosmosClient, containers, ?createGateway = createGateway) + member __.CosmosClient = cosmosClient + member internal __.ResolveContainerGuardAndStreamName(categoryName, streamId) = + containers.ResolveContainerGuardAndStreamName(cosmosClient, createGateway, categoryName, streamId) + +/// Defines a set of related access policies for a given CosmosDB, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) +type Context(client : Client, batchingPolicy, retryPolicy) = + new(client : Client, ?defaultMaxItems, ?getDefaultMaxItems, ?maxRequests, ?readRetryPolicy, ?writeRetryPolicy) = + let retry = RetryPolicy(?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) + let batching = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems, ?maxRequests = maxRequests) + Context(client, batching, retry) + member __.Batching = batchingPolicy + member __.Retries = retryPolicy + member internal __.ResolveContainerClientAndStreamIdAndInit(categoryName, streamId) = + let cg, streamId = client.ResolveContainerGuardAndStreamName(categoryName, streamId) + let cc = ContainerClient(cg.Gateway, batchingPolicy, retryPolicy) + cc, streamId, cg.InitializationGate + type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, caching, access) = let readCacheOption = match caching with @@ -1059,15 +1069,18 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, | AccessStrategy.MultiSnapshot (isOrigin, unfold) -> isOrigin, Choice2Of3 (fun _ state -> unfold state) | AccessStrategy.RollingState toSnapshot -> (fun _ -> true), Choice3Of3 (fun _ state -> [],[toSnapshot state]) | AccessStrategy.Custom (isOrigin,transmute) -> isOrigin, Choice3Of3 transmute - let cosmosCat = Category<'event, 'state, 'context>(context.Client, codec) - let folder = Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) - let category : ICategory<_, _, StoreGateway*string, 'context> = - match caching with - | CachingStrategy.NoCaching -> folder :> _ - | CachingStrategy.SlidingWindow(cache, window) -> - Caching.applyCacheUpdatesWithSlidingExpiration cache null window folder - - let resolveStream (streamId, maybeContainerInitializationGate) opt context compress = + let categories = System.Collections.Concurrent.ConcurrentDictionary>() + let resolveCategory (categoryName, container) = + let createCategory _name = + let cosmosCat = Category<'event, 'state, 'context>(container, codec) + let folder = Store.Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) + match caching with + | CachingStrategy.NoCaching -> folder :> ICategory<_, _, string, 'context> + | CachingStrategy.SlidingWindow(cache, window) -> Caching.applyCacheUpdatesWithSlidingExpiration cache null window folder + categories.GetOrAdd(categoryName, createCategory) + + let resolveStream (categoryName, container, streamId, maybeContainerInitializationGate) opt context compress = + let category = resolveCategory (categoryName, container) { new IStream<'event, 'state> with member __.Load log = category.Load(log, streamId, opt) member __.TrySync(log: ILogger, token: StreamToken, originState: 'state, events: 'event list) = @@ -1077,79 +1090,67 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, do! init () return! category.TrySync(log, token, originState, events, context, compress) } } - let resolveTarget = function - | StreamName.CategoryAndId (categoryName, streamId) -> context.ResolveContainerStream(categoryName, streamId) + let resolveStreamConfig = function + | StreamName.CategoryAndId (categoryName, streamId) -> + let containerClient, streamId, init = context.ResolveContainerClientAndStreamIdAndInit(categoryName, streamId) + categoryName, containerClient, streamId, init member __.Resolve ( streamName : StreamName, + /// Resolver options []?option, + /// Context to be passed to IEventCodec []?context, /// Determines whether the data and metadata payloads of the `u`nfolds in the Tip document are base64 encoded and compressed; defaults to true []?compressUnfolds) = let compress = defaultArg compressUnfolds true - match resolveTarget streamName, option with - | streamArgs,(None|Some AllowStale) -> resolveStream streamArgs option context compress - | (containerStream,maybeInit),Some AssumeEmpty -> - Stream.ofMemento (Token.create containerStream Position.fromKnownEmpty,initial) (resolveStream (containerStream,maybeInit) option context compress) + match resolveStreamConfig streamName, option with + | streamArgs,(None|Some AllowStale) -> + resolveStream streamArgs option context compress + | (_, _, streamId, _) as streamArgs,Some AssumeEmpty -> + let stream = resolveStream streamArgs option context compress + Stream.ofMemento (Token.create streamId Position.fromKnownEmpty,initial) stream member __.FromMemento - ( Token.Unpack (container,stream,_pos) as streamToken, + ( Token.Unpack (stream,_pos) as streamToken, state, /// Determines whether the data and metadata payloads of the `u`nfolds in the Tip document are base64 encoded and compressed; defaults to true []?compressUnfolds) = let compress = defaultArg compressUnfolds true let skipInitialization = None - Stream.ofMemento (streamToken,state) (resolveStream ((container,stream),skipInitialization) None None compress) + let (categoryName, container, streamId, _maybeInit) = resolveStreamConfig (StreamName.parse stream) + let stream = resolveStream (categoryName, container, streamId, skipInitialization) None None compress + Stream.ofMemento (streamToken,state) stream [] type Discovery = - | UriAndKey of databaseUri:Uri * key:string - /// Implements connection string parsing logic curiously missing from the CosmosDB SDK - static member FromConnectionString (connectionString: string) = - match connectionString with - | _ when String.IsNullOrWhiteSpace connectionString -> nullArg "connectionString" - | Regex.Match "^\s*AccountEndpoint\s*=\s*([^;\s]+)\s*;\s*AccountKey\s*=\s*([^;\s]+)\s*;?\s*$" m -> - let uri = m.Groups.[1].Value - let key = m.Groups.[2].Value - UriAndKey (Uri uri, key) - | _ -> invalidArg "connectionString" "unrecognized connection string format; must be `AccountEndpoint=https://...;AccountKey=...=;`" - -type StoreGatewayFactory + /// Separated Account Uri and Key (for interop with previous versions) + | AccountUriAndKey of databaseUri: Uri * key:string + /// Cosmos SDK Connection String + | ConnectionString of connectionString : string + +type CosmosClientFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached maxRetryAttemptsOnRateLimitedRequests: int, /// Maximum number of seconds to wait (especially if a higher wait delay is suggested by CosmosDb in the 429 response) - // naming matches SDK ver >=3 maxRetryWaitTimeOnRateLimitedRequests: TimeSpan, - /// Log to emit connection messages to - log : ILogger, /// Connection limit for Gateway Mode (default 1000) []?gatewayModeMaxConnectionLimit, /// Connection mode (default: ConnectionMode.Gateway (lowest perf, least trouble)) []?mode : ConnectionMode, /// consistency mode (default: ConsistencyLevel.Session) - []?defaultConsistencyLevel : ConsistencyLevel, - - /// Additional strings identifying the context of this connection; should provide enough context to disambiguate all potential connections to a cluster - /// NB as this will enter server and client logs, it should not contain sensitive information - []?tags : (string*string) seq) = - do if log = null then nullArg "log" - - let logName (uri : Uri) name = - let name = String.concat ";" <| seq { - yield name - match tags with None -> () | Some tags -> for key, value in tags do yield sprintf "%s=%s" key value } - let sanitizedName = name.Replace('\'','_').Replace(':','_') // sic; Align with logging for ES Adapter - log.ForContext("Uri", uri).Information("CosmosDb Connection Name {connectionName}", sanitizedName) + []?defaultConsistencyLevel : ConsistencyLevel) = - /// ClientOptions for this Connector as configured - member val CosmosClientOptions = + /// CosmosClientOptions for this Connector as configured + member val Options = let maxAttempts, maxWait, timeout = Nullable maxRetryAttemptsOnRateLimitedRequests, Nullable maxRetryWaitTimeOnRateLimitedRequests, requestTimeout + let serializerOptions = FsCodec.SystemTextJson.Options.CreateDefault(converters=[|FsCodec.SystemTextJson.Converters.JsonRecordConverter()|]) let co = CosmosClientOptions( MaxRetryAttemptsOnRateLimitedRequests = maxAttempts, MaxRetryWaitTimeOnRateLimitedRequests = maxWait, RequestTimeout = timeout, - Serializer = CosmosJsonSerializer(FsCodec.SystemTextJson.Options.CreateDefault(converters=[|FsCodec.SystemTextJson.Converters.JsonRecordConverter()|]))) + Serializer = CosmosJsonSerializer serializerOptions) match mode with | Some ConnectionMode.Direct -> co.ConnectionMode <- ConnectionMode.Direct | None | Some ConnectionMode.Gateway | Some _ (* enum total match :( *) -> co.ConnectionMode <- ConnectionMode.Gateway // default; only supports Https @@ -1168,20 +1169,10 @@ type StoreGatewayFactory // co.TransportClientHandlerFactory <- inhibitCertCheck co - abstract member Create: name: string * discovery: Discovery * dbName: string * containerName: string * ?skipLog: bool -> StoreGateway - default __.Create - ( /// Name should be sufficient to uniquely identify this connection within a single app instance's logs - name, discovery : Discovery, - dbName: string, - containerName: string, - /// true to inhibit logging of client name - []?skipLog) : StoreGateway = - - let (Discovery.UriAndKey (databaseUri=uri; key=key)) = discovery - if skipLog <> Some true then logName uri name - let cosmosClient = new CosmosClient(string uri, key, __.CosmosClientOptions) - - StoreGateway(cosmosClient, dbName, containerName) + abstract member Create: discovery: Discovery -> CosmosClient + default __.Create discovery = discovery |> function + | Discovery.AccountUriAndKey (databaseUri=uri; key=key) -> new CosmosClient(string uri, key, __.Options) + | Discovery.ConnectionString cs -> new CosmosClient(cs, __.Options) namespace Equinox.Cosmos.Core @@ -1200,7 +1191,7 @@ type AppendResult<'t> = /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. type Context - ( gateway : StoreGateway, + ( context : Equinox.Cosmos.Context, container : ContainerClient, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1208,15 +1199,9 @@ type Context []?defaultMaxItems, /// Alternate way of specifying defaultMaxItems which facilitates reading it from a cached dynamic configuration []?getDefaultMaxItems) = - do if log = null then nullArg "log" - let conn = Equinox.Cosmos.Store.StoreConnection(gateway) - let containers = Containers(gateway.DatabaseId, gateway.ContainerId) let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 let batching = BatchingPolicy(getDefaultMaxItems=getDefaultMaxItems) - let init = fun () -> Initialization.createSyncStoredProcedure (gateway.GetContainer()) SyncStoredProcedure.defaultName None |> Async.Ignore - let client = StoreClient(conn, batching) - let maxCountPredicate count = let acc = ref (max (count-1) 0) fun _ -> @@ -1225,16 +1210,23 @@ type Context false let yieldPositionAndData res = async { - let! (Token.Unpack (_,_,pos')), data = res + let! (Token.Unpack (_,pos')), data = res return pos', data } - member __.ResolveStream(streamName) = containers.Resolve(conn.Gateway, null, streamName, init) - member __.CreateStream(streamName) = __.ResolveStream streamName |> fst + new (client : Azure.Cosmos.CosmosClient, log, databaseId : string, containerId : string, ?defaultMaxItems, ?getDefaultMaxItems) = + let inner = Equinox.Cosmos.Context(Equinox.Cosmos.Client(client, databaseId, containerId)) + let cc, _streamId, _init = inner.ResolveContainerClientAndStreamIdAndInit(null, null) + Context(inner, cc, log, ?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems) + + member __.ResolveStream(streamName) = + let _cc, streamId, init = context.ResolveContainerClientAndStreamIdAndInit(null, streamName) + streamId, init + member __.CreateStream(streamName) : string = __.ResolveStream streamName |> fst member internal __.GetLazy((stream, startPos), ?batchSize, ?direction) : AsyncSeq[]> = let direction = defaultArg direction Direction.Forward let batching = BatchingPolicy(defaultArg batchSize batching.MaxItems) - client.ReadLazy batching log stream direction startPos (Some,fun _ -> false) + container.ReadLazy(batching, log, stream, direction, startPos, (Some,fun _ -> false)) member internal __.GetInternal((stream, startPos), ?maxCount, ?direction) = async { let direction = defaultArg direction Direction.Forward @@ -1246,12 +1238,12 @@ type Context match maxCount with | Some limit -> maxCountPredicate limit | None -> fun _ -> false - return! client.Read log stream direction startPos (Some,isOrigin) } + return! container.Read(log, stream, direction, startPos, (Some,isOrigin)) } /// Establishes the current position of the stream in as efficient a manner as possible /// (The ideal situation is that the preceding token is supplied as input in order to avail of 1RU low latency state checks) member __.Sync(stream, ?position: Position) : Async = async { - let! (Token.Unpack (_,_,pos')) = client.GetPosition(log, stream, ?pos=position) + let! (Token.Unpack (_,pos')) = container.GetPosition(log, stream, ?pos=position) return pos' } /// Reads in batches of `batchSize` from the specified `Position`, allowing the reader to efficiently walk away from a running query @@ -1265,7 +1257,7 @@ type Context /// Appends the supplied batch of events, subject to a consistency check based on the `position` /// Callers should implement appropriate idempotent handling, or use Equinox.Stream for that purpose - member __.Sync((container,stream), position, events: IEventData<_>[]) : Async> = async { + member __.Sync(stream, position, events: IEventData<_>[]) : Async> = async { // Writes go through the stored proc, which we need to provision per-collection // Having to do this here in this way is far from ideal, but work on caching, external snapshots and caching is likely // to move this about before we reach a final destination in any case @@ -1273,11 +1265,11 @@ type Context | None -> () | Some init -> do! init () let batch = Sync.mkBatch stream events Seq.empty - let! res = client.Sync log (container,stream) (Sync.Exp.Version position.index,batch) + let! res = container.Sync(log, stream, (Sync.Exp.Version position.index, batch)) match res with - | InternalSyncResult.Written (Token.Unpack (_,_,pos)) -> return AppendResult.Ok pos - | InternalSyncResult.Conflict (Token.Unpack (_,_,pos),events) -> return AppendResult.Conflict (pos, events) - | InternalSyncResult.ConflictUnknown (Token.Unpack (_,_,pos)) -> return AppendResult.ConflictUnknown pos } + | InternalSyncResult.Written (Token.Unpack (_,pos)) -> return AppendResult.Ok pos + | InternalSyncResult.Conflict (Token.Unpack (_,pos),events) -> return AppendResult.Conflict (pos, events) + | InternalSyncResult.ConflictUnknown (Token.Unpack (_,pos)) -> return AppendResult.ConflictUnknown pos } /// Low level, non-idempotent call appending events to a stream without a concurrency control mechanism in play /// NB Should be used sparingly; Equinox.Stream enables building equivalent equivalent idempotent handling with minimal code. diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs index dbd8358f6..88e14cda9 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs @@ -30,7 +30,7 @@ type Tests(testOutputHelper) = incr testIterations sprintf "events-%O-%i" name !testIterations let mkContextWithItemLimit log defaultBatchSize = - Context(createSpecifiedCosmosOrSimulatorClient log, log, ?defaultMaxItems = defaultBatchSize) + createSpecifiedCoreContext log defaultBatchSize let mkContext log = mkContextWithItemLimit log None let verifyRequestChargesMax rus = @@ -232,7 +232,7 @@ type Tests(testOutputHelper) = // 2 items atm test <@ [EqxAct.ResponseForward; EqxAct.ResponseForward; EqxAct.QueryForward] = capture.ExternalCalls @> - verifyRequestChargesMax 9 } // 8.51 // WAS 6 // 5.77 + verifyRequestChargesMax 7 } // 6.01 [] let ``get Lazy`` (TestStream streamName) = Async.RunSynchronously <| async { @@ -288,7 +288,7 @@ type Tests(testOutputHelper) = verifyCorrectEventsBackward 3L expected res test <@ List.replicate 2 EqxAct.ResponseBackward @ [EqxAct.QueryBackward] = capture.ExternalCalls @> - verifyRequestChargesMax 6 // 5.77 + verifyRequestChargesMax 7 // 6.01 } [] diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs index aa6b6cece..6c3f13d5d 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs @@ -13,27 +13,33 @@ module Option = let private read env = Environment.GetEnvironmentVariable env |> Option.ofObj let (|Default|) def name = (read name),def ||> defaultArg -let dbId = read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" -let cId = read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" +let private databaseId = read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" +let private containerId = read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" let private connectToCosmos batchSize client = Context(client, defaultMaxItems = batchSize) -let createSpecifiedCosmosOrSimulatorClient log = +let createSpecifiedCosmosOrSimulatorClient (log : Serilog.ILogger) = let createClient name discovery = - StoreGatewayFactory(log=log, requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - .Create(name, discovery, dbId, cId) + let factory = CosmosClientFactory(requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + let cosmosClient = factory.Create discovery + log.Information("Connection {name} to {endpoint}", name, cosmosClient.Endpoint) + Client(cosmosClient, databaseId, containerId) match read "EQUINOX_COSMOS_CONNECTION" with | None -> - Discovery.UriAndKey(Uri "https://localhost:8081", "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") + Discovery.AccountUriAndKey(Uri "https://localhost:8081", "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") |> createClient "localDocDbSim" | Some connectionString -> - Discovery.FromConnectionString connectionString + Discovery.ConnectionString connectionString |> createClient "EQUINOX_COSMOS_CONNECTION" let connectToSpecifiedCosmosOrSimulator (log: Serilog.ILogger) batchSize = createSpecifiedCosmosOrSimulatorClient log |> connectToCosmos batchSize +let createSpecifiedCoreContext log defaultBatchSize = + let client = createSpecifiedCosmosOrSimulatorClient log + Equinox.Cosmos.Core.Context(client.CosmosClient, log, databaseId, containerId, ?defaultMaxItems = defaultBatchSize) + let defaultBatchSize = 500 diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index b6c44a7fd..13c7ba204 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -77,8 +77,8 @@ and []DumpArguments = | [] PrettySkip | [] TimeRegular | [] UnfoldsOnly - | [] EventsOnly - | [] BatchSize of int + | [] EventsOnly + | [] BatchSize of int | [] Cosmos of ParseResults | [] Es of ParseResults | [] MsSql of ParseResults @@ -105,7 +105,7 @@ and DumpInfo(args: ParseResults) = match args.TryGetSubCommand() with | Some (DumpArguments.Cosmos sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Arguments.VerboseStore - storeLog, Storage.Cosmos.config (log,storeLog) storeConfig (Storage.Cosmos.Info sargs) + storeLog, Storage.Cosmos.config log storeConfig (Storage.Cosmos.Info sargs) | Some (DumpArguments.Es sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Arguments.VerboseStore storeLog, Storage.EventStore.config (log,storeLog) storeConfig sargs @@ -179,7 +179,7 @@ and TestInfo(args: ParseResults) = | Some (Cosmos sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.Cosmos.Arguments.VerboseStore log.Information("Running transactions in-process against CosmosDb with storage options: {options:l}", __.Options) - storeLog, Storage.Cosmos.config (log,storeLog) (cache, __.Unfolds, __.BatchSize) (Storage.Cosmos.Info sargs) + storeLog, Storage.Cosmos.config log (cache, __.Unfolds, __.BatchSize) (Storage.Cosmos.Info sargs) | Some (Es sargs) -> let storeLog = createStoreLog <| sargs.Contains Storage.EventStore.Arguments.VerboseStore log.Information("Running transactions in-process against EventStore with storage options: {options:l}", __.Options) @@ -306,20 +306,19 @@ let createDomainLog verbose verboseConsole maybeSeqEndpoint = module CosmosInit = open Equinox.Cosmos.Store - let conn (log,verboseConsole,maybeSeq) (sargs : ParseResults) = - let storeLog = createStoreLog (sargs.Contains Storage.Cosmos.Arguments.VerboseStore) verboseConsole maybeSeq - let discovery, dName, cName, factory = Storage.Cosmos.connection (log,storeLog) (Storage.Cosmos.Info sargs) - storeLog, factory, discovery, dName, cName + let conn log (sargs : ParseResults) = + let cosmosClient, dName, cName = Storage.Cosmos.connection log (Storage.Cosmos.Info sargs) + cosmosClient, dName, cName - let containerAndOrDb (log: ILogger, verboseConsole, maybeSeq) (iargs: ParseResults) = + let containerAndOrDb (log: ILogger) (iargs: ParseResults) = match iargs.TryGetSubCommand() with | Some (InitArguments.Cosmos sargs) -> let rus, skipStoredProc = iargs.GetResult(InitArguments.Rus), iargs.Contains InitArguments.SkipStoredProc let mode = if iargs.Contains InitArguments.Shared then Provisioning.Database (ReplaceAlways rus) else Provisioning.Container (ReplaceAlways rus) let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus - let _storeLog, factory, discovery, dName, cName = conn (log,verboseConsole,maybeSeq) sargs + let cosmosClient, dName, cName = conn log sargs log.Information("Provisioning `Equinox.Cosmos` Store collection at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - factory.Create(appName, discovery, dName, cName).InitializeContainer(mode, not skipStoredProc) |> ignore + Equinox.Cosmos.Store.Initialization.initializeContainer cosmosClient dName cName mode (not skipStoredProc, None) |> Async.Ignore |> Async.RunSynchronously | _ -> failwith "please specify a `cosmos` endpoint" module SqlInit = @@ -342,14 +341,14 @@ module CosmosStats = member container.QueryValue<'T>(sqlQuery : string) = let query : seq<'T> = container.GetItemQueryIterator<'T>(sqlQuery) |> AsyncSeq.ofAsyncEnum |> AsyncSeq.toBlockingSeq query |> Seq.exactlyOne - let run (log : ILogger, verboseConsole, maybeSeq) (args : ParseResults) = async { + let run (log : ILogger) (args : ParseResults) = async { match args.TryGetSubCommand() with | Some (StatsArguments.Cosmos sargs) -> let doS,doD,doE = args.Contains StatsArguments.Streams, args.Contains StatsArguments.Documents, args.Contains StatsArguments.Events let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel - let _storeLog,factory,discovery,dName,cName = CosmosInit.conn (log,verboseConsole,maybeSeq) sargs - let client = factory.Create(appName, discovery, dName, cName) + let cosmosClient, dName, cName = CosmosInit.conn log sargs + let container = cosmosClient.GetDatabase(dName).GetContainer(cName) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" @@ -357,7 +356,7 @@ module CosmosStats = log.Information("Computing {measures} ({mode})", Seq.map fst ops, (if inParallel then "in parallel" else "serially")) ops |> Seq.map (fun (name,sql) -> async { log.Debug("Running query: {sql}", sql) - let res = client.GetContainer().QueryValue(sql) + let res = container.QueryValue(sql) log.Information("{stat}: {result:N0}", name, res)}) |> if inParallel then Async.Parallel else Async.Sequential |> Async.Ignore @@ -463,10 +462,10 @@ let main argv = let verbose = args.Contains Verbose use log = createDomainLog verbose verboseConsole maybeSeq try match args.GetSubCommand() with - | Init iargs -> CosmosInit.containerAndOrDb (log, verboseConsole, maybeSeq) iargs + | Init iargs -> CosmosInit.containerAndOrDb log iargs | Config cargs -> SqlInit.databaseOrSchema log cargs |> Async.RunSynchronously | Dump dargs -> Dump.run (log, verboseConsole, maybeSeq) dargs |> Async.RunSynchronously - | Stats sargs -> CosmosStats.run (log, verboseConsole, maybeSeq) sargs |> Async.RunSynchronously + | Stats sargs -> CosmosStats.run log sargs |> Async.RunSynchronously | Run rargs -> let reportFilename = args.GetResult(LogFile,programName+".log") |> fun n -> System.IO.FileInfo(n).FullName LoadTest.run log (verbose,verboseConsole,maybeSeq) reportFilename rargs diff --git a/tools/Equinox.Tool/Properties/launchSettings.json b/tools/Equinox.Tool/Properties/launchSettings.json deleted file mode 100644 index 3d969fc8e..000000000 --- a/tools/Equinox.Tool/Properties/launchSettings.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "profiles": { - "Equinox.Tool": { - "commandName": "Project", - "commandLineArgs": "init -ru 400 cosmos", - "environmentVariables": { - "EQUINOX_COSMOS_CONNECTION": "AccountEndpoint=https://localhost:8081/;AccountKey=C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==", - "EQUINOX_COSMOS_CONTAINER": "equinox-master", - "EQUINOX_COSMOS_DATABASE": "equinox-master" - } - } - } -} \ No newline at end of file From 1e241211cd5597ad281a460fd813730f5d89cfa0 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Mon, 6 Apr 2020 10:33:39 +0100 Subject: [PATCH 62/71] simplify GetContainer --- tools/Equinox.Tool/Program.fs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 13c7ba204..80364a3b5 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -348,7 +348,7 @@ module CosmosStats = let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel let cosmosClient, dName, cName = CosmosInit.conn log sargs - let container = cosmosClient.GetDatabase(dName).GetContainer(cName) + let container = cosmosClient.GetContainer(dName, cName) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" From 84c3b73ed3fbcdee9382c541b5e33c71df51cf67 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 15 Apr 2020 13:33:02 +0100 Subject: [PATCH 63/71] Cosmos -> CosmosStore and related renames (#216) --- DOCUMENTATION.md | 33 ++++---- Equinox.sln | 4 +- README.md | 8 +- build.proj | 2 +- diagrams/context.puml | 2 +- samples/Infrastructure/Infrastructure.fsproj | 2 +- samples/Infrastructure/Services.fs | 4 +- samples/Infrastructure/Storage.fs | 20 ++--- samples/Store/Integration/CartIntegration.fs | 8 +- .../ContactPreferencesIntegration.fs | 12 +-- .../Store/Integration/FavoritesIntegration.fs | 8 +- samples/Store/Integration/Integration.fsproj | 4 +- samples/Store/Integration/LogIntegration.fs | 6 +- samples/Tutorial/AsAt.fsx | 45 ++++++----- samples/Tutorial/Cosmos.fsx | 30 +++---- samples/Tutorial/FulfilmentCenter.fsx | 27 +++---- samples/Tutorial/Gapless.fs | 6 +- samples/Tutorial/Index.fs | 6 +- samples/Tutorial/Sequence.fs | 6 +- samples/Tutorial/Set.fs | 6 +- samples/Tutorial/Todo.fsx | 27 +++---- samples/Tutorial/Tutorial.fsproj | 8 +- samples/Tutorial/Upload.fs | 6 +- samples/Web/Program.fs | 4 +- .../CosmosJsonSerializer.fs | 2 +- .../CosmosStore.fs} | 80 +++++++++---------- .../Equinox.CosmosStore.fsproj} | 2 +- .../CacheCellTests.fs | 6 +- .../CosmosCoreIntegration.fs | 16 ++-- .../CosmosFixtures.fs | 16 ++-- .../CosmosFixturesInfrastructure.fs | 14 ++-- .../CosmosIntegration.fs | 22 ++--- .../Equinox.CosmosStore.Integration.fsproj} | 2 +- .../Json.fs | 2 +- .../JsonConverterTests.fs | 11 ++- tools/Equinox.Tool/Program.fs | 22 ++--- 36 files changed, 241 insertions(+), 238 deletions(-) rename src/{Equinox.Cosmos => Equinox.CosmosStore}/CosmosJsonSerializer.fs (98%) rename src/{Equinox.Cosmos/Cosmos.fs => Equinox.CosmosStore/CosmosStore.fs} (95%) rename src/{Equinox.Cosmos/Equinox.Cosmos.fsproj => Equinox.CosmosStore/Equinox.CosmosStore.fsproj} (97%) rename tests/{Equinox.Cosmos.Integration => Equinox.CosmosStore.Integration}/CacheCellTests.fs (96%) rename tests/{Equinox.Cosmos.Integration => Equinox.CosmosStore.Integration}/CosmosCoreIntegration.fs (95%) rename tests/{Equinox.Cosmos.Integration => Equinox.CosmosStore.Integration}/CosmosFixtures.fs (74%) rename tests/{Equinox.Cosmos.Integration => Equinox.CosmosStore.Integration}/CosmosFixturesInfrastructure.fs (93%) rename tests/{Equinox.Cosmos.Integration => Equinox.CosmosStore.Integration}/CosmosIntegration.fs (93%) rename tests/{Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj => Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj} (93%) rename tests/{Equinox.Cosmos.Integration => Equinox.CosmosStore.Integration}/Json.fs (91%) rename tests/{Equinox.Cosmos.Integration => Equinox.CosmosStore.Integration}/JsonConverterTests.fs (84%) diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index ff7e8d983..9a050154c 100755 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -104,23 +104,23 @@ In other processes (when a cache is not fully in sync), the sequence runs slight ![Equinox.EventStore/SqlStreamStore c4model.com Code - another process; using snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/EventStoreCode.puml&idx=3&fmt=svg) -# Equinox.Cosmos +# Equinox.CosmosStore -## Container Diagram for `Equinox.Cosmos` +## Container Diagram for `Equinox.CosmosStore` -![Equinox.Cosmos c4model.com Container Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosContainer.puml?fmt=svg) +![Equinox.CosmosStore c4model.com Container Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosContainer.puml?fmt=svg) -## Component Diagram for `Equinox.Cosmos` +## Component Diagram for `Equinox.CosmosStore` -![Equinox.Cosmos c4model.com Component Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosComponent.puml?fmt=svg) +![Equinox.CosmosStore c4model.com Component Diagram](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosComponent.puml?fmt=svg) -## Code Diagrams for `Equinox.Cosmos` +## Code Diagrams for `Equinox.CosmosStore` This diagram walks through the basic sequence of operations, where: - this node has not yet read this stream (i.e. there's nothing in the Cache) - when we do read it, the Read call returns `404` (with a charge of `1 RU`) -![Equinox.Cosmos c4model.com Code - first Time](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=0&fmt=svg) +![Equinox.CosmosStore c4model.com Code - first Time](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=0&fmt=svg) Next, we extend the scenario to show: - how state held in the Cache influences the Cosmos APIs used @@ -131,17 +131,17 @@ Next, we extend the scenario to show: - when there's conflict and we're retrying (re-run the decision the conflicting events the call to `Sync` yielded) - when there's conflict and we're giving up (throw `MaxAttemptsExceededException`) -![Equinox.Cosmos c4model.com Code - with cache, snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=1&fmt=svg) +![Equinox.CosmosStore c4model.com Code - with cache, snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=1&fmt=svg) After the write, we circle back to illustrate the effect of the caching when we have correct state (we get a `304 Not Mofified` and pay only `1 RU`) -![Equinox.Cosmos c4model.com Code - next time; same process, i.e. cached](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=2&fmt=svg) +![Equinox.CosmosStore c4model.com Code - next time; same process, i.e. cached](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=2&fmt=svg) In other processes (when a cache is not fully in sync), the sequence runs slightly differently - we read the _Tip_ document, and can work from that snapshot - the same fallback sequence shown in the initial read will take place if no suitable snapshot that passes the `isOrigin` predicate is found within the _Tip_ -![Equinox.Cosmos c4model.com Code - another process; using snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=3&fmt=svg) +![Equinox.CosmosStore c4model.com Code - another process; using snapshotting](http://www.plantuml.com/plantuml/proxy?cache=no&src=https://raw.github.com/jet/equinox/diag/diagrams/CosmosCode.puml&idx=3&fmt=svg) # Glossary @@ -1018,7 +1018,7 @@ It can be useful to consider keeping snapshots in the auxiliary collection emplo This article provides a walkthrough of how `Equinox.Cosmos` encodes, writes and reads records from a stream under its control. -The code (see [source](src/Equinox.Cosmos/Cosmos.fs#L6)) contains lots of comments and is intended to be read - this just provides some background. +The code (see [source](src/Equinox.CosmosStore/CosmosStore.fs#L6)) contains lots of comments and is intended to be read - this just provides some background. ## Batches @@ -1171,18 +1171,19 @@ let outputLog = LoggerConfiguration().WriteTo.NLog().CreateLogger() let gatewayLog = outputLog.ForContext(Serilog.Core.Constants.SourceContextPropertyName, "Equinox") // When starting the app, we connect (once) -let connector : Equinox.Cosmos.Connector = - Connector( +let factory : Equinox.CosmosStore.CosmosStoreClientFactory = + CosmosStoreClientFactory( requestTimeout = TimeSpan.FromSeconds 5., maxRetryAttemptsOnThrottledRequests = 1, maxRetryWaitTimeInSeconds = 3, log = gatewayLog) -let cnx = connector.Connect("Application.CommandProcessor", Discovery.FromConnectionString connectionString) |> Async.RunSynchronously +let client = factory.Create(Discovery.ConnectionString connectionString) // If storing in a single collection, one specifies the db and collection // alternately use the overload that defers the mapping until the stream one is writing to becomes clear -let containerMap = Containers("databaseName", "containerName") -let ctx = Context(cnx, containerMap, gatewayLog) +let connection = CosmosStoreConnection(client, "databaseName", "containerName") +let storeContext = CosmosStoreContext(connection, "databaseName", "containerName") +let ctx = EventsContext(storeContext, gatewayLog) // // Write an event diff --git a/Equinox.sln b/Equinox.sln index ac98a2a97..3d4801feb 100644 --- a/Equinox.sln +++ b/Equinox.sln @@ -46,9 +46,9 @@ Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.MemoryStore.Integra EndProject Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.Tool", "tools\Equinox.Tool\Equinox.Tool.fsproj", "{C8992C1C-6DC5-42CD-A3D7-1C5663433FED}" EndProject -Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.Cosmos", "src\Equinox.Cosmos\Equinox.Cosmos.fsproj", "{54EA6187-9F9F-4D67-B602-163D011E43E6}" +Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.CosmosStore", "src\Equinox.CosmosStore\Equinox.CosmosStore.fsproj", "{54EA6187-9F9F-4D67-B602-163D011E43E6}" EndProject -Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.Cosmos.Integration", "tests\Equinox.Cosmos.Integration\Equinox.Cosmos.Integration.fsproj", "{DE0FEBF0-72DC-4D4A-BBA7-788D875D6B4B}" +Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "Equinox.CosmosStore.Integration", "tests\Equinox.CosmosStore.Integration\Equinox.CosmosStore.Integration.fsproj", "{DE0FEBF0-72DC-4D4A-BBA7-788D875D6B4B}" EndProject Project("{6EC3EE1D-3C4E-46DD-8F32-0CC8E7565705}") = "TodoBackend", "samples\TodoBackend\TodoBackend.fsproj", "{EC2EC658-3D85-44F3-AD2F-52AFCAFF8871}" EndProject diff --git a/README.md b/README.md index 2ec239b39..c6edc15c4 100644 --- a/README.md +++ b/README.md @@ -38,7 +38,7 @@ Some aspects of the implementation are distilled from [`Jet.com` systems dating - support, (via the [`FsCodec.IEventCodec`](https://github.com/jet/FsCodec#IEventCodec)) for the maintenance of multiple co-existing compaction schemas for a given stream (A 'compaction' event/snapshot isa Event) - compaction events typically do not get deleted (consistent with how EventStore works), although it is safe to do so in concept - NB while this works well, and can deliver excellent performance (especially when allied with the Cache), [it's not a panacea, as noted in this excellent EventStore.org article on the topic](https://eventstore.org/docs/event-sourcing-basics/rolling-snapshots/index.html) -- **`Equinox.Cosmos` 'Tip with Unfolds' schema**: (In contrast to `Equinox.EventStore`'s `AccessStrategy.RollingSnapshots`,) when using `Equinox.Cosmos`, optimized command processing is managed via the `Tip`; a document per stream with a well-known identity enabling Syncing the r/w Position via a single point-read by virtue of the fact that the document maintains: +- **`Equinox.CosmosStore` 'Tip with Unfolds' schema**: (In contrast to `Equinox.EventStore`'s `AccessStrategy.RollingSnapshots`,) when using `Equinox.CosmosStore`, optimized command processing is managed via the `Tip`; a document per stream with a well-known identity enabling Syncing the r/w Position via a single point-read by virtue of the fact that the document maintains: a) the present Position of the stream - i.e. the index at which the next events will be appended for a given stream (events and the Tip share a common logical partition key) b) ephemeral (`deflate+base64` compressed) [_unfolds_](DOCUMENTATION.md#Cosmos-Storage-Model) c) (optionally) a holding buffer for events since those unfolded events ([presently removed](https://github.com/jet/equinox/pull/58), but [should return](DOCUMENTATION.md#Roadmap), see [#109](https://github.com/jet/equinox/pull/109)) @@ -49,7 +49,7 @@ Some aspects of the implementation are distilled from [`Jet.com` systems dating - no additional roundtrips to the store needed at either the Load or Sync points in the flow It should be noted that from a querying perspective, the `Tip` shares the same structure as `Batch` documents (a potential future extension would be to carry some events in the `Tip` as [some interim versions of the implementation once did](https://github.com/jet/equinox/pull/58), see also [#109](https://github.com/jet/equinox/pull/109). -- **`Equinox.Cosmos` `RollingState` and `Custom` 'non-event-sourced' modes**: Uses 'Tip with Unfolds' encoding to avoid having to write event documents at all - this enables one to build, reason about and test your aggregates in the normal manner, but inhibit event documents from being generated. This enables one to benefit from the caching and consistency management mechanisms without having to bear the cost of writing and storing the events themselves (and/or dealing with an ever-growing store size). Search for `transmute` or `RollingState` in the `samples` and/or see [the `Checkpoint` Aggregate in Propulsion](https://github.com/jet/propulsion/blob/master/src/Propulsion.EventStore/Checkpoint.fs). One chief use of this mechanism is for tracking Summary Event feeds in [the `dotnet-templates` `summaryConsumer` template](https://github.com/jet/dotnet-templates/tree/master/propulsion-summary-consumer). +- **`Equinox.CosmosStore` `RollingState` and `Custom` 'non-event-sourced' modes**: Uses 'Tip with Unfolds' encoding to avoid having to write event documents at all - this enables one to build, reason about and test your aggregates in the normal manner, but inhibit event documents from being generated. This enables one to benefit from the caching and consistency management mechanisms without having to bear the cost of writing and storing the events themselves (and/or dealing with an ever-growing store size). Search for `transmute` or `RollingState` in the `samples` and/or see [the `Checkpoint` Aggregate in Propulsion](https://github.com/jet/propulsion/blob/master/src/Propulsion.EventStore/Checkpoint.fs). One chief use of this mechanism is for tracking Summary Event feeds in [the `dotnet-templates` `summaryConsumer` template](https://github.com/jet/dotnet-templates/tree/master/propulsion-summary-consumer). ## Components @@ -77,7 +77,7 @@ The components within this repository are delivered as multi-targeted Nuget pack - `Equinox.Core` [![NuGet](https://img.shields.io/nuget/v/Equinox.Core.svg)](https://www.nuget.org/packages/Equinox.Core/): Interfaces and helpers used in realizing the concrete Store implementations, together with the default [`System.Runtime.Caching.Cache`-based] `Cache` implementation . ([depends](https://www.fuget.org/packages/Equinox.Core) on `Equinox`, `System.Runtime.Caching`) - `Equinox.MemoryStore` [![MemoryStore NuGet](https://img.shields.io/nuget/v/Equinox.MemoryStore.svg)](https://www.nuget.org/packages/Equinox.MemoryStore/): In-memory store for integration testing/performance baselining/providing out-of-the-box zero dependency storage for examples. ([depends](https://www.fuget.org/packages/Equinox.MemoryStore) on `Equinox.Core`) - `Equinox.EventStore` [![EventStore NuGet](https://img.shields.io/nuget/v/Equinox.EventStore.svg)](https://www.nuget.org/packages/Equinox.EventStore/): Production-strength [EventStore](https://eventstore.org/) Adapter instrumented to the degree necessitated by Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.EventStore) on `Equinox.Core`, `EventStore.Client[Api.NetCore] >= 5.0.1`, `FSharp.Control.AsyncSeq`) -- `Equinox.Cosmos` [![Cosmos NuGet](https://img.shields.io/nuget/v/Equinox.Cosmos.svg)](https://www.nuget.org/packages/Equinox.Cosmos/): Production-strength Azure CosmosDb Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to the degree necessitated by Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.Cosmos) on `Equinox.Core`, `Microsoft.Azure.DocumentDb[.Core] >= 2.2`, `FsCodec.NewtonsoftJson`, `FSharp.Control.AsyncSeq`) +- `Equinox.CosmosStore` [![Cosmos NuGet](https://img.shields.io/nuget/v/Equinox.Cosmos.svg)](https://www.nuget.org/packages/Equinox.Cosmos/): Production-strength Azure CosmosDb Adapter with integrated 'unfolds' feature, facilitating optimal read performance in terms of latency and RU costs, instrumented to the degree necessitated by Jet's production monitoring requirements. ([depends](https://www.fuget.org/packages/Equinox.CosmosStore) on `Equinox.Core`, `Azure.Cosmos >= 4.0`, `FsCodec.NewtonsoftJson`, `FSharp.Control.AsyncSeq`) - `Equinox.SqlStreamStore` [![SqlStreamStore NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore/): Production-strength [SqlStreamStore](https://github.com/SQLStreamStore/SQLStreamStore) Adapter derived from `Equinox.EventStore` - provides core facilities (but does not connect to a specific database; see sibling `SqlStreamStore`.* packages). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore) on `Equinox.Core`, `FsCodec`, `SqlStreamStore >= 1.2.0-beta.8`, `FSharp.Control.AsyncSeq`) - `Equinox.SqlStreamStore.MsSql` [![MsSql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MsSql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MsSql/): [SqlStreamStore.MsSql](https://sqlstreamstore.readthedocs.io/en/latest/sqlserver) Sql Server `Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MsSql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MsSql >= 1.2.0-beta.8`) - `Equinox.SqlStreamStore.MySql` [![MySql NuGet](https://img.shields.io/nuget/v/Equinox.SqlStreamStore.MySql.svg)](https://www.nuget.org/packages/Equinox.SqlStreamStore.MySql/): `SqlStreamStore.MySql` MySQL Í`Connector` implementation for `Equinox.SqlStreamStore` package). ([depends](https://www.fuget.org/packages/Equinox.SqlStreamStore.MySql) on `Equinox.SqlStreamStore`, `SqlStreamStore.MySql >= 1.2.0-beta.8`) @@ -89,7 +89,7 @@ Equinox does not focus on projection logic or wrapping thereof - each store brin - `FsKafka` [![FsKafka NuGet](https://img.shields.io/nuget/v/FsKafka.svg)](https://www.nuget.org/packages/FsKafka/): Wraps `Confluent.Kafka` to provide efficient batched Kafka Producer and Consumer configurations, with basic logging instrumentation. Used in the [`propulsion project kafka`](https://github.com/jet/propulsion#dotnet-tool-provisioning--projections-test-tool) tool command; see [`dotnet new proProjector -k; dotnet new proConsumer` to generate a sample app](https://github.com/jet/dotnet-templates#propulsion-related) using it (see the `BatchedAsync` and `BatchedSync` modules in `Examples.fs`). - `Propulsion` [![Propulsion NuGet](https://img.shields.io/nuget/v/Propulsion.svg)](https://www.nuget.org/packages/Propulsion/): defines a canonical `Propulsion.Streams.StreamEvent` used to interop with `Propulsion.*` in processing pipelines for the `proProjector` and `proSync` templates in the [templates repo](https://github.com/jet/dotnet-templates), together with the `Ingestion`, `Streams`, `Progress` and `Parallel` modules that get composed into those processing pipelines. ([depends](https://www.fuget.org/packages/Propulsion) on `Serilog`) -- `Propulsion.Cosmos` [![Propulsion.Cosmos NuGet](https://img.shields.io/nuget/v/Propulsion.Cosmos.svg)](https://www.nuget.org/packages/Propulsion.Cosmos/): Wraps the [Microsoft .NET `ChangeFeedProcessor` library](https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet) providing a [processor loop](DOCUMENTATION.md#change-feed-processors) that maintains a continuous query loop per CosmosDb Physical Partition (Range) yielding new or updated documents (optionally unrolling events written by `Equinox.Cosmos` for processing or forwarding). Used in the [`propulsion project stats cosmos`](dotnet-tool-provisioning--benchmarking-tool) tool command; see [`dotnet new proProjector` to generate a sample app](#quickstart) using it. ([depends](https://www.fuget.org/packages/Propulsion.Cosmos) on `Equinox.Cosmos`, `Microsoft.Azure.DocumentDb.ChangeFeedProcessor >= 2.2.5`) +- `Propulsion.Cosmos` [![Propulsion.Cosmos NuGet](https://img.shields.io/nuget/v/Propulsion.Cosmos.svg)](https://www.nuget.org/packages/Propulsion.Cosmos/): Wraps the [Microsoft .NET `ChangeFeedProcessor` library](https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet) providing a [processor loop](DOCUMENTATION.md#change-feed-processors) that maintains a continuous query loop per CosmosDb Physical Partition (Range) yielding new or updated documents (optionally unrolling events written by `Equinox.CosmosStore` for processing or forwarding). Used in the [`propulsion project stats cosmos`](dotnet-tool-provisioning--benchmarking-tool) tool command; see [`dotnet new proProjector` to generate a sample app](#quickstart) using it. ([depends](https://www.fuget.org/packages/Propulsion.Cosmos) on `Equinox.Cosmos`, `Microsoft.Azure.DocumentDb.ChangeFeedProcessor >= 2.2.5`) - `Propulsion.EventStore` [![Propulsion.EventStore NuGet](https://img.shields.io/nuget/v/Propulsion.EventStore.svg)](https://www.nuget.org/packages/Propulsion.EventStore/) Used in the [`propulsion project es`](dotnet-tool-provisioning--benchmarking-tool) tool command; see [`dotnet new proSync` to generate a sample app](#quickstart) using it. ([depends](https://www.fuget.org/packages/Propulsion.EventStore) on `Equinox.EventStore`) - `Propulsion.Kafka` [![Propulsion.Kafka NuGet](https://img.shields.io/nuget/v/Propulsion.Kafka.svg)](https://www.nuget.org/packages/Propulsion.Kafka/): Provides a canonical `RenderedSpan` that can be used as a default format when projecting events via e.g. the Producer/Consumer pair in `dotnet new proProjector -k; dotnet new proConsumer`. ([depends](https://www.fuget.org/packages/Propulsion.Kafka) on `Newtonsoft.Json >= 11.0.2`, `Propulsion`, `FsKafka`) diff --git a/build.proj b/build.proj index 99309aa4b..0d5e2e31a 100644 --- a/build.proj +++ b/build.proj @@ -16,7 +16,7 @@ - + diff --git a/diagrams/context.puml b/diagrams/context.puml index e58d1d8ea..0d9077c85 100644 --- a/diagrams/context.puml +++ b/diagrams/context.puml @@ -1,4 +1,4 @@ - @startuml +@startuml !includeurl https://raw.githubusercontent.com/skleanthous/C4-PlantumlSkin/master/build/output/c4.puml title System Context Diagram for Equinox (+Propulsion) diff --git a/samples/Infrastructure/Infrastructure.fsproj b/samples/Infrastructure/Infrastructure.fsproj index 125a51a91..ca3032c34 100644 --- a/samples/Infrastructure/Infrastructure.fsproj +++ b/samples/Infrastructure/Infrastructure.fsproj @@ -19,7 +19,7 @@ - + diff --git a/samples/Infrastructure/Services.fs b/samples/Infrastructure/Services.fs index d27e2ade0..831f8868f 100644 --- a/samples/Infrastructure/Services.fs +++ b/samples/Infrastructure/Services.fs @@ -19,8 +19,8 @@ type StreamResolver(storage) = snapshot: (('event -> bool) * ('state -> 'event))) = match storage with | Storage.StorageConfig.Cosmos (store, caching, unfolds, _databaseId, _containerId) -> - let accessStrategy = if unfolds then Equinox.Cosmos.AccessStrategy.Snapshot snapshot else Equinox.Cosmos.AccessStrategy.Unoptimized - Equinox.Cosmos.Resolver<'event,'state,_>(store, codec, fold, initial, caching, accessStrategy).Resolve + let accessStrategy = if unfolds then Equinox.CosmosStore.AccessStrategy.Snapshot snapshot else Equinox.CosmosStore.AccessStrategy.Unoptimized + Equinox.CosmosStore.CosmosStoreCategory<'event,'state,_>(store, codec, fold, initial, caching, accessStrategy).Resolve | _ -> failwith "Currently, only Cosmos can be used with a JsonElement codec." member __.ResolveWithUtf8ArrayCodec diff --git a/samples/Infrastructure/Storage.fs b/samples/Infrastructure/Storage.fs index 60e868300..08747b3c0 100644 --- a/samples/Infrastructure/Storage.fs +++ b/samples/Infrastructure/Storage.fs @@ -10,7 +10,7 @@ type StorageConfig = // For MemoryStore, we keep the events as UTF8 arrays - we could use FsCodec.Codec.Box to remove the JSON encoding, which would improve perf but can conceal problems | Memory of Equinox.MemoryStore.VolatileStore | Es of Equinox.EventStore.Context * Equinox.EventStore.CachingStrategy option * unfolds: bool - | Cosmos of Equinox.Cosmos.Context * Equinox.Cosmos.CachingStrategy * unfolds: bool * databaseId: string * containerId: string + | Cosmos of Equinox.CosmosStore.CosmosStoreContext * Equinox.CosmosStore.CachingStrategy * unfolds: bool * databaseId: string * containerId: string | Sql of Equinox.SqlStreamStore.Context * Equinox.SqlStreamStore.CachingStrategy option * unfolds: bool module MemoryStore = @@ -67,23 +67,23 @@ module Cosmos = /// 1) replace connection below with a connection string or Uri+Key for an initialized Equinox instance with a database and collection named "equinox-test" /// 2) Set the 3x environment variables and create a local Equinox using tools/Equinox.Tool/bin/Release/net461/eqx.exe ` /// init -ru 1000 cosmos -s $env:EQUINOX_COSMOS_CONNECTION -d $env:EQUINOX_COSMOS_DATABASE -c $env:EQUINOX_COSMOS_CONTAINER - open Equinox.Cosmos + open Equinox.CosmosStore open Serilog - let connection (log: ILogger) (a : Info) = + let conn (log: ILogger) (a : Info) = let discovery = Discovery.ConnectionString a.Connection - let cosmosClient = CosmosClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, mode=a.Mode).Create(discovery) + let client = CosmosStoreClientFactory(a.Timeout, a.Retries, a.MaxRetryWaitTime, mode=a.Mode).Create(discovery) log.Information("CosmosDb {mode} {connection} Database {database} Container {container}", - a.Mode, cosmosClient.Endpoint, a.Database, a.Container) + a.Mode, client.Endpoint, a.Database, a.Container) log.Information("CosmosDb timeout {timeout}s; Throttling retries {retries}, max wait {maxRetryWaitTime}s", (let t = a.Timeout in t.TotalSeconds), a.Retries, let x = a.MaxRetryWaitTime in x.TotalSeconds) - cosmosClient, a.Database, a.Container + client, a.Database, a.Container let config (log: ILogger) (cache, unfolds, batchSize) info = - let cosmosClient, dName, cName = connection log info - let client = Client(cosmosClient, dName, cName) - let ctx = Context(client, defaultMaxItems = batchSize) + let client, databaseId, containerId = conn log info + let conn = CosmosStoreConnection(client, databaseId, containerId) + let ctx = CosmosStoreContext(conn, defaultMaxItems = batchSize) let cacheStrategy = match cache with Some c -> CachingStrategy.SlidingWindow (c, TimeSpan.FromMinutes 20.) | None -> CachingStrategy.NoCaching - StorageConfig.Cosmos (ctx, cacheStrategy, unfolds, dName, cName) + StorageConfig.Cosmos (ctx, cacheStrategy, unfolds, databaseId, containerId) /// To establish a local node to run the tests against: /// 1. cinst eventstore-oss -y # where cinst is an invocation of the Chocolatey Package Installer on Windows diff --git a/samples/Store/Integration/CartIntegration.fs b/samples/Store/Integration/CartIntegration.fs index 85260577d..09ccfc7c9 100644 --- a/samples/Store/Integration/CartIntegration.fs +++ b/samples/Store/Integration/CartIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.CartIntegration open Equinox -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open Equinox.EventStore open Equinox.MemoryStore open Swensen.Unquote @@ -25,9 +25,9 @@ let resolveGesStreamWithoutCustomAccessStrategy gateway = let cosmosCodec = Domain.Cart.Events.codecStj (FsCodec.SystemTextJson.Options.Create()) let resolveCosmosStreamWithSnapshotStrategy context = - fun (id,opt) -> Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) + fun (id,opt) -> CosmosStore.CosmosStoreCategory(context, cosmosCodec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) let resolveCosmosStreamWithoutCustomAccessStrategy context = - fun (id,opt) -> Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve(id,?option=opt) + fun (id,opt) -> CosmosStore.CosmosStoreCategory(context, cosmosCodec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Unoptimized).Resolve(id,?option=opt) let addAndThenRemoveItemsManyTimesExceptTheLastOne context cartId skuId (service: Backend.Cart.Service) count = service.ExecuteManyAsync(cartId, false, seq { @@ -74,7 +74,7 @@ type Tests(testOutputHelper) = let arrangeCosmos connect resolve = let log = createLog () - let ctx: Cosmos.Context = connect log defaultBatchSize + let ctx: CosmosStore.CosmosStoreContext = connect log defaultBatchSize Backend.Cart.create log (resolve ctx) [] diff --git a/samples/Store/Integration/ContactPreferencesIntegration.fs b/samples/Store/Integration/ContactPreferencesIntegration.fs index 08de4aafd..627276808 100644 --- a/samples/Store/Integration/ContactPreferencesIntegration.fs +++ b/samples/Store/Integration/ContactPreferencesIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.ContactPreferencesIntegration open Equinox -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -21,12 +21,12 @@ let resolveStreamGesWithoutAccessStrategy gateway = let cosmosCodec = Domain.ContactPreferences.Events.codecStj (FsCodec.SystemTextJson.Options.Create()) let resolveStreamCosmosWithLatestKnownEventSemantics context = - Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.LatestKnownEvent).Resolve + CosmosStore.CosmosStoreCategory(context, cosmosCodec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.LatestKnownEvent).Resolve let resolveStreamCosmosUnoptimized context = - Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Unoptimized).Resolve + CosmosStore.CosmosStoreCategory(context, cosmosCodec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Unoptimized).Resolve let resolveStreamCosmosRollingUnfolds context = - let access = Cosmos.AccessStrategy.Custom(Domain.ContactPreferences.Fold.isOrigin, Domain.ContactPreferences.Fold.transmute) - Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access).Resolve + let access = CosmosStore.AccessStrategy.Custom(Domain.ContactPreferences.Fold.isOrigin, Domain.ContactPreferences.Fold.transmute) + CosmosStore.CosmosStoreCategory(context, cosmosCodec, fold, initial, CosmosStore.CachingStrategy.NoCaching, access).Resolve type Tests(testOutputHelper) = let testOutput = TestOutputAdapter testOutputHelper @@ -64,7 +64,7 @@ type Tests(testOutputHelper) = let arrangeCosmos connect resolve batchSize = async { let log = createLog () - let ctx: Cosmos.Context = connect log batchSize + let ctx: CosmosStore.CosmosStoreContext = connect log batchSize return Backend.ContactPreferences.create log (resolve ctx) } [] diff --git a/samples/Store/Integration/FavoritesIntegration.fs b/samples/Store/Integration/FavoritesIntegration.fs index 5da08e3c0..135bcae2c 100644 --- a/samples/Store/Integration/FavoritesIntegration.fs +++ b/samples/Store/Integration/FavoritesIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.FavoritesIntegration open Equinox -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open Swensen.Unquote #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) @@ -21,12 +21,12 @@ let createServiceGes context log = let cosmosCodec = Domain.Favorites.Events.codecStj let createServiceCosmos context log = - let resolver = Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, Cosmos.AccessStrategy.Snapshot snapshot) + let resolver = CosmosStore.CosmosStoreCategory(context, cosmosCodec, fold, initial, CosmosStore.CachingStrategy.NoCaching, CosmosStore.AccessStrategy.Snapshot snapshot) Backend.Favorites.create log resolver.Resolve let createServiceCosmosRollingState context log = - let access = Cosmos.AccessStrategy.RollingState Domain.Favorites.Fold.snapshot - let resolver = Cosmos.Resolver(context, cosmosCodec, fold, initial, Cosmos.CachingStrategy.NoCaching, access) + let access = CosmosStore.AccessStrategy.RollingState Domain.Favorites.Fold.snapshot + let resolver = CosmosStore.CosmosStoreCategory(context, cosmosCodec, fold, initial, CosmosStore.CachingStrategy.NoCaching, access) Backend.Favorites.create log resolver.Resolve type Tests(testOutputHelper) = diff --git a/samples/Store/Integration/Integration.fsproj b/samples/Store/Integration/Integration.fsproj index d39726d1f..dde236250 100644 --- a/samples/Store/Integration/Integration.fsproj +++ b/samples/Store/Integration/Integration.fsproj @@ -18,11 +18,11 @@ - + - + diff --git a/samples/Store/Integration/LogIntegration.fs b/samples/Store/Integration/LogIntegration.fs index 3455011a2..15f91119a 100644 --- a/samples/Store/Integration/LogIntegration.fs +++ b/samples/Store/Integration/LogIntegration.fs @@ -1,7 +1,7 @@ module Samples.Store.Integration.LogIntegration open Equinox.Core -open Equinox.Cosmos.Integration +open Equinox.CosmosStore.Integration open FSharp.UMX open Swensen.Unquote open System @@ -23,7 +23,7 @@ module EquinoxEsInterop = | Log.Batch (Direction.Backward,c,m) -> "LoadB", m, Some c { action = action; stream = metric.stream; interval = metric.interval; bytes = metric.bytes; count = metric.count; batches = batches } module EquinoxCosmosInterop = - open Equinox.Cosmos.Store + open Equinox.CosmosStore.Core [] type FlatMetric = { action: string; stream : string; interval: StopwatchInterval; bytes: int; count: int; responses: int option; ru: float } with override __.ToString() = sprintf "%s-Stream=%s %s-Elapsed=%O Ru=%O" __.action __.stream __.action __.interval.Elapsed __.ru @@ -62,7 +62,7 @@ type SerilogMetricsExtractor(emit : string -> unit) = logEvent.Properties |> Seq.tryPick (function | KeyValue (k, SerilogScalar (:? Equinox.EventStore.Log.Event as m)) -> Some <| Choice1Of3 (k,m) - | KeyValue (k, SerilogScalar (:? Equinox.Cosmos.Store.Log.Event as m)) -> Some <| Choice2Of3 (k,m) + | KeyValue (k, SerilogScalar (:? Equinox.CosmosStore.Core.Log.Event as m)) -> Some <| Choice2Of3 (k,m) | _ -> None) |> Option.defaultValue (Choice3Of3 ()) let handleLogEvent logEvent = diff --git a/samples/Tutorial/AsAt.fsx b/samples/Tutorial/AsAt.fsx index 691ddeba7..b316d79d8 100644 --- a/samples/Tutorial/AsAt.fsx +++ b/samples/Tutorial/AsAt.fsx @@ -26,14 +26,14 @@ #r "Equinox.dll" #r "TypeShape.dll" #r "FsCodec.NewtonsoftJson.dll" +#r "FsCodec.SystemTextJson.dll" #r "FSharp.Control.AsyncSeq.dll" #r "System.Net.Http" #r "Serilog.Sinks.Seq.dll" #r "Eventstore.ClientAPI.dll" #r "Equinox.EventStore.dll" -#r "Microsoft.Azure.Cosmos.Direct.dll" -#r "Microsoft.Azure.Cosmos.Client.dll" -#r "Equinox.Cosmos.dll" +#r "Azure.Cosmos.dll" +#r "Equinox.CosmosStore.dll" open System @@ -52,15 +52,16 @@ module Events = // unlike most Aggregates, knowing the Event's index is critical - for this reason, we always propagate that index alongside the event body type Event = int64 * Contract + // our upconversion function doesn't actually fit the term - it just tuples the underlying event + let up (evt : FsCodec.ITimelineEvent<_>,e) : Event = + evt.Index,e + // as per the `up`, the downConverter needs to drop the index (which is only there for symmetry), add null metadata + let down (_index,e) : Contract * _ option * DateTimeOffset option = + e,None,None + // unlike most normal codecs, we have a mapping to supply as we want the Index to be added to each event so we can track it in the State as we fold - let codec = - // our upconversion function doesn't actually fit the term - it just tuples the underlying event - let up (evt : FsCodec.ITimelineEvent<_>,e) : Event = - evt.Index,e - // as per the `up`, the downConverter needs to drop the index (which is only there for symmetry), add null metadata - let down (_index,e) : Contract * _ option * DateTimeOffset option = - e,None,None - FsCodec.NewtonsoftJson.Codec.Create(up,down) + let codec = FsCodec.NewtonsoftJson.Codec.Create(up,down) + let codecStj = FsCodec.SystemTextJson.Codec.Create(up,down) module Fold = @@ -124,19 +125,21 @@ module Log = let c = LoggerConfiguration() let c = if verbose then c.MinimumLevel.Debug() else c let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump let c = c.WriteTo.Seq("http://localhost:5341") // https://getseq.net let c = c.WriteTo.Console(if verbose then LogEventLevel.Debug else LogEventLevel.Information) c.CreateLogger() let dumpMetrics () = - Equinox.Cosmos.Store.Log.InternalMetrics.dump log + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log Equinox.EventStore.Log.InternalMetrics.dump log let [] appName = "equinox-tutorial" let cache = Equinox.Cache(appName, 20) module EventStore = + open Equinox.EventStore + let snapshotWindow = 500 // see QuickStart for how to run a local instance in a mode that emulates the behavior of a cluster let (host,username,password) = "localhost", "admin", "changeit" @@ -153,16 +156,18 @@ module EventStore = let resolve id = Equinox.Stream(Log.log, resolver.Resolve(streamName id), maxAttempts = 3) module Cosmos = - open Equinox.Cosmos - let read key = System.Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get - let connector = Connector(TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5., log=Log.log, mode=Microsoft.Azure.Cosmos.ConnectionMode.Gateway) - let conn = connector.Connect(appName, Discovery.FromConnectionString (read "EQUINOX_COSMOS_CONNECTION")) |> Async.RunSynchronously - let context = Context(conn, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + open Equinox.CosmosStore + + let read key = System.Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get + let factory = CosmosStoreClientFactory(TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5., mode=Azure.Cosmos.ConnectionMode.Gateway) + let client = factory.Create(Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION")) + let conn = CosmosStoreConnection(client, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + let context = CosmosStoreContext(conn) let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching let accessStrategy = AccessStrategy.Snapshot (Fold.isValid,Fold.snapshot) - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) - let resolve id = Equinox.Stream(Log.log, resolver.Resolve(streamName id), maxAttempts = 3) + let category = CosmosStoreCategory(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let resolve id = Equinox.Stream(Log.log, category.Resolve(streamName id), maxAttempts = 3) let serviceES = Service(EventStore.resolve) let serviceCosmos = Service(Cosmos.resolve) diff --git a/samples/Tutorial/Cosmos.fsx b/samples/Tutorial/Cosmos.fsx index 8c9b7942f..47e3abfd8 100644 --- a/samples/Tutorial/Cosmos.fsx +++ b/samples/Tutorial/Cosmos.fsx @@ -6,18 +6,17 @@ #I "bin/Debug/netstandard2.1/" #r "Serilog.dll" #r "Serilog.Sinks.Console.dll" -#r "Newtonsoft.Json.dll" #r "TypeShape.dll" #r "Equinox.dll" #r "Equinox.Core.dll" #r "FSharp.UMX.dll" #r "FsCodec.dll" -#r "FsCodec.NewtonsoftJson.dll" +#r "FsCodec.SystemTextJson.dll" #r "FSharp.Control.AsyncSeq.dll" -#r "Microsoft.Azure.Cosmos.Client.dll" +#r "Azure.Cosmos.dll" #r "System.Net.Http" #r "Serilog.Sinks.Seq.dll" -#r "Equinox.Cosmos.dll" +#r "Equinox.CosmosStore.dll" module Log = @@ -27,11 +26,11 @@ module Log = let log = let c = LoggerConfiguration() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump let c = c.WriteTo.Seq("http://localhost:5341") // https://getseq.net let c = c.WriteTo.Console(if verbose then LogEventLevel.Debug else LogEventLevel.Information) c.CreateLogger() - let dumpMetrics () = Equinox.Cosmos.Store.Log.InternalMetrics.dump log + let dumpMetrics () = Equinox.CosmosStore.Core.Log.InternalMetrics.dump log module Favorites = @@ -45,7 +44,7 @@ module Favorites = | Added of Item | Removed of Item interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() // Coming soon, replace Newtonsoft with SystemTextJson and works same + let codec = FsCodec.SystemTextJson.Codec.Create() // Coming soon, replace Newtonsoft with SystemTextJson and works same module Fold = @@ -82,21 +81,24 @@ module Favorites = module Cosmos = - open Equinox.Cosmos // Everything outside of this module is completely storage agnostic so can be unit tested simply and/or bound to any store + open Equinox.CosmosStore // Everything outside of this module is completely storage agnostic so can be unit tested simply and/or bound to any store let accessStrategy = AccessStrategy.Unoptimized // Or Snapshot etc https://github.com/jet/equinox/blob/master/DOCUMENTATION.md#access-strategies let create (context, cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) - create resolver.Resolve + let category = CosmosStoreCategory(context, Events.codec, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + create category.Resolve let [] appName = "equinox-tutorial" module Store = - let read key = System.Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get - let connector = Equinox.Cosmos.Connector(System.TimeSpan.FromSeconds 5., 2, System.TimeSpan.FromSeconds 5., log=Log.log) - let conn = connector.Connect(appName, Equinox.Cosmos.Discovery.FromConnectionString (read "EQUINOX_COSMOS_CONNECTION")) |> Async.RunSynchronously - let createContext () = Equinox.Cosmos.Context(conn, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + open Equinox.CosmosStore + + let read key = System.Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get + let factory = Equinox.CosmosStore.CosmosStoreClientFactory(System.TimeSpan.FromSeconds 5., 2, System.TimeSpan.FromSeconds 5.) + let client = factory.Create(Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION")) + let conn = CosmosStoreConnection(client, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + let createContext () = CosmosStoreContext(conn) let context = Store.createContext () let cache = Equinox.Cache(appName, 20) diff --git a/samples/Tutorial/FulfilmentCenter.fsx b/samples/Tutorial/FulfilmentCenter.fsx index 18972d4f7..c9c6520d2 100644 --- a/samples/Tutorial/FulfilmentCenter.fsx +++ b/samples/Tutorial/FulfilmentCenter.fsx @@ -1,17 +1,16 @@ #I "bin/Debug/netstandard2.1/" #r "Serilog.dll" #r "Serilog.Sinks.Console.dll" -#r "Newtonsoft.Json.dll" #r "TypeShape.dll" #r "Equinox.dll" #r "Equinox.Core.dll" #r "FSharp.UMX.dll" #r "FSCodec.dll" -#r "FsCodec.NewtonsoftJson.dll" -#r "Microsoft.Azure.Cosmos.Client.dll" +#r "FsCodec.SystemTextJson.dll" +#r "Azure.Cosmos.dll" #r "System.Net.Http" #r "Serilog.Sinks.Seq.dll" -#r "Equinox.Cosmos.dll" +#r "Equinox.CosmosStore.dll" open FSharp.UMX @@ -54,7 +53,7 @@ module FulfilmentCenter = | FcDetailsChanged of FcData | FcRenamed of FcName interface TypeShape.UnionContract.IUnionContract - let codec = FsCodec.NewtonsoftJson.Codec.Create() + let codec = FsCodec.SystemTextJson.Codec.Create() module Fold = @@ -103,7 +102,7 @@ module FulfilmentCenter = member __.Read id : Async = read id member __.QueryWithVersion(id, render : Fold.State -> 'res) : Async = queryEx id render -open Equinox.Cosmos +open Equinox.CosmosStore open System module Log = @@ -114,27 +113,27 @@ module Log = let log = let c = LoggerConfiguration() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Store.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump + let c = c.WriteTo.Sink(Core.Log.InternalMetrics.Stats.LogSink()) // to power Log.InternalMetrics.dump let c = c.WriteTo.Seq("http://localhost:5341") // https://getseq.net let c = c.WriteTo.Console(if verbose then LogEventLevel.Debug else LogEventLevel.Information) c.CreateLogger() - let dumpMetrics () = Store.Log.InternalMetrics.dump log + let dumpMetrics () = Core.Log.InternalMetrics.dump log module Store = let read key = Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get let appName = "equinox-tutorial" - let connector = Connector(TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5., log=Log.log) - let conn = connector.Connect(appName, Discovery.FromConnectionString (read "EQUINOX_COSMOS_CONNECTION")) |> Async.RunSynchronously - let gateway = Gateway(conn, BatchingPolicy()) - let context = Context(gateway, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + let factory = CosmosStoreClientFactory(TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5., mode=Azure.Cosmos.ConnectionMode.Gateway) + let client = factory.Create(Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION")) + let conn = CosmosStoreConnection(client, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + let context = CosmosStoreContext(conn) let cache = Equinox.Cache(appName, 20) let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching open FulfilmentCenter -let resolver = Resolver(Store.context, Events.codec, Fold.fold, Fold.initial, Store.cacheStrategy, AccessStrategy.Unoptimized) -let resolve id = Equinox.Stream(Log.log, resolver.Resolve(streamName id), maxAttempts = 3) +let category = CosmosStoreCategory(Store.context, Events.codec, Fold.fold, Fold.initial, Store.cacheStrategy, AccessStrategy.Unoptimized) +let resolve id = Equinox.Stream(Log.log, category.Resolve(streamName id), maxAttempts = 3) let service = Service(resolve) let fc = "fc0" diff --git a/samples/Tutorial/Gapless.fs b/samples/Tutorial/Gapless.fs index 59dbcc8ae..e85381585 100644 --- a/samples/Tutorial/Gapless.fs +++ b/samples/Tutorial/Gapless.fs @@ -78,13 +78,13 @@ let [] appName = "equinox-tutorial-gapless" module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let private create (context, cache, accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + let category = CosmosStoreCategory(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) let resolve sequenceId = let streamName = streamName sequenceId - Equinox.Stream(Serilog.Log.Logger, resolver.Resolve streamName, maxAttempts = 3) + Equinox.Stream(Serilog.Log.Logger, category.Resolve streamName, maxAttempts = 3) Service(resolve) module Snapshot = diff --git a/samples/Tutorial/Index.fs b/samples/Tutorial/Index.fs index c5f505022..6981dc018 100644 --- a/samples/Tutorial/Index.fs +++ b/samples/Tutorial/Index.fs @@ -55,12 +55,12 @@ let create<'t> resolve indexId = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let create<'v> (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let resolver = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) - create resolver.Resolve + let category = CosmosStoreCategory(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + create category.Resolve module MemoryStore = diff --git a/samples/Tutorial/Sequence.fs b/samples/Tutorial/Sequence.fs index 5a7197472..62deeddc4 100644 --- a/samples/Tutorial/Sequence.fs +++ b/samples/Tutorial/Sequence.fs @@ -57,11 +57,11 @@ let create resolve = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let private create (context,cache,accessStrategy) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) - create resolver.Resolve + let category = CosmosStoreCategory(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + create category.Resolve module LatestKnownEvent = diff --git a/samples/Tutorial/Set.fs b/samples/Tutorial/Set.fs index 74b82ddec..a8d78c9cb 100644 --- a/samples/Tutorial/Set.fs +++ b/samples/Tutorial/Set.fs @@ -55,12 +55,12 @@ let create resolve setId = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let create (context, cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, System.TimeSpan.FromMinutes 20.) let accessStrategy = AccessStrategy.RollingState Fold.snapshot - let resolver = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) - create resolver.Resolve + let category = CosmosStoreCategory(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, accessStrategy) + create category.Resolve module MemoryStore = diff --git a/samples/Tutorial/Todo.fsx b/samples/Tutorial/Todo.fsx index cc642230a..4ff922c49 100644 --- a/samples/Tutorial/Todo.fsx +++ b/samples/Tutorial/Todo.fsx @@ -6,16 +6,15 @@ #I "bin/Debug/netstandard2.1/" #r "Serilog.dll" #r "Serilog.Sinks.Console.dll" -#r "Newtonsoft.Json.dll" +#r "System.Text.Json.dll" #r "TypeShape.dll" #r "Equinox.Core.dll" #r "Equinox.dll" #r "FSharp.UMX.dll" #r "FsCodec.dll" -#r "FsCodec.NewtonsoftJson.dll" +#r "FsCodec.SystemTextJson.dll" #r "FSharp.Control.AsyncSeq.dll" -#r "Microsoft.Azure.Cosmos.Client.dll" -#r "Equinox.Cosmos.dll" +#r "Equinox.CosmosStore.dll" open System @@ -35,7 +34,7 @@ type Event = | Cleared | Snapshotted of Snapshotted interface TypeShape.UnionContract.IUnionContract -let codec = FsCodec.NewtonsoftJson.Codec.Create() +let codec = FsCodec.SystemTextJson.Codec.Create() type State = { items : Todo list; nextId : int } let initial = { items = []; nextId = 0 } @@ -116,21 +115,21 @@ let log = LoggerConfiguration().WriteTo.Console().CreateLogger() let [] appName = "equinox-tutorial" let cache = Equinox.Cache(appName, 20) -open Equinox.Cosmos -module Store = - let read key = Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get +open Equinox.CosmosStore - let connector = Connector(TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5., log=log) - let conn = connector.Connect(appName, Discovery.FromConnectionString (read "EQUINOX_COSMOS_CONNECTION")) |> Async.RunSynchronously - let gateway = Gateway(conn, BatchingPolicy()) +module Store = - let store = Context(gateway, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + let read key = Environment.GetEnvironmentVariable key |> Option.ofObj |> Option.get + let factory = CosmosStoreClientFactory(TimeSpan.FromSeconds 5., 2, TimeSpan.FromSeconds 5.) + let client = factory.Create(Discovery.ConnectionString (read "EQUINOX_COSMOS_CONNECTION")) + let conn = CosmosStoreConnection(client, read "EQUINOX_COSMOS_DATABASE", read "EQUINOX_COSMOS_CONTAINER") + let context = CosmosStoreContext(conn) let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) module TodosCategory = let access = AccessStrategy.Snapshot (isOrigin,snapshot) - let resolver = Resolver(Store.store, codec, fold, initial, Store.cacheStrategy, access=access) - let resolve id = Equinox.Stream(log, resolver.Resolve(streamName id), maxAttempts = 3) + let category = CosmosStoreCategory(Store.context, codec, fold, initial, Store.cacheStrategy, access=access) + let resolve id = Equinox.Stream(log, category.Resolve(streamName id), maxAttempts = 3) let service = Service(TodosCategory.resolve) diff --git a/samples/Tutorial/Tutorial.fsproj b/samples/Tutorial/Tutorial.fsproj index 250f0f066..b802e0bc1 100644 --- a/samples/Tutorial/Tutorial.fsproj +++ b/samples/Tutorial/Tutorial.fsproj @@ -1,11 +1,10 @@  - netstandard2.1 + netstandard2.1 5 true - true - true + @@ -24,7 +23,7 @@ - + @@ -35,7 +34,6 @@ - \ No newline at end of file diff --git a/samples/Tutorial/Upload.fs b/samples/Tutorial/Upload.fs index 8ea8af0be..e6f243e51 100644 --- a/samples/Tutorial/Upload.fs +++ b/samples/Tutorial/Upload.fs @@ -72,11 +72,11 @@ let create resolve = module Cosmos = - open Equinox.Cosmos + open Equinox.CosmosStore let create (context,cache) = let cacheStrategy = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) // OR CachingStrategy.NoCaching - let resolver = Resolver(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent) - create resolver.Resolve + let category = CosmosStoreCategory(context, Events.codecStj, Fold.fold, Fold.initial, cacheStrategy, AccessStrategy.LatestKnownEvent) + create category.Resolve module EventStore = open Equinox.EventStore diff --git a/samples/Web/Program.fs b/samples/Web/Program.fs index c49531f82..fab9549b7 100644 --- a/samples/Web/Program.fs +++ b/samples/Web/Program.fs @@ -29,7 +29,7 @@ module Program = .Enrich.FromLogContext() .WriteTo.Console() // TOCONSIDER log and reset every minute or something ? - .WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) + .WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) .WriteTo.Sink(Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink()) let c = @@ -41,4 +41,4 @@ module Program = 0 with e -> eprintfn "%s" e.Message - 1 \ No newline at end of file + 1 diff --git a/src/Equinox.Cosmos/CosmosJsonSerializer.fs b/src/Equinox.CosmosStore/CosmosJsonSerializer.fs similarity index 98% rename from src/Equinox.Cosmos/CosmosJsonSerializer.fs rename to src/Equinox.CosmosStore/CosmosJsonSerializer.fs index 1484133d0..a826261ab 100644 --- a/src/Equinox.Cosmos/CosmosJsonSerializer.fs +++ b/src/Equinox.CosmosStore/CosmosJsonSerializer.fs @@ -1,4 +1,4 @@ -namespace Equinox.Cosmos.Store +namespace Equinox.CosmosStore.Core open Azure.Cosmos.Serialization open Equinox.Core diff --git a/src/Equinox.Cosmos/Cosmos.fs b/src/Equinox.CosmosStore/CosmosStore.fs similarity index 95% rename from src/Equinox.Cosmos/Cosmos.fs rename to src/Equinox.CosmosStore/CosmosStore.fs index 08f955076..c3d07a932 100644 --- a/src/Equinox.Cosmos/Cosmos.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -1,4 +1,4 @@ -namespace Equinox.Cosmos.Store +namespace Equinox.CosmosStore.Core open Azure open Azure.Cosmos @@ -405,14 +405,14 @@ module private CancellationToken = let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken module Initialization = - let internal getOrCreateDatabase (cosmosClient: CosmosClient) (dbName: string) (throughput: ResourceThroughput) = async { + let internal getOrCreateDatabase (client: CosmosClient) (databaseId: string) (throughput: ResourceThroughput) = async { let! ct = Async.CancellationToken let! response = match throughput with - | Default -> cosmosClient.CreateDatabaseIfNotExistsAsync(id = dbName, cancellationToken = ct) |> Async.AwaitTaskCorrect - | SetIfCreating value -> cosmosClient.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + | Default -> client.CreateDatabaseIfNotExistsAsync(id = databaseId, cancellationToken = ct) |> Async.AwaitTaskCorrect + | SetIfCreating value -> client.CreateDatabaseIfNotExistsAsync(id = databaseId, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect | ReplaceAlways value -> async { - let! response = cosmosClient.CreateDatabaseIfNotExistsAsync(id = dbName, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect + let! response = client.CreateDatabaseIfNotExistsAsync(id = databaseId, throughput = Nullable(value), cancellationToken = ct) |> Async.AwaitTaskCorrect let! _ = response.Database.ReplaceThroughputAsync(value, cancellationToken = ct) |> Async.AwaitTaskCorrect return response } return response.Database } @@ -429,8 +429,8 @@ module Initialization = return response } return response.Container } - let internal getBatchAndTipContainerProps (containerName: string) = - let props = ContainerProperties(id = containerName, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) + let internal getBatchAndTipContainerProps (containerId: string) = + let props = ContainerProperties(id = containerId, partitionKeyPath = sprintf "/%s" Batch.PartitionKeyField) props.IndexingPolicy.IndexingMode <- IndexingMode.Consistent props.IndexingPolicy.Automatic <- true // Can either do a blacklist or a whitelist @@ -447,11 +447,11 @@ module Initialization = return r.GetRawResponse().Headers.GetRequestCharge() with CosmosException ((CosmosStatusCode sc) as e) when sc = int System.Net.HttpStatusCode.Conflict -> return e.Response.Headers.GetRequestCharge() } - let initializeContainer (cosmosClient: CosmosClient) (dbName: string) (containerName: string) (mode: Provisioning) (createStoredProcedure: bool, nameOverride: string option) = async { + let initializeContainer (client: CosmosClient) (databaseId: string) (containerId: string) (mode: Provisioning) (createStoredProcedure: bool, nameOverride: string option) = async { let dbThroughput = match mode with Provisioning.Database throughput -> throughput | _ -> Default let containerThroughput = match mode with Provisioning.Container throughput -> throughput | _ -> Default - let! db = getOrCreateDatabase cosmosClient dbName dbThroughput - let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerName) containerThroughput + let! db = getOrCreateDatabase client databaseId dbThroughput + let! container = getOrCreateContainer db (getBatchAndTipContainerProps containerId) containerThroughput if createStoredProcedure then let! (_ru : float) = createSyncStoredProcedure container nameOverride in () @@ -818,14 +818,14 @@ type Containers let catAndStreamToDatabaseContainerStream (categoryName, streamId) = databaseId, containerId, genStreamName (categoryName, streamId) Containers(catAndStreamToDatabaseContainerStream, ?disableInitialization = disableInitialization) - member internal __.ResolveContainerGuardAndStreamName(cosmosClient : CosmosClient, createGateway, categoryName, streamId) : ContainerInitializerGuard * string = + member internal __.ResolveContainerGuardAndStreamName(client : CosmosClient, createGateway, categoryName, streamId) : ContainerInitializerGuard * string = let databaseId, containerId, streamName = categoryAndStreamNameToDatabaseContainerStream (categoryName, streamId) let createContainerInitializerGuard (d, c) = let init = if Some true = disableInitialization then None else Some (fun cosmosContainer -> Initialization.createSyncStoredProcedure cosmosContainer None |> Async.Ignore) ContainerInitializerGuard - ( createGateway (cosmosClient.GetDatabase(d).GetContainer(c)), + ( createGateway (client.GetDatabase(d).GetContainer(c)), ?initContainer = init) let g = containerInitGuards.GetOrAdd((databaseId, containerId), createContainerInitializerGuard) g, streamName @@ -966,12 +966,12 @@ type internal Folder<'event, 'state, 'context> | SyncResult.Conflict resync -> return SyncResult.Conflict resync | SyncResult.Written (token',state') -> return SyncResult.Written (token',state') } -namespace Equinox.Cosmos +namespace Equinox.CosmosStore open Azure.Cosmos open Equinox open Equinox.Core -open Equinox.Cosmos.Store +open Equinox.CosmosStore.Core open FsCodec open FSharp.Control open Serilog @@ -1025,38 +1025,39 @@ type AccessStrategy<'event,'state> = /// Holds all relevant state for a Store within a given CosmosDB Database /// - The (singleton) CosmosDB CosmosClient (there should be a single one of these per process) -type Client - ( cosmosClient : CosmosClient, +/// - The (singleton) Core.Containers instance, which maintains the per Container Stored Procedure initialization state +type CosmosStoreConnection + ( client : CosmosClient, /// Singleton used to cache initialization state per CosmosContainer. containers : Containers, /// Admits a hook to enable customization of how Equinox.Cosmos handles the low level interactions with the underlying CosmosContainer. ?createGateway) = let createGateway = match createGateway with Some creator -> creator | None -> ContainerGateway - new (cosmosClient, databaseId : string, containerId : string, + new (client, databaseId : string, containerId : string, /// Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time []?disableInitialization, /// Admits a hook to enable customization of how Equinox.Cosmos handles the low level interactions with the underlying CosmosContainer. []?createGateway : CosmosContainer -> ContainerGateway) = let containers = Containers(databaseId, containerId, ?disableInitialization = disableInitialization) - Client(cosmosClient, containers, ?createGateway = createGateway) - member __.CosmosClient = cosmosClient + CosmosStoreConnection(client, containers, ?createGateway = createGateway) + member __.Client = client member internal __.ResolveContainerGuardAndStreamName(categoryName, streamId) = - containers.ResolveContainerGuardAndStreamName(cosmosClient, createGateway, categoryName, streamId) + containers.ResolveContainerGuardAndStreamName(client, createGateway, categoryName, streamId) /// Defines a set of related access policies for a given CosmosDB, together with a Containers map defining mappings from (category,id) to (databaseId,containerId,streamName) -type Context(client : Client, batchingPolicy, retryPolicy) = - new(client : Client, ?defaultMaxItems, ?getDefaultMaxItems, ?maxRequests, ?readRetryPolicy, ?writeRetryPolicy) = +type CosmosStoreContext(connection : CosmosStoreConnection, batchingPolicy, retryPolicy) = + new(client : CosmosStoreConnection, ?defaultMaxItems, ?getDefaultMaxItems, ?maxRequests, ?readRetryPolicy, ?writeRetryPolicy) = let retry = RetryPolicy(?readRetryPolicy = readRetryPolicy, ?writeRetryPolicy = writeRetryPolicy) let batching = BatchingPolicy(?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems, ?maxRequests = maxRequests) - Context(client, batching, retry) + CosmosStoreContext(client, batching, retry) member __.Batching = batchingPolicy member __.Retries = retryPolicy member internal __.ResolveContainerClientAndStreamIdAndInit(categoryName, streamId) = - let cg, streamId = client.ResolveContainerGuardAndStreamName(categoryName, streamId) + let cg, streamId = connection.ResolveContainerGuardAndStreamName(categoryName, streamId) let cc = ContainerClient(cg.Gateway, batchingPolicy, retryPolicy) cc, streamId, cg.InitializationGate -type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, caching, access) = +type CosmosStoreCategory<'event, 'state, 'context>(context : CosmosStoreContext, codec, fold, initial, caching, access) = let readCacheOption = match caching with | CachingStrategy.NoCaching -> None @@ -1073,7 +1074,7 @@ type Resolver<'event, 'state, 'context>(context : Context, codec, fold, initial, let resolveCategory (categoryName, container) = let createCategory _name = let cosmosCat = Category<'event, 'state, 'context>(container, codec) - let folder = Store.Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) + let folder = Core.Folder<'event, 'state, 'context>(cosmosCat, fold, initial, isOrigin, mapUnfolds, ?readCache = readCacheOption) match caching with | CachingStrategy.NoCaching -> folder :> ICategory<_, _, string, 'context> | CachingStrategy.SlidingWindow(cache, window) -> Caching.applyCacheUpdatesWithSlidingExpiration cache null window folder @@ -1129,7 +1130,7 @@ type Discovery = /// Cosmos SDK Connection String | ConnectionString of connectionString : string -type CosmosClientFactory +type CosmosStoreClientFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached @@ -1174,9 +1175,8 @@ type CosmosClientFactory | Discovery.AccountUriAndKey (databaseUri=uri; key=key) -> new CosmosClient(string uri, key, __.Options) | Discovery.ConnectionString cs -> new CosmosClient(cs, __.Options) -namespace Equinox.Cosmos.Core +namespace Equinox.CosmosStore.Core -open Equinox.Cosmos.Store open FsCodec open FSharp.Control open System.Runtime.InteropServices @@ -1190,8 +1190,8 @@ type AppendResult<'t> = | ConflictUnknown of index: 't /// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. -type Context - ( context : Equinox.Cosmos.Context, container : ContainerClient, +type EventsContext + ( context : Equinox.CosmosStore.CosmosStoreContext, container : ContainerClient, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger log : Serilog.ILogger, /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events @@ -1214,9 +1214,9 @@ type Context return pos', data } new (client : Azure.Cosmos.CosmosClient, log, databaseId : string, containerId : string, ?defaultMaxItems, ?getDefaultMaxItems) = - let inner = Equinox.Cosmos.Context(Equinox.Cosmos.Client(client, databaseId, containerId)) + let inner = Equinox.CosmosStore.CosmosStoreContext(Equinox.CosmosStore.CosmosStoreConnection(client, databaseId, containerId)) let cc, _streamId, _init = inner.ResolveContainerClientAndStreamIdAndInit(null, null) - Context(inner, cc, log, ?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems) + EventsContext(inner, cc, log, ?defaultMaxItems = defaultMaxItems, ?getDefaultMaxItems = getDefaultMaxItems) member __.ResolveStream(streamName) = let _cc, streamId, init = context.ResolveContainerClientAndStreamIdAndInit(null, streamName) @@ -1311,43 +1311,43 @@ module Events = /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let getAll (ctx: Context) (streamName: string) (MinPosition index: int64) (batchSize: int): FSharp.Control.AsyncSeq[]> = + let getAll (ctx: EventsContext) (streamName: string) (MinPosition index: int64) (batchSize: int): FSharp.Control.AsyncSeq[]> = ctx.Walk(ctx.CreateStream streamName, batchSize, ?position=index) /// Returns an async array of events in the stream starting at the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is larger than the largest /// sequence number in the stream. - let get (ctx: Context) (streamName: string) (MinPosition index: int64) (maxCount: int): Async[]> = + let get (ctx: EventsContext) (streamName: string) (MinPosition index: int64) (maxCount: int): Async[]> = ctx.Read(ctx.CreateStream streamName, ?position=index, maxCount=maxCount) |> dropPosition /// Appends a batch of events to a stream at the specified expected sequence number. /// If the specified expected sequence number does not match the stream, the events are not appended /// and a failure is returned. - let append (ctx: Context) (streamName: string) (index: int64) (events: IEventData<_>[]): Async> = + let append (ctx: EventsContext) (streamName: string) (index: int64) (events: IEventData<_>[]): Async> = ctx.Sync(ctx.CreateStream streamName, Position.fromI index, events) |> stripSyncResult /// Appends a batch of events to a stream at the the present Position without any conflict checks. /// NB typically, it is recommended to ensure idempotency of operations by using the `append` and related API as /// this facilitates ensuring consistency is maintained, and yields reduced latency and Request Charges impacts /// (See equivalent APIs on `Context` that yield `Position` values) - let appendAtEnd (ctx: Context) (streamName: string) (events: IEventData<_>[]): Async = + let appendAtEnd (ctx: EventsContext) (streamName: string) (events: IEventData<_>[]): Async = ctx.NonIdempotentAppend(ctx.CreateStream streamName, events) |> stripPosition /// Returns an async sequence of events in the stream backwards starting from the specified sequence number, /// reading in batches of the specified size. /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getAllBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (batchSize: int): AsyncSeq[]> = + let getAllBackwards (ctx: EventsContext) (streamName: string) (MaxPosition index: int64) (batchSize: int): AsyncSeq[]> = ctx.Walk(ctx.CreateStream streamName, batchSize, ?position=index, direction=Direction.Backward) /// Returns an async array of events in the stream backwards starting from the specified sequence number, /// number of events to read is specified by batchSize /// Returns an empty sequence if the stream is empty or if the sequence number is smaller than the smallest /// sequence number in the stream. - let getBackwards (ctx: Context) (streamName: string) (MaxPosition index: int64) (maxCount: int): Async[]> = + let getBackwards (ctx: EventsContext) (streamName: string) (MaxPosition index: int64) (maxCount: int): Async[]> = ctx.Read(ctx.CreateStream streamName, ?position=index, maxCount=maxCount, direction=Direction.Backward) |> dropPosition /// Obtains the `index` from the current write Position - let getNextIndex (ctx: Context) (streamName: string) : Async = + let getNextIndex (ctx: EventsContext) (streamName: string) : Async = ctx.Sync(ctx.CreateStream streamName) |> stripPosition diff --git a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj similarity index 97% rename from src/Equinox.Cosmos/Equinox.Cosmos.fsproj rename to src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj index 8e015a0e3..52f0ea08c 100644 --- a/src/Equinox.Cosmos/Equinox.Cosmos.fsproj +++ b/src/Equinox.CosmosStore/Equinox.CosmosStore.fsproj @@ -11,7 +11,7 @@ - + diff --git a/tests/Equinox.Cosmos.Integration/CacheCellTests.fs b/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs similarity index 96% rename from tests/Equinox.Cosmos.Integration/CacheCellTests.fs rename to tests/Equinox.CosmosStore.Integration/CacheCellTests.fs index 06962af47..3d4cf283a 100644 --- a/tests/Equinox.Cosmos.Integration/CacheCellTests.fs +++ b/tests/Equinox.CosmosStore.Integration/CacheCellTests.fs @@ -1,4 +1,4 @@ -module Equinox.Cosmos.Integration.CacheCellTests +module Equinox.CosmosStore.Integration.CacheCellTests open Equinox.Core open Swensen.Unquote @@ -54,7 +54,7 @@ let ``AsyncCacheCell correctness with throwing`` initiallyThrowing = async { else let! r = cell.AwaitValue() test <@ 1 = r @> - + incr expectedValue let! accessResult = [|1 .. 100|] |> Array.map (fun _ -> cell.AwaitValue ()) |> Async.Parallel @@ -73,4 +73,4 @@ let ``AsyncCacheCell correctness with throwing`` initiallyThrowing = async { incr expectedValue let! accessResult = [|1 .. 10|] |> Array.map (fun _ -> cell.AwaitValue ()) |> Async.Parallel - test <@ accessResult |> Array.forall ((=) 4) @> } \ No newline at end of file + test <@ accessResult |> Array.forall ((=) 4) @> } diff --git a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs similarity index 95% rename from tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs rename to tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs index 88e14cda9..3a73c7204 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosCoreIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosCoreIntegration.fs @@ -1,7 +1,7 @@ -module Equinox.Cosmos.Integration.CoreIntegration +module Equinox.CosmosStore.Integration.CoreIntegration -open Equinox.Cosmos.Core -open Equinox.Cosmos.Integration.Infrastructure +open Equinox.CosmosStore.Core +open Equinox.CosmosStore.Integration.Infrastructure open FsCodec open FSharp.Control open Newtonsoft.Json.Linq @@ -91,14 +91,14 @@ type Tests(testOutputHelper) = let verifyCorrectEventsEx direction baseIndex (expected: IEventData<_>[]) (xs: ITimelineEvent[]) = let xs, baseIndex = - if direction = Equinox.Cosmos.Store.Direction.Forward then xs, baseIndex + if direction = Equinox.CosmosStore.Core.Direction.Forward then xs, baseIndex else Array.rev xs, baseIndex - int64 (Array.length expected) + 1L test <@ [for i in 0..expected.Length - 1 -> baseIndex + int64 i] = [for r in xs -> r.Index] @> test <@ [for e in expected -> e.EventType] = [ for r in xs -> r.EventType ] @> for i,x,y in Seq.mapi2 (fun i x y -> i,x,y) [for e in expected -> e.Data] [for r in xs -> r.Data] do verifyUtf8JsonEquals i x y - let verifyCorrectEventsBackward = verifyCorrectEventsEx Equinox.Cosmos.Store.Direction.Backward - let verifyCorrectEvents = verifyCorrectEventsEx Equinox.Cosmos.Store.Direction.Forward + let verifyCorrectEventsBackward = verifyCorrectEventsEx Equinox.CosmosStore.Core.Direction.Backward + let verifyCorrectEvents = verifyCorrectEventsEx Equinox.CosmosStore.Core.Direction.Forward [] let ``appendAtEnd and getNextIndex`` (extras, TestStream streamName) = Async.RunSynchronously <| async { @@ -247,7 +247,7 @@ type Tests(testOutputHelper) = verifyCorrectEvents 0L expected res test <@ [EqxAct.ResponseForward; EqxAct.QueryForward] = capture.ExternalCalls @> let queryRoundTripsAndItemCounts = function - | EqxEvent (Equinox.Cosmos.Store.Log.Event.Query (Equinox.Cosmos.Store.Direction.Forward, responses, { count = c })) -> Some (responses,c) + | EqxEvent (Equinox.CosmosStore.Core.Log.Event.Query (Equinox.CosmosStore.Core.Direction.Forward, responses, { count = c })) -> Some (responses,c) | _ -> None // validate that, despite only requesting max 1 item, we only needed one trip (which contained only one item) [1,1] =! capture.ChooseCalls queryRoundTripsAndItemCounts @@ -310,7 +310,7 @@ type Tests(testOutputHelper) = test <@ [EqxAct.ResponseBackward; EqxAct.QueryBackward] = capture.ExternalCalls @> // validate that, despite only requesting max 1 item, we only needed one trip, bearing 5 items (from which one item was omitted) let queryRoundTripsAndItemCounts = function - | EqxEvent (Equinox.Cosmos.Store.Log.Event.Query (Equinox.Cosmos.Store.Direction.Backward, responses, { count = c })) -> Some (responses,c) + | EqxEvent (Equinox.CosmosStore.Core.Log.Event.Query (Equinox.CosmosStore.Core.Direction.Backward, responses, { count = c })) -> Some (responses,c) | _ -> None [1,5] =! capture.ChooseCalls queryRoundTripsAndItemCounts verifyRequestChargesMax 6 // 5.76 // WAS 4 // 3.04 // WAS 3 // 2.98 diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs similarity index 74% rename from tests/Equinox.Cosmos.Integration/CosmosFixtures.fs rename to tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs index 6c3f13d5d..129ec8186 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixtures.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs @@ -1,7 +1,7 @@ [] -module Equinox.Cosmos.Integration.CosmosFixtures +module Equinox.CosmosStore.Integration.CosmosFixtures -open Equinox.Cosmos +open Equinox.CosmosStore open System module Option = @@ -17,14 +17,14 @@ let private databaseId = read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue " let private containerId = read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" let private connectToCosmos batchSize client = - Context(client, defaultMaxItems = batchSize) + CosmosStoreContext(client, defaultMaxItems = batchSize) let createSpecifiedCosmosOrSimulatorClient (log : Serilog.ILogger) = let createClient name discovery = - let factory = CosmosClientFactory(requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) - let cosmosClient = factory.Create discovery - log.Information("Connection {name} to {endpoint}", name, cosmosClient.Endpoint) - Client(cosmosClient, databaseId, containerId) + let factory = CosmosStoreClientFactory(requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) + let client = factory.Create discovery + log.Information("Connection {name} to {endpoint}", name, client.Endpoint) + CosmosStoreConnection(client, databaseId, containerId) match read "EQUINOX_COSMOS_CONNECTION" with | None -> @@ -40,6 +40,6 @@ let connectToSpecifiedCosmosOrSimulator (log: Serilog.ILogger) batchSize = let createSpecifiedCoreContext log defaultBatchSize = let client = createSpecifiedCosmosOrSimulatorClient log - Equinox.Cosmos.Core.Context(client.CosmosClient, log, databaseId, containerId, ?defaultMaxItems = defaultBatchSize) + Equinox.CosmosStore.Core.EventsContext(client.Client, log, databaseId, containerId, ?defaultMaxItems = defaultBatchSize) let defaultBatchSize = 500 diff --git a/tests/Equinox.Cosmos.Integration/CosmosFixturesInfrastructure.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixturesInfrastructure.fs similarity index 93% rename from tests/Equinox.Cosmos.Integration/CosmosFixturesInfrastructure.fs rename to tests/Equinox.CosmosStore.Integration/CosmosFixturesInfrastructure.fs index 1b88a59d0..a6b9e1aa0 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosFixturesInfrastructure.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixturesInfrastructure.fs @@ -1,5 +1,5 @@ [] -module Equinox.Cosmos.Integration.Infrastructure +module Equinox.CosmosStore.Integration.Infrastructure open Domain open FsCheck @@ -49,8 +49,8 @@ module SerilogHelpers = let (|SerilogScalar|_|) : Serilog.Events.LogEventPropertyValue -> obj option = function | (:? ScalarValue as x) -> Some x.Value | _ -> None - open Equinox.Cosmos.Store - open Equinox.Cosmos.Store.Log + open Equinox.CosmosStore.Core + open Equinox.CosmosStore.Core.Log [] type EqxAct = | Tip | TipNotFound | TipNotModified @@ -68,7 +68,7 @@ module SerilogHelpers = | Event.SyncSuccess _ -> EqxAct.Append | Event.SyncResync _ -> EqxAct.Resync | Event.SyncConflict _ -> EqxAct.Conflict - let inline (|Stats|) ({ ru = ru }: Equinox.Cosmos.Store.Log.Measurement) = ru + let inline (|Stats|) ({ ru = ru }: Equinox.CosmosStore.Core.Log.Measurement) = ru let (|CosmosReadRc|CosmosWriteRc|CosmosResyncRc|CosmosResponseRc|) = function | Event.Tip (Stats s) | Event.TipNotFound (Stats s) @@ -85,9 +85,9 @@ module SerilogHelpers = EquinoxChargeRollup | CosmosReadRc rc | CosmosWriteRc rc | CosmosResyncRc rc as e -> CosmosRequestCharge (e,rc) - let (|EqxEvent|_|) (logEvent : LogEvent) : Equinox.Cosmos.Store.Log.Event option = + let (|EqxEvent|_|) (logEvent : LogEvent) : Equinox.CosmosStore.Core.Log.Event option = logEvent.Properties.Values |> Seq.tryPick (function - | SerilogScalar (:? Equinox.Cosmos.Store.Log.Event as e) -> Some e + | SerilogScalar (:? Equinox.CosmosStore.Core.Log.Event as e) -> Some e | _ -> None) let (|HasProp|_|) (name : string) (e : LogEvent) : LogEventPropertyValue option = @@ -126,4 +126,4 @@ type TestsWithLogCapture(testOutputHelper) = member __.Capture = capture member __.Log = log - interface IDisposable with member __.Dispose() = log.Dispose() \ No newline at end of file + interface IDisposable with member __.Dispose() = log.Dispose() diff --git a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs b/tests/Equinox.CosmosStore.Integration/CosmosIntegration.fs similarity index 93% rename from tests/Equinox.Cosmos.Integration/CosmosIntegration.fs rename to tests/Equinox.CosmosStore.Integration/CosmosIntegration.fs index 620eb47fa..a21200629 100644 --- a/tests/Equinox.Cosmos.Integration/CosmosIntegration.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosIntegration.fs @@ -1,8 +1,8 @@ -module Equinox.Cosmos.Integration.CosmosIntegration +module Equinox.CosmosStore.Integration.CosmosIntegration open Domain -open Equinox.Cosmos -open Equinox.Cosmos.Integration.Infrastructure +open Equinox.CosmosStore +open Equinox.CosmosStore.Integration.Infrastructure open FSharp.UMX open Swensen.Unquote open System @@ -13,24 +13,24 @@ module Cart = let snapshot = Domain.Cart.Fold.isOrigin, Domain.Cart.Fold.snapshot let codec = Domain.Cart.Events.codecStj IntegrationJsonSerializer.options let createServiceWithoutOptimization store log = - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve(id,?option=opt) Backend.Cart.create log resolve let projection = "Compacted",snd snapshot /// Trigger looking in Tip (we want those calls to occur, but without leaning on snapshots, which would reduce the paths covered) let createServiceWithEmptyUnfolds store log = let unfArgs = Domain.Cart.Fold.isOrigin, fun _ -> Seq.empty - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.MultiSnapshot unfArgs).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.MultiSnapshot unfArgs).Resolve(id,?option=opt) Backend.Cart.create log resolve let createServiceWithSnapshotStrategy store log = - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) Backend.Cart.create log resolve let createServiceWithSnapshotStrategyAndCaching store log cache = let sliding20m = CachingStrategy.SlidingWindow (cache, TimeSpan.FromMinutes 20.) - let resolve (id,opt) = Resolver(store, codec, fold, initial, sliding20m, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, sliding20m, AccessStrategy.Snapshot snapshot).Resolve(id,?option=opt) Backend.Cart.create log resolve let createServiceWithRollingState store log = let access = AccessStrategy.RollingState Domain.Cart.Fold.snapshot - let resolve (id,opt) = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, access).Resolve(id,?option=opt) + let resolve (id,opt) = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, access).Resolve(id,?option=opt) Backend.Cart.create log resolve module ContactPreferences = @@ -38,13 +38,13 @@ module ContactPreferences = let codec = Domain.ContactPreferences.Events.codecStj IntegrationJsonSerializer.options let createServiceWithoutOptimization createContext defaultBatchSize log _ignoreWindowSize _ignoreCompactionPredicate = let context = createContext defaultBatchSize - let resolve = Resolver(context, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve + let resolve = CosmosStoreCategory(context, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.Unoptimized).Resolve Backend.ContactPreferences.create log resolve let createService log store = - let resolve = Resolver(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.LatestKnownEvent).Resolve + let resolve = CosmosStoreCategory(store, codec, fold, initial, CachingStrategy.NoCaching, AccessStrategy.LatestKnownEvent).Resolve Backend.ContactPreferences.create log resolve let createServiceWithLatestKnownEvent store log cachingStrategy = - let resolve = Resolver(store, codec, fold, initial, cachingStrategy, AccessStrategy.LatestKnownEvent).Resolve + let resolve = CosmosStoreCategory(store, codec, fold, initial, cachingStrategy, AccessStrategy.LatestKnownEvent).Resolve Backend.ContactPreferences.create log resolve #nowarn "1182" // From hereon in, we may have some 'unused' privates (the tests) diff --git a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj similarity index 93% rename from tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj rename to tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj index 09c104a32..53e4aa074 100644 --- a/tests/Equinox.Cosmos.Integration/Equinox.Cosmos.Integration.fsproj +++ b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj @@ -20,7 +20,7 @@ - + diff --git a/tests/Equinox.Cosmos.Integration/Json.fs b/tests/Equinox.CosmosStore.Integration/Json.fs similarity index 91% rename from tests/Equinox.Cosmos.Integration/Json.fs rename to tests/Equinox.CosmosStore.Integration/Json.fs index 80a5c976a..40f931328 100644 --- a/tests/Equinox.Cosmos.Integration/Json.fs +++ b/tests/Equinox.CosmosStore.Integration/Json.fs @@ -1,5 +1,5 @@ [] -module Equinox.Cosmos.Integration.Json +module Equinox.CosmosStore.Integration.Json open System open System.Text.Json.Serialization diff --git a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs similarity index 84% rename from tests/Equinox.Cosmos.Integration/JsonConverterTests.fs rename to tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs index 9651e6227..7f1a2a7f9 100644 --- a/tests/Equinox.Cosmos.Integration/JsonConverterTests.fs +++ b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs @@ -1,12 +1,11 @@ -module Equinox.Cosmos.Integration.JsonConverterTests +module Equinox.CosmosStore.Integration.JsonConverterTests -open Equinox.Cosmos -open Equinox.Cosmos.Store +open Equinox.CosmosStore +open Equinox.CosmosStore.Core open FsCheck.Xunit open Swensen.Unquote open System open System.Text.Json -open Xunit type Embedded = { embed : string } type Union = @@ -29,14 +28,14 @@ type Base64ZipUtf8Tests() = let encoded = eventCodec.Encode(None,value) let compressor = if compress then JsonCompressedBase64Converter.Compress else id - let e : Store.Unfold = + let e : Core.Unfold = { i = 42L c = encoded.EventType d = compressor encoded.Data m = Unchecked.defaultof t = DateTimeOffset.MinValue } let ser = FsCodec.SystemTextJson.Serdes.Serialize(e, defaultOptions) - let des = FsCodec.SystemTextJson.Serdes.Deserialize(ser, defaultOptions) + let des = FsCodec.SystemTextJson.Serdes.Deserialize(ser, defaultOptions) let d = FsCodec.Core.TimelineEvent.Create(-1L, des.c, des.d) let decoded = eventCodec.TryDecode d |> Option.get test <@ value = decoded @> diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 80364a3b5..4f036ff51 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -209,7 +209,7 @@ and Test = Favorite | SaveForLater | Todo let createStoreLog verbose verboseConsole maybeSeqEndpoint = let c = LoggerConfiguration().Destructure.FSharpTypes() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink()) let level = @@ -273,7 +273,7 @@ module LoadTest = .Information("Running {test} for {duration} @ {tps} hits/s across {clients} clients; Max errors: {errorCutOff}, reporting intervals: {ri}, report file: {report}", test, a.Duration, a.TestsPerSecond, clients.Length, a.ErrorCutoff, a.ReportingIntervals, reportFilename) // Reset the start time based on which the shared global metrics will be computed - let _ = Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink.Restart() + let _ = Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink.Restart() let _ = Equinox.EventStore.Log.InternalMetrics.Stats.LogSink.Restart() let _ = Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink.Restart() let results = runLoadTest log a.TestsPerSecond (duration.Add(TimeSpan.FromSeconds 5.)) a.ErrorCutoff a.ReportingIntervals clients runSingleTest |> Async.RunSynchronously @@ -285,7 +285,7 @@ module LoadTest = match storeConfig with | Some (Storage.StorageConfig.Cosmos _) -> - Equinox.Cosmos.Store.Log.InternalMetrics.dump log + Equinox.CosmosStore.Core.Log.InternalMetrics.dump log | Some (Storage.StorageConfig.Es _) -> Equinox.EventStore.Log.InternalMetrics.dump log | Some (Storage.StorageConfig.Sql _) -> @@ -295,7 +295,7 @@ module LoadTest = let createDomainLog verbose verboseConsole maybeSeqEndpoint = let c = LoggerConfiguration().Destructure.FSharpTypes().Enrich.FromLogContext() let c = if verbose then c.MinimumLevel.Debug() else c - let c = c.WriteTo.Sink(Equinox.Cosmos.Store.Log.InternalMetrics.Stats.LogSink()) + let c = c.WriteTo.Sink(Equinox.CosmosStore.Core.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.EventStore.Log.InternalMetrics.Stats.LogSink()) let c = c.WriteTo.Sink(Equinox.SqlStreamStore.Log.InternalMetrics.Stats.LogSink()) let outputTemplate = "{Timestamp:T} {Level:u1} {Message:l} {Properties}{NewLine}{Exception}" @@ -304,11 +304,11 @@ let createDomainLog verbose verboseConsole maybeSeqEndpoint = c.CreateLogger() module CosmosInit = - open Equinox.Cosmos.Store + open Equinox.CosmosStore.Core let conn log (sargs : ParseResults) = - let cosmosClient, dName, cName = Storage.Cosmos.connection log (Storage.Cosmos.Info sargs) - cosmosClient, dName, cName + let client, databaseId, containerId = Storage.Cosmos.conn log (Storage.Cosmos.Info sargs) + client, databaseId, containerId let containerAndOrDb (log: ILogger) (iargs: ParseResults) = match iargs.TryGetSubCommand() with @@ -316,9 +316,9 @@ module CosmosInit = let rus, skipStoredProc = iargs.GetResult(InitArguments.Rus), iargs.Contains InitArguments.SkipStoredProc let mode = if iargs.Contains InitArguments.Shared then Provisioning.Database (ReplaceAlways rus) else Provisioning.Container (ReplaceAlways rus) let modeStr, rus = match mode with Provisioning.Container rus -> "Container",rus | Provisioning.Database rus -> "Database",rus - let cosmosClient, dName, cName = conn log sargs + let client, databaseId, containerId = conn log sargs log.Information("Provisioning `Equinox.Cosmos` Store collection at {mode:l} level for {rus:n0} RU/s", modeStr, rus) - Equinox.Cosmos.Store.Initialization.initializeContainer cosmosClient dName cName mode (not skipStoredProc, None) |> Async.Ignore |> Async.RunSynchronously + Equinox.CosmosStore.Core.Initialization.initializeContainer client databaseId containerId mode (not skipStoredProc, None) |> Async.Ignore |> Async.RunSynchronously | _ -> failwith "please specify a `cosmos` endpoint" module SqlInit = @@ -347,8 +347,8 @@ module CosmosStats = let doS,doD,doE = args.Contains StatsArguments.Streams, args.Contains StatsArguments.Documents, args.Contains StatsArguments.Events let doS = doS || (not doD && not doE) // default to counting streams only unless otherwise specified let inParallel = args.Contains Parallel - let cosmosClient, dName, cName = CosmosInit.conn log sargs - let container = cosmosClient.GetContainer(dName, cName) + let client, databaseId, containerId = CosmosInit.conn log sargs + let container = client.GetContainer(databaseId, containerId) let ops = [ if doS then yield "Streams", """SELECT VALUE COUNT(1) FROM c WHERE c.id="-1" """ if doD then yield "Documents", """SELECT VALUE COUNT(1) FROM c""" From 730079f7aea451eb56bf70dcfaf8b40d93fa94da Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Sun, 10 May 2020 03:43:05 +0100 Subject: [PATCH 64/71] Remove CancellationToken.useOrCreate --- samples/Tutorial/Tutorial.fsproj | 2 +- src/Equinox.CosmosStore/CosmosStore.fs | 15 ++++++--------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/samples/Tutorial/Tutorial.fsproj b/samples/Tutorial/Tutorial.fsproj index 41368c917..0da3b7fce 100644 --- a/samples/Tutorial/Tutorial.fsproj +++ b/samples/Tutorial/Tutorial.fsproj @@ -4,7 +4,7 @@ netstandard2.1 5 true - + true diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index b0e25ee5f..9df5585b0 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -401,9 +401,6 @@ function sync(req, expIndex, expEtag) { } }""" -module private CancellationToken = - let useOrCreate = function Some ct -> async.Return ct | _ -> Async.CancellationToken - module Initialization = let internal getOrCreateDatabase (client: CosmosClient) (databaseId: string) (throughput: ResourceThroughput) = async { let! ct = Async.CancellationToken @@ -466,11 +463,11 @@ type ContainerGateway(cosmosContainer : CosmosContainer) = default __.GetQueryIteratorByPage<'T>(query, ?options) = cosmosContainer.GetItemQueryIterator<'T>(query, requestOptions = defaultArg options null).AsPages() |> AsyncSeq.ofAsyncEnum - abstract member TryReadItem<'T> : docId: string * partitionKey: string * ?options: ItemRequestOptions * ?cancellationToken: CancellationToken -> Async> - default __.TryReadItem<'T>(docId, partitionKey, ?options, ?cancellationToken) = async { + abstract member TryReadItem<'T> : docId: string * partitionKey: string * ?options: ItemRequestOptions -> Async> + default __.TryReadItem<'T>(docId, partitionKey, ?options) = async { let partitionKey = PartitionKey partitionKey let options = defaultArg options null - let! ct = CancellationToken.useOrCreate cancellationToken + let! ct = Async.CancellationToken // TODO use TryReadItemStreamAsync to avoid the exception https://github.com/Azure/azure-cosmos-dotnet-v3/issues/692#issuecomment-521936888 try let! item = async { return! cosmosContainer.ReadItemAsync<'T>(docId, partitionKey, requestOptions = options, cancellationToken = ct) |> Async.AwaitTaskCorrect } // if item.StatusCode = System.Net.HttpStatusCode.NotModified then return item.RequestCharge, NotModified @@ -483,9 +480,9 @@ type ContainerGateway(cosmosContainer : CosmosContainer) = // NB while the docs suggest you may see a 412, the NotModified in the body of the try/with is actually what happens | CosmosException (CosmosStatusCode sc as e) when sc = int System.Net.HttpStatusCode.PreconditionFailed -> return e.Response.Headers.GetRequestCharge(), NotModified } - abstract member ExecuteStoredProcedure: storedProcedureName: string * partitionKey: string * args: obj[] * ?cancellationToken : CancellationToken -> Async> - default __.ExecuteStoredProcedure(storedProcedureName, partitionKey, args, ?cancellationToken) = async { - let! ct = CancellationToken.useOrCreate cancellationToken + abstract member ExecuteStoredProcedure: storedProcedureName: string * partitionKey: string * args: obj[] -> Async> + default __.ExecuteStoredProcedure(storedProcedureName, partitionKey, args) = async { + let! ct = Async.CancellationToken let partitionKey = PartitionKey partitionKey //let args = [| box tip; box index; box (Option.toObj etag)|] return! cosmosContainer.Scripts.ExecuteStoredProcedureAsync(storedProcedureName, partitionKey, args, cancellationToken = ct) |> Async.AwaitTaskCorrect } From d2e96f2c69356bcb690fbd99a3da8e071b8d4ee8 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 25 Jun 2020 21:06:32 +0100 Subject: [PATCH 65/71] Implement pretty-printing for JsonElement --- tools/Equinox.Tool/Program.fs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/Equinox.Tool/Program.fs b/tools/Equinox.Tool/Program.fs index 4f036ff51..d0a53fbd1 100644 --- a/tools/Equinox.Tool/Program.fs +++ b/tools/Equinox.Tool/Program.fs @@ -411,7 +411,7 @@ module Dump = |> Async.Ignore open System.Text.Json - let dumpJsonElementStorage (log: ILogger) (storeLog: ILogger) doU doE doC doJ _doP doT (resolver: Services.StreamResolver) (streams: FsCodec.StreamName list) = + let dumpJsonElementStorage (log: ILogger) (storeLog: ILogger) doU doE doC doJ doP doT (resolver: Services.StreamResolver) (streams: FsCodec.StreamName list) = let initial = List.empty let fold state events = (events,state) ||> Seq.foldBack (fun e l -> e :: l) let mutable unfolds = List.empty @@ -420,10 +420,10 @@ module Dump = Some x let idCodec = FsCodec.Codec.Create((fun _ -> failwith "No encoding required"), tryDecode, (fun _ -> failwith "No mapCausation")) let isOriginAndSnapshot = (fun (event : FsCodec.ITimelineEvent<_>) -> not doE && event.IsUnfold),fun _state -> failwith "no snapshot required" - let render (data : JsonElement) = + let render pretty (data : JsonElement) = match data.ValueKind with | JsonValueKind.Null | JsonValueKind.Undefined -> null - | _ when doJ -> data.GetRawText() + | _ when doJ -> if pretty then FsCodec.SystemTextJson.Serdes.Serialize(data, indent=true) else data.GetRawText() | _ -> sprintf "(%d chars)" (data.GetRawText().Length) let readStream (streamName : FsCodec.StreamName) = async { let stream = resolver.ResolveWithJsonElementCodec(idCodec,fold,initial,isOriginAndSnapshot) streamName @@ -431,7 +431,8 @@ module Dump = let source = if not doE && not (List.isEmpty unfolds) then Seq.ofList unfolds else Seq.append events unfolds let mutable prevTs = None for x in source |> Seq.filter (fun e -> (e.IsUnfold && doU) || (not e.IsUnfold && doE)) do - prevTs <- Some (logEvent log prevTs doC doT x render) } + let pretty = x.IsUnfold || doP + prevTs <- Some (logEvent log prevTs doC doT x (render pretty)) } streams |> Seq.map readStream |> Async.Parallel From 31c94ce3e3978caa4c24b4d94394ffebb27a9de2 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Wed, 2 Sep 2020 15:50:00 +0100 Subject: [PATCH 66/71] Tidy comments --- src/Equinox.CosmosStore/CosmosStore.fs | 8 ++--- .../CosmosFixtures.fs | 32 +++++++++---------- 2 files changed, 19 insertions(+), 21 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 37cedd494..bbb526c13 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -1,4 +1,4 @@ -namespace Equinox.CosmosStore.Core +namespace Equinox.CosmosStore.Core open Azure open Azure.Cosmos @@ -1027,13 +1027,13 @@ type CosmosStoreConnection ( client : CosmosClient, /// Singleton used to cache initialization state per CosmosContainer. containers : Containers, - /// Admits a hook to enable customization of how Equinox.Cosmos handles the low level interactions with the underlying CosmosContainer. + /// Admits a hook to enable customization of how Equinox.CosmosStore handles the low level interactions with the underlying CosmosContainer. ?createGateway) = let createGateway = match createGateway with Some creator -> creator | None -> ContainerGateway new (client, databaseId : string, containerId : string, /// Inhibit CreateStoredProcedureIfNotExists when a given Container is used for the first time []?disableInitialization, - /// Admits a hook to enable customization of how Equinox.Cosmos handles the low level interactions with the underlying CosmosContainer. + /// Admits a hook to enable customization of how Equinox.CosmosStore handles the low level interactions with the underlying CosmosContainer. []?createGateway : CosmosContainer -> ContainerGateway) = let containers = Containers(databaseId, containerId, ?disableInitialization = disableInitialization) CosmosStoreConnection(client, containers, ?createGateway = createGateway) @@ -1186,7 +1186,7 @@ type AppendResult<'t> = | Conflict of index: 't * conflictingEvents: ITimelineEvent[] | ConflictUnknown of index: 't -/// Encapsulates the core facilities Equinox.Cosmos offers for operating directly on Events in Streams. +/// Encapsulates the core facilities Equinox.CosmosStore offers for operating directly on Events in Streams. type EventsContext ( context : Equinox.CosmosStore.CosmosStoreContext, container : ContainerClient, /// Logger to write to - see https://github.com/serilog/serilog/wiki/Provided-Sinks for how to wire to your logger diff --git a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs index 129ec8186..d243e4883 100644 --- a/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs +++ b/tests/Equinox.CosmosStore.Integration/CosmosFixtures.fs @@ -9,37 +9,35 @@ module Option = /// Standing up an Equinox instance is necessary to run for test purposes; either: /// - replace connection below with a connection string or Uri+Key for an initialized Equinox instance -/// - Create a local Equinox via dotnet run cli/Equinox.cli -s $env:EQUINOX_COSMOS_CONNECTION -d test -c $env:EQUINOX_COSMOS_CONTAINER provision -ru 10000 -let private read env = Environment.GetEnvironmentVariable env |> Option.ofObj -let (|Default|) def name = (read name),def ||> defaultArg +/// - Create a local Equinox via (e.g.) dotnet run cli/Equinox.Tool init -ru 1000 cosmos -s $env:EQUINOX_COSMOS_CONNECTION -d test -c $env:EQUINOX_COSMOS_CONTAINER +let private tryRead env = Environment.GetEnvironmentVariable env |> Option.ofObj +let (|Default|) def name = (tryRead name),def ||> defaultArg -let private databaseId = read "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" -let private containerId = read "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" +let private databaseId = tryRead "EQUINOX_COSMOS_DATABASE" |> Option.defaultValue "equinox-test" +let private containerId = tryRead "EQUINOX_COSMOS_CONTAINER" |> Option.defaultValue "equinox-test" -let private connectToCosmos batchSize client = - CosmosStoreContext(client, defaultMaxItems = batchSize) - -let createSpecifiedCosmosOrSimulatorClient (log : Serilog.ILogger) = - let createClient name discovery = +let createSpecifiedCosmosOrSimulatorConnection (log : Serilog.ILogger) = + let createConnection name discovery = let factory = CosmosStoreClientFactory(requestTimeout=TimeSpan.FromSeconds 3., maxRetryAttemptsOnRateLimitedRequests=2, maxRetryWaitTimeOnRateLimitedRequests=TimeSpan.FromMinutes 1.) let client = factory.Create discovery - log.Information("Connection {name} to {endpoint}", name, client.Endpoint) + log.Information("CosmosDb Connecting {name} to {endpoint}", name, client.Endpoint) CosmosStoreConnection(client, databaseId, containerId) - match read "EQUINOX_COSMOS_CONNECTION" with + match tryRead "EQUINOX_COSMOS_CONNECTION" with | None -> Discovery.AccountUriAndKey(Uri "https://localhost:8081", "C2y6yDjf5/R+ob0N8A7Cgv30VRDJIWEHLM+4QDU5DE2nQ9nDuVTqobD4b8mGGyPMbIZnqyMsEcaGQy67XIw/Jw==") - |> createClient "localDocDbSim" + |> createConnection "localDocDbSim" | Some connectionString -> Discovery.ConnectionString connectionString - |> createClient "EQUINOX_COSMOS_CONNECTION" + |> createConnection "EQUINOX_COSMOS_CONNECTION" +// TODO rename to something with Context in the name let connectToSpecifiedCosmosOrSimulator (log: Serilog.ILogger) batchSize = - createSpecifiedCosmosOrSimulatorClient log - |> connectToCosmos batchSize + let conn = createSpecifiedCosmosOrSimulatorConnection log + CosmosStoreContext(conn, defaultMaxItems = batchSize) let createSpecifiedCoreContext log defaultBatchSize = - let client = createSpecifiedCosmosOrSimulatorClient log + let client = createSpecifiedCosmosOrSimulatorConnection log Equinox.CosmosStore.Core.EventsContext(client.Client, log, databaseId, containerId, ?defaultMaxItems = defaultBatchSize) let defaultBatchSize = 500 From 4eae6010e6ff72c8efc857234491ae706751993a Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 3 Sep 2020 11:49:01 +0100 Subject: [PATCH 67/71] Tidy --- src/Equinox.CosmosStore/CosmosStore.fs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index bbb526c13..130b33ae2 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -1,4 +1,4 @@ -namespace Equinox.CosmosStore.Core +namespace Equinox.CosmosStore.Core open Azure open Azure.Cosmos @@ -815,15 +815,14 @@ type Containers let catAndStreamToDatabaseContainerStream (categoryName, streamId) = databaseId, containerId, genStreamName (categoryName, streamId) Containers(catAndStreamToDatabaseContainerStream, ?disableInitialization = disableInitialization) - member internal __.ResolveContainerGuardAndStreamName(client : CosmosClient, createGateway, categoryName, streamId) : ContainerInitializerGuard * string = + member internal __.ResolveContainerGuardAndStreamName(client, createGateway, categoryName, streamId) : ContainerInitializerGuard * string = let databaseId, containerId, streamName = categoryAndStreamNameToDatabaseContainerStream (categoryName, streamId) let createContainerInitializerGuard (d, c) = let init = if Some true = disableInitialization then None else Some (fun cosmosContainer -> Initialization.createSyncStoredProcedure cosmosContainer None |> Async.Ignore) - ContainerInitializerGuard - ( createGateway (client.GetDatabase(d).GetContainer(c)), - ?initContainer = init) + let primaryContainer = (client : CosmosClient).GetDatabase(d).GetContainer(c) + ContainerInitializerGuard(createGateway primaryContainer, ?initContainer = init) let g = containerInitGuards.GetOrAdd((databaseId, containerId), createContainerInitializerGuard) g, streamName From 66220262d93c96eafead53cb5b6712d67d6a9e56 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 3 Sep 2020 11:52:45 +0100 Subject: [PATCH 68/71] Polish comments --- src/Equinox.CosmosStore/CosmosStore.fs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 130b33ae2..a6a050b56 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -1129,7 +1129,7 @@ type Discovery = type CosmosStoreClientFactory ( /// Timeout to apply to individual reads/write round-trips going to CosmosDb requestTimeout: TimeSpan, - /// Maximum number of times attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached + /// Maximum number of times to attempt when failure reason is a 429 from CosmosDb, signifying RU limits have been breached maxRetryAttemptsOnRateLimitedRequests: int, /// Maximum number of seconds to wait (especially if a higher wait delay is suggested by CosmosDb in the 429 response) maxRetryWaitTimeOnRateLimitedRequests: TimeSpan, @@ -1137,7 +1137,7 @@ type CosmosStoreClientFactory []?gatewayModeMaxConnectionLimit, /// Connection mode (default: ConnectionMode.Gateway (lowest perf, least trouble)) []?mode : ConnectionMode, - /// consistency mode (default: ConsistencyLevel.Session) + /// consistency mode (default: ConsistencyLevel.Session) []?defaultConsistencyLevel : ConsistencyLevel) = /// CosmosClientOptions for this Connector as configured @@ -1193,7 +1193,7 @@ type EventsContext /// Optional maximum number of Store.Batch records to retrieve as a set (how many Events are placed therein is controlled by average batch size when appending events /// Defaults to 10 []?defaultMaxItems, - /// Alternate way of specifying defaultMaxItems which facilitates reading it from a cached dynamic configuration + /// Alternate way of specifying defaultMaxItems that facilitates reading it from a cached dynamic configuration []?getDefaultMaxItems) = do if log = null then nullArg "log" let getDefaultMaxItems = match getDefaultMaxItems with Some f -> f | None -> fun () -> defaultArg defaultMaxItems 10 From e7009b7c1440c4aeb40b53c38c7817e0c9a4ce50 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Tue, 15 Sep 2020 17:10:05 +0100 Subject: [PATCH 69/71] Upgrade to STJ 5.0 rc1 --- src/Equinox.CosmosStore/CosmosJsonSerializer.fs | 6 +++--- .../Equinox.CosmosStore.Integration.fsproj | 2 ++ .../JsonConverterTests.fs | 10 ++++++++-- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosJsonSerializer.fs b/src/Equinox.CosmosStore/CosmosJsonSerializer.fs index a826261ab..55852f2f1 100644 --- a/src/Equinox.CosmosStore/CosmosJsonSerializer.fs +++ b/src/Equinox.CosmosStore/CosmosJsonSerializer.fs @@ -37,7 +37,7 @@ type CosmosJsonSerializer (options: JsonSerializerOptions) = and JsonCompressedBase64Converter() = inherit JsonConverter() - static member Compress (value: JsonElement) = + static member Compress(value: JsonElement) = if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then value else @@ -48,7 +48,7 @@ and JsonCompressedBase64Converter() = compressor.Close() JsonDocument.Parse("\"" + System.Convert.ToBase64String(output.ToArray()) + "\"").RootElement - override __.Read (reader, _typeToConvert, options) = + override __.Read(reader, _typeToConvert, options) = if reader.TokenType <> JsonTokenType.String then JsonSerializer.Deserialize(&reader, options) else @@ -59,7 +59,7 @@ and JsonCompressedBase64Converter() = decompressor.CopyTo(output) JsonSerializer.Deserialize(ReadOnlySpan.op_Implicit(output.ToArray()), options) - override __.Write (writer, value, options) = + override __.Write(writer, value, options) = JsonSerializer.Serialize(writer, value, options) type JsonCompressedBase64ConverterAttribute () = diff --git a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj index 9cc8cbd8a..c9df00623 100644 --- a/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj +++ b/tests/Equinox.CosmosStore.Integration/Equinox.CosmosStore.Integration.fsproj @@ -30,6 +30,8 @@ + + diff --git a/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs index 7f1a2a7f9..d1d4ef690 100644 --- a/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs +++ b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs @@ -15,6 +15,10 @@ type Union = let defaultOptions = FsCodec.SystemTextJson.Options.Create() +module JsonElement = + let d = JsonDocument.Parse "null" + let Null = d.RootElement + type Base64ZipUtf8Tests() = let eventCodec = FsCodec.SystemTextJson.Codec.Create(defaultOptions) @@ -28,13 +32,15 @@ type Base64ZipUtf8Tests() = let encoded = eventCodec.Encode(None,value) let compressor = if compress then JsonCompressedBase64Converter.Compress else id + let compressed = compressor encoded.Data let e : Core.Unfold = { i = 42L c = encoded.EventType - d = compressor encoded.Data - m = Unchecked.defaultof + d = compressed + m = JsonElement.Null // TODO find a way to omit the value from rendering t = DateTimeOffset.MinValue } let ser = FsCodec.SystemTextJson.Serdes.Serialize(e, defaultOptions) + System.Diagnostics.Trace.WriteLine ser let des = FsCodec.SystemTextJson.Serdes.Deserialize(ser, defaultOptions) let d = FsCodec.Core.TimelineEvent.Create(-1L, des.c, des.d) let decoded = eventCodec.TryDecode d |> Option.get From 75d45516301f78f245cb68e307b1d11e94291a48 Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 17 Sep 2020 09:29:42 +0100 Subject: [PATCH 70/71] Fix json --- src/Equinox.CosmosStore/CosmosJsonSerializer.fs | 10 ++++++++-- src/Equinox.CosmosStore/CosmosStore.fs | 10 +++++----- .../JsonConverterTests.fs | 11 ++--------- 3 files changed, 15 insertions(+), 16 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosJsonSerializer.fs b/src/Equinox.CosmosStore/CosmosJsonSerializer.fs index 55852f2f1..8a8a70814 100644 --- a/src/Equinox.CosmosStore/CosmosJsonSerializer.fs +++ b/src/Equinox.CosmosStore/CosmosJsonSerializer.fs @@ -7,6 +7,13 @@ open System.IO open System.Text.Json open System.Text.Json.Serialization +module JsonHelper = + + let d = JsonDocument.Parse "null" + let private Null = d.RootElement + /// System.Text.Json versions > 4.7 reject JsonValueKind.Undefined elements + let fixup (e : JsonElement) = if e.ValueKind = JsonValueKind.Undefined then Null else e + type CosmosJsonSerializer (options: JsonSerializerOptions) = inherit CosmosSerializer() @@ -38,8 +45,7 @@ and JsonCompressedBase64Converter() = inherit JsonConverter() static member Compress(value: JsonElement) = - if value.ValueKind = JsonValueKind.Null || value.ValueKind = JsonValueKind.Undefined then - value + if value.ValueKind = JsonValueKind.Null then value else let input = System.Text.Encoding.UTF8.GetBytes(value.GetRawText()) use output = new MemoryStream() diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 27991723d..2ff799b09 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -512,13 +512,13 @@ module Sync = let call = logged containerStream batch Log.withLoggedRetries retryPolicy "writeAttempt" call log - let mkBatch (stream: string) (events: IEventData<_>[]) unfolds: Tip = + let private mkEvent (e : IEventData<_>) = + { t = e.Timestamp; c = e.EventType; d = JsonHelper.fixup e.Data; m = JsonHelper.fixup e.Meta; correlationId = e.CorrelationId; causationId = e.CausationId } + let mkBatch (stream: string) (events: IEventData<_>[]) unfolds : Tip = { p = stream; id = Tip.WellKnownDocumentId; n = -1L(*Server-managed*); i = -1L(*Server-managed*); _etag = null - e = [| for e in events -> { t = e.Timestamp; c = e.EventType; d = e.Data; m = e.Meta; correlationId = e.CorrelationId; causationId = e.CausationId } |] - u = Array.ofSeq unfolds } - + e = [| for e in events -> mkEvent e |]; u = Array.ofSeq unfolds } let mkUnfold compress baseIndex (unfolds: IEventData<_> seq) : Unfold seq = - let compressIfRequested x = if compress then JsonCompressedBase64Converter.Compress x else x + let inline compressIfRequested x = if compress then JsonCompressedBase64Converter.Compress x else x unfolds |> Seq.mapi (fun offset x -> { diff --git a/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs index d1d4ef690..b7c9df0ea 100644 --- a/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs +++ b/tests/Equinox.CosmosStore.Integration/JsonConverterTests.fs @@ -24,20 +24,13 @@ type Base64ZipUtf8Tests() = [] let ``Can read uncompressed and compressed`` compress value = - let hasNulls = - match value with - | A x | B x when obj.ReferenceEquals(null, x) -> true - | A { embed = x } | B { embed = x } -> obj.ReferenceEquals(null, x) - if hasNulls then () else - let encoded = eventCodec.Encode(None,value) let compressor = if compress then JsonCompressedBase64Converter.Compress else id - let compressed = compressor encoded.Data let e : Core.Unfold = { i = 42L c = encoded.EventType - d = compressed - m = JsonElement.Null // TODO find a way to omit the value from rendering + d = encoded.Data |> JsonHelper.fixup |> compressor + m = Unchecked.defaultof<_> |> JsonHelper.fixup t = DateTimeOffset.MinValue } let ser = FsCodec.SystemTextJson.Serdes.Serialize(e, defaultOptions) System.Diagnostics.Trace.WriteLine ser From 792e4207254a00640e7b5ed5a680da1cc752f78b Mon Sep 17 00:00:00 2001 From: Ruben Bartelink Date: Thu, 17 Sep 2020 10:17:00 +0100 Subject: [PATCH 71/71] Really Fix json --- src/Equinox.CosmosStore/CosmosStore.fs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Equinox.CosmosStore/CosmosStore.fs b/src/Equinox.CosmosStore/CosmosStore.fs index 2ff799b09..cd9d550f1 100644 --- a/src/Equinox.CosmosStore/CosmosStore.fs +++ b/src/Equinox.CosmosStore/CosmosStore.fs @@ -524,8 +524,8 @@ module Sync = { i = baseIndex + int64 offset c = x.EventType - d = compressIfRequested x.Data - m = compressIfRequested x.Meta + d = compressIfRequested <| JsonHelper.fixup x.Data + m = compressIfRequested <| JsonHelper.fixup x.Meta t = DateTimeOffset.UtcNow } : Unfold)