diff --git a/bun.lock b/bun.lock index d40a77c..90547ea 100644 --- a/bun.lock +++ b/bun.lock @@ -5,6 +5,7 @@ "name": "@s2-dev/streamstore", "dependencies": { "@protobuf-ts/runtime": "^2.11.1", + "debug": "^4.4.3", }, "devDependencies": { "@arethetypeswrong/cli": "^0.18.2", @@ -13,6 +14,7 @@ "@hey-api/openapi-ts": "^0.86.0", "@protobuf-ts/plugin": "^2.11.1", "@types/bun": "^1.3.1", + "@types/debug": "^4.1.12", "openapi-typescript": "^7.10.1", "protoc": "^33.0.0", "typedoc": "^0.28.14", @@ -250,6 +252,8 @@ "@types/chai": ["@types/chai@5.2.3", "", { "dependencies": { "@types/deep-eql": "*", "assertion-error": "^2.0.1" } }, "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA=="], + "@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="], + "@types/deep-eql": ["@types/deep-eql@4.0.2", "", {}, "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw=="], "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="], @@ -258,6 +262,8 @@ "@types/json-schema": ["@types/json-schema@7.0.15", "", {}, "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA=="], + "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="], + "@types/node": ["@types/node@24.9.2", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-uWN8YqxXxqFMX2RqGOrumsKeti4LlmIMIyV0lgut4jx7KQBcBiW6vkDtIBvHnHIquwNfJhk8v2OtmO8zXWHfPA=="], "@types/react": ["@types/react@19.2.2", "", { "dependencies": { "csstype": "^3.0.2" } }, "sha512-6mDvHUFSjyT2B2yeNx2nUgMxh9LtOWvkhIU3uePn2I2oyNymUAX1NIsdgviM4CH+JSrp2D2hsMvJOkxY+0wNRA=="], diff --git a/examples/image.ts b/examples/image.ts index ea416d0..ff37474 100644 --- a/examples/image.ts +++ b/examples/image.ts @@ -31,6 +31,11 @@ function rechunkStream( const s2 = new S2({ accessToken: process.env.S2_ACCESS_TOKEN!, + retry: { + maxAttempts: 10, + retryBackoffDurationMs: 100, + appendRetryPolicy: "noSideEffects", + }, }); const basinName = process.env.S2_BASIN; @@ -45,12 +50,26 @@ const stream = basin.stream("image"); const startAt = await stream.checkTail(); const session = await stream.appendSession({ - maxQueuedBytes: 1024 * 1024 * 10, + maxQueuedBytes: 1024 * 1024, // 1MiB }); let image = await fetch( "https://upload.wikimedia.org/wikipedia/commons/2/24/Peter_Paul_Rubens_-_Self-portrait_-_RH.S.180_-_Rubenshuis_%28after_restoration%29.jpg", ); +function mapWithIndexAsync( + fn: (value: T, index: number) => Promise | U, +): TransformStream { + let index = 0; + + return new TransformStream({ + async transform(chunk, controller) { + const out = await fn(chunk, index); + index += 1; + controller.enqueue(out); + }, + }); +} + // Write directly from fetch response to S2 stream let append = await image .body! // Ensure each chunk is at most 128KiB. S2 has a maximum individual record size of 1MiB. @@ -63,10 +82,24 @@ let append = await image }, }), ) + .pipeThrough( + mapWithIndexAsync( + (record, index) => + ({ + ...record, + headers: [ + [ + new TextEncoder().encode("index"), + new TextEncoder().encode(index.toString()), + ], + ], + }) as AppendRecord, + ), + ) // Collect records into batches. .pipeThrough( new BatchTransform({ - lingerDurationMillis: 50, + lingerDurationMillis: 5, match_seq_num: startAt.tail.seq_num, }), ) diff --git a/examples/read.ts b/examples/read.ts index 047940e..9ab3e10 100644 --- a/examples/read.ts +++ b/examples/read.ts @@ -24,7 +24,7 @@ if (streams.streams[0]) { for await (const record of readSession) { console.log(`[seq ${record.seq_num}] ${record.body}`); - console.log("new tail", readSession.lastReadPosition()?.seq_num); + console.log("new tail", readSession.nextReadPosition()?.seq_num); } console.log("Done reading"); } diff --git a/examples/throughput.ts b/examples/throughput.ts new file mode 100644 index 0000000..180c87b --- /dev/null +++ b/examples/throughput.ts @@ -0,0 +1,69 @@ +import { createWriteStream } from "node:fs"; +import { + AppendRecord, + BatchTransform, + type ReadRecord, + S2, +} from "../src/index.js"; + +function createStringStream( + n: number, + delayMs: number = 0, +): ReadableStream { + let count = 0; + return new ReadableStream({ + async pull(controller) { + if (count < n) { + if (delayMs > 0) { + await new Promise((resolve) => setTimeout(resolve, delayMs)); + } + console.log("pull", count); + const randomChars = Array.from({ length: 1024 * 10 }, () => + String.fromCharCode(97 + Math.floor(Math.random() * 26)), + ).join(""); + + var str = `${count} ${randomChars}`; + controller.enqueue(str); + count++; + } else { + controller.close(); + } + }, + }); +} + +const s2 = new S2({ + accessToken: process.env.S2_ACCESS_TOKEN!, + retry: { + maxAttempts: 10, + retryBackoffDurationMs: 100, + appendRetryPolicy: "noSideEffects", + requestTimeoutMillis: 10000, + }, +}); + +const basinName = process.env.S2_BASIN; +if (!basinName) { + console.error("S2_BASIN environment variable is not set"); + process.exit(1); +} + +const basin = s2.basin(basinName!); +const stream = basin.stream("throughput"); + +const sesh = await stream.appendSession({ maxQueuedBytes: 1024 * 1024 * 5 }); + +createStringStream(1000000, 0) + .pipeThrough( + new TransformStream({ + transform(arr, controller) { + controller.enqueue(AppendRecord.make(arr)); + }, + }), + ) + .pipeThrough( + new BatchTransform({ + lingerDurationMillis: 100, + }), + ) + .pipeTo(sesh.writable); diff --git a/package.json b/package.json index 4181455..9790c5d 100644 --- a/package.json +++ b/package.json @@ -45,7 +45,8 @@ "LICENSE" ], "dependencies": { - "@protobuf-ts/runtime": "^2.11.1" + "@protobuf-ts/runtime": "^2.11.1", + "debug": "^4.4.3" }, "devDependencies": { "@arethetypeswrong/cli": "^0.18.2", @@ -54,6 +55,7 @@ "@hey-api/openapi-ts": "^0.86.0", "@protobuf-ts/plugin": "^2.11.1", "@types/bun": "^1.3.1", + "@types/debug": "^4.1.12", "openapi-typescript": "^7.10.1", "protoc": "^33.0.0", "typedoc": "^0.28.14", @@ -61,5 +63,6 @@ }, "peerDependencies": { "typescript": "^5.9.3" - } + }, + "packageManager": "pnpm@10.12.4+sha512.5ea8b0deed94ed68691c9bad4c955492705c5eeb8a87ef86bc62c74a26b037b08ff9570f108b2e4dbd1dd1a9186fea925e527f141c648e85af45631074680184" } diff --git a/src/accessTokens.ts b/src/accessTokens.ts index 8c75fa6..bdb6eb1 100644 --- a/src/accessTokens.ts +++ b/src/accessTokens.ts @@ -1,5 +1,5 @@ -import type { DataToObject, S2RequestOptions } from "./common.js"; -import { S2Error } from "./error.js"; +import type { DataToObject, RetryConfig, S2RequestOptions } from "./common.js"; +import { S2Error, withS2Data } from "./error.js"; import type { Client } from "./generated/client/types.gen.js"; import { type IssueAccessTokenData, @@ -9,6 +9,7 @@ import { type RevokeAccessTokenData, revokeAccessToken, } from "./generated/index.js"; +import { withRetries } from "./lib/retry.js"; export interface ListAccessTokensArgs extends DataToObject {} @@ -19,9 +20,11 @@ export interface RevokeAccessTokenArgs export class S2AccessTokens { readonly client: Client; + private readonly retryConfig?: RetryConfig; - constructor(client: Client) { + constructor(client: Client, retryConfig?: RetryConfig) { this.client = client; + this.retryConfig = retryConfig; } /** @@ -32,21 +35,15 @@ export class S2AccessTokens { * @param args.limit Max results (up to 1000) */ public async list(args?: ListAccessTokensArgs, options?: S2RequestOptions) { - const response = await listAccessTokens({ - client: this.client, - query: args, - ...options, + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + listAccessTokens({ + client: this.client, + query: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -58,21 +55,15 @@ export class S2AccessTokens { * @param args.expires_at Expiration in ISO 8601; defaults to requestor's token expiry */ public async issue(args: IssueAccessTokenArgs, options?: S2RequestOptions) { - const response = await issueAccessToken({ - client: this.client, - body: args, - ...options, + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + issueAccessToken({ + client: this.client, + body: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -81,20 +72,14 @@ export class S2AccessTokens { * @param args.id Token ID to revoke */ public async revoke(args: RevokeAccessTokenArgs, options?: S2RequestOptions) { - const response = await revokeAccessToken({ - client: this.client, - path: args, - ...options, + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + revokeAccessToken({ + client: this.client, + path: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } } diff --git a/src/basin.ts b/src/basin.ts index e9ba382..c3e7c43 100644 --- a/src/basin.ts +++ b/src/basin.ts @@ -1,3 +1,4 @@ +import type { RetryConfig } from "./common.js"; import { createClient, createConfig } from "./generated/client/index.js"; import type { Client } from "./generated/client/types.gen.js"; import * as Redacted from "./lib/redacted.js"; @@ -8,6 +9,7 @@ import { S2Streams } from "./streams.js"; export class S2Basin { private readonly client: Client; private readonly transportConfig: TransportConfig; + private readonly retryConfig?: RetryConfig; public readonly name: string; public readonly streams: S2Streams; @@ -19,6 +21,7 @@ export class S2Basin { * @param accessToken Redacted access token from the parent `S2` client * @param baseUrl Base URL for the basin (e.g. `https://my-basin.b.aws.s2.dev/v1`) * @param includeBasinHeader Include the `S2-Basin` header with the request + * @param retryConfig Retry configuration inherited from parent S2 client */ constructor( name: string, @@ -26,12 +29,16 @@ export class S2Basin { accessToken: Redacted.Redacted; baseUrl: string; includeBasinHeader: boolean; + retryConfig?: RetryConfig; }, ) { this.name = name; + this.retryConfig = options.retryConfig; this.transportConfig = { baseUrl: options.baseUrl, accessToken: options.accessToken, + basinName: options.includeBasinHeader ? name : undefined, + retry: options.retryConfig, }; this.client = createClient( createConfig({ @@ -40,7 +47,8 @@ export class S2Basin { headers: options.includeBasinHeader ? { "s2-basin": name } : {}, }), ); - this.streams = new S2Streams(this.client); + + this.streams = new S2Streams(this.client, this.retryConfig); } /** @@ -48,10 +56,15 @@ export class S2Basin { * @param name Stream name */ public stream(name: string, options?: StreamOptions) { - return new S2Stream(name, this.client, { - ...this.transportConfig, - forceTransport: options?.forceTransport, - }); + return new S2Stream( + name, + this.client, + { + ...this.transportConfig, + forceTransport: options?.forceTransport, + }, + this.retryConfig, + ); } } diff --git a/src/basins.ts b/src/basins.ts index b1d494f..37076eb 100644 --- a/src/basins.ts +++ b/src/basins.ts @@ -1,18 +1,23 @@ -import type { DataToObject, S2RequestOptions } from "./common.js"; -import { S2Error } from "./error.js"; +import type { DataToObject, RetryConfig, S2RequestOptions } from "./common.js"; +import { withS2Data } from "./error.js"; import type { Client } from "./generated/client/types.gen.js"; import { + type BasinConfig, type CreateBasinData, + type CreateBasinResponse, createBasin, type DeleteBasinData, deleteBasin, type GetBasinConfigData, getBasinConfig, type ListBasinsData, + type ListBasinsResponse, listBasins, type ReconfigureBasinData, + type ReconfigureBasinResponse, reconfigureBasin, } from "./generated/index.js"; +import { withRetries } from "./lib/retry.js"; export interface ListBasinsArgs extends DataToObject {} export interface CreateBasinArgs extends DataToObject {} @@ -23,9 +28,11 @@ export interface ReconfigureBasinArgs export class S2Basins { private readonly client: Client; + private readonly retryConfig: RetryConfig; - constructor(client: Client) { + constructor(client: Client, retryConfig: RetryConfig) { this.client = client; + this.retryConfig = retryConfig; } /** @@ -35,22 +42,19 @@ export class S2Basins { * @param args.start_after Name to start after (for pagination) * @param args.limit Max results (up to 1000) */ - public async list(args?: ListBasinsArgs, options?: S2RequestOptions) { - const response = await listBasins({ - client: this.client, - query: args, - ...options, + public async list( + args?: ListBasinsArgs, + options?: S2RequestOptions, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + listBasins({ + client: this.client, + query: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -60,22 +64,19 @@ export class S2Basins { * @param args.config Optional basin configuration (e.g. default stream config) * @param args.scope Basin scope */ - public async create(args: CreateBasinArgs, options?: S2RequestOptions) { - const response = await createBasin({ - client: this.client, - body: args, - ...options, + public async create( + args: CreateBasinArgs, + options?: S2RequestOptions, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + createBasin({ + client: this.client, + body: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -83,22 +84,19 @@ export class S2Basins { * * @param args.basin Basin name */ - public async getConfig(args: GetBasinConfigArgs, options?: S2RequestOptions) { - const response = await getBasinConfig({ - client: this.client, - path: args, - ...options, + public async getConfig( + args: GetBasinConfigArgs, + options?: S2RequestOptions, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + getBasinConfig({ + client: this.client, + path: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -106,22 +104,19 @@ export class S2Basins { * * @param args.basin Basin name */ - public async delete(args: DeleteBasinArgs, options?: S2RequestOptions) { - const response = await deleteBasin({ - client: this.client, - path: args, - ...options, + public async delete( + args: DeleteBasinArgs, + options?: S2RequestOptions, + ): Promise { + await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + deleteBasin({ + client: this.client, + path: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -133,22 +128,16 @@ export class S2Basins { public async reconfigure( args: ReconfigureBasinArgs, options?: S2RequestOptions, - ) { - const response = await reconfigureBasin({ - client: this.client, - path: args, - body: args, - ...options, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + reconfigureBasin({ + client: this.client, + path: args, + body: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } } diff --git a/src/batch-transform.ts b/src/batch-transform.ts index 285f56f..9658220 100644 --- a/src/batch-transform.ts +++ b/src/batch-transform.ts @@ -30,7 +30,7 @@ export type BatchOutput = { * @example * ```typescript * const batcher = new BatchTransform<"string">({ - * lingerDuration: 20, + * lingerDurationMillis: 20, * maxBatchRecords: 100, * maxBatchBytes: 256 * 1024, * match_seq_num: 0 // Optional: auto-increments per batch @@ -80,12 +80,39 @@ export class BatchTransform extends TransformStream { }, }); - // Cap at maximum allowed values - this.maxBatchRecords = Math.min(args?.maxBatchRecords ?? 1000, 1000); - this.maxBatchBytes = Math.min( - args?.maxBatchBytes ?? 1024 * 1024, - 1024 * 1024, - ); + // Validate configuration + if (args?.maxBatchRecords !== undefined) { + if (args.maxBatchRecords < 1 || args.maxBatchRecords > 1000) { + throw new S2Error({ + message: `maxBatchRecords must be between 1 and 1000 (inclusive); got ${args.maxBatchRecords}`, + status: 400, + origin: "sdk", + }); + } + } + if (args?.maxBatchBytes !== undefined) { + const max = 1024 * 1024; + if (args.maxBatchBytes < 1 || args.maxBatchBytes > max) { + throw new S2Error({ + message: `maxBatchBytes must be between 1 and ${max} (1 MiB) bytes (inclusive); got ${args.maxBatchBytes}`, + status: 400, + origin: "sdk", + }); + } + } + if (args?.lingerDurationMillis !== undefined) { + if (args.lingerDurationMillis < 0) { + throw new S2Error({ + message: `lingerDurationMillis must be >= 0; got ${args.lingerDurationMillis}`, + status: 400, + origin: "sdk", + }); + } + } + + // Apply defaults + this.maxBatchRecords = args?.maxBatchRecords ?? 1000; + this.maxBatchBytes = args?.maxBatchBytes ?? 1024 * 1024; this.lingerDuration = args?.lingerDurationMillis ?? 5; this.fencing_token = args?.fencing_token; this.next_match_seq_num = args?.match_seq_num; @@ -98,6 +125,8 @@ export class BatchTransform extends TransformStream { if (recordSize > this.maxBatchBytes) { throw new S2Error({ message: `Record size ${recordSize} bytes exceeds maximum batch size of ${this.maxBatchBytes} bytes`, + status: 400, + origin: "sdk", }); } diff --git a/src/common.ts b/src/common.ts index decbbee..8596895 100644 --- a/src/common.ts +++ b/src/common.ts @@ -1,3 +1,44 @@ +/** + * Policy for retrying append operations. + * + * - `all`: Retry all append operations, including those that may have side effects + * - `noSideEffects`: Only retry append operations that are guaranteed to have no side effects + */ +export type AppendRetryPolicy = "all" | "noSideEffects"; + +/** + * Retry configuration for handling transient failures. + */ +export type RetryConfig = { + /** + * Maximum number of retry attempts. + * Set to 0 to disable retries. + * @default 3 + */ + maxAttempts?: number; + + /** + * Base delay in milliseconds between retry attempts. + * Uses exponential backoff: delay = retryBackoffDurationMs * (2 ^ attempt) + * @default 100 + */ + retryBackoffDurationMs?: number; + + /** + * Policy for retrying append operations. + * @default "noSideEffects" + */ + appendRetryPolicy?: AppendRetryPolicy; + + /** + * Maximum time in milliseconds to wait for an append ack before considering + * the attempt timed out and applying retry logic. + * + * Used by retrying append sessions. When unset, defaults to 5000ms. + */ + requestTimeoutMillis?: number; +}; + /** * Configuration for constructing the top-level `S2` client. * @@ -19,6 +60,12 @@ export type S2ClientOptions = { * Defaults to `https://{basin}.b.aws.s2.dev`. */ makeBasinBaseUrl?: (basin: string) => string; + /** + * Retry configuration for handling transient failures. + * Applies to management operations (basins, streams, tokens) and stream operations (read, append). + * @default { maxAttempts: 3, retryBackoffDurationMs: 100 } + */ + retry?: RetryConfig; }; /** diff --git a/src/error.ts b/src/error.ts index aacfcfd..cf1a6f1 100644 --- a/src/error.ts +++ b/src/error.ts @@ -1,3 +1,170 @@ +function isConnectionError(error: unknown): boolean { + if (!(error instanceof Error)) { + return false; + } + + if (error.message.includes("fetch failed")) { + return true; + } + + const cause = (error as any).cause; + let code = (error as any).code; + if (cause && typeof cause === "object") { + code = cause.code; + } + + // Common connection error codes from Node.js net module + const connectionErrorCodes = [ + "ECONNREFUSED", // Connection refused + "ENOTFOUND", // DNS lookup failed + "ETIMEDOUT", // Connection timeout + "ENETUNREACH", // Network unreachable + "EHOSTUNREACH", // Host unreachable + "ECONNRESET", // Connection reset by peer + "EPIPE", // Broken pipe + ]; + + if (connectionErrorCodes.includes(code)) { + return true; + } + + return false; +} + +export function s2Error(error: any): S2Error { + if (error instanceof S2Error) { + return error; + } + + // Connection error? + if (isConnectionError(error)) { + const cause = (error as any).cause; + const code = cause?.code || "NETWORK_ERROR"; + return new S2Error({ + message: `Connection failed: ${code}`, + status: 502, // Bad Gateway for upstream/network issues + origin: "sdk", + }); + } + + // Abort error? + if (error instanceof Error && error.name === "AbortError") { + return new S2Error({ + message: "Request cancelled", + status: 499, // Client Closed Request (nginx non-standard) + origin: "sdk", + }); + } + + // Other unknown errors + return new S2Error({ + message: error instanceof Error ? error.message : "Unknown error", + status: 0, // Non-HTTP/internal error sentinel + origin: "sdk", + }); +} + +export async function withS2Error(fn: () => Promise): Promise { + try { + const result: any = await fn(); + + // Support response-parsing mode (throwOnError=false): + // Generated client responses have shape { data, error?, response } + if ( + result && + typeof result === "object" && + Object.prototype.hasOwnProperty.call(result, "error") + ) { + const err = result.error; + if (err) { + const status = (result.response?.status as number | undefined) ?? 500; + const statusText = result.response?.statusText as string | undefined; + + // If server provided structured error with message/code, use it + if (typeof err === "object" && "message" in err) { + throw new S2Error({ + message: (err as any).message ?? statusText ?? "Error", + code: (err as any).code ?? undefined, + status, + origin: "server", + }); + } + + // Fallback: synthesize from HTTP response metadata + throw new S2Error({ + message: statusText ?? "Request failed", + status, + origin: "server", + }); + } + } + + return result as T; + } catch (error) { + // Network and other thrown errors + throw s2Error(error); + } +} + +/** + * Execute a generated client call and return its `data` on success. + * Throws S2Error when the response contains `error`, or when the + * response has no `data` and is not a 204 No Content. + */ +export async function withS2Data( + fn: () => Promise< + | { + data?: T; + error?: unknown; + response?: { status?: number; statusText?: string }; + } + | T + >, +): Promise { + try { + const res: any = await fn(); + if ( + res && + typeof res === "object" && + (Object.prototype.hasOwnProperty.call(res, "error") || + Object.prototype.hasOwnProperty.call(res, "data") || + Object.prototype.hasOwnProperty.call(res, "response")) + ) { + const status = (res.response?.status as number | undefined) ?? 500; + const statusText = res.response?.statusText as string | undefined; + if (res.error) { + const err = res.error; + if (typeof err === "object" && "message" in err) { + throw new S2Error({ + message: (err as any).message ?? statusText ?? "Error", + code: (err as any).code ?? undefined, + status, + origin: "server", + }); + } + throw new S2Error({ + message: statusText ?? "Request failed", + status, + origin: "server", + }); + } + // No error + if (typeof res.data !== "undefined") return res.data as T; + // Treat 204 as success for void endpoints + if (status === 204) return undefined as T; + throw new S2Error({ + message: "Empty response", + status, + origin: "server", + }); + } + // Not a generated client response; return as-is + return res as T; + } catch (error) { + throw s2Error(error); + } +} + /** * Rich error type used by the SDK to surface HTTP and protocol errors. * @@ -7,24 +174,70 @@ */ export class S2Error extends Error { public readonly code?: string; - public readonly status?: number; + public readonly status: number; + /** Optional structured error details for diagnostics. */ + public readonly data?: unknown; + /** Origin of the error: server (HTTP response) or sdk (local). */ + public readonly origin: "server" | "sdk"; constructor({ message, code, status, + data, + origin, }: { message: string; code?: string; status?: number; + data?: unknown; + origin?: "server" | "sdk"; }) { super(message); this.code = code; - this.status = status; + // Ensure status is always a number (0 for non-HTTP/internal errors) + this.status = typeof status === "number" ? status : 0; + this.data = data; + this.origin = origin ?? "sdk"; this.name = "S2Error"; } } +/** Helper: construct a non-retryable invariant violation error (400). */ +export function invariantViolation( + message: string, + details?: unknown, +): S2Error { + return new S2Error({ + message: `Invariant violation: ${message}`, + code: "INTERNAL_ERROR", + status: 500, + origin: "sdk", + data: details, + }); +} + +/** Helper: construct an internal SDK error (status 0, never retried). */ +export function internalSdkError(message: string, details?: unknown): S2Error { + return new S2Error({ + message: `Internal SDK error: ${message}`, + code: "INTERNAL_SDK_ERROR", + status: 0, + origin: "sdk", + data: details, + }); +} + +/** Helper: construct an aborted/cancelled error (499). */ +export function abortedError(message: string = "Request cancelled"): S2Error { + return new S2Error({ + message, + code: "ABORTED", + status: 499, + origin: "sdk", + }); +} + /** * Thrown when an append operation fails due to a sequence number mismatch. * @@ -53,6 +266,7 @@ export class SeqNumMismatchError extends S2Error { message: `${message}\nExpected sequence number: ${expectedSeqNum}`, code, status, + origin: "server", }); this.name = "SeqNumMismatchError"; this.expectedSeqNum = expectedSeqNum; @@ -87,6 +301,7 @@ export class FencingTokenMismatchError extends S2Error { message: `${message}\nExpected fencing token: ${expectedFencingToken}`, code, status, + origin: "server", }); this.name = "FencingTokenMismatchError"; this.expectedFencingToken = expectedFencingToken; @@ -116,7 +331,77 @@ export class RangeNotSatisfiableError extends S2Error { message, code, status, + origin: "server", }); this.name = "RangeNotSatisfiableError"; } } + +/** + * Build a generic S2Error from HTTP status and optional payload. + * If the payload contains a structured { message, code }, those are preferred. + */ +export function makeServerError( + response: { status?: number; statusText?: string }, + payload?: unknown, +): S2Error { + const status = typeof response.status === "number" ? response.status : 500; + // Pull message/code from structured payload when present + if (payload && typeof payload === "object" && "message" in (payload as any)) { + return new S2Error({ + message: (payload as any).message ?? response.statusText ?? "Error", + code: (payload as any).code ?? undefined, + status, + origin: "server", + }); + } + // Fallbacks + let message: string | undefined = undefined; + if (typeof payload === "string" && payload.trim().length > 0) { + message = payload; + } + return new S2Error({ + message: message ?? response.statusText ?? "Request failed", + status, + origin: "server", + }); +} + +/** Map 412 Precondition Failed append errors to rich error types. */ +export function makeAppendPreconditionError( + status: number, + json: any, +): S2Error { + if (json && typeof json === "object") { + if ("seq_num_mismatch" in json) { + const expected = Number(json.seq_num_mismatch); + return new SeqNumMismatchError({ + message: "Append condition failed: sequence number mismatch", + code: "APPEND_CONDITION_FAILED", + status, + expectedSeqNum: expected, + }); + } + if ("fencing_token_mismatch" in json) { + const expected = String(json.fencing_token_mismatch); + return new FencingTokenMismatchError({ + message: "Append condition failed: fencing token mismatch", + code: "APPEND_CONDITION_FAILED", + status, + expectedFencingToken: expected, + }); + } + if ("message" in json) { + return new S2Error({ + message: json.message ?? "Append condition failed", + status, + origin: "server", + }); + } + } + return new S2Error({ + message: "Append condition failed", + status, + origin: "server", + }); +} diff --git a/src/generated/client/types.gen.ts b/src/generated/client/types.gen.ts index d68ab68..c037719 100644 --- a/src/generated/client/types.gen.ts +++ b/src/generated/client/types.gen.ts @@ -10,6 +10,7 @@ import type { Config as CoreConfig, } from '../core/types.gen.js'; import type { Middleware } from './utils.gen.js'; +import type {S2Error} from "../../error.js"; export type ResponseStyle = 'data' | 'fields'; @@ -204,7 +205,7 @@ export type Client = CoreClient< BuildUrlFn, SseFn > & { - interceptors: Middleware; + interceptors: Middleware; }; /** diff --git a/src/index.ts b/src/index.ts index a9905a0..ec82e31 100644 --- a/src/index.ts +++ b/src/index.ts @@ -12,6 +12,12 @@ export type { } from "./basins.js"; export type { BatchOutput, BatchTransformArgs } from "./batch-transform.js"; export { BatchTransform } from "./batch-transform.js"; +export type { + AppendRetryPolicy, + RetryConfig, + S2ClientOptions, + S2RequestOptions, +} from "./common.js"; export { FencingTokenMismatchError, RangeNotSatisfiableError, diff --git a/src/lib/result.ts b/src/lib/result.ts new file mode 100644 index 0000000..0844375 --- /dev/null +++ b/src/lib/result.ts @@ -0,0 +1,58 @@ +/** + * Result types for AppendSession operations. + * Using discriminated unions for ergonomic error handling with TypeScript control flow analysis. + */ + +import { S2Error } from "../error.js"; +import type { AppendAck } from "../generated/index.js"; + +/** + * Result of an append operation. + * Use discriminated union pattern: check `result.ok` to access either `value` or `error`. + */ +export type AppendResult = + | { ok: true; value: AppendAck } + | { ok: false; error: S2Error }; + +/** + * Result of a close operation. + */ +export type CloseResult = { ok: true } | { ok: false; error: S2Error }; + +/** + * Constructs a successful append result. + */ +export function ok(value: AppendAck): AppendResult { + return { ok: true, value }; +} + +/** + * Constructs a failed append result. + */ +export function err(error: S2Error): AppendResult { + return { ok: false, error }; +} + +/** + * Constructs a successful close result. + */ +export function okClose(): CloseResult { + return { ok: true }; +} + +/** + * Constructs a failed close result. + */ +export function errClose(error: S2Error): CloseResult { + return { ok: false, error }; +} + +/** + * Type guard to check if a result is successful. + * Mainly for internal use; prefer `result.ok` for public API. + */ +export function isOk( + result: { ok: true; value: T } | { ok: false; error: S2Error }, +): result is { ok: true; value: T } { + return result.ok; +} diff --git a/src/lib/retry.ts b/src/lib/retry.ts new file mode 100644 index 0000000..43eba13 --- /dev/null +++ b/src/lib/retry.ts @@ -0,0 +1,1145 @@ +import createDebug from "debug"; +import type { RetryConfig } from "../common.js"; +import { invariantViolation, S2Error, s2Error, withS2Error } from "../error.js"; +import type { AppendAck, StreamPosition } from "../generated/index.js"; +import { meteredSizeBytes } from "../utils.js"; +import type { AppendResult, CloseResult } from "./result.js"; +import { err, errClose, ok, okClose } from "./result.js"; +import type { + AcksStream, + AppendArgs, + AppendRecord, + AppendSessionOptions, + ReadArgs, + ReadRecord, + TransportAppendSession, + TransportReadSession, +} from "./stream/types.js"; + +const debugWith = createDebug("s2:retry:with"); +const debugRead = createDebug("s2:retry:read"); +const debugSession = createDebug("s2:retry:session"); + +/** + * Default retry configuration. + */ +export const DEFAULT_RETRY_CONFIG: Required & { + requestTimeoutMillis: number; +} = { + maxAttempts: 3, + retryBackoffDurationMs: 100, + appendRetryPolicy: "noSideEffects", + requestTimeoutMillis: 5000, // 5 seconds +}; + +const RETRYABLE_STATUS_CODES = new Set([ + 408, // request_timeout + 429, // too_many_requests + 500, // internal_server_error + 502, // bad_gateway + 503, // service_unavailable +]); + +/** + * Determines if an error should be retried based on its characteristics. + * 400-level errors (except 408, 429) are non-retryable validation/client errors. + */ +export function isRetryable(error: S2Error): boolean { + if (!error.status) return false; + + // Explicit retryable codes (including some 4xx like 408, 429) + if (RETRYABLE_STATUS_CODES.has(error.status)) { + return true; + } + + // 400-level errors are generally non-retryable (validation, bad request) + if (error.status >= 400 && error.status < 500) { + return false; + } + + return false; +} + +/** + * Calculates the delay before the next retry attempt using exponential backoff. + */ +export function calculateDelay(attempt: number, baseDelayMs: number): number { + // Exponential backoff: baseDelay * (2 ^ attempt) + const delay = baseDelayMs * Math.pow(2, attempt); + // Add jitter: random value between 0 and delay + const jitter = Math.random() * delay; + return Math.floor(delay + jitter); +} + +/** + * Sleeps for the specified duration. + */ +export function sleep(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Executes an async function with automatic retry logic for transient failures. + * + * @param retryConfig Retry configuration (max attempts, backoff duration) + * @param fn The async function to execute + * @returns The result of the function + * @throws The last error if all retry attempts are exhausted + */ +export async function withRetries( + retryConfig: RetryConfig | undefined, + fn: () => Promise, + isPolicyCompliant: (config: RetryConfig, error: S2Error) => boolean = () => + true, +): Promise { + const config = { + ...DEFAULT_RETRY_CONFIG, + ...retryConfig, + }; + + // If maxAttempts is 0, don't retry at all + if (config.maxAttempts === 0) { + debugWith("maxAttempts is 0, retries disabled"); + return fn(); + } + + let lastError: S2Error | undefined = undefined; + + for (let attempt = 0; attempt <= config.maxAttempts; attempt++) { + try { + const result = await fn(); + if (attempt > 0) { + debugWith("succeeded after %d retries", attempt); + } + return result; + } catch (error) { + // withRetry only handles S2Errors (withS2Error should be called first) + if (!(error instanceof S2Error)) { + debugWith("non-S2Error thrown, rethrowing immediately: %s", error); + throw error; + } + + lastError = error; + + // Don't retry if this is the last attempt + if (attempt === config.maxAttempts) { + debugWith("max attempts exhausted, throwing error"); + break; + } + + // Check if error is retryable + if (!isPolicyCompliant(config, lastError) || !isRetryable(lastError)) { + debugWith("error not retryable, throwing immediately"); + throw error; + } + + // Calculate delay and wait before retrying + const delay = calculateDelay(attempt, config.retryBackoffDurationMs); + debugWith( + "retryable error, backing off for %dms, status=%s", + delay, + error.status, + ); + await sleep(delay); + } + } + + throw lastError; +} +export class ReadSession< + Format extends "string" | "bytes" = "string", +> extends ReadableStream> { + private _nextReadPosition: StreamPosition | undefined = undefined; + private _lastObservedTail: StreamPosition | undefined = undefined; + + private _recordsRead: number = 0; + private _bytesRead: number = 0; + + static async create( + generator: ( + args: ReadArgs, + ) => Promise>, + args: ReadArgs = {}, + config?: RetryConfig, + ) { + return new ReadSession(args, generator, config); + } + + private constructor( + args: ReadArgs, + generator: ( + args: ReadArgs, + ) => Promise>, + config?: RetryConfig, + ) { + const retryConfig = { + ...DEFAULT_RETRY_CONFIG, + ...config, + }; + let session: TransportReadSession | undefined = undefined; + const startTimeMs = performance.now(); // Capture start time before super() + super({ + start: async (controller) => { + let nextArgs = { ...args } as ReadArgs; + // Capture original request budget so retries compute from a stable baseline + const baselineCount = args?.count; + const baselineBytes = args?.bytes; + const baselineWait = args?.wait; + let attempt = 0; + + while (true) { + debugRead("starting read session with args: %o", nextArgs); + session = await generator(nextArgs); + const reader = session.getReader(); + + while (true) { + const { done, value: result } = await reader.read(); + // Update last observed tail if transport exposes it + try { + const tail = session.lastObservedTail?.(); + if (tail) this._lastObservedTail = tail; + } catch {} + if (done) { + reader.releaseLock(); + controller.close(); + return; + } + + // Check if result is an error + if (!result.ok) { + reader.releaseLock(); + const error = result.error; + + // Check if we can retry (track session attempts, not record reads) + if (isRetryable(error) && attempt < retryConfig.maxAttempts) { + if (this._nextReadPosition) { + nextArgs.seq_num = this._nextReadPosition.seq_num as any; + // Clear alternative start position fields to avoid conflicting params + delete (nextArgs as any).timestamp; + delete (nextArgs as any).tail_offset; + } + // Compute planned backoff delay now so we can subtract it from wait budget + const delay = calculateDelay( + attempt, + retryConfig.retryBackoffDurationMs, + ); + // Recompute remaining budget from original request each time to avoid double-subtraction + if (baselineCount !== undefined) { + const remaining = Math.max( + 0, + baselineCount - this._recordsRead, + ); + nextArgs.count = remaining as any; + } + if (baselineBytes !== undefined) { + const remaining = Math.max( + 0, + baselineBytes - this._bytesRead, + ); + nextArgs.bytes = remaining as any; + } + // Adjust wait from original budget based on total elapsed time since start + if (baselineWait !== undefined) { + const elapsedSeconds = + (performance.now() - startTimeMs) / 1000; + nextArgs.wait = Math.max( + 0, + baselineWait - (elapsedSeconds + delay / 1000), + ) as any; + } + // Proactively cancel the current transport session before retrying + try { + await session.cancel?.("retry"); + } catch {} + + debugRead( + "will retry after %dms, status=%s", + delay, + error.status, + ); + await sleep(delay); + attempt++; + break; // Break inner loop to retry + } + + // Error is not retryable or attempts exhausted + debugRead("error in retry loop: %s", error); + controller.error(error); + return; + } + + // Success: enqueue the record and reset retry attempt counter + const record = result.value; + this._nextReadPosition = { + seq_num: record.seq_num + 1, + timestamp: record.timestamp, + }; + this._recordsRead++; + this._bytesRead += meteredSizeBytes(record); + attempt = 0; + + controller.enqueue(record); + } + } + }, + cancel: async (reason) => { + try { + await session?.cancel(reason); + } catch (err) { + // Ignore ERR_INVALID_STATE - stream may already be closed/cancelled + if ((err as any)?.code !== "ERR_INVALID_STATE") { + throw err; + } + } + }, + }); + } + + async [Symbol.asyncDispose]() { + await this.cancel("disposed"); + } + + // Polyfill for older browsers / Node.js environments + [Symbol.asyncIterator](): AsyncIterableIterator> { + const fn = (ReadableStream.prototype as any)[Symbol.asyncIterator]; + if (typeof fn === "function") return fn.call(this); + const reader = this.getReader(); + return { + next: async () => { + const r = await reader.read(); + if (r.done) { + reader.releaseLock(); + return { done: true, value: undefined }; + } + return { done: false, value: r.value }; + }, + throw: async (e) => { + try { + await reader.cancel(e); + } catch (err) { + if ((err as any)?.code !== "ERR_INVALID_STATE") throw err; + } + reader.releaseLock(); + return { done: true, value: undefined }; + }, + return: async () => { + try { + await reader.cancel("done"); + } catch (err) { + if ((err as any)?.code !== "ERR_INVALID_STATE") throw err; + } + reader.releaseLock(); + return { done: true, value: undefined }; + }, + [Symbol.asyncIterator]() { + return this; + }, + }; + } + + lastObservedTail(): StreamPosition | undefined { + return this._lastObservedTail; + } + + nextReadPosition(): StreamPosition | undefined { + return this._nextReadPosition; + } +} + +/** + * AppendSession wraps an underlying transport AppendSession with automatic retry logic. + * + * Architecture: + * - All writes (submit() and writable.write()) are serialized through inflightQueue + * - inflightQueue tracks batches that have been submitted but not yet acked + * - Background ack reader consumes acks and matches them FIFO with inflightQueue + * - On error, _initSession() recreates session and re-transmits all inflightQueue batches + * - Ack timeout is fatal: if no ack arrives within the timeout window, + * the session aborts and rejects queued writers + * + * Flow for a successful append: + * 1. submit(records) adds batch to inflightQueue with promise resolvers + * 2. Calls underlying session.submit() to send batch + * 3. Background reader receives ack, validates record count + * 4. Resolves promise, removes from inflightQueue, forwards ack to user + * + * Flow for a failed append: + * 1. submit(records) adds batch to inflightQueue + * 2. Calls underlying session.submit() which fails + * 3. Checks if retryable (status code, retry policy, idempotency) + * 4. Calls _initSession() which closes old session, creates new session + * 5. _initSession() re-transmits ALL batches in inflightQueue (recovery) + * 6. Background reader receives acks for recovered batches + * 7. Original submit() call's promise is resolved by background reader + * + * Invariants: + * - Exactly one ack per batch in FIFO order + * - Ack record count matches batch record count + * - Acks arrive within ackTimeoutMs (5s) or session is retried + */ +/** + * New simplified inflight entry for the pump-based architecture. + * Each entry tracks a batch and its promise from the inner transport session. + */ +type InflightEntry = { + records: AppendRecord[]; + args?: Omit & { precalculatedSize?: number }; + expectedCount: number; + meteredBytes: number; + attemptStartedMonotonicMs?: number; // Monotonic timestamp (performance.now) for per-attempt ack timeout anchoring + innerPromise: Promise; // Promise from transport session + maybeResolve?: (result: AppendResult) => void; // Resolver for submit() callers +}; + +const DEFAULT_MAX_QUEUED_BYTES = 10 * 1024 * 1024; // 10 MiB default + +export class AppendSession implements AsyncDisposable { + private readonly requestTimeoutMillis: number; + private readonly maxQueuedBytes: number; + private readonly maxInflightBatches?: number; + private readonly retryConfig: Required & { + requestTimeoutMillis: number; + }; + + private readonly inflight: InflightEntry[] = []; + private capacityWaiter?: () => void; // Single waiter (WritableStream writer lock) + + private session?: TransportAppendSession; + private queuedBytes = 0; + private pendingBytes = 0; + private consecutiveFailures = 0; + private currentAttempt = 0; + + private pumpPromise?: Promise; + private pumpStopped = false; + private closing = false; + private pumpWakeup?: () => void; + private closed = false; + private fatalError?: S2Error; + + private _lastAckedPosition?: AppendAck; + private acksController?: ReadableStreamDefaultController; + + public readonly readable: ReadableStream; + public readonly writable: WritableStream; + + /** + * If the session has failed, returns the original fatal error that caused + * the pump to stop. Returns undefined when the session has not failed. + */ + failureCause(): S2Error | undefined { + return this.fatalError; + } + + constructor( + private readonly generator: ( + options?: AppendSessionOptions, + ) => Promise, + private readonly sessionOptions?: AppendSessionOptions, + config?: RetryConfig, + ) { + this.retryConfig = { + ...DEFAULT_RETRY_CONFIG, + ...config, + }; + this.requestTimeoutMillis = this.retryConfig.requestTimeoutMillis; + this.maxQueuedBytes = + this.sessionOptions?.maxQueuedBytes ?? DEFAULT_MAX_QUEUED_BYTES; + this.maxInflightBatches = this.sessionOptions?.maxInflightBatches; + + this.readable = new ReadableStream({ + start: (controller) => { + this.acksController = controller; + }, + }); + + this.writable = new WritableStream({ + write: async (chunk) => { + const recordsArray = Array.isArray(chunk.records) + ? chunk.records + : [chunk.records]; + + // Calculate metered size + let batchMeteredSize = 0; + for (const record of recordsArray) { + batchMeteredSize += meteredSizeBytes(record); + } + + // Wait for capacity (backpressure for writable only) + await this.waitForCapacity(batchMeteredSize); + + const args = { ...chunk } as Omit & { + precalculatedSize?: number; + }; + delete (args as any).records; + args.precalculatedSize = batchMeteredSize; + + // Move reserved bytes to queued bytes accounting before submission + this.pendingBytes = Math.max(0, this.pendingBytes - batchMeteredSize); + + // Submit without waiting for ack (writable doesn't need per-batch resolution) + const promise = this.submitInternal( + recordsArray, + args, + batchMeteredSize, + ); + promise.catch(() => { + // Swallow to avoid unhandled rejection; pump surfaces errors via readable stream + }); + }, + close: async () => { + await this.close(); + }, + abort: async (reason) => { + const error = new S2Error({ + message: `AppendSession aborted: ${reason}`, + status: 499, + }); + await this.abort(error); + }, + }); + } + + static async create( + generator: ( + options?: AppendSessionOptions, + ) => Promise, + sessionOptions?: AppendSessionOptions, + config?: RetryConfig, + ): Promise { + return new AppendSession(generator, sessionOptions, config); + } + + /** + * Submit an append request. Returns a promise that resolves with the ack. + * This method does not block on capacity (only writable.write() does). + */ + async submit( + records: AppendRecord | AppendRecord[], + args?: Omit & { precalculatedSize?: number }, + ): Promise { + const recordsArray = Array.isArray(records) ? records : [records]; + + // Calculate metered size if not provided + let batchMeteredSize = args?.precalculatedSize ?? 0; + if (batchMeteredSize === 0) { + for (const record of recordsArray) { + batchMeteredSize += meteredSizeBytes(record); + } + } + + const result = await this.submitInternal( + recordsArray, + args, + batchMeteredSize, + ); + + // Convert discriminated union back to throw pattern for public API + if (result.ok) { + return result.value; + } else { + throw result.error; + } + } + + /** + * Internal submit that returns discriminated union. + * Creates inflight entry and starts pump if needed. + */ + private submitInternal( + records: AppendRecord[], + args: + | (Omit & { precalculatedSize?: number }) + | undefined, + batchMeteredSize: number, + ): Promise { + if (this.closed || this.closing) { + return Promise.resolve( + err(new S2Error({ message: "AppendSession is closed", status: 400 })), + ); + } + + // Check for fatal error (e.g., from abort()) + if (this.fatalError) { + debugSession( + "[SUBMIT] rejecting due to fatal error: %s", + this.fatalError.message, + ); + return Promise.resolve(err(this.fatalError)); + } + + // Create promise for submit() callers + return new Promise((resolve) => { + // Create inflight entry (innerPromise will be set when pump processes it) + const entry: InflightEntry & { __needsSubmit?: boolean } = { + records, + args, + expectedCount: records.length, + meteredBytes: batchMeteredSize, + innerPromise: new Promise(() => {}), // Never-resolving placeholder + maybeResolve: resolve, + __needsSubmit: true, // Mark for pump to submit + }; + + debugSession( + "[SUBMIT] enqueueing %d records (%d bytes): inflight=%d->%d, queuedBytes=%d->%d", + records.length, + batchMeteredSize, + this.inflight.length, + this.inflight.length + 1, + this.queuedBytes, + this.queuedBytes + batchMeteredSize, + ); + + this.inflight.push(entry); + this.queuedBytes += batchMeteredSize; + + // Wake pump if it's sleeping + if (this.pumpWakeup) { + this.pumpWakeup(); + } + + // Start pump if not already running + this.ensurePump(); + }); + } + + /** + * Wait for capacity before allowing write to proceed (writable only). + */ + private async waitForCapacity(bytes: number): Promise { + debugSession( + "[CAPACITY] checking for %d bytes: queuedBytes=%d, pendingBytes=%d, maxQueuedBytes=%d, inflight=%d", + bytes, + this.queuedBytes, + this.pendingBytes, + this.maxQueuedBytes, + this.inflight.length, + ); + + // Check if we have capacity + while (true) { + // Check for fatal error before adding to pendingBytes + if (this.fatalError) { + debugSession( + "[CAPACITY] fatal error detected, rejecting: %s", + this.fatalError.message, + ); + throw this.fatalError; + } + + // Byte-based gating + if (this.queuedBytes + this.pendingBytes + bytes <= this.maxQueuedBytes) { + // Batch-based gating (if configured) + if ( + this.maxInflightBatches === undefined || + this.inflight.length < this.maxInflightBatches + ) { + debugSession( + "[CAPACITY] capacity available, adding %d to pendingBytes", + bytes, + ); + this.pendingBytes += bytes; + return; + } + } + + // No capacity - wait + // WritableStream enforces writer lock, so only one write can be blocked at a time + debugSession("[CAPACITY] no capacity, waiting for release"); + await new Promise((resolve) => { + this.capacityWaiter = resolve; + }); + debugSession("[CAPACITY] woke up, rechecking"); + } + } + + /** + * Release capacity and wake waiter if present. + */ + private releaseCapacity(bytes: number): void { + debugSession( + "[CAPACITY] releasing %d bytes: queuedBytes=%d->%d, pendingBytes=%d->%d, hasWaiter=%s", + bytes, + this.queuedBytes, + this.queuedBytes - bytes, + this.pendingBytes, + Math.max(0, this.pendingBytes - bytes), + !!this.capacityWaiter, + ); + this.queuedBytes -= bytes; + this.pendingBytes = Math.max(0, this.pendingBytes - bytes); + + // Wake single waiter + const waiter = this.capacityWaiter; + if (waiter) { + debugSession("[CAPACITY] waking waiter"); + this.capacityWaiter = undefined; + waiter(); + } + } + + /** + * Ensure pump loop is running. + */ + private ensurePump(): void { + if (this.pumpPromise || this.pumpStopped) { + return; + } + + this.pumpPromise = this.runPump().catch((e) => { + debugSession("pump crashed unexpectedly: %s", e); + // This should never happen - pump handles all errors internally + }); + } + + /** + * Main pump loop: processes inflight queue, handles acks, retries, and recovery. + */ + private async runPump(): Promise { + debugSession("pump started"); + + while (true) { + debugSession( + "[PUMP] loop: inflight=%d, queuedBytes=%d, pendingBytes=%d, closing=%s, pumpStopped=%s", + this.inflight.length, + this.queuedBytes, + this.pendingBytes, + this.closing, + this.pumpStopped, + ); + + // Check if we should stop + if (this.pumpStopped) { + debugSession("[PUMP] stopped by flag"); + return; + } + + // If closing and queue is empty, stop + if (this.closing && this.inflight.length === 0) { + debugSession("[PUMP] closing and queue empty, stopping"); + this.pumpStopped = true; + return; + } + + // If no entries, sleep and continue + if (this.inflight.length === 0) { + debugSession("[PUMP] no entries, sleeping 10ms"); + // Use interruptible sleep - can be woken by new submissions + await Promise.race([ + sleep(10), + new Promise((resolve) => { + this.pumpWakeup = resolve; + }), + ]); + this.pumpWakeup = undefined; + continue; + } + + // Get head entry (we know it exists because we checked length above) + const head = this.inflight[0]!; + debugSession( + "[PUMP] processing head: expectedCount=%d, meteredBytes=%d", + head.expectedCount, + head.meteredBytes, + ); + + // Ensure session exists + debugSession("[PUMP] ensuring session exists"); + await this.ensureSession(); + if (!this.session) { + // Session creation failed - will retry + debugSession("[PUMP] session creation failed, sleeping 100ms"); + await sleep(100); + continue; + } + + // Submit ALL entries that need submitting (enables HTTP/2 pipelining for S2S) + for (const entry of this.inflight) { + if (!entry.innerPromise || (entry as any).__needsSubmit) { + debugSession( + "[PUMP] submitting entry to inner session (%d records, %d bytes)", + entry.expectedCount, + entry.meteredBytes, + ); + entry.attemptStartedMonotonicMs = performance.now(); + entry.innerPromise = this.session.submit(entry.records, entry.args); + delete (entry as any).__needsSubmit; + } + } + + // Wait for head with timeout + debugSession("[PUMP] waiting for head result"); + const result = await this.waitForHead(head); + debugSession("[PUMP] got result: kind=%s", result.kind); + + if (result.kind === "timeout") { + // Ack timeout - fatal (per-attempt) + const attemptElapsed = + head.attemptStartedMonotonicMs != null + ? Math.round(performance.now() - head.attemptStartedMonotonicMs) + : undefined; + const error = new S2Error({ + message: `Request timeout after ${attemptElapsed ?? "unknown"}ms (${head.expectedCount} records, ${head.meteredBytes} bytes)`, + status: 408, + code: "REQUEST_TIMEOUT", + }); + debugSession("ack timeout for head entry: %s", error.message); + await this.abort(error); + return; + } + + // Promise settled + const appendResult = result.value; + + if (appendResult.ok) { + // Success! + const ack = appendResult.value; + debugSession("[PUMP] success, got ack", { ack }); + + // Invariant check: ack count matches batch count + const ackCount = Number(ack.end.seq_num) - Number(ack.start.seq_num); + if (ackCount !== head.expectedCount) { + const error = invariantViolation( + `Ack count mismatch: expected ${head.expectedCount}, got ${ackCount}`, + ); + debugSession("invariant violation: %s", error.message); + await this.abort(error); + return; + } + + // Invariant check: sequence numbers must be strictly increasing + if (this._lastAckedPosition) { + const prevEnd = BigInt(this._lastAckedPosition.end.seq_num); + const currentEnd = BigInt(ack.end.seq_num); + if (currentEnd <= prevEnd) { + const error = invariantViolation( + `Sequence number not strictly increasing: previous=${prevEnd}, current=${currentEnd}`, + ); + debugSession("invariant violation: %s", error.message); + await this.abort(error); + return; + } + } + + // Update last acked position + this._lastAckedPosition = ack; + + // Resolve submit() caller if present + if (head.maybeResolve) { + head.maybeResolve(ok(ack)); + } + + // Emit to readable stream + try { + this.acksController?.enqueue(ack); + } catch (e) { + debugSession("failed to enqueue ack: %s", e); + } + + // Remove from inflight and release capacity + debugSession( + "[PUMP] removing head from inflight, releasing %d bytes", + head.meteredBytes, + ); + this.inflight.shift(); + this.releaseCapacity(head.meteredBytes); + + // Reset consecutive failures on success + this.consecutiveFailures = 0; + this.currentAttempt = 0; + } else { + // Error result + const error = appendResult.error; + debugSession( + "[PUMP] error: status=%s, message=%s", + error.status, + error.message, + ); + + // Check if retryable + if (!isRetryable(error)) { + debugSession("error not retryable, aborting"); + await this.abort(error); + return; + } + + // Check policy compliance + if ( + this.retryConfig.appendRetryPolicy === "noSideEffects" && + !this.isIdempotent(head) + ) { + debugSession("error not policy-compliant (noSideEffects), aborting"); + await this.abort(error); + return; + } + + // Check max attempts + if (this.currentAttempt >= this.retryConfig.maxAttempts) { + debugSession( + "max attempts reached (%d), aborting", + this.retryConfig.maxAttempts, + ); + const wrappedError = new S2Error({ + message: `Max retry attempts (${this.retryConfig.maxAttempts}) exceeded: ${error.message}`, + status: error.status, + code: error.code, + }); + await this.abort(wrappedError); + return; + } + + // Perform recovery + this.consecutiveFailures++; + this.currentAttempt++; + + debugSession( + "performing recovery (attempt %d/%d)", + this.currentAttempt, + this.retryConfig.maxAttempts, + ); + + await this.recover(); + } + } + } + + /** + * Wait for head entry's innerPromise with timeout. + * Returns either the settled result or a timeout indicator. + * + * Per-attempt ack timeout semantics: + * - The deadline is computed from the most recent (re)submit attempt using + * a monotonic clock (performance.now) to avoid issues with wall clock + * adjustments. + * - If attempt start is missing (for backward compatibility), we measure + * from "now" with the full timeout window. + */ + private async waitForHead( + head: InflightEntry, + ): Promise<{ kind: "settled"; value: AppendResult } | { kind: "timeout" }> { + const startMono = head.attemptStartedMonotonicMs ?? performance.now(); + const deadline = startMono + this.requestTimeoutMillis; + const remaining = Math.max(0, deadline - performance.now()); + + let timer: any; + const timeoutP = new Promise<{ kind: "timeout" }>((resolve) => { + timer = setTimeout(() => resolve({ kind: "timeout" }), remaining); + }); + + const settledP = head.innerPromise.then((result) => ({ + kind: "settled" as const, + value: result, + })); + + try { + return await Promise.race([settledP, timeoutP]); + } finally { + if (timer) clearTimeout(timer); + } + } + + /** + * Recover from transient error: recreate session and resubmit all inflight entries. + */ + private async recover(): Promise { + debugSession("starting recovery"); + + // Calculate backoff delay + const delay = calculateDelay( + this.consecutiveFailures - 1, + this.retryConfig.retryBackoffDurationMs, + ); + debugSession("backing off for %dms", delay); + await sleep(delay); + + // Teardown old session + if (this.session) { + try { + const closeResult = await this.session.close(); + if (!closeResult.ok) { + debugSession( + "error closing old session during recovery: %s", + closeResult.error.message, + ); + } + } catch (e) { + debugSession("exception closing old session: %s", e); + } + this.session = undefined; + } + + // Create new session + await this.ensureSession(); + if (!this.session) { + debugSession("failed to create new session during recovery"); + // Will retry on next pump iteration + return; + } + + // Store session in local variable to help TypeScript type narrowing + const session: TransportAppendSession = this.session; + + // Resubmit all inflight entries (replace their innerPromise and reset attempt start) + debugSession("resubmitting %d inflight entries", this.inflight.length); + for (const entry of this.inflight) { + // Attach .catch to superseded promise to avoid unhandled rejection + entry.innerPromise.catch(() => {}); + + // Create new promise from new session + entry.attemptStartedMonotonicMs = performance.now(); + entry.innerPromise = session.submit(entry.records, entry.args); + } + + debugSession("recovery complete"); + } + + /** + * Check if append can be retried under noSideEffects policy. + * For appends, idempotency requires match_seq_num. + */ + private isIdempotent(entry: InflightEntry): boolean { + const args = entry.args; + if (!args) return false; + + return args.match_seq_num !== undefined; + } + + /** + * Ensure session exists, creating it if necessary. + */ + private async ensureSession(): Promise { + if (this.session) { + return; + } + + try { + this.session = await this.generator(this.sessionOptions); + } catch (e) { + const error = s2Error(e); + debugSession("failed to create session: %s", error.message); + // Don't set this.session - will retry later + } + } + + /** + * Abort the session with a fatal error. + */ + private async abort(error: S2Error): Promise { + if (this.pumpStopped) { + return; // Already aborted + } + + debugSession("aborting session: %s", error.message); + + this.fatalError = error; + this.pumpStopped = true; + + // Resolve all inflight entries with error + for (const entry of this.inflight) { + if (entry.maybeResolve) { + entry.maybeResolve(err(error)); + } + } + this.inflight.length = 0; + this.queuedBytes = 0; + this.pendingBytes = 0; + + // Error the readable stream + try { + this.acksController?.error(error); + } catch (e) { + debugSession("failed to error acks controller: %s", e); + } + + // Wake capacity waiter to unblock any pending writer + if (this.capacityWaiter) { + this.capacityWaiter(); + this.capacityWaiter = undefined; + } + + // Close inner session + if (this.session) { + try { + await this.session.close(); + } catch (e) { + debugSession("error closing session during abort: %s", e); + } + this.session = undefined; + } + } + + /** + * Close the append session. + * Waits for all pending appends to complete before resolving. + * Does not interrupt recovery - allows it to complete. + */ + async close(): Promise { + if (this.closed) { + if (this.fatalError) { + throw this.fatalError; + } + return; + } + + debugSession("close requested"); + this.closing = true; + + // Wake pump if it's sleeping so it can check closing flag + if (this.pumpWakeup) { + this.pumpWakeup(); + } + + // Wait for pump to stop (drains inflight queue, including through recovery) + if (this.pumpPromise) { + await this.pumpPromise; + } + + // Close inner session + if (this.session) { + try { + const result = await this.session.close(); + if (!result.ok) { + debugSession("error closing inner session: %s", result.error.message); + } + } catch (e) { + debugSession("exception closing inner session: %s", e); + } + this.session = undefined; + } + + // Close readable stream + try { + this.acksController?.close(); + } catch (e) { + debugSession("error closing acks controller: %s", e); + } + + this.closed = true; + + // If fatal error occurred, throw it + if (this.fatalError) { + throw this.fatalError; + } + + debugSession("close complete"); + } + + async [Symbol.asyncDispose](): Promise { + await this.close(); + } + + /** + * Get a stream of acknowledgements for appends. + */ + acks(): AcksStream { + return this.readable as AcksStream; + } + + /** + * Get the last acknowledged position. + */ + lastAckedPosition(): AppendAck | undefined { + return this._lastAckedPosition; + } +} diff --git a/src/lib/stream/factory.ts b/src/lib/stream/factory.ts index efe9b4a..c604937 100644 --- a/src/lib/stream/factory.ts +++ b/src/lib/stream/factory.ts @@ -13,7 +13,6 @@ import type { SessionTransport, TransportConfig } from "./types.js"; * - Everywhere else: uses FetchTransport (JSON over HTTP/1.1) * * @param config Transport configuration - * @param preferHttp2 Force HTTP/2 or HTTP/1.1 (default: auto-detect) */ export async function createSessionTransport( config: TransportConfig, diff --git a/src/lib/stream/transport/fetch/index.ts b/src/lib/stream/transport/fetch/index.ts index 3b90c18..1353928 100644 --- a/src/lib/stream/transport/fetch/index.ts +++ b/src/lib/stream/transport/fetch/index.ts @@ -1,3 +1,4 @@ +import createDebug from "debug"; import type { S2RequestOptions } from "../../../../common.js"; import { RangeNotSatisfiableError, S2Error } from "../../../../error.js"; import { @@ -15,6 +16,12 @@ import { meteredSizeBytes } from "../../../../utils.js"; import { decodeFromBase64 } from "../../../base64.js"; import { EventStream } from "../../../event-stream.js"; import * as Redacted from "../../../redacted.js"; +import type { AppendResult, CloseResult } from "../../../result.js"; +import { err, errClose, ok, okClose } from "../../../result.js"; +import { + AppendSession as AppendSessionImpl, + ReadSession as ReadSessionImpl, +} from "../../../retry.js"; import type { AppendArgs, AppendRecord, @@ -23,62 +30,121 @@ import type { ReadArgs, ReadBatch, ReadRecord, + ReadResult, ReadSession, SessionTransport, + TransportAppendSession, TransportConfig, + TransportReadSession, } from "../../types.js"; import { streamAppend } from "./shared.js"; -export class FetchReadSession< - Format extends "string" | "bytes" = "string", -> extends EventStream> { +const debug = createDebug("s2:fetch"); + +export class FetchReadSession + extends ReadableStream> + implements TransportReadSession +{ static async create( client: Client, name: string, args?: ReadArgs, options?: S2RequestOptions, ) { + debug("FetchReadSession.create stream=%s args=%o", name, args); const { as, ...queryParams } = args ?? {}; - const response = await read({ - client, - path: { - stream: name, - }, - headers: { - accept: "text/event-stream", - ...(as === "bytes" ? { "s2-format": "base64" } : {}), - }, - query: queryParams, - parseAs: "stream", - ...options, - }); - if (response.error) { - if ("message" in response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } else { - // special case for 416 - Range Not Satisfiable - throw new RangeNotSatisfiableError({ - status: response.response.status, + + try { + const response = await read({ + client, + path: { + stream: name, + }, + headers: { + accept: "text/event-stream", + ...(as === "bytes" ? { "s2-format": "base64" } : {}), + }, + query: queryParams, + parseAs: "stream", + ...options, + }); + if (response.error) { + // Convert error to S2Error and return error session + const error = + "message" in response.error + ? new S2Error({ + message: response.error.message, + code: response.error.code ?? undefined, + status: response.response.status, + }) + : new RangeNotSatisfiableError({ + status: response.response.status, + }); + return FetchReadSession.createErrorSession(error); + } + if (!response.response.body) { + const error = new S2Error({ + message: "No body in SSE response", + code: "INVALID_RESPONSE", + status: 502, + origin: "sdk", }); + return FetchReadSession.createErrorSession(error); } + const format = (args?.as ?? "string") as Format; + return new FetchReadSession(response.response.body, format); + } catch (error) { + // Catch any thrown errors (network failures, DNS errors, etc.) + const s2Error = + error instanceof S2Error + ? error + : new S2Error({ + message: String(error), + status: 502, // Bad Gateway - network/fetch failure + }); + return FetchReadSession.createErrorSession(s2Error); } - if (!response.response.body) { - throw new S2Error({ - message: "No body in SSE response", - }); - } - const format = (args?.as ?? "string") as Format; - return new FetchReadSession(response.response.body, format); } - private _lastReadPosition: StreamPosition | undefined = undefined; + /** + * Create a session that immediately emits an error result and closes. + * Used when errors occur during session creation. + */ + private static createErrorSession( + error: S2Error, + ): FetchReadSession { + // Create a custom instance that extends ReadableStream and emits error immediately + const stream = new ReadableStream>({ + start(controller) { + controller.enqueue({ ok: false, error }); + controller.close(); + }, + }); + + // Copy methods from stream to create a proper FetchReadSession + const session = Object.assign( + Object.create(FetchReadSession.prototype), + stream, + ); + session._nextReadPosition = undefined; + session._lastObservedTail = undefined; + + return session as FetchReadSession; + } + + private _nextReadPosition: StreamPosition | undefined = undefined; + private _lastObservedTail: StreamPosition | undefined = undefined; private constructor(stream: ReadableStream, format: Format) { - super(stream, (msg) => { + // Track error from parser + let parserError: S2Error | null = null; + + // Track last ping time for timeout detection (20s without a ping = timeout) + let lastPingTimeMs = performance.now(); + const PING_TIMEOUT_MS = 20000; // 20 seconds + + // Create EventStream that parses SSE and yields records + const eventStream = new EventStream>(stream, (msg) => { // Parse SSE events according to the S2 protocol if (msg.event === "batch" && msg.data) { const rawBatch: GeneratedReadBatch = JSON.parse(msg.data); @@ -108,109 +174,172 @@ export class FetchReadSession< } })() as ReadBatch; if (batch.tail) { - this._lastReadPosition = batch.tail; + this._lastObservedTail = batch.tail; + } + let lastRecord = batch.records?.at(-1); + if (lastRecord) { + this._nextReadPosition = { + seq_num: lastRecord.seq_num + 1, + timestamp: lastRecord.timestamp, + }; } return { done: false, batch: true, value: batch.records ?? [] }; } if (msg.event === "error") { - // Handle error events - throw new S2Error({ message: msg.data ?? "Unknown error" }); + // Store error and signal end of stream + // SSE error events are server errors - treat as 503 (Service Unavailable) for retry logic + debug("parse event error"); + parserError = new S2Error({ + message: msg.data ?? "Unknown error", + status: 503, + }); + return { done: true }; } + lastPingTimeMs = performance.now(); // Skip ping events and other events return { done: false }; }); - } - public lastReadPosition() { - return this._lastReadPosition; - } -} + // Wrap the EventStream to convert records to ReadResult and check for errors + const reader = eventStream.getReader(); + let done = false; -class AcksStream extends ReadableStream implements AsyncDisposable { - constructor( - setController: ( - controller: ReadableStreamDefaultController, - ) => void, - ) { super({ - start: (controller) => { - setController(controller); + pull: async (controller) => { + if (done) { + controller.close(); + return; + } + + // Check for ping timeout before reading + const now = performance.now(); + const timeSinceLastPingMs = now - lastPingTimeMs; + if (timeSinceLastPingMs > PING_TIMEOUT_MS) { + const timeoutError = new S2Error({ + message: `No ping received for ${Math.floor(timeSinceLastPingMs / 1000)}s (timeout: ${PING_TIMEOUT_MS / 1000}s)`, + status: 408, // Request Timeout + code: "TIMEOUT", + }); + debug("ping timeout detected, elapsed=%dms", timeSinceLastPingMs); + controller.enqueue({ ok: false, error: timeoutError }); + done = true; + controller.close(); + return; + } + + try { + // Calculate remaining time until timeout + const remainingTimeMs = PING_TIMEOUT_MS - timeSinceLastPingMs; + + // Race reader.read() against timeout + // This ensures we don't wait forever if server stops sending events + const result = await Promise.race([ + reader.read(), + new Promise((_, reject) => + setTimeout(() => { + const elapsed = performance.now() - lastPingTimeMs; + reject( + new S2Error({ + message: `No ping received for ${Math.floor(elapsed / 1000)}s (timeout: ${PING_TIMEOUT_MS / 1000}s)`, + status: 408, + code: "TIMEOUT", + }), + ); + }, remainingTimeMs), + ), + ]); + + if (result.done) { + done = true; + // Check if stream ended due to error + if (parserError) { + controller.enqueue({ ok: false, error: parserError }); + } + controller.close(); + } else { + // Emit successful result + controller.enqueue({ ok: true, value: result.value }); + } + } catch (error) { + // Convert unexpected errors to S2Error and emit as error result + const s2Err = + error instanceof S2Error + ? error + : new S2Error({ message: String(error), status: 500 }); + controller.enqueue({ ok: false, error: s2Err }); + done = true; + controller.close(); + } + }, + cancel: async () => { + await eventStream.cancel(); }, }); } - async [Symbol.asyncDispose]() { - await this.cancel("disposed"); + public nextReadPosition(): StreamPosition | undefined { + return this._nextReadPosition; + } + + public lastObservedTail(): StreamPosition | undefined { + return this._lastObservedTail; } - // Polyfill for older browsers - [Symbol.asyncIterator](): AsyncIterableIterator { + // Implement AsyncIterable (for await...of support) + [Symbol.asyncIterator](): AsyncIterableIterator> { const fn = (ReadableStream.prototype as any)[Symbol.asyncIterator]; if (typeof fn === "function") return fn.call(this); const reader = this.getReader(); return { next: async () => { const r = await reader.read(); - if (r.done) { - reader.releaseLock(); - return { done: true, value: undefined }; - } + if (r.done) return { done: true, value: undefined }; return { done: false, value: r.value }; }, - throw: async (e) => { - await reader.cancel(e); + return: async (value?: any) => { reader.releaseLock(); - return { done: true, value: undefined }; + return { done: true, value }; }, - return: async () => { - await reader.cancel("done"); + throw: async (e?: any) => { reader.releaseLock(); - return { done: true, value: undefined }; + throw e; }, [Symbol.asyncIterator]() { return this; }, }; } + + // Implement AsyncDisposable + async [Symbol.asyncDispose](): Promise { + await this.cancel(); + } } +// Removed AcksStream - transport sessions no longer expose streams + /** - * Session for appending records to a stream. - * Queues append requests and ensures only one is in-flight at a time. + * Fetch-based transport session for appending records via HTTP/1.1. + * Queues append requests and ensures only one is in-flight at a time (single-flight). + * No backpressure, no retry logic, no streams - just submit/close with value-encoded errors. */ -export class FetchAppendSession - implements ReadableWritablePair, AsyncDisposable -{ - private _lastAckedPosition: AppendAck | undefined = undefined; +export class FetchAppendSession implements TransportAppendSession { private queue: Array<{ records: AppendRecord[]; fencing_token?: string; match_seq_num?: number; - meteredSize: number; }> = []; private pendingResolvers: Array<{ - resolve: (ack: AppendAck) => void; - reject: (error: any) => void; + resolve: (result: AppendResult) => void; }> = []; private inFlight = false; private readonly options?: S2RequestOptions; private readonly stream: string; - private acksController: - | ReadableStreamDefaultController - | undefined; - private _readable: AcksStream; - private _writable: WritableStream; private closed = false; private processingPromise: Promise | null = null; - private queuedBytes = 0; - private readonly maxQueuedBytes: number; - private waitingForCapacity: Array<() => void> = []; private readonly client: Client; - public readonly readable: ReadableStream; - public readonly writable: WritableStream; - static async create( stream: string, transportConfig: TransportConfig, @@ -233,128 +362,73 @@ export class FetchAppendSession ) { this.options = requestOptions; this.stream = stream; - this.maxQueuedBytes = sessionOptions?.maxQueuedBytes ?? 10 * 1024 * 1024; // 10 MiB default this.client = createClient( createConfig({ baseUrl: transportConfig.baseUrl, auth: () => Redacted.value(transportConfig.accessToken), + headers: transportConfig.basinName + ? { "s2-basin": transportConfig.basinName } + : {}, }), ); - // Create the readable stream for acks - this._readable = new AcksStream((controller) => { - this.acksController = controller; - }); - this.readable = this._readable; - - // Create the writable stream - let writableController: WritableStreamDefaultController; - this._writable = new WritableStream({ - start: (controller) => { - writableController = controller; - }, - write: async (chunk) => { - // Calculate batch size - let batchMeteredSize = 0; - for (const record of chunk.records) { - batchMeteredSize += meteredSizeBytes(record as AppendRecord); - } - - // Wait for capacity if needed - while ( - this.queuedBytes + batchMeteredSize > this.maxQueuedBytes && - !this.closed - ) { - await new Promise((resolve) => { - this.waitingForCapacity.push(resolve); - }); - } - - // Submit the batch - this.submit( - chunk.records, - { - fencing_token: chunk.fencing_token ?? undefined, - match_seq_num: chunk.match_seq_num ?? undefined, - }, - batchMeteredSize, - ); - }, - close: async () => { - this.closed = true; - await this.waitForDrain(); - }, - abort: async (reason) => { - this.closed = true; - this.queue = []; - this.queuedBytes = 0; - - // Reject all pending promises - const error = new S2Error({ - message: `AppendSession was aborted: ${reason}`, - }); - for (const resolver of this.pendingResolvers) { - resolver.reject(error); - } - this.pendingResolvers = []; - - // Reject all waiting for capacity - for (const resolver of this.waitingForCapacity) { - resolver(); - } - this.waitingForCapacity = []; - }, - }); - this.writable = this._writable; - } - - async [Symbol.asyncDispose]() { - await this.close(); - } - - /** - * Get a stream of acknowledgements for appends. - */ - acks(): AcksStream { - return this._readable; } /** * Close the append session. * Waits for all pending appends to complete before resolving. + * Never throws - returns CloseResult. */ - async close(): Promise { - await this.writable.close(); + async close(): Promise { + try { + this.closed = true; + await this.waitForDrain(); + return okClose(); + } catch (error) { + const s2Err = + error instanceof S2Error + ? error + : new S2Error({ message: String(error), status: 500 }); + return errClose(s2Err); + } } /** * Submit an append request to the session. * The request will be queued and sent when no other request is in-flight. - * Returns a promise that resolves when the append is acknowledged or rejects on error. + * Never throws - returns AppendResult discriminated union. */ submit( records: AppendRecord | AppendRecord[], - args?: { fencing_token?: string; match_seq_num?: number }, - precalculatedSize?: number, - ): Promise { + args?: { + fencing_token?: string; + match_seq_num?: number; + precalculatedSize?: number; + }, + ): Promise { + // Validate closed state if (this.closed) { - return Promise.reject( - new S2Error({ message: "AppendSession is closed" }), + return Promise.resolve( + err(new S2Error({ message: "AppendSession is closed", status: 400 })), ); } const recordsArray = Array.isArray(records) ? records : [records]; - // Validate batch size limits + // Validate batch size limits (non-retryable 400-level error) if (recordsArray.length > 1000) { - return Promise.reject( - new S2Error({ - message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`, - }), + return Promise.resolve( + err( + new S2Error({ + message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`, + status: 400, + code: "INVALID_ARGUMENT", + }), + ), ); } // Validate metered size (use precalculated if provided) - let batchMeteredSize = precalculatedSize ?? 0; + let batchMeteredSize = args?.precalculatedSize ?? 0; if (batchMeteredSize === 0) { for (const record of recordsArray) { batchMeteredSize += meteredSizeBytes(record); @@ -362,32 +436,36 @@ export class FetchAppendSession } if (batchMeteredSize > 1024 * 1024) { - return Promise.reject( - new S2Error({ - message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`, - }), + return Promise.resolve( + err( + new S2Error({ + message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`, + status: 400, + code: "INVALID_ARGUMENT", + }), + ), ); } - return new Promise((resolve, reject) => { + return new Promise((resolve) => { this.queue.push({ records: recordsArray, fencing_token: args?.fencing_token, match_seq_num: args?.match_seq_num, - meteredSize: batchMeteredSize, }); - this.queuedBytes += batchMeteredSize; - this.pendingResolvers.push({ resolve, reject }); + this.pendingResolvers.push({ resolve }); // Start processing if not already running if (!this.processingPromise) { - this.processingPromise = this.processLoop(); + // Attach a catch to avoid unhandled rejection warnings on hard failures + this.processingPromise = this.processLoop().catch(() => {}); } }); } /** * Main processing loop that sends queued requests one at a time. + * Single-flight: only one request in progress at a time. */ private async processLoop(): Promise { while (this.queue.length > 0) { @@ -406,48 +484,32 @@ export class FetchAppendSession }, this.options, ); - this._lastAckedPosition = ack; - - // Emit ack to the acks stream if it exists - if (this.acksController) { - this.acksController.enqueue(ack); - } - - // Resolve the promise for this request - resolver.resolve(ack); - // Release capacity and wake up waiting writers - this.queuedBytes -= args.meteredSize; - while (this.waitingForCapacity.length > 0) { - const waiter = this.waitingForCapacity.shift()!; - waiter(); - // Only wake one at a time - let them check capacity again - break; - } + // Resolve with success result + resolver.resolve(ok(ack)); } catch (error) { - this.inFlight = false; - this.processingPromise = null; + // Convert error to S2Error and resolve with error result + const s2Err = + error instanceof S2Error + ? error + : new S2Error({ message: String(error), status: 502 }); - // Reject the promise for this request - resolver.reject(error); + // Resolve this request with error + resolver.resolve(err(s2Err)); - // Reject all remaining pending promises + // Resolve all remaining pending promises with the same error + // (transport failure affects all queued requests) for (const pendingResolver of this.pendingResolvers) { - pendingResolver.reject(error); + pendingResolver.resolve(err(s2Err)); } this.pendingResolvers = []; - // Clear the queue and reset queued bytes + // Clear the queue this.queue = []; - this.queuedBytes = 0; - // Wake up all waiting writers (they'll see the closed state or retry) - for (const waiter of this.waitingForCapacity) { - waiter(); - } - this.waitingForCapacity = []; - - // Do not rethrow here to avoid unhandled rejection; callers already received rejection + this.inFlight = false; + this.processingPromise = null; + return; } this.inFlight = false; @@ -466,15 +528,6 @@ export class FetchAppendSession while (this.queue.length > 0 || this.inFlight) { await new Promise((resolve) => setTimeout(resolve, 10)); } - - // Close the acks stream if it exists - if (this.acksController) { - this.acksController.close(); - } - } - - lastAckedPosition() { - return this._lastAckedPosition; } } @@ -490,6 +543,7 @@ export class FetchTransport implements SessionTransport { createConfig({ baseUrl: config.baseUrl, auth: () => Redacted.value(config.accessToken), + headers: config.basinName ? { "s2-basin": config.basinName } : {}, }), ); this.transportConfig = config; @@ -500,11 +554,23 @@ export class FetchTransport implements SessionTransport { sessionOptions?: AppendSessionOptions, requestOptions?: S2RequestOptions, ): Promise { - return FetchAppendSession.create( - stream, - this.transportConfig, - sessionOptions, - requestOptions, + // Fetch transport intentionally enforces single-flight submission (HTTP/1.1) + // This ensures only one batch is in-flight at a time, regardless of user setting. + const opts = { + ...sessionOptions, + maxInflightBatches: 1, + } as AppendSessionOptions; + return AppendSessionImpl.create( + (myOptions) => { + return FetchAppendSession.create( + stream, + this.transportConfig, + myOptions, + requestOptions, + ); + }, + opts, + this.transportConfig.retry, ); } @@ -513,6 +579,12 @@ export class FetchTransport implements SessionTransport { args?: ReadArgs, options?: S2RequestOptions, ): Promise> { - return FetchReadSession.create(this.client, stream, args, options); + return ReadSessionImpl.create( + (myArgs) => { + return FetchReadSession.create(this.client, stream, myArgs, options); + }, + args, + this.transportConfig.retry, + ); } } diff --git a/src/lib/stream/transport/fetch/shared.ts b/src/lib/stream/transport/fetch/shared.ts index 8e79811..d852b98 100644 --- a/src/lib/stream/transport/fetch/shared.ts +++ b/src/lib/stream/transport/fetch/shared.ts @@ -1,9 +1,10 @@ import type { S2RequestOptions } from "../../../../common.js"; import { - FencingTokenMismatchError, + makeAppendPreconditionError, + makeServerError, RangeNotSatisfiableError, S2Error, - SeqNumMismatchError, + s2Error, } from "../../../../error.js"; import type { Client } from "../../../../generated/client/index.js"; import { @@ -39,30 +40,31 @@ export async function streamRead( options?: S2RequestOptions, ) { const { as, ...queryParams } = args ?? {}; - const response = await read({ - client, - path: { - stream, - }, - headers: { - ...(as === "bytes" ? { "s2-format": "base64" } : {}), - }, - query: queryParams, - ...options, - }); + let response: any; + try { + response = await read({ + client, + path: { + stream, + }, + headers: { + ...(as === "bytes" ? { "s2-format": "base64" } : {}), + }, + query: queryParams, + ...options, + }); + } catch (error) { + throw s2Error(error); + } if (response.error) { - if ("message" in response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } else { - // special case for 416 - Range Not Satisfiable - throw new RangeNotSatisfiableError({ - status: response.response.status, - }); + const status = response.response.status; + if (status === 416) { + throw new RangeNotSatisfiableError({ status }); } + throw makeServerError( + { status, statusText: response.response.statusText }, + response.error, + ); } if (args?.as === "bytes") { @@ -86,12 +88,14 @@ export async function streamRead( } else { const res: ReadBatch<"string"> = { ...response.data, - records: response.data.records.map((record) => ({ - ...record, - headers: record.headers - ? Object.fromEntries(record.headers) - : undefined, - })), + records: response.data.records?.map( + (record: GeneratedSequencedRecord) => ({ + ...record, + headers: record.headers + ? Object.fromEntries(record.headers) + : undefined, + }), + ), }; return res as ReadBatch; } @@ -134,6 +138,7 @@ export async function streamAppend( const format = computeAppendRecordFormat(record); if (format === "bytes") { const formattedRecord = record as AppendRecordForFormat<"bytes">; + hasAnyBytesRecords = true; const encodedRecord = { ...formattedRecord, body: formattedRecord.body @@ -171,52 +176,35 @@ export async function streamAppend( } } - const response = await append({ - client, - path: { - stream, - }, - body: { - fencing_token: args?.fencing_token, - match_seq_num: args?.match_seq_num, - records: encodedRecords, - }, - headers: { - ...(hasAnyBytesRecords ? { "s2-format": "base64" } : {}), - }, - ...options, - }); + let response: any; + try { + response = await append({ + client, + path: { + stream, + }, + body: { + fencing_token: args?.fencing_token, + match_seq_num: args?.match_seq_num, + records: encodedRecords, + }, + headers: { + ...(hasAnyBytesRecords ? { "s2-format": "base64" } : {}), + }, + ...options, + }); + } catch (error) { + throw s2Error(error); + } if (response.error) { - if ("message" in response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } else { - // special case for 412 - append condition failed - if ("seq_num_mismatch" in response.error) { - throw new SeqNumMismatchError({ - message: "Append condition failed: sequence number mismatch", - code: "APPEND_CONDITION_FAILED", - status: response.response.status, - expectedSeqNum: response.error.seq_num_mismatch, - }); - } else if ("fencing_token_mismatch" in response.error) { - throw new FencingTokenMismatchError({ - message: "Append condition failed: fencing token mismatch", - code: "APPEND_CONDITION_FAILED", - status: response.response.status, - expectedFencingToken: response.error.fencing_token_mismatch, - }); - } else { - // fallback for unknown 412 error format - throw new S2Error({ - message: "Append condition failed", - status: response.response.status, - }); - } + const status = response.response.status; + if (status === 412) { + throw makeAppendPreconditionError(status, response.error); } + throw makeServerError( + { status, statusText: response.response.statusText }, + response.error, + ); } return response.data; } diff --git a/src/lib/stream/transport/s2s/index.ts b/src/lib/stream/transport/s2s/index.ts index c0507db..2df9401 100644 --- a/src/lib/stream/transport/s2s/index.ts +++ b/src/lib/stream/transport/s2s/index.ts @@ -6,12 +6,14 @@ */ import * as http2 from "node:http2"; +import createDebug from "debug"; import type { S2RequestOptions } from "../../../../common.js"; import { - type Client, - createClient, - createConfig, -} from "../../../../generated/client/index.js"; + makeAppendPreconditionError, + makeServerError, + RangeNotSatisfiableError, + S2Error, +} from "../../../../error.js"; import type { AppendAck, StreamPosition } from "../../../../generated/index.js"; import { AppendAck as ProtoAppendAck, @@ -19,9 +21,14 @@ import { ReadBatch as ProtoReadBatch, type StreamPosition as ProtoStreamPosition, } from "../../../../generated/proto/s2.js"; -import { S2Error } from "../../../../index.js"; import { meteredSizeBytes } from "../../../../utils.js"; import * as Redacted from "../../../redacted.js"; +import type { AppendResult, CloseResult } from "../../../result.js"; +import { err, errClose, ok, okClose } from "../../../result.js"; +import { + AppendSession as AppendSessionImpl, + ReadSession as ReadSessionImpl, +} from "../../../retry.js"; import type { AppendArgs, AppendRecord, @@ -29,12 +36,17 @@ import type { AppendSessionOptions, ReadArgs, ReadRecord, + ReadResult, ReadSession, SessionTransport, + TransportAppendSession, TransportConfig, + TransportReadSession, } from "../../types.js"; import { frameMessage, S2SFrameParser } from "./framing.js"; +const debug = createDebug("s2:s2s"); + export function buildProtoAppendInput( records: AppendRecord[], args: AppendArgs, @@ -73,18 +85,11 @@ export function buildProtoAppendInput( } export class S2STransport implements SessionTransport { - private readonly client: Client; private readonly transportConfig: TransportConfig; private connection?: http2.ClientHttp2Session; private connectionPromise?: Promise; constructor(config: TransportConfig) { - this.client = createClient( - createConfig({ - baseUrl: config.baseUrl, - auth: () => Redacted.value(config.accessToken), - }), - ); this.transportConfig = config; } @@ -93,13 +98,20 @@ export class S2STransport implements SessionTransport { sessionOptions?: AppendSessionOptions, requestOptions?: S2RequestOptions, ): Promise { - return S2SAppendSession.create( - this.transportConfig.baseUrl, - this.transportConfig.accessToken, - stream, - () => this.getConnection(), + return AppendSessionImpl.create( + (myOptions) => { + return S2SAppendSession.create( + this.transportConfig.baseUrl, + this.transportConfig.accessToken, + stream, + () => this.getConnection(), + this.transportConfig.basinName, + myOptions, + requestOptions, + ); + }, sessionOptions, - requestOptions, + this.transportConfig.retry, ); } @@ -108,13 +120,20 @@ export class S2STransport implements SessionTransport { args?: ReadArgs, options?: S2RequestOptions, ): Promise> { - return S2SReadSession.create( - this.transportConfig.baseUrl, - this.transportConfig.accessToken, - stream, + return ReadSessionImpl.create( + (myArgs) => { + return S2SReadSession.create( + this.transportConfig.baseUrl, + this.transportConfig.accessToken, + stream, + myArgs, + options, + () => this.getConnection(), + this.transportConfig.basinName, + ); + }, args, - options, - () => this.getConnection(), + this.transportConfig.retry, ); } @@ -181,11 +200,13 @@ export class S2STransport implements SessionTransport { } class S2SReadSession - extends ReadableStream> - implements ReadSession + extends ReadableStream> + implements TransportReadSession { private http2Stream?: http2.ClientHttp2Stream; private _lastReadPosition?: StreamPosition; + private _nextReadPosition?: StreamPosition; + private _lastObservedTail?: StreamPosition; private parser = new S2SFrameParser(); static async create( @@ -195,6 +216,7 @@ class S2SReadSession args: ReadArgs | undefined, options: S2RequestOptions | undefined, getConnection: () => Promise, + basinName?: string, ): Promise> { const url = new URL(baseUrl); return new S2SReadSession( @@ -204,6 +226,7 @@ class S2SReadSession url, options, getConnection, + basinName, ); } @@ -214,6 +237,7 @@ class S2SReadSession private url: URL, private options: S2RequestOptions | undefined, private getConnection: () => Promise, + private basinName?: string, ) { // Initialize parser and textDecoder before super() call const parser = new S2SFrameParser(); @@ -221,12 +245,21 @@ class S2SReadSession let http2Stream: http2.ClientHttp2Stream | undefined; let lastReadPosition: StreamPosition | undefined; + // Track timeout for detecting when server stops sending data + const TAIL_TIMEOUT_MS = 20000; // 20 seconds + let timeoutTimer: NodeJS.Timeout | undefined; + super({ start: async (controller) => { let controllerClosed = false; + let responseCode: number | undefined; const safeClose = () => { if (!controllerClosed) { controllerClosed = true; + if (timeoutTimer) { + clearTimeout(timeoutTimer); + timeoutTimer = undefined; + } try { controller.close(); } catch { @@ -237,11 +270,40 @@ class S2SReadSession const safeError = (err: unknown) => { if (!controllerClosed) { controllerClosed = true; - controller.error(err); + if (timeoutTimer) { + clearTimeout(timeoutTimer); + timeoutTimer = undefined; + } + // Convert error to S2Error and enqueue as error result + const s2Err = + err instanceof S2Error + ? err + : new S2Error({ message: String(err), status: 500 }); + controller.enqueue({ ok: false, error: s2Err }); + controller.close(); } }; + // Helper to start/reset the timeout timer + // Resets on every tail received, fires only if no tail for 20s + const resetTimeoutTimer = () => { + if (timeoutTimer) { + clearTimeout(timeoutTimer); + } + timeoutTimer = setTimeout(() => { + const timeoutError = new S2Error({ + message: `No tail received for ${TAIL_TIMEOUT_MS / 1000}s`, + status: 408, // Request Timeout + code: "TIMEOUT", + }); + debug("tail timeout detected"); + safeError(timeoutError); + }, TAIL_TIMEOUT_MS); + }; + try { + // Start the timeout timer - will fire in 20s if no tail received + resetTimeoutTimer(); const connection = await getConnection(); // Build query string @@ -275,6 +337,7 @@ class S2SReadSession authorization: `Bearer ${Redacted.value(authToken)}`, accept: "application/protobuf", "content-type": "s2s/proto", + ...(basinName ? { "s2-basin": basinName } : {}), }); http2Stream = stream; @@ -285,76 +348,169 @@ class S2SReadSession } }); + stream.on("response", (headers) => { + // Cache the status. + // This informs whether we should attempt to parse s2s frames in the "data" handler. + responseCode = headers[":status"] ?? 500; + }); + + connection.on("goaway", (errorCode, lastStreamID, opaqueData) => { + debug("received GOAWAY from server"); + }); + + stream.on("error", (err) => { + safeError(err); + }); + stream.on("data", (chunk: Buffer) => { - // Buffer already extends Uint8Array in Node.js, no need to convert - parser.push(chunk); - - let frame = parser.parseFrame(); - while (frame) { - if (frame.terminal) { - if (frame.statusCode && frame.statusCode >= 400) { - const errorText = textDecoder.decode(frame.body); + try { + if ((responseCode ?? 500) >= 400) { + const errorText = textDecoder.decode(chunk); + try { + const errorJson = JSON.parse(errorText); + safeError( + new S2Error({ + message: errorJson.message ?? "Unknown error", + code: errorJson.code, + status: responseCode, + origin: "server", + }), + ); + } catch { + safeError( + new S2Error({ + message: errorText || "Unknown error", + status: responseCode, + origin: "server", + }), + ); + } + return; + } + // Buffer already extends Uint8Array in Node.js, no need to convert + parser.push(chunk); + + let frame = parser.parseFrame(); + while (frame) { + if (frame.terminal) { + if (frame.statusCode && frame.statusCode >= 400) { + const errorText = textDecoder.decode(frame.body); + try { + const errorJson = JSON.parse(errorText); + const status = frame.statusCode ?? 500; + + // Map known read errors + if (status === 416) { + safeError(new RangeNotSatisfiableError({ status })); + } else { + safeError( + makeServerError( + { status, statusText: undefined }, + errorJson, + ), + ); + } + } catch { + safeError( + makeServerError( + { + status: frame.statusCode ?? 500, + statusText: undefined, + }, + errorText, + ), + ); + } + } else { + safeClose(); + } + stream.close(); + } else { + // Parse ReadBatch try { - const errorJson = JSON.parse(errorText); + const protoBatch = ProtoReadBatch.fromBinary(frame.body); + + resetTimeoutTimer(); + + // Update tail from batch + if (protoBatch.tail) { + const tail = convertStreamPosition(protoBatch.tail); + lastReadPosition = tail; + this._lastReadPosition = tail; + this._lastObservedTail = tail; + debug("received tail"); + } + + // Enqueue each record and track next position + for (const record of protoBatch.records) { + const converted = this.convertRecord( + record, + as ?? ("string" as Format), + textDecoder, + ); + controller.enqueue({ ok: true, value: converted }); + + // Update next read position to after this record + if (record.seqNum !== undefined) { + this._nextReadPosition = { + seq_num: Number(record.seqNum) + 1, + timestamp: Number(record.timestamp ?? 0n), + }; + } + } + } catch (err) { safeError( new S2Error({ - message: errorJson.message ?? "Unknown error", - code: errorJson.code, - status: frame.statusCode, - }), - ); - } catch { - safeError( - new S2Error({ - message: errorText || "Unknown error", - status: frame.statusCode, + message: `Failed to parse ReadBatch: ${err}`, + status: 500, + origin: "sdk", }), ); } - } else { - safeClose(); } - stream.close(); - } else { - // Parse ReadBatch - try { - const protoBatch = ProtoReadBatch.fromBinary(frame.body); - // Update position from tail - if (protoBatch.tail) { - lastReadPosition = convertStreamPosition(protoBatch.tail); - // Assign to instance property - this._lastReadPosition = lastReadPosition; - } - - // Enqueue each record - for (const record of protoBatch.records) { - const converted = this.convertRecord( - record, - as ?? ("string" as Format), - textDecoder, - ); - controller.enqueue(converted); - } - } catch (err) { - safeError( - new S2Error({ - message: `Failed to parse ReadBatch: ${err}`, - }), - ); - } + frame = parser.parseFrame(); } - - frame = parser.parseFrame(); + } catch (error) { + safeError( + error instanceof S2Error + ? error + : new S2Error({ + message: `Failed to process read data: ${error}`, + status: 500, + origin: "sdk", + }), + ); } }); - stream.on("error", (err) => { - safeError(err); + stream.on("end", () => { + if (stream.rstCode != 0) { + debug("stream reset code=%d", stream.rstCode); + safeError( + new S2Error({ + message: `Stream ended with error: ${stream.rstCode}`, + status: 500, + code: "stream reset", + origin: "sdk", + }), + ); + } }); stream.on("close", () => { - safeClose(); + if (parser.hasData()) { + safeError( + new S2Error({ + message: "Stream closed with unparsed data remaining", + status: 500, + code: "STREAM_CLOSED_PREMATURELY", + origin: "sdk", + }), + ); + } else { + safeClose(); + } }); } catch (err) { safeError(err); @@ -420,7 +576,7 @@ class S2SReadSession } // Polyfill for older browsers / Node.js environments - [Symbol.asyncIterator](): AsyncIterableIterator> { + [Symbol.asyncIterator](): AsyncIterableIterator> { const fn = (ReadableStream.prototype as any)[Symbol.asyncIterator]; if (typeof fn === "function") return fn.call(this); const reader = this.getReader(); @@ -449,97 +605,41 @@ class S2SReadSession }; } - lastReadPosition(): StreamPosition | undefined { - return this._lastReadPosition; + nextReadPosition(): StreamPosition | undefined { + return this._nextReadPosition; + } + + lastObservedTail(): StreamPosition | undefined { + return this._lastObservedTail; } } /** * AcksStream for S2S append session */ -class S2SAcksStream - extends ReadableStream - implements AsyncDisposable -{ - constructor( - setController: ( - controller: ReadableStreamDefaultController, - ) => void, - ) { - super({ - start: (controller) => { - setController(controller); - }, - }); - } - - async [Symbol.asyncDispose]() { - await this.cancel("disposed"); - } - - // Polyfill for older browsers - [Symbol.asyncIterator](): AsyncIterableIterator { - const fn = (ReadableStream.prototype as any)[Symbol.asyncIterator]; - if (typeof fn === "function") return fn.call(this); - const reader = this.getReader(); - return { - next: async () => { - const r = await reader.read(); - if (r.done) { - reader.releaseLock(); - return { done: true, value: undefined }; - } - return { done: false, value: r.value }; - }, - throw: async (e) => { - await reader.cancel(e); - reader.releaseLock(); - return { done: true, value: undefined }; - }, - return: async () => { - await reader.cancel("done"); - reader.releaseLock(); - return { done: true, value: undefined }; - }, - [Symbol.asyncIterator]() { - return this; - }, - }; - } -} +// Removed S2SAcksStream - transport sessions no longer expose streams /** - * S2S Append Session for pipelined writes - * Unlike fetch-based append, writes don't block on acks - only on submission + * Fetch-based transport session for appending records via HTTP/2. + * Pipelined: multiple requests can be in-flight simultaneously. + * No backpressure, no retry logic, no streams - just submit/close with value-encoded errors. */ -class S2SAppendSession - implements ReadableWritablePair, AsyncDisposable -{ +class S2SAppendSession implements TransportAppendSession { private http2Stream?: http2.ClientHttp2Stream; - private _lastAckedPosition?: AppendAck; private parser = new S2SFrameParser(); - private acksController?: ReadableStreamDefaultController; - private _readable: S2SAcksStream; - private _writable: WritableStream; private closed = false; - private queuedBytes = 0; - private readonly maxQueuedBytes: number; - private waitingForCapacity: Array<() => void> = []; private pendingAcks: Array<{ - resolve: (ack: AppendAck) => void; - reject: (error: any) => void; + resolve: (result: AppendResult) => void; batchSize: number; }> = []; private initPromise?: Promise; - public readonly readable: ReadableStream; - public readonly writable: WritableStream; - static async create( baseUrl: string, bearerToken: Redacted.Redacted, streamName: string, getConnection: () => Promise, + basinName: string | undefined, sessionOptions?: AppendSessionOptions, requestOptions?: S2RequestOptions, ): Promise { @@ -548,6 +648,7 @@ class S2SAppendSession bearerToken, streamName, getConnection, + basinName, sessionOptions, requestOptions, ); @@ -558,98 +659,12 @@ class S2SAppendSession private authToken: Redacted.Redacted, private streamName: string, private getConnection: () => Promise, + private basinName?: string, sessionOptions?: AppendSessionOptions, private options?: S2RequestOptions, ) { - this.maxQueuedBytes = sessionOptions?.maxQueuedBytes ?? 10 * 1024 * 1024; // 10 MiB default - - // Create the readable stream for acks - this._readable = new S2SAcksStream((controller) => { - this.acksController = controller; - }); - this.readable = this._readable; - - // Create the writable stream - this._writable = new WritableStream({ - start: async (controller) => { - this.initPromise = this.initializeStream(); - await this.initPromise; - }, - write: async (chunk) => { - if (this.closed) { - throw new S2Error({ message: "AppendSession is closed" }); - } - - const recordsArray = Array.isArray(chunk.records) - ? chunk.records - : [chunk.records]; - - // Validate batch size limits - if (recordsArray.length > 1000) { - throw new S2Error({ - message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`, - }); - } - - // Calculate metered size - let batchMeteredSize = 0; - for (const record of recordsArray) { - batchMeteredSize += meteredSizeBytes(record); - } - - if (batchMeteredSize > 1024 * 1024) { - throw new S2Error({ - message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`, - }); - } - - // Wait for capacity if needed (backpressure) - while ( - this.queuedBytes + batchMeteredSize > this.maxQueuedBytes && - !this.closed - ) { - await new Promise((resolve) => { - this.waitingForCapacity.push(resolve); - }); - } - - if (this.closed) { - throw new S2Error({ message: "AppendSession is closed" }); - } - - // Send the batch immediately (pipelined) - // Returns when frame is sent, not when ack is received - await this.sendBatchNonBlocking(recordsArray, chunk, batchMeteredSize); - }, - close: async () => { - this.closed = true; - await this.closeStream(); - }, - abort: async (reason) => { - this.closed = true; - this.queuedBytes = 0; - - // Reject all pending acks - const error = new S2Error({ - message: `AppendSession was aborted: ${reason}`, - }); - for (const pending of this.pendingAcks) { - pending.reject(error); - } - this.pendingAcks = []; - - // Wake up all waiting for capacity - for (const resolver of this.waitingForCapacity) { - resolver(); - } - this.waitingForCapacity = []; - - if (this.http2Stream && !this.http2Stream.closed) { - this.http2Stream.close(); - } - }, - }); - this.writable = this._writable; + // No stream setup + // Initialization happens lazily on first submit } private async initializeStream(): Promise { @@ -666,6 +681,7 @@ class S2SAppendSession authorization: `Bearer ${Redacted.value(this.authToken)}`, "content-type": "s2s/proto", accept: "application/protobuf", + ...(this.basinName ? { "s2-basin": this.basinName } : {}), }); this.http2Stream = stream; @@ -677,184 +693,118 @@ class S2SAppendSession }); const textDecoder = new TextDecoder(); - let controllerClosed = false; - const safeClose = () => { - if (!controllerClosed && this.acksController) { - controllerClosed = true; - try { - this.acksController.close(); - } catch { - // Controller may already be closed, ignore - } - } - }; + const safeError = (error: unknown) => { + const s2Err = + error instanceof S2Error + ? error + : new S2Error({ message: String(error), status: 502 }); - const safeError = (err: unknown) => { - if (!controllerClosed && this.acksController) { - controllerClosed = true; - this.acksController.error(err); - } - - // Reject all pending acks + // Resolve all pending acks with error result for (const pending of this.pendingAcks) { - pending.reject(err); + pending.resolve(err(s2Err)); } this.pendingAcks = []; }; // Handle incoming data (acks) stream.on("data", (chunk: Buffer) => { - this.parser.push(chunk); - - let frame = this.parser.parseFrame(); - while (frame) { - if (frame.terminal) { - if (frame.statusCode && frame.statusCode >= 400) { - const errorText = textDecoder.decode(frame.body); - try { - const errorJson = JSON.parse(errorText); - safeError( - new S2Error({ - message: errorJson.message ?? "Unknown error", - code: errorJson.code, - status: frame.statusCode, - }), - ); - } catch { - safeError( - new S2Error({ - message: errorText || "Unknown error", - status: frame.statusCode, - }), - ); + try { + this.parser.push(chunk); + + let frame = this.parser.parseFrame(); + while (frame) { + if (frame.terminal) { + if (frame.statusCode && frame.statusCode >= 400) { + const errorText = textDecoder.decode(frame.body); + const status = frame.statusCode ?? 500; + try { + const errorJson = JSON.parse(errorText); + const err = + status === 412 + ? makeAppendPreconditionError(status, errorJson) + : makeServerError( + { status, statusText: undefined }, + errorJson, + ); + queueMicrotask(() => safeError(err)); + } catch { + const err = makeServerError( + { status, statusText: undefined }, + errorText, + ); + queueMicrotask(() => safeError(err)); + } } + stream.close(); } else { - safeClose(); - } - stream.close(); - } else { - // Parse AppendAck - try { - const protoAck = ProtoAppendAck.fromBinary(frame.body); - - const ack = convertAppendAck(protoAck); - - this._lastAckedPosition = ack; - - // Enqueue to readable stream - if (this.acksController) { - this.acksController.enqueue(ack); - } - - // Resolve the pending ack promise - const pending = this.pendingAcks.shift(); - if (pending) { - pending.resolve(ack); - - // Release capacity - this.queuedBytes -= pending.batchSize; + // Parse AppendAck + try { + const protoAck = ProtoAppendAck.fromBinary(frame.body); + const ack = convertAppendAck(protoAck); - // Wake up one waiting writer - if (this.waitingForCapacity.length > 0) { - const waiter = this.waitingForCapacity.shift()!; - waiter(); + // Resolve the pending ack promise (FIFO) + const pending = this.pendingAcks.shift(); + if (pending) { + pending.resolve(ok(ack)); } + } catch (parseErr) { + queueMicrotask(() => + safeError( + new S2Error({ + message: `Failed to parse AppendAck: ${parseErr}`, + status: 500, + }), + ), + ); } - } catch (err) { - safeError( - new S2Error({ - message: `Failed to parse AppendAck: ${err}`, - }), - ); } - } - frame = this.parser.parseFrame(); + frame = this.parser.parseFrame(); + } + } catch (error) { + queueMicrotask(() => safeError(error)); } }); - stream.on("error", (err: Error) => { - safeError(err); + stream.on("error", (streamErr: Error) => { + queueMicrotask(() => safeError(streamErr)); }); stream.on("close", () => { - safeClose(); - }); - } - - /** - * Send a batch non-blocking (returns when frame is sent, not when ack is received) - */ - private sendBatchNonBlocking( - records: AppendRecord[], - args: AppendArgs, - batchMeteredSize: number, - ): Promise { - if (!this.http2Stream || this.http2Stream.closed) { - return Promise.reject( - new S2Error({ message: "HTTP/2 stream is not open" }), - ); - } - - // Convert to protobuf AppendInput - const protoInput = buildProtoAppendInput(records, args); - - const bodyBytes = ProtoAppendInput.toBinary(protoInput); - - // Frame the message - const frame = frameMessage({ - terminal: false, - body: bodyBytes, - }); - - // This promise resolves when the frame is written (not when ack is received) - return new Promise((resolve, reject) => { - // Track pending ack - will be resolved when ack arrives - const ackPromise = { - resolve: () => {}, - reject, - batchSize: batchMeteredSize, - }; - this.pendingAcks.push(ackPromise); - - this.queuedBytes += batchMeteredSize; - - // Send the frame (pipelined) - this.http2Stream!.write(frame, (err) => { - if (err) { - // Remove from pending acks on write error - const idx = this.pendingAcks.indexOf(ackPromise); - if (idx !== -1) { - this.pendingAcks.splice(idx, 1); - this.queuedBytes -= batchMeteredSize; - } - reject(err); - } else { - // Frame written successfully - resolve immediately (pipelined) - resolve(); - } - }); + // Stream closed - resolve any remaining pending acks with error + // This can happen if the server closes the stream without sending all acks + if (this.pendingAcks.length > 0) { + queueMicrotask(() => + safeError( + new S2Error({ + message: "Stream closed with pending acks", + status: 502, + code: "BAD_GATEWAY", + }), + ), + ); + } }); } /** - * Send a batch and wait for ack (used by submit method) + * Send a batch and wait for ack. Returns AppendResult (never throws). + * Pipelined: multiple sends can be in-flight; acks resolve FIFO. */ private sendBatch( records: AppendRecord[], args: AppendArgs, batchMeteredSize: number, - ): Promise { + ): Promise { if (!this.http2Stream || this.http2Stream.closed) { - return Promise.reject( - new S2Error({ message: "HTTP/2 stream is not open" }), + return Promise.resolve( + err(new S2Error({ message: "HTTP/2 stream is not open", status: 502 })), ); } // Convert to protobuf AppendInput const protoInput = buildProtoAppendInput(records, args); - const bodyBytes = ProtoAppendInput.toBinary(protoInput); // Frame the message @@ -863,103 +813,124 @@ class S2SAppendSession body: bodyBytes, }); - // Track pending ack - this promise resolves when the ack is received - return new Promise((resolve, reject) => { + // Track pending ack - this promise resolves when the ack is received (FIFO) + return new Promise((resolve) => { this.pendingAcks.push({ resolve, - reject, batchSize: batchMeteredSize, }); - this.queuedBytes += batchMeteredSize; - - // Send the frame (non-blocking - pipelined) - this.http2Stream!.write(frame, (err) => { - if (err) { + // Send the frame (pipelined - non-blocking) + this.http2Stream!.write(frame, (writeErr) => { + if (writeErr) { // Remove from pending acks on write error - const idx = this.pendingAcks.findIndex((p) => p.reject === reject); + const idx = this.pendingAcks.findIndex((p) => p.resolve === resolve); if (idx !== -1) { this.pendingAcks.splice(idx, 1); - this.queuedBytes -= batchMeteredSize; } - reject(err); + // Resolve with error result + const s2Err = + writeErr instanceof S2Error + ? writeErr + : new S2Error({ message: String(writeErr), status: 502 }); + resolve(err(s2Err)); } - // Write completed, but promise resolves when ack is received + // Write completed successfully - promise resolves later when ack is received }); }); } - private async closeStream(): Promise { - // Wait for all pending acks - while (this.pendingAcks.length > 0) { - await new Promise((resolve) => setTimeout(resolve, 10)); - } - - // Close the HTTP/2 stream (client doesn't send terminal frame for clean close) - if (this.http2Stream && !this.http2Stream.closed) { - this.http2Stream.end(); - } - } - - async [Symbol.asyncDispose]() { - await this.close(); - } - - /** - * Get a stream of acknowledgements for appends. - */ - acks(): S2SAcksStream { - return this._readable; - } - /** * Close the append session. * Waits for all pending appends to complete before resolving. + * Never throws - returns CloseResult. */ - async close(): Promise { - await this.writable.close(); + async close(): Promise { + try { + this.closed = true; + + // Wait for all pending acks to complete + while (this.pendingAcks.length > 0) { + await new Promise((resolve) => setTimeout(resolve, 10)); + } + + // Close the HTTP/2 stream (client doesn't send terminal frame for clean close) + if (this.http2Stream && !this.http2Stream.closed) { + this.http2Stream.end(); + } + + return okClose(); + } catch (error) { + const s2Err = + error instanceof S2Error + ? error + : new S2Error({ message: String(error), status: 500 }); + return errClose(s2Err); + } } /** * Submit an append request to the session. - * Returns a promise that resolves with the ack when received. + * Returns AppendResult (never throws). + * Pipelined: multiple submits can be in-flight; acks resolve FIFO. */ async submit( records: AppendRecord | AppendRecord[], - args?: { fencing_token?: string; match_seq_num?: number }, - ): Promise { + args?: { + fencing_token?: string; + match_seq_num?: number; + precalculatedSize?: number; + }, + ): Promise { + // Validate closed state if (this.closed) { - return Promise.reject( - new S2Error({ message: "AppendSession is closed" }), + return err( + new S2Error({ message: "AppendSession is closed", status: 400 }), ); } - // Wait for initialization - if (this.initPromise) { + // Lazy initialize HTTP/2 stream on first submit + if (!this.initPromise) { + this.initPromise = this.initializeStream(); + } + + try { await this.initPromise; + } catch (initErr) { + const s2Err = + initErr instanceof S2Error + ? initErr + : new S2Error({ message: String(initErr), status: 502 }); + return err(s2Err); } const recordsArray = Array.isArray(records) ? records : [records]; - // Validate batch size limits + // Validate batch size limits (non-retryable 400-level error) if (recordsArray.length > 1000) { - return Promise.reject( + return err( new S2Error({ message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`, + status: 400, + code: "INVALID_ARGUMENT", }), ); } - // Calculate metered size - let batchMeteredSize = 0; - for (const record of recordsArray) { - batchMeteredSize += meteredSizeBytes(record); + // Calculate metered size (use precalculated if provided) + let batchMeteredSize = args?.precalculatedSize ?? 0; + if (batchMeteredSize === 0) { + for (const record of recordsArray) { + batchMeteredSize += meteredSizeBytes(record); + } } if (batchMeteredSize > 1024 * 1024) { - return Promise.reject( + return err( new S2Error({ message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`, + status: 400, + code: "INVALID_ARGUMENT", }), ); } @@ -974,10 +945,6 @@ class S2SAppendSession batchMeteredSize, ); } - - lastAckedPosition(): AppendAck | undefined { - return this._lastAckedPosition; - } } /** diff --git a/src/lib/stream/types.ts b/src/lib/stream/types.ts index bb700cc..3c70617 100644 --- a/src/lib/stream/types.ts +++ b/src/lib/stream/types.ts @@ -1,4 +1,5 @@ -import type { S2RequestOptions } from "../../common.js"; +import type { RetryConfig, S2RequestOptions } from "../../common.js"; +import type { S2Error } from "../../error.js"; import type { AppendAck, AppendInput as GeneratedAppendInput, @@ -62,28 +63,68 @@ export interface AcksStream extends ReadableStream, AsyncIterable {} -export interface AppendSession - extends ReadableWritablePair, - AsyncDisposable { +/** + * Transports only implement submit/close with value-encoded errors (discriminated unions). + * No backpressure, no retry, no streams - AppendSession adds those. + */ +export interface TransportAppendSession { submit( records: AppendRecord | AppendRecord[], args?: Omit & { precalculatedSize?: number }, - ): Promise; - acks(): AcksStream; - close(): Promise; - lastAckedPosition(): AppendAck | undefined; + ): Promise; + close(): Promise; } -export interface ReadSession - extends ReadableStream>, - AsyncIterable>, +/** + * Public AppendSession interface with retry, backpressure, and streams. + * This is what users interact with - implemented by AppendSession. + */ +// Public AppendSession type is the concrete class from retry.ts +export type AppendSession = import("../retry.js").AppendSession; + +/** + * Result type for transport-level read operations. + * Transport sessions yield ReadResult instead of throwing errors. + */ +export type ReadResult = + | { ok: true; value: ReadRecord } + | { ok: false; error: S2Error }; + +/** + * Transport-level read session interface. + * Transport implementations yield ReadResult and never throw errors from the stream. + * ReadSession wraps these and converts them to the public ReadSession interface. + */ +export interface TransportReadSession< + Format extends "string" | "bytes" = "string", +> extends ReadableStream>, + AsyncIterable>, AsyncDisposable { - lastReadPosition(): StreamPosition | undefined; + nextReadPosition(): StreamPosition | undefined; + lastObservedTail(): StreamPosition | undefined; } +/** + * Public-facing read session interface. + * Yields records directly and propagates errors by throwing (standard stream behavior). + */ +// Public ReadSession type is the concrete class from retry.ts +export type ReadSession = + import("../retry.js").ReadSession; + export interface AppendSessionOptions { - /** Maximum bytes to queue before applying backpressure (default: 10 MiB) */ + /** + * Maximum bytes to queue before applying backpressure (default: 10 MiB). + * Enforced by AppendSession; underlying transports do not apply + * byte-based backpressure on their own. + */ maxQueuedBytes?: number; + /** + * Maximum number of batches allowed in-flight (including queued) before + * applying backpressure. This is enforced by AppendSession; underlying + * transport sessions do not implement their own backpressure. + */ + maxInflightBatches?: number; } export interface SessionTransport { @@ -105,4 +146,12 @@ export interface TransportConfig { baseUrl: string; accessToken: Redacted.Redacted; forceTransport?: SessionTransports; + /** + * Basin name to include in s2-basin header when using account endpoint + */ + basinName?: string; + /** + * Retry configuration inherited from the top-level client + */ + retry?: RetryConfig; } diff --git a/src/metrics.ts b/src/metrics.ts index f381598..cae5fc1 100644 --- a/src/metrics.ts +++ b/src/metrics.ts @@ -1,5 +1,5 @@ -import type { DataToObject, S2RequestOptions } from "./common.js"; -import { S2Error } from "./error.js"; +import type { DataToObject, RetryConfig, S2RequestOptions } from "./common.js"; +import { withS2Data } from "./error.js"; import type { Client } from "./generated/client/types.gen.js"; import { type AccountMetricsData, @@ -9,6 +9,7 @@ import { type StreamMetricsData, streamMetrics, } from "./generated/index.js"; +import { withRetries } from "./lib/retry.js"; export interface AccountMetricsArgs extends DataToObject {} export interface BasinMetricsArgs extends DataToObject {} @@ -16,9 +17,11 @@ export interface StreamMetricsArgs extends DataToObject {} export class S2Metrics { readonly client: Client; + private readonly retryConfig?: RetryConfig; - constructor(client: Client) { + constructor(client: Client, retryConfig?: RetryConfig) { this.client = client; + this.retryConfig = retryConfig; } /** @@ -30,21 +33,15 @@ export class S2Metrics { * @param args.interval Optional aggregation interval for timeseries sets */ public async account(args: AccountMetricsArgs, options?: S2RequestOptions) { - const response = await accountMetrics({ - client: this.client, - query: args, - ...options, + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + accountMetrics({ + client: this.client, + query: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -57,22 +54,16 @@ export class S2Metrics { * @param args.interval Optional aggregation interval for timeseries sets */ public async basin(args: BasinMetricsArgs, options?: S2RequestOptions) { - const response = await basinMetrics({ - client: this.client, - path: args, - query: args, - ...options, + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + basinMetrics({ + client: this.client, + path: args, + query: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -86,21 +77,15 @@ export class S2Metrics { * @param args.interval Optional aggregation interval for timeseries sets */ public async stream(args: StreamMetricsArgs, options?: S2RequestOptions) { - const response = await streamMetrics({ - client: this.client, - path: args, - query: args, - ...options, + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + streamMetrics({ + client: this.client, + path: args, + query: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } } diff --git a/src/s2.ts b/src/s2.ts index a446782..1e305b0 100644 --- a/src/s2.ts +++ b/src/s2.ts @@ -1,7 +1,8 @@ import { S2AccessTokens } from "./accessTokens.js"; import { S2Basin } from "./basin.js"; import { S2Basins } from "./basins.js"; -import type { S2ClientOptions } from "./common.js"; +import type { RetryConfig, S2ClientOptions } from "./common.js"; +import { S2Error } from "./error.js"; import { createClient, createConfig } from "./generated/client/index.js"; import type { Client } from "./generated/client/types.gen.js"; import * as Redacted from "./lib/redacted.js"; @@ -21,6 +22,7 @@ export class S2 { private readonly client: Client; private readonly makeBasinBaseUrl: (basin: string) => string; private readonly includeBasinHeader: boolean; + private readonly retryConfig: RetryConfig; /** * Account-scoped basin management operations. @@ -40,15 +42,25 @@ export class S2 { */ constructor(options: S2ClientOptions) { this.accessToken = Redacted.make(options.accessToken); + this.retryConfig = options.retry ?? {}; this.client = createClient( createConfig({ baseUrl: options.baseUrl ?? defaultBaseUrl, auth: () => Redacted.value(this.accessToken), }), ); - this.basins = new S2Basins(this.client); - this.accessTokens = new S2AccessTokens(this.client); - this.metrics = new S2Metrics(this.client); + + this.client.interceptors.error.use((err, res, req, opt) => { + return new S2Error({ + message: err instanceof Error ? err.message : "Unknown error", + status: res.status, + origin: "server", + }); + }); + + this.basins = new S2Basins(this.client, this.retryConfig); + this.accessTokens = new S2AccessTokens(this.client, this.retryConfig); + this.metrics = new S2Metrics(this.client, this.retryConfig); this.makeBasinBaseUrl = options.makeBasinBaseUrl ?? defaultMakeBasinBaseUrl; this.includeBasinHeader = !!options.makeBasinBaseUrl; } @@ -63,6 +75,7 @@ export class S2 { accessToken: this.accessToken, baseUrl: this.makeBasinBaseUrl(name), includeBasinHeader: this.includeBasinHeader, + retryConfig: this.retryConfig, }); } } diff --git a/src/stream.ts b/src/stream.ts index aca6e71..abead14 100644 --- a/src/stream.ts +++ b/src/stream.ts @@ -1,7 +1,8 @@ -import type { S2RequestOptions } from "./common.js"; -import { S2Error } from "./error.js"; +import type { RetryConfig, S2RequestOptions } from "./common.js"; +import { withS2Data } from "./error.js"; import type { Client } from "./generated/client/types.gen.js"; import { type AppendAck, checkTail } from "./generated/index.js"; +import { isRetryable, withRetries } from "./lib/retry.js"; import { createSessionTransport } from "./lib/stream/factory.js"; import { streamAppend, @@ -22,14 +23,21 @@ import type { export class S2Stream { private readonly client: Client; private readonly transportConfig: TransportConfig; + private readonly retryConfig?: RetryConfig; private _transport?: SessionTransport; public readonly name: string; - constructor(name: string, client: Client, transportConfig: TransportConfig) { + constructor( + name: string, + client: Client, + transportConfig: TransportConfig, + retryConfig?: RetryConfig, + ) { this.name = name; this.client = client; this.transportConfig = transportConfig; + this.retryConfig = retryConfig; } /** @@ -48,23 +56,17 @@ export class S2Stream { * Returns the next sequence number and timestamp to be assigned (`tail`). */ public async checkTail(options?: S2RequestOptions) { - const response = await checkTail({ - client: this.client, - path: { - stream: this.name, - }, - ...options, + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + checkTail({ + client: this.client, + path: { + stream: this.name, + }, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -79,7 +81,9 @@ export class S2Stream { args?: ReadArgs, options?: S2RequestOptions, ): Promise> { - return await streamRead(this.name, this.client, args, options); + return await withRetries(this.retryConfig, async () => { + return await streamRead(this.name, this.client, args, options); + }); } /** * Append one or more records to the stream. @@ -100,7 +104,27 @@ export class S2Stream { args?: Omit, options?: S2RequestOptions, ): Promise { - return await streamAppend(this.name, this.client, records, args, options); + return await withRetries( + this.retryConfig, + async () => { + return await streamAppend( + this.name, + this.client, + records, + args, + options, + ); + }, + (config, error) => { + if ((config.appendRetryPolicy ?? "noSideEffects") === "noSideEffects") { + // Allow retry only when the append is naturally idempotent by containing + // a match_seq_num condition. + return !!args?.match_seq_num; + } else { + return true; + } + }, + ); } /** * Open a streaming read session diff --git a/src/streams.ts b/src/streams.ts index 78b985e..06534da 100644 --- a/src/streams.ts +++ b/src/streams.ts @@ -1,18 +1,23 @@ -import type { DataToObject, S2RequestOptions } from "./common.js"; -import { S2Error } from "./error.js"; +import type { DataToObject, RetryConfig, S2RequestOptions } from "./common.js"; +import { withS2Data } from "./error.js"; import type { Client } from "./generated/client/types.gen.js"; import { type CreateStreamData, + type CreateStreamResponse, createStream, type DeleteStreamData, deleteStream, type GetStreamConfigData, getStreamConfig, type ListStreamsData, + type ListStreamsResponse, listStreams, type ReconfigureStreamData, + type ReconfigureStreamResponse, reconfigureStream, + type StreamConfig, } from "./generated/index.js"; +import { withRetries } from "./lib/retry.js"; export interface ListStreamsArgs extends DataToObject {} export interface CreateStreamArgs extends DataToObject {} @@ -24,8 +29,11 @@ export interface ReconfigureStreamArgs export class S2Streams { private readonly client: Client; - constructor(client: Client) { + private readonly retryConfig?: RetryConfig; + + constructor(client: Client, retryConfig?: RetryConfig) { this.client = client; + this.retryConfig = retryConfig; } /** @@ -35,22 +43,19 @@ export class S2Streams { * @param args.start_after Name to start after (for pagination) * @param args.limit Max results (up to 1000) */ - public async list(args?: ListStreamsArgs, options?: S2RequestOptions) { - const response = await listStreams({ - client: this.client, - query: args, - ...options, + public async list( + args?: ListStreamsArgs, + options?: S2RequestOptions, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + listStreams({ + client: this.client, + query: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -59,22 +64,19 @@ export class S2Streams { * @param args.stream Stream name (1-512 bytes, unique within the basin) * @param args.config Stream configuration (retention, storage class, timestamping, delete-on-empty) */ - public async create(args: CreateStreamArgs, options?: S2RequestOptions) { - const response = await createStream({ - client: this.client, - body: args, - ...options, + public async create( + args: CreateStreamArgs, + options?: S2RequestOptions, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + createStream({ + client: this.client, + body: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -85,22 +87,16 @@ export class S2Streams { public async getConfig( args: GetStreamConfigArgs, options?: S2RequestOptions, - ) { - const response = await getStreamConfig({ - client: this.client, - path: args, - ...options, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + getStreamConfig({ + client: this.client, + path: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -108,22 +104,19 @@ export class S2Streams { * * @param args.stream Stream name */ - public async delete(args: DeleteStreamArgs, options?: S2RequestOptions) { - const response = await deleteStream({ - client: this.client, - path: args, - ...options, + public async delete( + args: DeleteStreamArgs, + options?: S2RequestOptions, + ): Promise { + await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + deleteStream({ + client: this.client, + path: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } /** @@ -135,22 +128,16 @@ export class S2Streams { public async reconfigure( args: ReconfigureStreamArgs, options?: S2RequestOptions, - ) { - const response = await reconfigureStream({ - client: this.client, - path: args, - body: args, - ...options, + ): Promise { + return await withRetries(this.retryConfig, async () => { + return await withS2Data(() => + reconfigureStream({ + client: this.client, + path: args, + body: args, + ...options, + }), + ); }); - - if (response.error) { - throw new S2Error({ - message: response.error.message, - code: response.error.code ?? undefined, - status: response.response.status, - }); - } - - return response.data; } } diff --git a/src/tests/appendSession.e2e.test.ts b/src/tests/appendSession.e2e.test.ts index 61f141a..c6b7762 100644 --- a/src/tests/appendSession.e2e.test.ts +++ b/src/tests/appendSession.e2e.test.ts @@ -3,31 +3,23 @@ import { AppendRecord, S2 } from "../index.js"; import type { SessionTransports } from "../lib/stream/types.js"; const transports: SessionTransports[] = ["fetch", "s2s"]; +const hasEnv = !!process.env.S2_ACCESS_TOKEN && !!process.env.S2_BASIN; +const describeIf = hasEnv ? describe : describe.skip; -describe("AppendSession Integration Tests", () => { +describeIf("AppendSession Integration Tests", () => { let s2: S2; let basinName: string; let streamName: string; beforeAll(() => { const token = process.env.S2_ACCESS_TOKEN; - if (!token) { - throw new Error( - "S2_ACCESS_TOKEN environment variable is required for integration tests", - ); - } - s2 = new S2({ accessToken: token }); + const basin = process.env.S2_BASIN; + if (!token || !basin) return; + s2 = new S2({ accessToken: token! }); + basinName = basin!; }); beforeAll(async () => { - // Get or use an existing basin - const basins = await s2.basins.list(); - if (!basins.basins || basins.basins.length === 0) { - throw new Error("No basins found. Please create a basin first."); - } - basinName = basins.basins[0]!.name; - expect(basinName).toBeTruthy(); - // Use a unique stream name for each test run const timestamp = Date.now(); streamName = `integration-test-append-${timestamp}`; diff --git a/src/tests/appendSession.test.ts b/src/tests/appendSession.test.ts index a8fb73c..313aed2 100644 --- a/src/tests/appendSession.test.ts +++ b/src/tests/appendSession.test.ts @@ -7,11 +7,12 @@ import { S2Stream } from "../stream.js"; // Minimal Client shape to satisfy S2Stream constructor; we won't use it directly const fakeClient: any = {}; -const makeStream = () => +const makeStream = (retry?: { maxAttempts?: number }) => new S2Stream("test-stream", fakeClient, { baseUrl: "https://test.b.aws.s2.dev", accessToken: Redacted.make("test-access-token"), forceTransport: "fetch", + retry, }); const makeAck = (n: number): AppendAck => ({ @@ -81,9 +82,14 @@ describe("AppendSession", () => { } })(); - await session.submit([{ body: "a" }]); - await session.submit([{ body: "b" }]); + const ack1 = await session.submit([{ body: "a" }]); + const ack2 = await session.submit([{ body: "b" }]); + // Verify acks were received before closing + expect(ack1).toBeTruthy(); + expect(ack2).toBeTruthy(); + + // Close session - with interruptible sleep, pump will wake immediately await session.close(); await consumer; @@ -103,6 +109,8 @@ describe("AppendSession", () => { const p2 = session.submit([{ body: "y" }]); await Promise.all([p1, p2]); + + // Close - with interruptible sleep, pump will wake immediately await session.close(); await expect(p1).resolves.toBeTruthy(); @@ -123,9 +131,12 @@ describe("AppendSession", () => { }); it("error during processing rejects current and queued, clears queue", async () => { - const stream = makeStream(); + // Create stream with no retries to test immediate failure + const stream = makeStream({ maxAttempts: 0 }); - streamAppendSpy.mockRejectedValueOnce(new Error("boom")); + // With retry enabled, the first error will trigger recovery and retry + // So we need to mock multiple failures to exhaust retries + streamAppendSpy.mockRejectedValue(new Error("boom")); const session = await stream.appendSession(); @@ -135,14 +146,15 @@ describe("AppendSession", () => { p1.catch(() => {}); p2.catch(() => {}); + // Advance timers to allow pump to attempt processing + await vi.advanceTimersByTimeAsync(10); + await expect(p1).rejects.toBeTruthy(); await expect(p2).rejects.toBeTruthy(); - // After error, queue should be empty; new submit should restart processing - streamAppendSpy.mockResolvedValueOnce(makeAck(3)); + // After fatal error, session is dead - new submits should also reject const p3 = session.submit([{ body: "c" }]); - await expect(p3).resolves.toBeTruthy(); - expect(streamAppendSpy).toHaveBeenCalledTimes(2); // 1 throw + 1 success + await expect(p3).rejects.toBeTruthy(); }); it("updates lastSeenPosition after successful append", async () => { @@ -210,6 +222,7 @@ describe("AppendSession", () => { expect(thirdWriteStarted).toBe(true); expect(streamAppendSpy).toHaveBeenCalledTimes(3); + // Close - with interruptible sleep, pump will wake immediately await writer.close(); }); }); diff --git a/src/tests/batch-transform.test.ts b/src/tests/batch-transform.test.ts index 55f1370..93ed47b 100644 --- a/src/tests/batch-transform.test.ts +++ b/src/tests/batch-transform.test.ts @@ -204,21 +204,13 @@ describe("BatchTransform", () => { reader.releaseLock(); }); - it("respects maximum limits (capped at 1000 records, 1 MiB)", () => { - // Should cap maxBatchRecords at 1000 - const batcher1 = new BatchTransform({ - maxBatchRecords: 5000, // Will be capped to 1000 - }); - // We can't directly access private fields, but we can verify behavior - - // Should cap maxBatchBytes at 1 MiB - const batcher2 = new BatchTransform({ - maxBatchBytes: 10 * 1024 * 1024, // Will be capped to 1 MiB - }); - - // Both should construct without error - expect(batcher1).toBeDefined(); - expect(batcher2).toBeDefined(); + it("rejects invalid configuration (records > 1000 or bytes > 1 MiB)", () => { + // maxBatchRecords > 1000 should throw + expect(() => new BatchTransform({ maxBatchRecords: 5000 })).toThrow(); + // maxBatchBytes > 1 MiB should throw + expect( + () => new BatchTransform({ maxBatchBytes: 10 * 1024 * 1024 }), + ).toThrow(); }); it("handles empty batches gracefully", async () => { diff --git a/src/tests/batcher-session.test.ts b/src/tests/batcher-session.test.ts index e7eb817..fdf7ad6 100644 --- a/src/tests/batcher-session.test.ts +++ b/src/tests/batcher-session.test.ts @@ -35,7 +35,17 @@ describe("BatchTransform + AppendSession integration", () => { it("linger-driven batching yields single session submission", async () => { const stream = makeStream(); const session = await stream.appendSession(); - streamAppendSpy.mockResolvedValue(makeAck(1)); + // Mock returns ack based on number of records submitted + let cumulativeSeq = 0; + streamAppendSpy.mockImplementation((_0: any, _1: any, records: any[]) => { + const start = cumulativeSeq; + cumulativeSeq += records.length; + return Promise.resolve({ + start: { seq_num: start, timestamp: 0 }, + end: { seq_num: cumulativeSeq, timestamp: 0 }, + tail: { seq_num: cumulativeSeq, timestamp: 0 }, + }); + }); const batcher = new BatchTransform({ lingerDurationMillis: 10, @@ -61,8 +71,17 @@ describe("BatchTransform + AppendSession integration", () => { it("batch overflow increments match_seq_num across multiple flushes", async () => { const stream = makeStream(); const session = await stream.appendSession(); - streamAppendSpy.mockResolvedValueOnce(makeAck(1)); - streamAppendSpy.mockResolvedValueOnce(makeAck(2)); + // Mock returns ack based on number of records submitted + let cumulativeSeq = 0; + streamAppendSpy.mockImplementation((_0: any, _1: any, records: any[]) => { + const start = cumulativeSeq; + cumulativeSeq += records.length; + return Promise.resolve({ + start: { seq_num: start, timestamp: 0 }, + end: { seq_num: cumulativeSeq, timestamp: 0 }, + tail: { seq_num: cumulativeSeq, timestamp: 0 }, + }); + }); const batcher = new BatchTransform({ lingerDurationMillis: 0, @@ -79,6 +98,8 @@ describe("BatchTransform + AppendSession integration", () => { await writer.write({ body: "3" }); await writer.close(); + // Advance timers to allow linger flushes to complete + await vi.advanceTimersByTimeAsync(10); await pipePromise; expect(streamAppendSpy).toHaveBeenCalledTimes(2); @@ -93,7 +114,17 @@ describe("BatchTransform + AppendSession integration", () => { it("batches are acknowledged via session.acks()", async () => { const stream = makeStream(); const session = await stream.appendSession(); - streamAppendSpy.mockResolvedValue(makeAck(123)); + // Mock returns ack based on number of records submitted + let cumulativeSeq = 0; + streamAppendSpy.mockImplementation((_0: any, _1: any, records: any[]) => { + const start = cumulativeSeq; + cumulativeSeq += records.length; + return Promise.resolve({ + start: { seq_num: start, timestamp: 0 }, + end: { seq_num: cumulativeSeq, timestamp: 0 }, + tail: { seq_num: cumulativeSeq, timestamp: 0 }, + }); + }); const batcher = new BatchTransform({ lingerDurationMillis: 0, @@ -121,6 +152,6 @@ describe("BatchTransform + AppendSession integration", () => { await acksPromise; expect(acks).toHaveLength(1); - expect(acks[0]?.end.seq_num).toBe(123); + expect(acks[0]?.end.seq_num).toBe(1); // 1 record written }); }); diff --git a/src/tests/readSession.e2e.test.ts b/src/tests/readSession.e2e.test.ts index dba3816..dd6b2e4 100644 --- a/src/tests/readSession.e2e.test.ts +++ b/src/tests/readSession.e2e.test.ts @@ -3,8 +3,10 @@ import { AppendRecord, S2 } from "../index.js"; import type { SessionTransports } from "../lib/stream/types.js"; const transports: SessionTransports[] = ["fetch", "s2s"]; +const hasEnv = !!process.env.S2_ACCESS_TOKEN && !!process.env.S2_BASIN; +const describeIf = hasEnv ? describe : describe.skip; -describe("ReadSession Integration Tests", () => { +describeIf("ReadSession Integration Tests", () => { let s2: S2; let basinName: string; let streamName: string; @@ -12,13 +14,9 @@ describe("ReadSession Integration Tests", () => { beforeAll(() => { const token = process.env.S2_ACCESS_TOKEN; const basin = process.env.S2_BASIN; - if (!token || !basin) { - throw new Error( - "S2_ACCESS_TOKEN and S2_BASIN environment variables are required for e2e tests", - ); - } - s2 = new S2({ accessToken: token }); - basinName = basin; + if (!token || !basin) return; + s2 = new S2({ accessToken: token! }); + basinName = basin!; }); beforeAll(async () => { @@ -96,14 +94,14 @@ describe("ReadSession Integration Tests", () => { }); // Initially streamPosition should be undefined - expect(session.lastReadPosition()).toBeUndefined(); + expect(session.nextReadPosition()).toBeUndefined(); const records: Array<{ seq_num: number }> = []; for await (const record of session) { records.push({ seq_num: record.seq_num }); // streamPosition should be updated after reading - if (session.lastReadPosition()) { - expect(session.lastReadPosition()?.seq_num).toBeGreaterThanOrEqual( + if (session.nextReadPosition()) { + expect(session.nextReadPosition()?.seq_num).toBeGreaterThanOrEqual( record.seq_num, ); } @@ -113,8 +111,8 @@ describe("ReadSession Integration Tests", () => { } // After reading, streamPosition should be set - expect(session.lastReadPosition()).toBeDefined(); - expect(session.lastReadPosition()?.seq_num).toBeGreaterThan(0); + expect(session.nextReadPosition()).toBeDefined(); + expect(session.nextReadPosition()?.seq_num).toBeGreaterThan(0); }, ); diff --git a/src/tests/retry.test.ts b/src/tests/retry.test.ts new file mode 100644 index 0000000..9d33716 --- /dev/null +++ b/src/tests/retry.test.ts @@ -0,0 +1,99 @@ +import { describe, expect, it, vi } from "vitest"; +import { S2Error } from "../error.js"; +import { DEFAULT_RETRY_CONFIG, withRetries } from "../lib/retry.js"; + +describe("Retry Logic", () => { + describe("withRetry", () => { + it("should succeed on first attempt", async () => { + const fn = vi.fn().mockResolvedValue("success"); + const result = await withRetries(undefined, fn); + + expect(result).toBe("success"); + expect(fn).toHaveBeenCalledTimes(1); + }); + + it("should retry on S2Error with 5xx status", async () => { + const fn = vi + .fn() + .mockRejectedValueOnce( + new S2Error({ message: "Server error", status: 503 }), + ) + .mockResolvedValue("success"); + + const result = await withRetries( + { maxAttempts: 3, retryBackoffDurationMs: 1 }, + fn, + ); + + expect(result).toBe("success"); + expect(fn).toHaveBeenCalledTimes(2); + }); + + it("should not retry on S2Error with 4xx status", async () => { + const error = new S2Error({ message: "Bad request", status: 400 }); + const fn = vi.fn().mockRejectedValue(error); + + await expect( + withRetries({ maxAttempts: 3, retryBackoffDurationMs: 1 }, fn), + ).rejects.toThrow(error); + + expect(fn).toHaveBeenCalledTimes(1); + }); + + it("should retry on 408 Request Timeout", async () => { + const fn = vi + .fn() + .mockRejectedValueOnce( + new S2Error({ message: "Request timeout", status: 408 }), + ) + .mockResolvedValue("success"); + + const result = await withRetries( + { maxAttempts: 3, retryBackoffDurationMs: 1 }, + fn, + ); + + expect(result).toBe("success"); + expect(fn).toHaveBeenCalledTimes(2); + }); + + it("should exhaust retries and throw last error", async () => { + const error = new S2Error({ message: "Server error", status: 503 }); + const fn = vi.fn().mockRejectedValue(error); + + await expect( + withRetries({ maxAttempts: 2, retryBackoffDurationMs: 1 }, fn), + ).rejects.toThrow(error); + + // Initial attempt + 2 retries = 3 calls + expect(fn).toHaveBeenCalledTimes(3); + }); + + it("should not retry when maxAttempts is 0", async () => { + const error = new S2Error({ message: "Server error", status: 503 }); + const fn = vi.fn().mockRejectedValue(error); + + await expect( + withRetries({ maxAttempts: 0, retryBackoffDurationMs: 1 }, fn), + ).rejects.toThrow(error); + + expect(fn).toHaveBeenCalledTimes(1); + }); + + it("should use default config when not provided", async () => { + const fn = vi.fn().mockResolvedValue("success"); + const result = await withRetries(undefined, fn); + + expect(result).toBe("success"); + expect(fn).toHaveBeenCalledTimes(1); + }); + }); + + describe("DEFAULT_RETRY_CONFIG", () => { + it("should have correct default values", () => { + expect(DEFAULT_RETRY_CONFIG.maxAttempts).toBe(3); + expect(DEFAULT_RETRY_CONFIG.retryBackoffDurationMs).toBe(100); + expect(DEFAULT_RETRY_CONFIG.appendRetryPolicy).toBe("noSideEffects"); + }); + }); +}); diff --git a/src/tests/retryAppendSession.test.ts b/src/tests/retryAppendSession.test.ts new file mode 100644 index 0000000..f06c523 --- /dev/null +++ b/src/tests/retryAppendSession.test.ts @@ -0,0 +1,366 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { S2Error } from "../error.js"; +import type { AppendAck, StreamPosition } from "../generated/index.js"; +import type { AppendResult, CloseResult } from "../lib/result.js"; +import { err, errClose, ok, okClose } from "../lib/result.js"; +import { AppendSession as AppendSessionImpl } from "../lib/retry.js"; +import type { + AcksStream, + AppendArgs, + AppendRecord, + TransportAppendSession, +} from "../lib/stream/types.js"; + +/** + * Minimal controllable AppendSession for testing AppendSessionImpl. + */ +class FakeAppendSession { + public readonly readable: ReadableStream; + public readonly writable: WritableStream; + private acksController!: ReadableStreamDefaultController; + private closed = false; + public writes: AppendArgs[] = []; + + failureCause(): undefined { + return undefined; + } + + constructor( + private readonly behavior: { + rejectWritesWith?: S2Error; // if provided, writer.write rejects with this error + neverAck?: boolean; // if true, never emit acks + errorAcksWith?: S2Error; // if provided, acks() stream errors after first write + } = {}, + ) { + this.readable = new ReadableStream({ + start: (c) => { + this.acksController = c; + }, + }); + + this.writable = new WritableStream({ + write: async (args) => { + if (this.closed) { + throw new S2Error({ message: "AppendSession is closed" }); + } + if (this.behavior.rejectWritesWith) { + throw this.behavior.rejectWritesWith; + } + this.writes.push(args); + + // Optionally error the acks stream right after a write + if (this.behavior.errorAcksWith) { + queueMicrotask(() => { + try { + this.acksController.error(this.behavior.errorAcksWith); + } catch {} + }); + } + + // Optionally emit an ack immediately + if (!this.behavior.neverAck && !this.behavior.errorAcksWith) { + const count = Array.isArray(args.records) ? args.records.length : 1; + const start = { seq_num: 0, timestamp: 0 } as StreamPosition; + const end = { seq_num: count, timestamp: 0 } as StreamPosition; + const tail = { seq_num: count, timestamp: 0 } as StreamPosition; + const ack: AppendAck = { start, end, tail }; + this.acksController.enqueue(ack); + } + }, + close: async () => { + this.closed = true; + try { + this.acksController.close(); + } catch {} + }, + abort: async (reason) => { + this.closed = true; + try { + this.acksController.error( + reason instanceof S2Error + ? reason + : new S2Error({ message: String(reason) }), + ); + } catch {} + }, + }); + } + + acks(): AcksStream { + return this.readable as AcksStream; + } + + async close(): Promise { + await this.writable.close(); + } + + async [Symbol.asyncDispose](): Promise { + await this.close(); + } + + submit( + records: AppendRecord | AppendRecord[], + _args?: Omit & { precalculatedSize?: number }, + ): Promise { + const writer = this.writable.getWriter(); + const batch = Array.isArray(records) ? records : [records]; + return writer.write({ records: batch } as AppendArgs) as any; + } + + lastAckedPosition(): AppendAck | undefined { + return undefined; + } +} + +/** + * Transport-level fake session that returns discriminated unions. + * Used for testing AppendSessionImpl which wraps transport sessions. + */ +class FakeTransportAppendSession implements TransportAppendSession { + public writes: Array<{ records: AppendRecord[]; args?: any }> = []; + private closed = false; + private ackIndex = 0; + + constructor( + private readonly behavior: { + submitError?: S2Error; // if provided, submit() returns error result + closeError?: S2Error; // if provided, close() returns error result + neverAck?: boolean; // if true, submit() hangs forever (for timeout tests) + customAcks?: AppendAck[]; // if provided, return these acks in sequence + } = {}, + ) {} + + async submit( + records: AppendRecord | AppendRecord[], + args?: Omit & { precalculatedSize?: number }, + ): Promise { + if (this.closed) { + return err(new S2Error({ message: "session is closed", status: 400 })); + } + + if (this.behavior.submitError) { + return err(this.behavior.submitError); + } + + if (this.behavior.neverAck) { + // Hang forever (for timeout tests) + return new Promise(() => {}); + } + + const batch = Array.isArray(records) ? records : [records]; + this.writes.push({ records: batch, args }); + + // Return custom ack if provided + if ( + this.behavior.customAcks && + this.ackIndex < this.behavior.customAcks.length + ) { + const ack = this.behavior.customAcks[this.ackIndex++]!; + return ok(ack); + } + + // Return default successful ack + const count = batch.length; + const start = { seq_num: 0, timestamp: 0 } as StreamPosition; + const end = { seq_num: count, timestamp: 0 } as StreamPosition; + const tail = { seq_num: count, timestamp: 0 } as StreamPosition; + const ack: AppendAck = { start, end, tail }; + return ok(ack); + } + + async close(): Promise { + if (this.behavior.closeError) { + return errClose(this.behavior.closeError); + } + this.closed = true; + return okClose(); + } +} + +describe("AppendSessionImpl (unit)", () => { + beforeEach(() => { + vi.useFakeTimers(); + }); + afterEach(() => { + vi.useRealTimers(); + }); + + it("aborts on ack timeout (~5s from enqueue) when no acks arrive", async () => { + const session = await AppendSessionImpl.create(async () => { + // Accept writes but never emit acks + return new FakeTransportAppendSession({ neverAck: true }); + }); + (session as any).requestTimeoutMillis = 500; + + const ackP = session.submit([{ body: "x" }]); + + // Not yet timed out at 0.49s + await vi.advanceTimersByTimeAsync(490); + await Promise.resolve(); + let settled = false; + ackP.then(() => (settled = true)).catch(() => (settled = true)); + await Promise.resolve(); + expect(settled).toBe(false); + + // Time out after ~0.5s + await vi.advanceTimersByTimeAsync(20); + await Promise.resolve(); + await expect(ackP).rejects.toMatchObject({ status: 408 }); + }); + + it("recovers from send-phase transient error and resolves after recovery", async () => { + // First session rejects writes; second accepts and acks immediately + let call = 0; + const session = await AppendSessionImpl.create( + async () => { + call++; + if (call === 1) { + return new FakeTransportAppendSession({ + submitError: new S2Error({ message: "boom", status: 500 }), + }); + } + return new FakeTransportAppendSession(); + }, + undefined, + { retryBackoffDurationMs: 1, maxAttempts: 2, appendRetryPolicy: "all" }, + ); + + const p = session.submit([{ body: "x" }]); + // Allow microtasks (acks error propagation) to run + await Promise.resolve(); + await vi.advanceTimersByTimeAsync(10); + await Promise.resolve(); + const ack = await p; + expect(ack.end.seq_num - ack.start.seq_num).toBe(1); + }); + + it("fails immediately when retries are disabled and send-phase errors persist", async () => { + const error = new S2Error({ message: "boom", status: 500 }); + const session = await AppendSessionImpl.create( + async () => new FakeTransportAppendSession({ submitError: error }), + undefined, + { retryBackoffDurationMs: 1, maxAttempts: 0, appendRetryPolicy: "all" }, + ); + + const ackP = session.submit([{ body: "x" }]); + await expect(ackP).rejects.toMatchObject({ + message: "Max retry attempts (0) exceeded: boom", + status: 500, + }); + }); + + it("does not retry non-idempotent inflight under noSideEffects policy and exposes failure cause", async () => { + const error = new S2Error({ message: "boom", status: 500 }); + const session = await AppendSessionImpl.create( + async () => new FakeTransportAppendSession({ submitError: error }), + undefined, + { + retryBackoffDurationMs: 1, + maxAttempts: 2, + appendRetryPolicy: "noSideEffects", + }, + ); + + const p1 = session.submit([{ body: "x" }]); + await expect(p1).rejects.toMatchObject({ status: 500 }); + expect(session.failureCause()).toMatchObject({ status: 500 }); + }); + + it("abort rejects backlog and queued submissions with the abort error", async () => { + const error = new S2Error({ message: "boom", status: 500 }); + const session = await AppendSessionImpl.create( + async () => new FakeTransportAppendSession({ submitError: error }), + undefined, + { + retryBackoffDurationMs: 1, + maxAttempts: 2, + appendRetryPolicy: "noSideEffects", + }, + ); + + const p1 = session.submit([{ body: "a" }]); + const p2 = session.submit([{ body: "b" }]); + await expect(p1).rejects.toMatchObject({ status: 500 }); + await expect(p2).rejects.toMatchObject({ status: 500 }); + }); + + it("detects non-monotonic sequence numbers and aborts with fatal error", async () => { + // Create acks with non-monotonic sequence numbers + // Each ack must have correct count (end - start = 1 for single record batches) + const ack1: AppendAck = { + start: { seq_num: 0, timestamp: 0 }, + end: { seq_num: 1, timestamp: 0 }, // count = 1 + tail: { seq_num: 1, timestamp: 0 }, + }; + const ack2: AppendAck = { + start: { seq_num: 0, timestamp: 0 }, // Decreasing! + end: { seq_num: 1, timestamp: 0 }, + tail: { seq_num: 1, timestamp: 0 }, + }; + + const session = await AppendSessionImpl.create( + async () => new FakeTransportAppendSession({ customAcks: [ack1, ack2] }), + undefined, + { retryBackoffDurationMs: 1, maxAttempts: 0 }, // No retries + ); + + // First submit should succeed + const p1 = session.submit([{ body: "a" }]); + await expect(p1).resolves.toMatchObject({ end: { seq_num: 1 } }); + + // Second submit should trigger invariant violation + const p2 = session.submit([{ body: "b" }]); + await expect(p2).rejects.toMatchObject({ + message: expect.stringContaining( + "Sequence number not strictly increasing", + ), + status: 500, + code: "INTERNAL_ERROR", + }); + + // Session should expose the failure cause + expect(session.failureCause()).toMatchObject({ + message: expect.stringContaining( + "Sequence number not strictly increasing", + ), + status: 500, + }); + + // Subsequent submits should also fail + const p3 = session.submit([{ body: "c" }]); + await expect(p3).rejects.toMatchObject({ status: 500 }); + }); + + it("detects non-increasing (equal) sequence numbers and aborts", async () => { + // Create acks with equal sequence numbers + // Each ack must have correct count (end - start = 1 for single record batches) + const ack1: AppendAck = { + start: { seq_num: 9, timestamp: 0 }, + end: { seq_num: 10, timestamp: 0 }, // count = 1 + tail: { seq_num: 10, timestamp: 0 }, + }; + const ack2: AppendAck = { + start: { seq_num: 9, timestamp: 0 }, + end: { seq_num: 10, timestamp: 0 }, // Equal end, not increasing! + tail: { seq_num: 10, timestamp: 0 }, + }; + + const session = await AppendSessionImpl.create( + async () => new FakeTransportAppendSession({ customAcks: [ack1, ack2] }), + undefined, + { retryBackoffDurationMs: 1, maxAttempts: 0 }, + ); + + // First submit should succeed + await expect(session.submit([{ body: "a" }])).resolves.toMatchObject({ + end: { seq_num: 10 }, + }); + + // Second submit should trigger invariant violation + const error = await session.submit([{ body: "b" }]).catch((e) => e); + expect(error.message).toContain("Sequence number not strictly increasing"); + expect(error.message).toContain("previous=10"); + expect(error.message).toContain("current=10"); + expect(error.status).toBe(500); + }); +}); diff --git a/src/tests/retryReadSession.test.ts b/src/tests/retryReadSession.test.ts new file mode 100644 index 0000000..2ab7ec7 --- /dev/null +++ b/src/tests/retryReadSession.test.ts @@ -0,0 +1,497 @@ +import { afterEach, beforeEach, describe, expect, it, vi } from "vitest"; +import { S2Error } from "../error.js"; +import type { StreamPosition } from "../generated/index.js"; +import { ReadSession } from "../lib/retry.js"; +import type { + ReadArgs, + ReadRecord, + ReadResult, + TransportReadSession, +} from "../lib/stream/types.js"; + +/** + * Fake TransportReadSession for testing ReadSession. + * Implements the transport layer pattern: yields ReadResult and never throws. + */ +class FakeReadSession + extends ReadableStream> + implements TransportReadSession +{ + public recordsEmitted = 0; + + constructor( + private readonly behavior: { + // Records to emit before erroring (if errorAfterRecords is set) + records: Array>; + // Error after emitting this many records (undefined = no error) + errorAfterRecords?: number; + // Error to emit as error result + error?: S2Error; + }, + ) { + let emittedCount = 0; // Use local variable in super() callback + super({ + pull: (controller) => { + // Check if we should error before emitting any more records + if ( + behavior.errorAfterRecords !== undefined && + emittedCount >= behavior.errorAfterRecords + ) { + // Emit error result instead of throwing + controller.enqueue({ + ok: false, + error: + behavior.error ?? new S2Error({ message: "boom", status: 500 }), + }); + controller.close(); + return; + } + + // Emit records one at a time as they're requested + if (emittedCount < behavior.records.length) { + // Emit success result + controller.enqueue({ + ok: true, + value: behavior.records[emittedCount]!, + }); + emittedCount++; + + // Check if we should error after emitting this record + if ( + behavior.errorAfterRecords !== undefined && + emittedCount >= behavior.errorAfterRecords + ) { + // Emit error result instead of throwing + controller.enqueue({ + ok: false, + error: + behavior.error ?? new S2Error({ message: "boom", status: 500 }), + }); + controller.close(); + return; + } + } else { + // All records emitted + controller.close(); + } + }, + }); + this.recordsEmitted = behavior.errorAfterRecords ?? behavior.records.length; + } + + nextReadPosition(): StreamPosition | undefined { + if (this.recordsEmitted === 0) return undefined; + const lastRecord = this.behavior.records[this.recordsEmitted - 1]; + if (!lastRecord) return undefined; + return { + seq_num: lastRecord.seq_num + 1, + timestamp: lastRecord.timestamp, + }; + } + + lastObservedTail(): StreamPosition | undefined { + return undefined; + } + + // Implement AsyncIterable (for await...of support) + [Symbol.asyncIterator](): AsyncIterableIterator> { + const fn = (ReadableStream.prototype as any)[Symbol.asyncIterator]; + if (typeof fn === "function") return fn.call(this); + const reader = this.getReader(); + return { + next: async () => { + const r = await reader.read(); + if (r.done) return { done: true, value: undefined }; + return { done: false, value: r.value }; + }, + return: async (value?: any) => { + reader.releaseLock(); + return { done: true, value }; + }, + throw: async (e?: any) => { + reader.releaseLock(); + throw e; + }, + [Symbol.asyncIterator]() { + return this; + }, + }; + } + + // Implement AsyncDisposable (using Disposable) + async [Symbol.asyncDispose](): Promise { + await this.cancel(); + } +} + +describe("ReadSession (unit)", () => { + // Note: Not using fake timers here because they don't play well with async iteration + // Instead, we use very short backoff times (1ms) to make tests run fast + + it("adjusts count parameter on retry after partial read", async () => { + const records: ReadRecord<"string">[] = [ + { seq_num: 0, timestamp: 0, body: "a" }, + { seq_num: 1, timestamp: 0, body: "b" }, + { seq_num: 2, timestamp: 0, body: "c" }, + ]; + + let callCount = 0; + const capturedArgs: Array> = []; + + const session = await ReadSession.create( + async (args) => { + capturedArgs.push({ ...args }); + callCount++; + if (callCount === 1) { + // First call: emit 3 records then error + return new FakeReadSession({ + records, + errorAfterRecords: 3, + error: new S2Error({ message: "transient error", status: 500 }), + }); + } + // Second call: emit remaining records (none in this case, but succeed) + return new FakeReadSession({ records: [] }); + }, + { count: 10 }, // Request 10 records + { retryBackoffDurationMs: 1, maxAttempts: 1 }, + ); + + // Consume all records + const results: ReadRecord<"string">[] = []; + for await (const record of session) { + results.push(record); + } + + // Verify we got all 3 records (transport explicitly emits them as success before error) + expect(results).toHaveLength(3); + + // Verify retry adjusted count: 10 - 3 = 7 + expect(capturedArgs).toHaveLength(2); + expect(capturedArgs[0]?.count).toBe(10); + expect(capturedArgs[1]?.count).toBe(7); + }); + + it("adjusts bytes parameter on retry after partial read", async () => { + // Each record is ~50 bytes (rough estimate with body + overhead) + const records: ReadRecord<"string">[] = [ + { seq_num: 0, timestamp: 0, body: "x".repeat(42) }, // ~50 bytes + { seq_num: 1, timestamp: 0, body: "y".repeat(42) }, // ~50 bytes + ]; + + let callCount = 0; + const capturedArgs: Array> = []; + + const session = await ReadSession.create( + async (args) => { + capturedArgs.push({ ...args }); + callCount++; + if (callCount === 1) { + // First call: emit 2 records (~100 bytes) then error + return new FakeReadSession({ + records, + errorAfterRecords: 2, + error: new S2Error({ message: "transient error", status: 500 }), + }); + } + // Second call: succeed + return new FakeReadSession({ records: [] }); + }, + { bytes: 500 }, // Request 500 bytes + { retryBackoffDurationMs: 1, maxAttempts: 1 }, + ); + + // Consume all records + const results: ReadRecord<"string">[] = []; + for await (const record of session) { + results.push(record); + } + + // Verify we got both records (transport explicitly emits them as success before error) + expect(results).toHaveLength(2); + + // Verify retry adjusted bytes: should be less than 500 + expect(capturedArgs).toHaveLength(2); + expect(capturedArgs[0]?.bytes).toBe(500); + expect(capturedArgs[1]?.bytes).toBeLessThan(500); + // Each record is approximately 50 bytes, so should be around 500 - 100 = 400 + expect(capturedArgs[1]?.bytes).toBeGreaterThan(350); + }); + + it("adjusts wait parameter based on elapsed time", async () => { + const records: ReadRecord<"string">[] = [ + { seq_num: 0, timestamp: 0, body: "a" }, + ]; + + let callCount = 0; + const capturedArgs: Array> = []; + + const session = await ReadSession.create( + async (args) => { + capturedArgs.push({ ...args }); + callCount++; + if (callCount === 1) { + // First call: emit 1 record then error + return new FakeReadSession({ + records, + errorAfterRecords: 1, + error: new S2Error({ message: "transient error", status: 500 }), + }); + } + // Second call: succeed + return new FakeReadSession({ records: [] }); + }, + { wait: 10 }, // Wait up to 10 seconds + { retryBackoffDurationMs: 1, maxAttempts: 1 }, + ); + + // Consume all records + const results: ReadRecord<"string">[] = []; + for await (const record of session) { + results.push(record); + } + + // Verify we got the record (transport explicitly emits it as success before error) + expect(results).toHaveLength(1); + + // Verify retry adjusted wait: should be less than original 10 seconds + expect(capturedArgs).toHaveLength(2); + expect(capturedArgs[0]?.wait).toBe(10); + // Should be less than original 10 seconds (since some time elapsed) + expect(capturedArgs[1]?.wait).toBeLessThan(10); + expect(capturedArgs[1]?.wait).toBeGreaterThanOrEqual(0); + }); + + it("adjusts seq_num to resume from next position on retry", async () => { + const records: ReadRecord<"string">[] = [ + { seq_num: 100, timestamp: 0, body: "a" }, + { seq_num: 101, timestamp: 0, body: "b" }, + { seq_num: 102, timestamp: 0, body: "c" }, + ]; + + let callCount = 0; + const capturedArgs: Array> = []; + + const session = await ReadSession.create( + async (args) => { + capturedArgs.push({ ...args }); + callCount++; + if (callCount === 1) { + // First call: emit 3 records (seq_num 100-102) then error + return new FakeReadSession({ + records, + errorAfterRecords: 3, + error: new S2Error({ message: "transient error", status: 500 }), + }); + } + // Second call: succeed + return new FakeReadSession({ records: [] }); + }, + { seq_num: 100 }, // Start from seq_num 100 + { retryBackoffDurationMs: 1, maxAttempts: 1 }, + ); + + // Consume all records (including through retry) + const results: ReadRecord<"string">[] = []; + for await (const record of session) { + results.push(record); + } + + // Verify we got all 3 records (transport explicitly emits them as success before error) + expect(results).toHaveLength(3); + + // Verify retry adjusted seq_num to 103 (102 + 1) + expect(capturedArgs).toHaveLength(2); + expect(capturedArgs[0]?.seq_num).toBe(100); + expect(capturedArgs[1]?.seq_num).toBe(103); + }); + + it("does not adjust until parameter on retry (absolute boundary)", async () => { + const records: ReadRecord<"string">[] = [ + { seq_num: 0, timestamp: 0, body: "a" }, + { seq_num: 1, timestamp: 0, body: "b" }, + ]; + + let callCount = 0; + const capturedArgs: Array> = []; + + const session = await ReadSession.create( + async (args) => { + capturedArgs.push({ ...args }); + callCount++; + if (callCount === 1) { + // First call: emit 2 records then error + return new FakeReadSession({ + records, + errorAfterRecords: 2, + error: new S2Error({ message: "transient error", status: 500 }), + }); + } + // Second call: succeed + return new FakeReadSession({ records: [] }); + }, + { until: 1000 }, // Read until seq_num 1000 + { retryBackoffDurationMs: 1, maxAttempts: 1 }, + ); + + // Consume all records + const results: ReadRecord<"string">[] = []; + for await (const record of session) { + results.push(record); + } + + // Verify we got both records (transport explicitly emits them as success before error) + expect(results).toHaveLength(2); + + // Verify until remains unchanged (it's an absolute boundary) + expect(capturedArgs).toHaveLength(2); + expect(capturedArgs[0]?.until).toBe(1000); + expect(capturedArgs[1]?.until).toBe(1000); + }); + + it("combines all parameter adjustments on retry", async () => { + const records: ReadRecord<"string">[] = [ + { seq_num: 50, timestamp: 0, body: "x".repeat(42) }, + { seq_num: 51, timestamp: 0, body: "y".repeat(42) }, + ]; + + let callCount = 0; + const capturedArgs: Array> = []; + + const session = await ReadSession.create( + async (args) => { + capturedArgs.push({ ...args }); + callCount++; + if (callCount === 1) { + // First call: emit 2 records then error + return new FakeReadSession({ + records, + errorAfterRecords: 2, + error: new S2Error({ message: "transient error", status: 500 }), + }); + } + // Second call: succeed + return new FakeReadSession({ records: [] }); + }, + { + seq_num: 50, + count: 10, + bytes: 500, + wait: 30, + until: 1000, + }, + { retryBackoffDurationMs: 1, maxAttempts: 1 }, + ); + + // Consume all records + const results: ReadRecord<"string">[] = []; + for await (const record of session) { + results.push(record); + } + + // Verify we got both records (transport explicitly emits them as success before error) + expect(results).toHaveLength(2); + + // Verify all adjustments + expect(capturedArgs).toHaveLength(2); + const firstArgs = capturedArgs[0]!; + const secondArgs = capturedArgs[1]!; + + // Original args + expect(firstArgs.seq_num).toBe(50); + expect(firstArgs.count).toBe(10); + expect(firstArgs.bytes).toBe(500); + expect(firstArgs.wait).toBe(30); + expect(firstArgs.until).toBe(1000); + + // Adjusted args + expect(secondArgs.seq_num).toBe(52); // 50 + 2 (read 2 records) + expect(secondArgs.count).toBe(8); // 10 - 2 + expect(secondArgs.bytes).toBeLessThan(500); // Decremented by ~100 + expect(secondArgs.bytes).toBeGreaterThan(350); // Should be around 400 + expect(secondArgs.wait).toBeLessThan(30); // Decremented by elapsed time + expect(secondArgs.until).toBe(1000); // Unchanged (absolute boundary) + }); + + it("fails after max retry attempts exhausted", async () => { + let callCount = 0; + + const session = await ReadSession.create( + async (_args) => { + callCount++; + // Always error immediately without emitting any successful records + return new FakeReadSession({ + records: [], + errorAfterRecords: 0, + error: new S2Error({ message: "persistent error", status: 500 }), + }); + }, + { count: 10 }, + { retryBackoffDurationMs: 1, maxAttempts: 2 }, // Allow 2 retries + ); + + // Try to consume the stream - should fail after exhausting retries + await expect(async () => { + for await (const _record of session) { + // Should eventually fail + } + }).rejects.toMatchObject({ + message: expect.stringContaining("persistent error"), + }); + + // Should have tried 3 times (initial + 2 retries) + expect(callCount).toBe(3); + }); + + it("does not double-subtract count across multiple retries", async () => { + // First attempt emits 30 then errors, second emits 40 then errors, third succeeds + const records1: ReadRecord<"string">[] = Array.from( + { length: 30 }, + (_, i) => ({ seq_num: i, timestamp: 0, body: "a" }), + ); + const records2: ReadRecord<"string">[] = Array.from( + { length: 40 }, + (_, i) => ({ seq_num: 30 + i, timestamp: 0, body: "b" }), + ); + + let call = 0; + const capturedArgs: Array> = []; + + const session = await ReadSession.create( + async (args) => { + capturedArgs.push({ ...args }); + call++; + if (call === 1) { + // First call: 30 records then error + return new FakeReadSession({ + records: records1, + errorAfterRecords: 30, + error: new S2Error({ message: "transient", status: 500 }), + }); + } else if (call === 2) { + // Second call: 40 records then error + return new FakeReadSession({ + records: records2, + errorAfterRecords: 40, + error: new S2Error({ message: "transient", status: 500 }), + }); + } + // Third call: success (no more records to emit; just close) + return new FakeReadSession({ records: [] }); + }, + { seq_num: 0, count: 100 }, + { retryBackoffDurationMs: 1, maxAttempts: 2 }, + ); + + // Drain the session + for await (const _ of session) { + // consuming until completion + } + + // Expect args progression: 100 -> 70 -> 30 + expect(capturedArgs).toHaveLength(3); + expect(capturedArgs[0]?.count).toBe(100); + expect(capturedArgs[1]?.count).toBe(70); // 100 - 30 + expect(capturedArgs[2]?.count).toBe(30); // 100 - (30 + 40) + }); +}); diff --git a/src/tests/withS2Error.test.ts b/src/tests/withS2Error.test.ts new file mode 100644 index 0000000..0b59cce --- /dev/null +++ b/src/tests/withS2Error.test.ts @@ -0,0 +1,58 @@ +import { describe, expect, it } from "vitest"; +import { S2Error, withS2Error } from "../error.js"; + +describe("withS2Error response parsing", () => { + it("returns result when response has no error", async () => { + const value = await withS2Error(async () => ({ + data: { ok: 1 }, + error: undefined, + response: { status: 200, statusText: "OK" }, + })); + + expect(value).toMatchObject({ data: { ok: 1 }, response: { status: 200 } }); + }); + + it("throws S2Error with message/code/status when response.error has message", async () => { + const run = () => + withS2Error(async () => ({ + data: undefined, + error: { message: "Bad things", code: "BAD_THING" }, + response: { status: 400, statusText: "Bad Request" }, + })); + + await expect(run()).rejects.toMatchObject({ + name: "S2Error", + message: "Bad things", + code: "BAD_THING", + status: 400, + }); + }); + + it("falls back to HTTP statusText when error lacks message", async () => { + const run = () => + withS2Error(async () => ({ + data: undefined, + error: { something: "else" }, + response: { status: 502, statusText: "Bad Gateway" }, + })); + + await expect(run()).rejects.toMatchObject({ + name: "S2Error", + message: "Bad Gateway", + status: 502, + }); + }); + + it("wraps thrown errors as S2Error via s2Error()", async () => { + const run = () => + withS2Error(async () => { + throw new Error("boom"); + }); + + const err = await run().catch((e) => e as S2Error); + expect(err).toBeInstanceOf(S2Error); + expect(err.message).toBe("boom"); + // Generic thrown errors get status 0 in s2Error() + expect(err.status).toBe(0); + }); +}); diff --git a/src/utils.ts b/src/utils.ts index 71330e3..642da17 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -1,6 +1,7 @@ import type { AppendHeaders, AppendRecord as AppendRecordType, + ReadRecord, } from "./lib/stream/types.js"; export type AppendRecord = AppendRecordType; @@ -136,7 +137,9 @@ export function utf8ByteLength(str: string): number { * @param record The record to measure * @returns The size in bytes */ -export function meteredSizeBytes(record: AppendRecord): number { +export function meteredSizeBytes( + record: AppendRecord | ReadRecord, +): number { // Calculate header size based on actual data types let numHeaders = 0; let headersSize = 0;