diff --git a/sdk/eventhub/ci.yml b/sdk/eventhub/ci.yml index 18dc714fea40..c17b63442263 100644 --- a/sdk/eventhub/ci.yml +++ b/sdk/eventhub/ci.yml @@ -27,7 +27,7 @@ extends: template: ../../eng/pipelines/templates/stages/archetype-sdk-client.yml parameters: ServiceDirectory: eventhub - RunUnitTests: false + RunUnitTests: true Artifacts: - name: azure-event-hubs safeName: azureeventhubs diff --git a/sdk/eventhub/event-hubs/karma.conf.js b/sdk/eventhub/event-hubs/karma.conf.js index 2708a0b7474c..78bb5e336440 100644 --- a/sdk/eventhub/event-hubs/karma.conf.js +++ b/sdk/eventhub/event-hubs/karma.conf.js @@ -51,7 +51,8 @@ module.exports = function(config) { envPreprocessor: [ "EVENTHUB_CONNECTION_STRING", "EVENTHUB_NAME", - "IOTHUB_EH_COMPATIBLE_CONNECTION_STRING" + "IOTHUB_EH_COMPATIBLE_CONNECTION_STRING", + "TEST_TARGET=live" ], // test results reporter to use diff --git a/sdk/eventhub/event-hubs/package.json b/sdk/eventhub/event-hubs/package.json index 10523529e883..f41fd2814310 100644 --- a/sdk/eventhub/event-hubs/package.json +++ b/sdk/eventhub/event-hubs/package.json @@ -32,7 +32,8 @@ } }, "browser": { - "./dist-esm/src/util/runtimeInfo.js": "./dist-esm/src/util/runtimeInfo.browser.js" + "./dist-esm/src/util/runtimeInfo.js": "./dist-esm/src/util/runtimeInfo.browser.js", + "./dist-esm/test/public/utils/mockService.js": "./dist-esm/test/public/utils/mockService.browser.js" }, "files": [ "dist/", @@ -57,8 +58,9 @@ "execute:samples": "npm run build:samples && echo Skipped.", "extract-api": "tsc -p . && api-extractor run --local", "format": "prettier --write --config ../../../.prettierrc.json --ignore-path ../../../.prettierignore \"src/**/*.ts\" \"test/**/*.ts\" \"*.{js,json}\"", + "generate-certs": "node ./scripts/generateCerts.js", "integration-test:browser": "karma start --single-run", - "integration-test:node": "nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"dist-esm/test/internal/*.spec.js\" \"dist-esm/test/public/*.spec.js\" \"dist-esm/test/public/**/*.spec.js\" \"dist-esm/test/internal/**/*.spec.js\"", + "integration-test:node": "cross-env TEST_TARGET=live nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"dist-esm/test/internal/*.spec.js\" \"dist-esm/test/public/*.spec.js\" \"dist-esm/test/public/**/*.spec.js\" \"dist-esm/test/internal/**/*.spec.js\"", "integration-test": "npm run integration-test:node && npm run integration-test:browser", "lint:fix": "eslint package.json api-extractor.json src test --ext .ts --fix --fix-type [problem,suggestion]", "lint": "eslint package.json api-extractor.json src test --ext .ts", @@ -69,7 +71,7 @@ "test:node": "npm run build:test && npm run unit-test:node && npm run integration-test:node", "test": "npm run build:test && npm run unit-test && npm run integration-test", "unit-test:browser": "echo skipped", - "unit-test:node": "echo skipped", + "unit-test:node": "npm run generate-certs && cross-env NODE_EXTRA_CA_CERTS=\"./certs/my-private-root-ca.crt.pem\" TEST_TARGET=mock nyc mocha -r esm --require source-map-support/register --reporter ../../../common/tools/mocha-multi-reporter.js --timeout 1200000 --full-trace \"dist-esm/test/internal/*.spec.js\" \"dist-esm/test/public/*.spec.js\" \"dist-esm/test/public/**/*.spec.js\" \"dist-esm/test/internal/**/*.spec.js\"", "unit-test": "npm run unit-test:node && npm run unit-test:browser", "docs": "typedoc --excludePrivate --excludeNotExported --excludeExternals --stripInternal --mode file --out ./dist/docs ./src" }, @@ -107,6 +109,8 @@ "@azure/dev-tool": "^1.0.0", "@azure/eslint-plugin-azure-sdk": "^3.0.0", "@azure/identity": "^1.1.0", + "@azure/mock-hub": "^1.0.0", + "@azure/test-utils-multi-version": "^1.0.0", "@azure/test-utils-perfstress": "^1.0.0", "@microsoft/api-extractor": "7.7.11", "@rollup/plugin-commonjs": "11.0.2", diff --git a/sdk/eventhub/event-hubs/scripts/generateCerts.js b/sdk/eventhub/event-hubs/scripts/generateCerts.js new file mode 100644 index 000000000000..53ecb7917b18 --- /dev/null +++ b/sdk/eventhub/event-hubs/scripts/generateCerts.js @@ -0,0 +1,82 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +const { execFileSync } = require("child_process"); +const { mkdirSync } = require("fs"); +const { resolve: resolvePath } = require("path"); + +const cwd = process.cwd(); +const certsDirectory = resolvePath(cwd, "certs"); + +// Create `certs` directory. +console.log(`Creating ${certsDirectory}`); +try { + mkdirSync(certsDirectory); +} catch (err) { + if (err.code !== "EEXIST") { + throw err; + } +} + +// Create own Root Certificate Authority +execFileSync("openssl", [ + "genrsa", + "-out", + `${resolvePath(certsDirectory, "my-private-root-ca.key.pem")}`, + "2048" +]); + +// Self-sign Root Certificate Authority +execFileSync("openssl", [ + "req", + "-x509", + "-new", + "-nodes", + "-key", + `${resolvePath(certsDirectory, "my-private-root-ca.key.pem")}`, + "-days", + "5", + "-out", + `${resolvePath(certsDirectory, "my-private-root-ca.crt.pem")}`, + "-subj", + "/C=US/ST=Washington/L=Seattle/O=Fake Signing Authority/CN=fake.foo" +]); + +// Create a certificate for localhost +execFileSync("openssl", [ + "genrsa", + "-out", + `${resolvePath(certsDirectory, "my-server.key.pem")}`, + "2048" +]); + +// Create a request which the Root Certificate Authority will sign +execFileSync("openssl", [ + "req", + "-new", + "-key", + `${resolvePath(certsDirectory, "my-server.key.pem")}`, + "-out", + `${resolvePath(certsDirectory, "my-server.csr.pem")}`, + "-subj", + "/C=US/ST=Washington/L=Seattle/O=Fake Hubs/CN=localhost" +]); + +// Sign the request with the Root Certificate Authority +execFileSync("openssl", [ + "x509", + "-req", + "-in", + `${resolvePath(certsDirectory, "my-server.csr.pem")}`, + "-CA", + `${resolvePath(certsDirectory, "my-private-root-ca.crt.pem")}`, + "-CAkey", + `${resolvePath(certsDirectory, "my-private-root-ca.key.pem")}`, + "-CAcreateserial", + "-out", + `${resolvePath(certsDirectory, "my-server.crt.pem")}`, + "-days", + "5" +]); + +console.log(`Certs created.`); diff --git a/sdk/eventhub/event-hubs/test/internal/auth.spec.ts b/sdk/eventhub/event-hubs/test/internal/auth.spec.ts index a72125e1133b..ee7318493a2e 100644 --- a/sdk/eventhub/event-hubs/test/internal/auth.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/auth.spec.ts @@ -7,176 +7,202 @@ import { EventHubProducerClient, parseEventHubConnectionString } from "../../src"; -import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "../public/utils/testUtils"; import chai from "chai"; import { AzureNamedKeyCredential, AzureSASCredential } from "@azure/core-auth"; import { createSasTokenProvider } from "@azure/core-amqp"; +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../public/utils/mockService"; const should = chai.should(); -const env = getEnvVars(); - -describe("Authentication via", () => { - const { - endpoint, - fullyQualifiedNamespace, - sharedAccessKey, - sharedAccessKeyName - } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME], - endpoint: endpoint.replace(/\/+$/, "") - }; - - before(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - describe("Keys", () => { - describe("using connection string", () => { - it("EventHubConsumerClient", async () => { - const consumerClient = new EventHubConsumerClient( - "$Default", - service.connectionString, - service.path - ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - await consumerClient.close(); - }); - - it("EventHubProducerClient", async () => { - const producerClient = new EventHubProducerClient(service.connectionString, service.path); - - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); - - await producerClient.close(); - }); - }); - - describe("using NamedKeyCredential", () => { - it("EventHubConsumerClient", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - namedKeyCredential - ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - await consumerClient.close(); - }); - - it("EventHubProducerClient", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); - - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - namedKeyCredential - ); - - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); - - await producerClient.close(); - }); - }); - }); - - describe("SAS", () => { - function getSas(): string { - const parsed = parseEventHubConnectionString(service.connectionString) as Required< - | Pick - | Pick - >; - return createSasTokenProvider(parsed).getToken(`${service.endpoint}/${service.path}`).token; - } - - describe("using connection string", () => { - function getSasConnectionString(): string { - const sas = getSas(); - - return `Endpoint=${service.endpoint}/;SharedAccessSignature=${sas}`; +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("internal/auth.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); } + } - it("EventHubConsumerClient", async () => { - const sasConnectionString = getSasConnectionString(); - - const consumerClient = new EventHubConsumerClient( - "$Default", - sasConnectionString, - service.path + describe("Authentication via", () => { + const { + endpoint, + fullyQualifiedNamespace, + sharedAccessKey, + sharedAccessKeyName + } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME], + endpoint: endpoint.replace(/\/+$/, "") + }; + + before(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - await consumerClient.close(); - }); - - it("EventHubProducerClient", async () => { - const sasConnectionString = getSasConnectionString(); - - const producerClient = new EventHubProducerClient(sasConnectionString, service.path); - - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); - - await producerClient.close(); - }); - }); - - describe("using SASCredential", () => { - it("EventHubConsumerClient", async () => { - const sasCredential = new AzureSASCredential(getSas()); - - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - sasCredential + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - await consumerClient.close(); }); - it("EventHubProducerClient", async () => { - const sasCredential = new AzureSASCredential(getSas()); - - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - sasCredential - ); - - const properties = await producerClient.getEventHubProperties(); - should.exist(properties); + describe("Keys", () => { + describe("using connection string", () => { + it("EventHubConsumerClient", async () => { + const consumerClient = new EventHubConsumerClient( + "$Default", + service.connectionString, + service.path + ); + + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + await consumerClient.close(); + }); + + it("EventHubProducerClient", async () => { + const producerClient = new EventHubProducerClient( + service.connectionString, + service.path + ); + + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); + + await producerClient.close(); + }); + }); + + describe("using NamedKeyCredential", () => { + it("EventHubConsumerClient", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); + + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + namedKeyCredential + ); + + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + await consumerClient.close(); + }); + + it("EventHubProducerClient", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); + + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + namedKeyCredential + ); + + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); + + await producerClient.close(); + }); + }); + }); - await producerClient.close(); + describe("SAS", () => { + function getSas(): string { + const parsed = parseEventHubConnectionString(service.connectionString) as Required< + | Pick + | Pick + >; + return createSasTokenProvider(parsed).getToken(`${service.endpoint}/${service.path}`) + .token; + } + + describe("using connection string", () => { + function getSasConnectionString(): string { + const sas = getSas(); + + return `Endpoint=${service.endpoint}/;SharedAccessSignature=${sas}`; + } + + it("EventHubConsumerClient", async () => { + const sasConnectionString = getSasConnectionString(); + + const consumerClient = new EventHubConsumerClient( + "$Default", + sasConnectionString, + service.path + ); + + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + await consumerClient.close(); + }); + + it("EventHubProducerClient", async () => { + const sasConnectionString = getSasConnectionString(); + const producerClient = new EventHubProducerClient(sasConnectionString, service.path); + + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); + + await producerClient.close(); + }); + }); + + describe("using SASCredential", () => { + it("EventHubConsumerClient", async () => { + const sasCredential = new AzureSASCredential(getSas()); + + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + sasCredential + ); + + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + await consumerClient.close(); + }); + + it("EventHubProducerClient", async () => { + const sasCredential = new AzureSASCredential(getSas()); + + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + sasCredential + ); + + const properties = await producerClient.getEventHubProperties(); + should.exist(properties); + + await producerClient.close(); + }); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/client.spec.ts b/sdk/eventhub/event-hubs/test/internal/client.spec.ts index 760de8f22974..87dff826100f 100644 --- a/sdk/eventhub/event-hubs/test/internal/client.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/client.spec.ts @@ -16,12 +16,15 @@ import { Subscription } from "../../src"; import { packageJsonInfo } from "../../src/util/constants"; -import { EnvVarKeys, getEnvVars, isNode } from "../public/utils/testUtils"; +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "../public/utils/testUtils"; import { MessagingError } from "@azure/core-amqp"; import { ConnectionContext } from "../../src/connectionContext"; import { getRuntimeInfo } from "../../src/util/runtimeInfo"; -const env = getEnvVars(); +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../public/utils/mockService"; +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; const testFailureMessage = "Test failure"; function validateConnectionError(err: E): void { should.exist(err.code, "Missing code on error object."); @@ -31,751 +34,786 @@ function validateConnectionError(err: E): v should.not.equal(err.message, testFailureMessage); } -describe("Create EventHubConsumerClient", function(): void { - it("throws when no EntityPath in connection string", function(): void { - const connectionString = "Endpoint=sb://abc"; - const test = function(): EventHubConsumerClient { - return new EventHubConsumerClient("dummy", connectionString); - }; - test.should.throw( - Error, - `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + - `must contain "EntityPath=".` - ); - }); +describe("internal/client.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } - it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { - const connectionString = - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; - const eventHubName = "event-hub-name"; - const test = function(): EventHubConsumerClient { - return new EventHubConsumerClient("dummy", connectionString, eventHubName); - }; - test.should.throw( - Error, - `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + - `doesn't match with eventHubName: "${eventHubName}".` - ); - }); + describe("Create EventHubConsumerClient", function(): void { + it("throws when no EntityPath in connection string", function(): void { + const connectionString = "Endpoint=sb://abc"; + const test = function(): EventHubConsumerClient { + return new EventHubConsumerClient("dummy", connectionString); + }; + test.should.throw( + Error, + `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + + `must contain "EntityPath=".` + ); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { - const client = new EventHubConsumerClient( - "dummy", - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubConsumerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { + const connectionString = + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; + const eventHubName = "event-hub-name"; + const test = function(): EventHubConsumerClient { + return new EventHubConsumerClient("dummy", connectionString, eventHubName); + }; + test.should.throw( + Error, + `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + + `doesn't match with eventHubName: "${eventHubName}".` + ); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { - const client = new EventHubConsumerClient( - "dummy", - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", - "my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubConsumerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { + const client = new EventHubConsumerClient( + "dummy", + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubConsumerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); + + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { + const client = new EventHubConsumerClient( + "dummy", + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", + "my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubConsumerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 + it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } }; - } - }; - const client = new EventHubConsumerClient( - "dummy", - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential - ); - client.should.be.an.instanceof(EventHubConsumerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + const client = new EventHubConsumerClient( + "dummy", + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential + ); + client.should.be.an.instanceof(EventHubConsumerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("respects customEndpointAddress when using connection string", () => { - const client = new EventHubConsumerClient( - "dummy", - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubConsumerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); + it("respects customEndpointAddress when using connection string", () => { + const client = new EventHubConsumerClient( + "dummy", + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubConsumerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); - it("respects customEndpointAddress when using credentials", () => { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 + it("respects customEndpointAddress when using credentials", () => { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } }; - } - }; - const client = new EventHubConsumerClient( - "dummy", - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential, - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubConsumerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); -}); + const client = new EventHubConsumerClient( + "dummy", + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential, + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubConsumerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); + }); -describe("Create EventHubProducerClient", function(): void { - it("throws when no EntityPath in connection string ", function(): void { - const connectionString = "Endpoint=sb://abc"; - const test = function(): EventHubProducerClient { - return new EventHubProducerClient(connectionString); - }; - test.should.throw( - Error, - `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + - `must contain "EntityPath=".` - ); - }); + describe("Create EventHubProducerClient", function(): void { + it("throws when no EntityPath in connection string ", function(): void { + const connectionString = "Endpoint=sb://abc"; + const test = function(): EventHubProducerClient { + return new EventHubProducerClient(connectionString); + }; + test.should.throw( + Error, + `Either provide "eventHubName" or the "connectionString": "${connectionString}", ` + + `must contain "EntityPath=".` + ); + }); - it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { - const connectionString = - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; - const eventHubName = "event-hub-name"; - const test = function(): EventHubProducerClient { - return new EventHubProducerClient(connectionString, eventHubName); - }; - test.should.throw( - Error, - `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + - `doesn't match with eventHubName: "${eventHubName}".` - ); - }); + it("throws when EntityPath in connection string doesn't match with event hub name parameter", function(): void { + const connectionString = + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c=;EntityPath=my-event-hub-name"; + const eventHubName = "event-hub-name"; + const test = function(): EventHubProducerClient { + return new EventHubProducerClient(connectionString, eventHubName); + }; + test.should.throw( + Error, + `The entity path "my-event-hub-name" in connectionString: "${connectionString}" ` + + `doesn't match with eventHubName: "${eventHubName}".` + ); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { - const client = new EventHubProducerClient( - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubProducerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string", function(): void { + const client = new EventHubProducerClient( + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubProducerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { - const client = new EventHubProducerClient( - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", - "my-event-hub-name" - ); - client.should.be.an.instanceof(EventHubProducerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + it("sets eventHubName, fullyQualifiedNamespace properties when created from a connection string and event hub name", function(): void { + const client = new EventHubProducerClient( + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c", + "my-event-hub-name" + ); + client.should.be.an.instanceof(EventHubProducerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 + it("sets eventHubName, fullyQualifiedNamespace properties when created from a token credential", function(): void { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } }; - } - }; - const client = new EventHubProducerClient( - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential - ); - client.should.be.an.instanceof(EventHubProducerClient); - should.equal(client.eventHubName, "my-event-hub-name"); - should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); - }); + const client = new EventHubProducerClient( + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential + ); + client.should.be.an.instanceof(EventHubProducerClient); + should.equal(client.eventHubName, "my-event-hub-name"); + should.equal(client.fullyQualifiedNamespace, "test.servicebus.windows.net"); + }); - it("respects customEndpointAddress when using connection string", () => { - const client = new EventHubProducerClient( - "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubProducerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); + it("respects customEndpointAddress when using connection string", () => { + const client = new EventHubProducerClient( + "Endpoint=sb://test.servicebus.windows.net;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=my-event-hub-name", + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubProducerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); - it("respects customEndpointAddress when using credentials", () => { - const dummyCredential: TokenCredential = { - getToken: async () => { - return { - token: "boo", - expiresOnTimestamp: 12324 + it("respects customEndpointAddress when using credentials", () => { + const dummyCredential: TokenCredential = { + getToken: async () => { + return { + token: "boo", + expiresOnTimestamp: 12324 + }; + } }; - } - }; - const client = new EventHubProducerClient( - "test.servicebus.windows.net", - "my-event-hub-name", - dummyCredential, - { customEndpointAddress: "sb://foo.private.bar:111" } - ); - client.should.be.an.instanceof(EventHubProducerClient); - client["_context"].config.host.should.equal("foo.private.bar"); - client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); - client["_context"].config.port!.should.equal(111); - }); -}); + const client = new EventHubProducerClient( + "test.servicebus.windows.net", + "my-event-hub-name", + dummyCredential, + { customEndpointAddress: "sb://foo.private.bar:111" } + ); + client.should.be.an.instanceof(EventHubProducerClient); + client["_context"].config.host.should.equal("foo.private.bar"); + client["_context"].config.amqpHostname!.should.equal("test.servicebus.windows.net"); + client["_context"].config.port!.should.equal(111); + }); + }); -describe("EventHubConsumerClient with non existent namespace", function(): void { - let client: EventHubConsumerClient; - beforeEach(() => { - client = new EventHubConsumerClient( - "$Default", - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" - ); - }); + describe("EventHubConsumerClient with non existent namespace", function(): void { + let client: EventHubConsumerClient; + beforeEach(() => { + client = new EventHubConsumerClient( + "$Default", + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" + ); + }); - afterEach(() => { - return client.close(); - }); + afterEach(() => { + return client.close(); + }); - it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError while subscribe()", async function(): Promise { - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); + it("should throw ServiceCommunicationError while subscribe()", async function(): Promise< + void + > { + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + if (subscription) { + await subscription.close(); } + debug(caughtErr); + validateConnectionError(caughtErr); + await client.close(); }); }); - if (subscription) { - await subscription.close(); - } - debug(caughtErr); - validateConnectionError(caughtErr); - await client.close(); - }); -}); -describe("EventHubProducerClient with non existent namespace", function(): void { - let client: EventHubProducerClient; - beforeEach(() => { - client = new EventHubProducerClient( - "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" - ); - }); - - afterEach(() => { - return client.close(); - }); - - it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + describe("EventHubProducerClient with non existent namespace", function(): void { + let client: EventHubProducerClient; + beforeEach(() => { + client = new EventHubProducerClient( + "Endpoint=sb://a;SharedAccessKeyName=b;SharedAccessKey=c;EntityPath=d" + ); + }); - it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + afterEach(() => { + return client.close(); + }); - it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError while sending", async function(): Promise { - try { - await client.sendBatch([{ body: "Hello World" }]); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); + it("should throw ServiceCommunicationError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - it("should throw ServiceCommunicationError while creating a batch", async function(): Promise< - void - > { - try { - await client.createBatch(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - validateConnectionError(err); - } - }); -}); + it("should throw ServiceCommunicationError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); -describe("EventHubConsumerClient with non existent event hub", function(): void { - let client: EventHubConsumerClient; - const expectedErrCode = "MessagingEntityNotFoundError"; + it("should throw ServiceCommunicationError while sending", async function(): Promise { + try { + await client.sendBatch([{ body: "Hello World" }]); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); + it("should throw ServiceCommunicationError while creating a batch", async function(): Promise< + void + > { + try { + await client.createBatch(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + validateConnectionError(err); + } + }); + }); - client = new EventHubConsumerClient("dummy", env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], "bad"); - }); + describe("EventHubConsumerClient with non existent event hub", function(): void { + let client: EventHubConsumerClient; + const expectedErrCode = "MessagingEntityNotFoundError"; + + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + + client = new EventHubConsumerClient( + "dummy", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "bad" + ); + }); - afterEach(() => { - return client.close(); - }); + afterEach(() => { + return client.close(); + }); - it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< - void - > { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError while subscribe()", async function(): Promise< - void - > { - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); + it("should throw MessagingEntityNotFoundError while subscribe()", async function(): Promise< + void + > { + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + if (subscription) { + await subscription.close(); } + debug(caughtErr); + should.equal(caughtErr instanceof MessagingError && caughtErr.code, expectedErrCode); + await client.close(); }); }); - if (subscription) { - await subscription.close(); - } - debug(caughtErr); - should.equal(caughtErr instanceof MessagingError && caughtErr.code, expectedErrCode); - await client.close(); - }); -}); -describe("EventHubProducerClient with non existent event hub", function(): void { - let client: EventHubProducerClient; - const expectedErrCode = "MessagingEntityNotFoundError"; + describe("EventHubProducerClient with non existent event hub", function(): void { + let client: EventHubProducerClient; + const expectedErrCode = "MessagingEntityNotFoundError"; - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - client = new EventHubProducerClient(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], "bad"); - }); + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + client = new EventHubProducerClient(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], "bad"); + }); - afterEach(() => { - return client.close(); - }); + afterEach(() => { + return client.close(); + }); - it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< - void - > { - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getEventHubProperties", async function(): Promise< + void + > { + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< - void - > { - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionProperties", async function(): Promise< + void + > { + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< - void - > { - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError for getPartitionIds", async function(): Promise< + void + > { + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError while sending", async function(): Promise { - try { - await client.sendBatch([{ body: "Hello World" }]); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); + it("should throw MessagingEntityNotFoundError while sending", async function(): Promise< + void + > { + try { + await client.sendBatch([{ body: "Hello World" }]); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); - it("should throw MessagingEntityNotFoundError while creating a batch", async function(): Promise< - void - > { - try { - await client.createBatch(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.code, expectedErrCode); - } - }); -}); + it("should throw MessagingEntityNotFoundError while creating a batch", async function(): Promise< + void + > { + try { + await client.createBatch(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.code, expectedErrCode); + } + }); + }); -describe("EventHubConsumerClient User Agent String", function(): void { - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); + describe("EventHubConsumerClient User Agent String", function(): void { + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - it("should correctly populate the default user agent", async function(): Promise { - const consumerClient = new EventHubConsumerClient( - "$Default", - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] - ); - testUserAgentString(consumerClient["_context"]); - await consumerClient.close(); - }); + it("should correctly populate the default user agent", async function(): Promise { + const consumerClient = new EventHubConsumerClient( + "$Default", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + testUserAgentString(consumerClient["_context"]); + await consumerClient.close(); + }); - it("should correctly populate the custom user agent", async function(): Promise { - const customUserAgent = "boo"; - const consumerClient = new EventHubConsumerClient( - "$Default", - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME], - { userAgent: customUserAgent } - ); - testUserAgentString(consumerClient["_context"], customUserAgent); - await consumerClient.close(); - }); -}); + it("should correctly populate the custom user agent", async function(): Promise { + const customUserAgent = "boo"; + const consumerClient = new EventHubConsumerClient( + "$Default", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME], + { userAgent: customUserAgent } + ); + testUserAgentString(consumerClient["_context"], customUserAgent); + await consumerClient.close(); + }); + }); -describe("EventHubProducerClient User Agent String", function(): void { - beforeEach(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); + describe("EventHubProducerClient User Agent String", function(): void { + beforeEach(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - it("should correctly populate the default user agent", async function(): Promise { - const producerClient = new EventHubProducerClient( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] - ); - testUserAgentString(producerClient["_context"]); - await producerClient.close(); - }); + it("should correctly populate the default user agent", async function(): Promise { + const producerClient = new EventHubProducerClient( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + testUserAgentString(producerClient["_context"]); + await producerClient.close(); + }); - it("should correctly populate the custom user agent", async function(): Promise { - const customUserAgent = "boo"; - const producerClient = new EventHubProducerClient( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME], - { userAgent: customUserAgent } - ); - testUserAgentString(producerClient["_context"], customUserAgent); - await producerClient.close(); - }); -}); + it("should correctly populate the custom user agent", async function(): Promise { + const customUserAgent = "boo"; + const producerClient = new EventHubProducerClient( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME], + { userAgent: customUserAgent } + ); + testUserAgentString(producerClient["_context"], customUserAgent); + await producerClient.close(); + }); + }); -function testUserAgentString(context: ConnectionContext, customValue?: string): void { - const packageVersion = packageJsonInfo.version; - const properties = context.connection.options.properties; - properties!["user-agent"].should.startWith( - `azsdk-js-azureeventhubs/${packageVersion} (${getRuntimeInfo()})` - ); - should.equal(properties!.product, "MSJSClient"); - should.equal(properties!.version, packageVersion); - if (isNode) { - should.equal(properties!.framework, `Node/${process.version}`); - } else { - should.equal(properties!.framework.startsWith("Browser/"), true); - } - should.exist(properties!.platform); - if (customValue) { - properties!["user-agent"].should.endWith(customValue); - } -} + function testUserAgentString(context: ConnectionContext, customValue?: string): void { + const packageVersion = packageJsonInfo.version; + const properties = context.connection.options.properties; + properties!["user-agent"].should.startWith( + `azsdk-js-azureeventhubs/${packageVersion} (${getRuntimeInfo()})` + ); + should.equal(properties!.product, "MSJSClient"); + should.equal(properties!.version, packageVersion); + if (isNode) { + should.equal(properties!.framework, `Node/${process.version}`); + } else { + should.equal(properties!.framework.startsWith("Browser/"), true); + } + should.exist(properties!.platform); + if (customValue) { + properties!["user-agent"].should.endWith(customValue); + } + } -describe("EventHubConsumerClient after close()", function(): void { - let client: EventHubConsumerClient; - const expectedErrorMsg = "The underlying AMQP connection is closed."; - - async function beforeEachTest(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - client = new EventHubConsumerClient( - "$Default", - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] - ); - - // Ensure that the connection is opened - await client.getPartitionIds(); - - // close(), so that we can then test the resulting error. - await client.close(); - } + describe("EventHubConsumerClient after close()", function(): void { + let client: EventHubConsumerClient; + const expectedErrorMsg = "The underlying AMQP connection is closed."; + + async function beforeEachTest(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + client = new EventHubConsumerClient( + "$Default", + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + + // Ensure that the connection is opened + await client.getPartitionIds(); + + // close(), so that we can then test the resulting error. + await client.close(); + } - it("should throw connection closed error for getEventHubProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getEventHubProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error for getPartitionProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getPartitionProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error for getPartitionIds", async function(): Promise { - await beforeEachTest(); - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getPartitionIds", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error while subscribe()", async function(): Promise { - await beforeEachTest(); - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); + it("should throw connection closed error while subscribe()", async function(): Promise { + await beforeEachTest(); + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + if (subscription) { + await subscription.close(); } + debug(caughtErr); + should.equal(caughtErr.message, expectedErrorMsg); }); }); - if (subscription) { - await subscription.close(); - } - debug(caughtErr); - should.equal(caughtErr.message, expectedErrorMsg); - }); -}); -describe("EventHubProducerClient after close()", function(): void { - let client: EventHubProducerClient; - const expectedErrorMsg = "The underlying AMQP connection is closed."; - - async function beforeEachTest(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - client = new EventHubProducerClient( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - env[EnvVarKeys.EVENTHUB_NAME] - ); - - // Ensure that the connection is opened - await client.getPartitionIds(); - - // close(), so that we can then test the resulting error. - await client.close(); - } + describe("EventHubProducerClient after close()", function(): void { + let client: EventHubProducerClient; + const expectedErrorMsg = "The underlying AMQP connection is closed."; + + async function beforeEachTest(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + client = new EventHubProducerClient( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + env[EnvVarKeys.EVENTHUB_NAME] + ); + + // Ensure that the connection is opened + await client.getPartitionIds(); + + // close(), so that we can then test the resulting error. + await client.close(); + } - it("should throw connection closed error for getEventHubProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getEventHubProperties(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getEventHubProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getEventHubProperties(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error for getPartitionProperties", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.getPartitionProperties("0"); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getPartitionProperties", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getPartitionProperties("0"); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error for getPartitionIds", async function(): Promise { - await beforeEachTest(); - try { - await client.getPartitionIds(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error for getPartitionIds", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.getPartitionIds(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error while sending", async function(): Promise { - await beforeEachTest(); - try { - await client.sendBatch([{ body: "Hello World" }]); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } - }); + it("should throw connection closed error while sending", async function(): Promise { + await beforeEachTest(); + try { + await client.sendBatch([{ body: "Hello World" }]); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); - it("should throw connection closed error while creating a batch", async function(): Promise< - void - > { - await beforeEachTest(); - try { - await client.createBatch(); - throw new Error(testFailureMessage); - } catch (err) { - debug(err); - should.equal(err.message, expectedErrorMsg); - } + it("should throw connection closed error while creating a batch", async function(): Promise< + void + > { + await beforeEachTest(); + try { + await client.createBatch(); + throw new Error(testFailureMessage); + } catch (err) { + debug(err); + should.equal(err.message, expectedErrorMsg); + } + }); + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts b/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts index 382d16c8bc6f..de7feee897a9 100644 --- a/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/eventHubConsumerClientUnitTests.spec.ts @@ -12,7 +12,7 @@ import chai from "chai"; import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; const should = chai.should(); -const env = getEnvVars(); +const env = getEnvVars("mock"); describe("EventHubConsumerClient", () => { const service = { diff --git a/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts b/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts index 249b3f550c91..be57e43db73f 100644 --- a/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/eventProcessor.spec.ts @@ -19,7 +19,13 @@ import { EventHubConsumerClient, EventHubProducerClient } from "../../src"; -import { EnvVarKeys, getEnvVars, loopUntil } from "../public/utils/testUtils"; +import { + EnvVarKeys, + getEnvVars, + getEnvVarValue, + isNode, + loopUntil +} from "../public/utils/testUtils"; import { Dictionary, generate_uuid } from "rhea-promise"; import { EventProcessor, FullEventProcessorOptions } from "../../src/eventProcessor"; import { Checkpoint } from "../../src/partitionProcessor"; @@ -38,1909 +44,1955 @@ import { AbortController } from "@azure/abort-controller"; import { UnbalancedLoadBalancingStrategy } from "../../src/loadBalancerStrategies/unbalancedStrategy"; import { BalancedLoadBalancingStrategy } from "../../src/loadBalancerStrategies/balancedStrategy"; import { GreedyLoadBalancingStrategy } from "../../src/loadBalancerStrategies/greedyStrategy"; -const env = getEnvVars(); - -describe("Event Processor", function(): void { - const defaultOptions: FullEventProcessorOptions = { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1, - ownerLevel: 0, - loopIntervalInMs: 10000, - loadBalancingStrategy: new UnbalancedLoadBalancingStrategy() - }; +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../public/utils/mockService"; + +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("internal/eventProcessor.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); + describe("Event Processor", function(): void { + const defaultOptions: FullEventProcessorOptions = { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1, + ownerLevel: 0, + loopIntervalInMs: 10000, + loadBalancingStrategy: new UnbalancedLoadBalancingStrategy() + }; - beforeEach("create the client", function() { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - }); + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; - afterEach("close the connection", async function(): Promise { - await producerClient.close(); - await consumerClient.close(); - }); + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - describe("unit tests", () => { - describe("_getStartingPosition", () => { - function createEventProcessor( - checkpointStore: CheckpointStore, - startPosition?: FullEventProcessorOptions["startPosition"] - ): EventProcessor { - return new EventProcessor( + beforeEach("create the client", function() { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ - } - }, - checkpointStore, - { - startPosition, - maxBatchSize: 1, - maxWaitTimeInSeconds: 1, - loadBalancingStrategy: defaultOptions.loadBalancingStrategy, - loopIntervalInMs: defaultOptions.loopIntervalInMs - } + service.connectionString, + service.path ); - } - - const emptyCheckpointStore = createCheckpointStore([]); - - function createCheckpointStore( - checkpointsForTest: Pick[] - ): CheckpointStore { - return { - claimOwnership: async () => { - return []; - }, - listCheckpoints: async () => { - return checkpointsForTest.map((cp) => { - return { - fullyQualifiedNamespace: "not-used-for-this-test", - consumerGroup: "not-used-for-this-test", - eventHubName: "not-used-for-this-test", - offset: cp.offset, - sequenceNumber: cp.sequenceNumber, - partitionId: cp.partitionId - }; - }); - }, - listOwnership: async () => { - return []; - }, - updateCheckpoint: async () => { - /* no-op */ - } - }; - } - - before(() => { - consumerClient["_context"].managementSession!.getEventHubProperties = async () => { - return Promise.resolve({ - name: "boo", - createdOn: new Date(), - partitionIds: ["0", "1"] - }); - }; }); - it("no checkpoint or user specified default", async () => { - const processor = createEventProcessor(emptyCheckpointStore); - - const eventPosition = await processor["_getStartingPosition"]("0"); - should.equal(isLatestPosition(eventPosition), true); + afterEach("close the connection", async function(): Promise { + await producerClient.close(); + await consumerClient.close(); }); - it("has a checkpoint", async () => { - const checkpointStore = createCheckpointStore([ - { - offset: 1009, - sequenceNumber: 1010, - partitionId: "0" + describe("unit tests", () => { + describe("_getStartingPosition", () => { + function createEventProcessor( + checkpointStore: CheckpointStore, + startPosition?: FullEventProcessorOptions["startPosition"] + ): EventProcessor { + return new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } + }, + checkpointStore, + { + startPosition, + maxBatchSize: 1, + maxWaitTimeInSeconds: 1, + loadBalancingStrategy: defaultOptions.loadBalancingStrategy, + loopIntervalInMs: defaultOptions.loopIntervalInMs + } + ); } - ]); - - const processor = createEventProcessor( - checkpointStore, - // checkpoints always win over the user's specified position - latestEventPosition - ); - const eventPosition = await processor["_getStartingPosition"]("0"); - eventPosition!.offset!.should.equal(1009); - should.not.exist(eventPosition!.sequenceNumber); - }); + const emptyCheckpointStore = createCheckpointStore([]); - it("checkpoint with falsy values", async () => { - // this caused a bug for us before - it's a perfectly valid offset - // but we were thrown off by its falsy-ness. (actually it was - // sequence number before but the concept is the same) - const checkpointStore = createCheckpointStore([ - { - offset: 0, - sequenceNumber: 0, - partitionId: "0" + function createCheckpointStore( + checkpointsForTest: Pick[] + ): CheckpointStore { + return { + claimOwnership: async () => { + return []; + }, + listCheckpoints: async () => { + return checkpointsForTest.map((cp) => { + return { + fullyQualifiedNamespace: "not-used-for-this-test", + consumerGroup: "not-used-for-this-test", + eventHubName: "not-used-for-this-test", + offset: cp.offset, + sequenceNumber: cp.sequenceNumber, + partitionId: cp.partitionId + }; + }); + }, + listOwnership: async () => { + return []; + }, + updateCheckpoint: async () => { + /* no-op */ + } + }; } - ]); - - const processor = createEventProcessor(checkpointStore); - const eventPosition = await processor["_getStartingPosition"]("0"); - eventPosition!.offset!.should.equal(0); - should.not.exist(eventPosition!.sequenceNumber); - }); + before(() => { + consumerClient["_context"].managementSession!.getEventHubProperties = async () => { + return Promise.resolve({ + name: "boo", + createdOn: new Date(), + partitionIds: ["0", "1"] + }); + }; + }); - it("using a single default event position for any partition", async () => { - const processor = createEventProcessor(emptyCheckpointStore, { offset: 1009 }); + it("no checkpoint or user specified default", async () => { + const processor = createEventProcessor(emptyCheckpointStore); - const eventPosition = await processor["_getStartingPosition"]("0"); - eventPosition!.offset!.should.equal(1009); - should.not.exist(eventPosition!.sequenceNumber); - }); + const eventPosition = await processor["_getStartingPosition"]("0"); + should.equal(isLatestPosition(eventPosition), true); + }); - it("using a fallback map", async () => { - const fallbackPositions = { "0": { offset: 2001 } }; - // we'll purposefully omit "1" which should act as "fallback to the fallback" which is earliest() + it("has a checkpoint", async () => { + const checkpointStore = createCheckpointStore([ + { + offset: 1009, + sequenceNumber: 1010, + partitionId: "0" + } + ]); - const processor = createEventProcessor(emptyCheckpointStore, fallbackPositions); + const processor = createEventProcessor( + checkpointStore, + // checkpoints always win over the user's specified position + latestEventPosition + ); - const eventPositionForPartitionZero = await processor["_getStartingPosition"]("0"); - eventPositionForPartitionZero!.offset!.should.equal(2001); - should.not.exist(eventPositionForPartitionZero!.sequenceNumber); + const eventPosition = await processor["_getStartingPosition"]("0"); + eventPosition!.offset!.should.equal(1009); + should.not.exist(eventPosition!.sequenceNumber); + }); - const eventPositionForPartitionOne = await processor["_getStartingPosition"]("1"); - should.equal(isLatestPosition(eventPositionForPartitionOne), true); - }); - }); - - describe("_handleSubscriptionError", () => { - let eventProcessor: EventProcessor; - let userCallback: (() => void) | undefined; - let errorFromCallback: Error | undefined; - let contextFromCallback: PartitionContext | undefined; - - beforeEach(() => { - userCallback = undefined; - errorFromCallback = undefined; - contextFromCallback = undefined; - - // note: we're not starting this event processor so there's nothing to stop() - // it's only here so we can call a few private methods on it. - eventProcessor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async (err, context) => { - // simulate the user messing up and accidentally throwing an error - // we should just log it and not kill anything. - errorFromCallback = err; - contextFromCallback = context; - - if (userCallback) { - userCallback(); + it("checkpoint with falsy values", async () => { + // this caused a bug for us before - it's a perfectly valid offset + // but we were thrown off by its falsy-ness. (actually it was + // sequence number before but the concept is the same) + const checkpointStore = createCheckpointStore([ + { + offset: 0, + sequenceNumber: 0, + partitionId: "0" } - } - }, - new InMemoryCheckpointStore(), - defaultOptions - ); - }); + ]); - it("error thrown from user's processError handler", async () => { - // the user's error handler will throw an error - won't escape from this function - userCallback = () => { - throw new Error("Error thrown from the user's error handler"); - }; + const processor = createEventProcessor(checkpointStore); - await eventProcessor["_handleSubscriptionError"](new Error("test error")); + const eventPosition = await processor["_getStartingPosition"]("0"); + eventPosition!.offset!.should.equal(0); + should.not.exist(eventPosition!.sequenceNumber); + }); - errorFromCallback!.message.should.equal("test error"); - contextFromCallback!.partitionId.should.equal(""); - }); + it("using a single default event position for any partition", async () => { + const processor = createEventProcessor(emptyCheckpointStore, { offset: 1009 }); - it("non-useful errors are filtered out", async () => { - // the user's error handler will throw an error - won't escape from this function + const eventPosition = await processor["_getStartingPosition"]("0"); + eventPosition!.offset!.should.equal(1009); + should.not.exist(eventPosition!.sequenceNumber); + }); - await eventProcessor["_handleSubscriptionError"](new AbortError("test error")); + it("using a fallback map", async () => { + const fallbackPositions = { "0": { offset: 2001 } }; + // we'll purposefully omit "1" which should act as "fallback to the fallback" which is earliest() - // we don't call the user's handler for abort errors - should.not.exist(errorFromCallback); - should.not.exist(contextFromCallback); - }); - }); - - it("if we fail to claim partitions we don't start up new processors", async () => { - const checkpointStore = { - claimOwnershipCalled: false, - - // the important thing is that the EventProcessor won't be able to claim - // any partitions, causing it to go down the "I tried but failed" path. - async claimOwnership(_: PartitionOwnership[]): Promise { - checkpointStore.claimOwnershipCalled = true; - return []; - }, - - // (these aren't used for this test) - async listOwnership(): Promise { - return []; - }, - async updateCheckpoint(): Promise { - /* no-op */ - }, - async listCheckpoints(): Promise { - return []; - } - }; + const processor = createEventProcessor(emptyCheckpointStore, fallbackPositions); - const pumpManager = { - createPumpCalled: false, + const eventPositionForPartitionZero = await processor["_getStartingPosition"]("0"); + eventPositionForPartitionZero!.offset!.should.equal(2001); + should.not.exist(eventPositionForPartitionZero!.sequenceNumber); - async createPump() { - pumpManager.createPumpCalled = true; - }, + const eventPositionForPartitionOne = await processor["_getStartingPosition"]("1"); + should.equal(isLatestPosition(eventPositionForPartitionOne), true); + }); + }); - async removeAllPumps() { - /* no-op */ - }, + describe("_handleSubscriptionError", () => { + let eventProcessor: EventProcessor; + let userCallback: (() => void) | undefined; + let errorFromCallback: Error | undefined; + let contextFromCallback: PartitionContext | undefined; + + beforeEach(() => { + userCallback = undefined; + errorFromCallback = undefined; + contextFromCallback = undefined; + + // note: we're not starting this event processor so there's nothing to stop() + // it's only here so we can call a few private methods on it. + eventProcessor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async (err, context) => { + // simulate the user messing up and accidentally throwing an error + // we should just log it and not kill anything. + errorFromCallback = err; + contextFromCallback = context; + + if (userCallback) { + userCallback(); + } + } + }, + new InMemoryCheckpointStore(), + defaultOptions + ); + }); - isReceivingFromPartition() { - return false; - }, + it("error thrown from user's processError handler", async () => { + // the user's error handler will throw an error - won't escape from this function + userCallback = () => { + throw new Error("Error thrown from the user's error handler"); + }; - receivingFromPartitions() { - return []; - } - }; + await eventProcessor["_handleSubscriptionError"](new Error("test error")); - const eventProcessor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ - } - }, - checkpointStore, - { - ...defaultOptions, - pumpManager: pumpManager - } - ); - - await eventProcessor["_claimOwnership"]( - { - consumerGroup: "cgname", - eventHubName: "ehname", - fullyQualifiedNamespace: "fqdn", - ownerId: "owner", - partitionId: "0" - }, - new AbortController().signal - ); - - // when we fail to claim a partition we should _definitely_ - // not attempt to start a pump. - should.equal(pumpManager.createPumpCalled, false); - - // we'll attempt to claim a partition (but won't succeed) - should.equal(checkpointStore.claimOwnershipCalled, true); - }); - - it("abandoned claims are treated as unowned claims", async () => { - const commonFields = { - fullyQualifiedNamespace: "irrelevant namespace", - eventHubName: "irrelevant eventhub name", - consumerGroup: "irrelevant consumer group" - }; + errorFromCallback!.message.should.equal("test error"); + contextFromCallback!.partitionId.should.equal(""); + }); - const handlers = new FakeSubscriptionEventHandlers(); - const checkpointStore = new InMemoryCheckpointStore(); + it("non-useful errors are filtered out", async () => { + // the user's error handler will throw an error - won't escape from this function - const originalClaimedPartitions = await checkpointStore.claimOwnership([ - // abandoned claim - { ...commonFields, partitionId: "1001", ownerId: "", etag: "abandoned etag" }, - // normally owned claim - { ...commonFields, partitionId: "1002", ownerId: "owned partition", etag: "owned etag" } - // 1003 - completely unowned - ]); + await eventProcessor["_handleSubscriptionError"](new AbortError("test error")); - originalClaimedPartitions.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); + // we don't call the user's handler for abort errors + should.not.exist(errorFromCallback); + should.not.exist(contextFromCallback); + }); + }); - const partitionIds = ["1001", "1002", "1003"]; + it("if we fail to claim partitions we don't start up new processors", async () => { + const checkpointStore = { + claimOwnershipCalled: false, - const fakeConnectionContext = { - managementSession: { - getEventHubProperties: async () => { - return { - partitionIds - }; - } - }, - config: { - entityPath: commonFields.eventHubName, - host: commonFields.fullyQualifiedNamespace - } - }; + // the important thing is that the EventProcessor won't be able to claim + // any partitions, causing it to go down the "I tried but failed" path. + async claimOwnership(_: PartitionOwnership[]): Promise { + checkpointStore.claimOwnershipCalled = true; + return []; + }, - const ep = new EventProcessor( - commonFields.consumerGroup, - fakeConnectionContext as any, - handlers, - checkpointStore, - { - maxBatchSize: 1, - loopIntervalInMs: 1, - maxWaitTimeInSeconds: 1, - pumpManager: { - async createPump() { - /* no-op */ + // (these aren't used for this test) + async listOwnership(): Promise { + return []; }, - async removeAllPumps(): Promise { + async updateCheckpoint(): Promise { /* no-op */ }, - isReceivingFromPartition() { - return false; + async listCheckpoints(): Promise { + return []; } - }, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) - } - ); - - // allow three iterations through the loop - one for each partition that - // we expect to be claimed - // - // we'll let one more go through just to make sure we're not going to - // pick up an extra surprise partition - // - // There are 6 places where the abort signal is checked during the loop: - // - while condition - // - getEventHubProperties - // - _performLoadBalancing (start) - // - _performLoadBalancing (after listOwnership) - // - _performLoadBalancing (passed to _claimOwnership) - // - delay - const numTimesAbortedIsCheckedInLoop = 6; - await ep["_runLoopWithLoadBalancing"]( - ep["_loadBalancingStrategy"], - triggerAbortedSignalAfterNumCalls(partitionIds.length * numTimesAbortedIsCheckedInLoop) - ); - - handlers.errors.should.deep.equal([]); - - const currentOwnerships = await checkpointStore.listOwnership( - commonFields.fullyQualifiedNamespace, - commonFields.eventHubName, - commonFields.consumerGroup - ); - currentOwnerships.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - - currentOwnerships.should.deep.equal([ - { - ...commonFields, - partitionId: "1001", - ownerId: ep.id, - etag: currentOwnerships[0].etag, - lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs - }, - // 1002 is not going to be claimed since it's already owned so it should be untouched - originalClaimedPartitions[1], - { - ...commonFields, - partitionId: "1003", - ownerId: ep.id, - etag: currentOwnerships[2].etag, - lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs - } - ]); - - // now let's "unclaim" everything by stopping our event processor - await ep.stop(); - - // sanity check - we were previously modifying the original instances - // in place which...isn't right. - currentOwnerships.should.deep.equal([ - { - ...commonFields, - partitionId: "1001", - ownerId: ep.id, - etag: currentOwnerships[0].etag, - lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs - }, - // 1002 is not going to be claimed since it's already owned so it should be untouched - originalClaimedPartitions[1], - { - ...commonFields, - partitionId: "1003", - ownerId: ep.id, - etag: currentOwnerships[2].etag, - lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs - } - ]); - - const ownershipsAfterStop = await checkpointStore.listOwnership( - commonFields.fullyQualifiedNamespace, - commonFields.eventHubName, - commonFields.consumerGroup - ); - ownershipsAfterStop.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - - ownershipsAfterStop.should.deep.equal([ - { - ...commonFields, - partitionId: "1001", - ownerId: "", - etag: ownershipsAfterStop[0].etag, - lastModifiedTimeInMs: ownershipsAfterStop[0].lastModifiedTimeInMs - }, - // 1002 is not going to be claimed since it's already owned so it should be untouched - originalClaimedPartitions[1], - { - ...commonFields, - partitionId: "1003", - ownerId: "", - etag: ownershipsAfterStop[2].etag, - lastModifiedTimeInMs: ownershipsAfterStop[2].lastModifiedTimeInMs - } - ]); - }); - }); - - it("claimOwnership throws and is reported to the user", async () => { - const errors = []; - const partitionIds = await consumerClient.getPartitionIds(); - - const faultyCheckpointStore: CheckpointStore = { - listOwnership: async () => [], - claimOwnership: async () => { - throw new Error("Some random failure!"); - }, - updateCheckpoint: async () => { - /* no-op */ - }, - listCheckpoints: async () => [] - }; - - const eventProcessor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async (err, _) => { - errors.push(err); - } - }, - faultyCheckpointStore, - { - ...defaultOptions - } - ); - - // claimOwnership() calls that fail in the runloop of eventProcessor - // will get directed to the user's processError handler. - eventProcessor.start(); - - try { - await loopUntil({ - name: "waiting for checkpoint store errors to show up", - timeBetweenRunsMs: 1000, - maxTimes: 30, - until: async () => errors.length !== 0 - }); - - errors.length.should.equal(partitionIds.length); - } finally { - // this will also fail - we "abandon" all claimed partitions at - // when a processor is stopped (which requires us to claim them - // with an empty owner ID). - // - // Note that this one gets thrown directly from stop(), rather - // than reporting to processError() since we have a direct - // point of contact with the user. - await eventProcessor.stop().should.be.rejectedWith(/Some random failure!/); - } - }); + }; - it("errors thrown from the user's handlers are reported to processError()", async () => { - const errors = new Set(); - const partitionIds = await consumerClient.getPartitionIds(); - - const processCloseErrorMessage = "processClose() error"; - const processEventsErrorMessage = "processEvents() error"; - const processInitializeErrorMessage = "processInitialize() error"; - const expectedErrorMessages: string[] = []; - for (let i = 0; i < partitionIds.length; i++) { - expectedErrorMessages.push( - processCloseErrorMessage, - processEventsErrorMessage, - processInitializeErrorMessage - ); - } - expectedErrorMessages.sort(); - - const eventProcessor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processClose: async () => { - throw new Error(processCloseErrorMessage); - }, - processEvents: async () => { - throw new Error(processEventsErrorMessage); - }, - processInitialize: async () => { - throw new Error(processInitializeErrorMessage); - }, - processError: async (err, _) => { - errors.add(err); - throw new Error("These are logged but ignored"); - } - }, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: earliestEventPosition - } - ); - - // errors that occur within the user's own event handlers will get - // routed to their processError() handler - eventProcessor.start(); - console.log("event processor started"); - try { - await loopUntil({ - name: "waiting for errors thrown from user's handlers", - timeBetweenRunsMs: 1000, - maxTimes: 30, - until: async () => { - console.log(partitionIds.length); - console.dir(errors); - return errors.size >= partitionIds.length * 3; - } - }); - console.log("event processor loop completed"); - const messages = [...errors].map((e) => e.message); - messages.sort(); - console.dir(messages); - console.dir(expectedErrorMessages); - messages.should.deep.equal(expectedErrorMessages); - } finally { - console.log("attempting to stop"); - await eventProcessor.stop(); - console.log("stopped"); - } - }); + const pumpManager = { + createPumpCalled: false, - it("should expose an id", async function(): Promise { - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ - } - }, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: latestEventPosition - } - ); + async createPump() { + pumpManager.createPumpCalled = true; + }, - const id = processor.id; - id.length.should.be.gt(1); - }); + async removeAllPumps() { + /* no-op */ + }, - it("id can be forced to be a specific value", async function(): Promise { - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ - } - }, - new InMemoryCheckpointStore(), - { ...defaultOptions, ownerId: "hello", startPosition: latestEventPosition } - ); + isReceivingFromPartition() { + return false; + }, - processor.id.should.equal("hello"); - }); + receivingFromPartitions() { + return []; + } + }; - it("should treat consecutive start invocations as idempotent", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); - - const { - subscriptionEventHandler, - startPosition - } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - subscriptionEventHandler, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: startPosition - } - ); + const eventProcessor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } + }, + checkpointStore, + { + ...defaultOptions, + pumpManager: pumpManager + } + ); - processor.start(); - processor.start(); - processor.start(); + await eventProcessor["_claimOwnership"]( + { + consumerGroup: "cgname", + eventHubName: "ehname", + fullyQualifiedNamespace: "fqdn", + ownerId: "owner", + partitionId: "0" + }, + new AbortController().signal + ); - const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); - const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); + // when we fail to claim a partition we should _definitely_ + // not attempt to start a pump. + should.equal(pumpManager.createPumpCalled, false); - // shutdown the processor - await processor.stop(); + // we'll attempt to claim a partition (but won't succeed) + should.equal(checkpointStore.claimOwnershipCalled, true); + }); - receivedEvents.should.deep.equal(expectedMessages); + it("abandoned claims are treated as unowned claims", async () => { + const commonFields = { + fullyQualifiedNamespace: "irrelevant namespace", + eventHubName: "irrelevant eventhub name", + consumerGroup: "irrelevant consumer group" + }; - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - }); + const handlers = new FakeSubscriptionEventHandlers(); + const checkpointStore = new InMemoryCheckpointStore(); - it("should not throw if stop is called without start", async function(): Promise { - let didPartitionProcessorStart = false; - - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - { - processInitialize: async () => { - didPartitionProcessorStart = true; - }, - processEvents: async () => { - /* no-op */ - }, - processError: async () => { - /* no-op */ - } - }, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: latestEventPosition - } - ); + const originalClaimedPartitions = await checkpointStore.claimOwnership([ + // abandoned claim + { ...commonFields, partitionId: "1001", ownerId: "", etag: "abandoned etag" }, + // normally owned claim + { ...commonFields, partitionId: "1002", ownerId: "owned partition", etag: "owned etag" } + // 1003 - completely unowned + ]); - // shutdown the processor - await processor.stop(); + originalClaimedPartitions.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - didPartitionProcessorStart.should.equal(false); - }); + const partitionIds = ["1001", "1002", "1003"]; - it("should support start after stopping", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); - - const { - subscriptionEventHandler, - startPosition - } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - subscriptionEventHandler, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: startPosition - } - ); + const fakeConnectionContext = { + managementSession: { + getEventHubProperties: async () => { + return { + partitionIds + }; + } + }, + config: { + entityPath: commonFields.eventHubName, + host: commonFields.fullyQualifiedNamespace + } + }; - loggerForTest(`Starting processor for the first time`); - processor.start(); + const ep = new EventProcessor( + commonFields.consumerGroup, + fakeConnectionContext as any, + handlers, + checkpointStore, + { + maxBatchSize: 1, + loopIntervalInMs: 1, + maxWaitTimeInSeconds: 1, + pumpManager: { + async createPump() { + /* no-op */ + }, + async removeAllPumps(): Promise { + /* no-op */ + }, + isReceivingFromPartition() { + return false; + } + }, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + } + ); - const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); - const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); + // allow three iterations through the loop - one for each partition that + // we expect to be claimed + // + // we'll let one more go through just to make sure we're not going to + // pick up an extra surprise partition + // + // There are 6 places where the abort signal is checked during the loop: + // - while condition + // - getEventHubProperties + // - _performLoadBalancing (start) + // - _performLoadBalancing (after listOwnership) + // - _performLoadBalancing (passed to _claimOwnership) + // - delay + const numTimesAbortedIsCheckedInLoop = 6; + await ep["_runLoopWithLoadBalancing"]( + ep["_loadBalancingStrategy"], + triggerAbortedSignalAfterNumCalls(partitionIds.length * numTimesAbortedIsCheckedInLoop) + ); - loggerForTest(`Stopping processor for the first time`); - await processor.stop(); + handlers.errors.should.deep.equal([]); - receivedEvents.should.deep.equal(expectedMessages); + const currentOwnerships = await checkpointStore.listOwnership( + commonFields.fullyQualifiedNamespace, + commonFields.eventHubName, + commonFields.consumerGroup + ); + currentOwnerships.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); + + currentOwnerships.should.deep.equal([ + { + ...commonFields, + partitionId: "1001", + ownerId: ep.id, + etag: currentOwnerships[0].etag, + lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs + }, + // 1002 is not going to be claimed since it's already owned so it should be untouched + originalClaimedPartitions[1], + { + ...commonFields, + partitionId: "1003", + ownerId: ep.id, + etag: currentOwnerships[2].etag, + lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs + } + ]); + + // now let's "unclaim" everything by stopping our event processor + await ep.stop(); + + // sanity check - we were previously modifying the original instances + // in place which...isn't right. + currentOwnerships.should.deep.equal([ + { + ...commonFields, + partitionId: "1001", + ownerId: ep.id, + etag: currentOwnerships[0].etag, + lastModifiedTimeInMs: currentOwnerships[0].lastModifiedTimeInMs + }, + // 1002 is not going to be claimed since it's already owned so it should be untouched + originalClaimedPartitions[1], + { + ...commonFields, + partitionId: "1003", + ownerId: ep.id, + etag: currentOwnerships[2].etag, + lastModifiedTimeInMs: currentOwnerships[2].lastModifiedTimeInMs + } + ]); - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); + const ownershipsAfterStop = await checkpointStore.listOwnership( + commonFields.fullyQualifiedNamespace, + commonFields.eventHubName, + commonFields.consumerGroup + ); + ownershipsAfterStop.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); + + ownershipsAfterStop.should.deep.equal([ + { + ...commonFields, + partitionId: "1001", + ownerId: "", + etag: ownershipsAfterStop[0].etag, + lastModifiedTimeInMs: ownershipsAfterStop[0].lastModifiedTimeInMs + }, + // 1002 is not going to be claimed since it's already owned so it should be untouched + originalClaimedPartitions[1], + { + ...commonFields, + partitionId: "1003", + ownerId: "", + etag: ownershipsAfterStop[2].etag, + lastModifiedTimeInMs: ownershipsAfterStop[2].lastModifiedTimeInMs + } + ]); + }); + }); - // validate correct events captured for each partition + it("claimOwnership throws and is reported to the user", async () => { + const errors = []; + const partitionIds = await consumerClient.getPartitionIds(); - // start it again - loggerForTest(`Starting processor again`); - subscriptionEventHandler.clear(); + const faultyCheckpointStore: CheckpointStore = { + listOwnership: async () => [], + claimOwnership: async () => { + throw new Error("Some random failure!"); + }, + updateCheckpoint: async () => { + /* no-op */ + }, + listCheckpoints: async () => [] + }; - processor.start(); + const eventProcessor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async (err, _) => { + errors.push(err); + } + }, + faultyCheckpointStore, + { + ...defaultOptions + } + ); - await subscriptionEventHandler.waitUntilInitialized(partitionIds); + // claimOwnership() calls that fail in the runloop of eventProcessor + // will get directed to the user's processError handler. + eventProcessor.start(); - loggerForTest(`Stopping processor again`); - await processor.stop(); + try { + await loopUntil({ + name: "waiting for checkpoint store errors to show up", + timeBetweenRunsMs: 1000, + maxTimes: 30, + until: async () => errors.length !== 0 + }); - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - }); + errors.length.should.equal(partitionIds.length); + } finally { + // this will also fail - we "abandon" all claimed partitions at + // when a processor is stopped (which requires us to claim them + // with an empty owner ID). + // + // Note that this one gets thrown directly from stop(), rather + // than reporting to processError() since we have a direct + // point of contact with the user. + await eventProcessor.stop().should.be.rejectedWith(/Some random failure!/); + } + }); - describe("Partition processor", function(): void { - it("should support processing events across multiple partitions", async function(): Promise< - void - > { - const partitionIds = await producerClient.getPartitionIds(); - const { - subscriptionEventHandler, - startPosition - } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - - const processor = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - subscriptionEventHandler, - new InMemoryCheckpointStore(), - { - ...defaultOptions, - startPosition: startPosition + it("errors thrown from the user's handlers are reported to processError()", async () => { + const errors = new Set(); + const partitionIds = await consumerClient.getPartitionIds(); + + const processCloseErrorMessage = "processClose() error"; + const processEventsErrorMessage = "processEvents() error"; + const processInitializeErrorMessage = "processInitialize() error"; + const expectedErrorMessages: string[] = []; + for (let i = 0; i < partitionIds.length; i++) { + expectedErrorMessages.push( + processCloseErrorMessage, + processEventsErrorMessage, + processInitializeErrorMessage + ); } - ); + expectedErrorMessages.sort(); - processor.start(); + const eventProcessor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processClose: async () => { + throw new Error(processCloseErrorMessage); + }, + processEvents: async () => { + throw new Error(processEventsErrorMessage); + }, + processInitialize: async () => { + throw new Error(processInitializeErrorMessage); + }, + processError: async (err, _) => { + errors.add(err); + throw new Error("These are logged but ignored"); + } + }, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: earliestEventPosition + } + ); - const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); - const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); + // errors that occur within the user's own event handlers will get + // routed to their processError() handler + eventProcessor.start(); + console.log("event processor started"); + try { + await loopUntil({ + name: "waiting for errors thrown from user's handlers", + timeBetweenRunsMs: 1000, + maxTimes: 30, + until: async () => { + console.log(partitionIds.length); + console.dir(errors); + return errors.size >= partitionIds.length * 3; + } + }); + console.log("event processor loop completed"); + const messages = [...errors].map((e) => e.message); + messages.sort(); + console.dir(messages); + console.dir(expectedErrorMessages); + messages.should.deep.equal(expectedErrorMessages); + } finally { + console.log("attempting to stop"); + await eventProcessor.stop(); + console.log("stopped"); + } + }); - // shutdown the processor - await processor.stop(); + it("should expose an id", async function(): Promise { + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } + }, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: latestEventPosition + } + ); - subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); - subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); + const id = processor.id; + id.length.should.be.gt(1); + }); - receivedEvents.should.deep.equal(expectedMessages); - }); - }); + it("id can be forced to be a specific value", async function(): Promise { + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } + }, + new InMemoryCheckpointStore(), + { ...defaultOptions, ownerId: "hello", startPosition: latestEventPosition } + ); - describe("InMemory Partition Manager", function(): void { - it("should claim ownership, get a list of ownership and update checkpoint", async function(): Promise< - void - > { - const inMemoryCheckpointStore = new InMemoryCheckpointStore(); - const partitionOwnership1: PartitionOwnership = { - fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", - eventHubName: "myEventHub", - consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, - ownerId: generate_uuid(), - partitionId: "0" - }; - const partitionOwnership2: PartitionOwnership = { - fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", - eventHubName: "myEventHub", - consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, - ownerId: generate_uuid(), - partitionId: "1" - }; - const partitionOwnership = await inMemoryCheckpointStore.claimOwnership([ - partitionOwnership1, - partitionOwnership2 - ]); - partitionOwnership.length.should.equals(2); - const ownershiplist = await inMemoryCheckpointStore.listOwnership( - "myNamespace.servicebus.windows.net", - "myEventHub", - EventHubConsumerClient.defaultConsumerGroupName - ); - ownershiplist.length.should.equals(2); - - const checkpoint: Checkpoint = { - fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", - eventHubName: "myEventHub", - consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, - partitionId: "0", - sequenceNumber: 10, - offset: 50 - }; + processor.id.should.equal("hello"); + }); - await inMemoryCheckpointStore.updateCheckpoint(checkpoint); - const partitionOwnershipList = await inMemoryCheckpointStore.listOwnership( - "myNamespace.servicebus.windows.net", - "myEventHub", - EventHubConsumerClient.defaultConsumerGroupName - ); - partitionOwnershipList[0].partitionId.should.equals(checkpoint.partitionId); - partitionOwnershipList[0].fullyQualifiedNamespace!.should.equals( - "myNamespace.servicebus.windows.net" - ); - partitionOwnershipList[0].eventHubName!.should.equals("myEventHub"); - partitionOwnershipList[0].consumerGroup!.should.equals( - EventHubConsumerClient.defaultConsumerGroupName - ); - }); - - it("should receive events from the checkpoint", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); - - let checkpointMap = new Map(); - partitionIds.forEach((id) => checkpointMap.set(id, [])); - - let didError = false; - let processedAtLeastOneEvent = new Set(); - const checkpointSequenceNumbers: Map = new Map(); - - let partionCount: { [x: string]: number } = {}; - - class FooPartitionProcessor { - async processEvents(events: ReceivedEventData[], context: PartitionContext): Promise { - processedAtLeastOneEvent.add(context.partitionId); - - if (!partionCount[context.partitionId]) { - partionCount[context.partitionId] = 0; - } - partionCount[context.partitionId]++; + it("should treat consecutive start invocations as idempotent", async function(): Promise< + void + > { + const partitionIds = await producerClient.getPartitionIds(); - const existingEvents = checkpointMap.get(context.partitionId)!; + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - for (const event of events) { - debug("Received event: '%s' from partition: '%s'", event.body, context.partitionId); + const { + subscriptionEventHandler, + startPosition + } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - if (partionCount[context.partitionId] <= 50) { - checkpointSequenceNumbers.set(context.partitionId, event.sequenceNumber); - await context.updateCheckpoint(event); - existingEvents.push(event); - } - } - } - async processError(): Promise { - didError = true; - } - } + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + subscriptionEventHandler, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: startPosition + } + ); - const inMemoryCheckpointStore = new InMemoryCheckpointStore(); - const processor1 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - inMemoryCheckpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition - } - ); + processor.start(); + processor.start(); + processor.start(); - // start first processor - processor1.start(); + const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); + const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); - // create messages - const expectedMessagePrefix = "EventProcessor test - checkpoint - "; - const events: EventData[] = []; + // shutdown the processor + await processor.stop(); - for (const partitionId of partitionIds) { - for (let index = 1; index <= 100; index++) { - events.push({ body: `${expectedMessagePrefix} ${index} ${partitionId}` }); - } - await producerClient.sendBatch(events, { partitionId }); - } + receivedEvents.should.deep.equal(expectedMessages); - // set a delay to give a consumers a chance to receive a message - while (checkpointSequenceNumbers.size !== partitionIds.length) { - await delay(5000); - } + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); + }); - // shutdown the first processor - await processor1.stop(); + it("should not throw if stop is called without start", async function(): Promise { + let didPartitionProcessorStart = false; - const lastEventsReceivedFromProcessor1: ReceivedEventData[] = []; - let index = 0; + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + { + processInitialize: async () => { + didPartitionProcessorStart = true; + }, + processEvents: async () => { + /* no-op */ + }, + processError: async () => { + /* no-op */ + } + }, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: latestEventPosition + } + ); - for (const partitionId of partitionIds) { - const receivedEvents = checkpointMap.get(partitionId)!; - lastEventsReceivedFromProcessor1[index++] = receivedEvents[receivedEvents.length - 1]; - } + // shutdown the processor + await processor.stop(); - checkpointMap = new Map(); - partitionIds.forEach((id) => checkpointMap.set(id, [])); - partionCount = {}; - processedAtLeastOneEvent = new Set(); + didPartitionProcessorStart.should.equal(false); + }); - const processor2 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - inMemoryCheckpointStore, - { ...defaultOptions, startPosition: earliestEventPosition } - ); + it("should support start after stopping", async function(): Promise { + const partitionIds = await producerClient.getPartitionIds(); - const checkpoints = await inMemoryCheckpointStore.listCheckpoints( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - checkpoints.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); + const { + subscriptionEventHandler, + startPosition + } = await SubscriptionHandlerForTests.startingFromHere(producerClient); - for (const checkpoint of checkpoints) { - const expectedSequenceNumber = checkpointSequenceNumbers.get(checkpoint.partitionId); - should.exist(expectedSequenceNumber); + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + subscriptionEventHandler, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: startPosition + } + ); - expectedSequenceNumber!.should.equal(checkpoint.sequenceNumber); - } + loggerForTest(`Starting processor for the first time`); + processor.start(); - // start second processor - processor2.start(); + const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); + const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); - // set a delay to give a consumers a chance to receive a message - while (processedAtLeastOneEvent.size !== partitionIds.length) { - await delay(5000); - } + loggerForTest(`Stopping processor for the first time`); + await processor.stop(); - // shutdown the second processor - await processor2.stop(); + receivedEvents.should.deep.equal(expectedMessages); - index = 0; - const firstEventsReceivedFromProcessor2: ReceivedEventData[] = []; - for (const partitionId of partitionIds) { - const receivedEvents = checkpointMap.get(partitionId)!; - firstEventsReceivedFromProcessor2[index++] = receivedEvents[0]; - } + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - didError.should.equal(false); - index = 0; - // validate correct events captured for each partition using checkpoint - for (const partitionId of partitionIds) { - debug(`Validate events for partition: ${partitionId}`); - lastEventsReceivedFromProcessor1[index].sequenceNumber.should.equal( - firstEventsReceivedFromProcessor2[index].sequenceNumber - 1 - ); - index++; - } - }); + // validate correct events captured for each partition - it("makes copies and never returns internal instances directly", async () => { - const checkpointStore = new InMemoryCheckpointStore(); - const allObjects = new Set(); + // start it again + loggerForTest(`Starting processor again`); + subscriptionEventHandler.clear(); - const assertUnique = (...objects: any[]): void => { - const size = allObjects.size; + processor.start(); - for (const obj of objects) { - allObjects.add(obj); - size.should.be.lessThan(allObjects.size); - } - }; + await subscriptionEventHandler.waitUntilInitialized(partitionIds); - const basicProperties = { - consumerGroup: "initial consumer group", - eventHubName: "initial event hub name", - fullyQualifiedNamespace: "initial fully qualified namespace" - }; + loggerForTest(`Stopping processor again`); + await processor.stop(); - const originalPartitionOwnership = { - ...basicProperties, - ownerId: "initial owner ID", - partitionId: "1001" - }; + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); + }); - const copyOfPartitionOwnership = { - ...originalPartitionOwnership - }; + describe("Partition processor", function(): void { + it("should support processing events across multiple partitions", async function(): Promise< + void + > { + const partitionIds = await producerClient.getPartitionIds(); + const { + subscriptionEventHandler, + startPosition + } = await SubscriptionHandlerForTests.startingFromHere(producerClient); + + const processor = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + subscriptionEventHandler, + new InMemoryCheckpointStore(), + { + ...defaultOptions, + startPosition: startPosition + } + ); - assertUnique(originalPartitionOwnership); + processor.start(); - for (let i = 0; i < 2; ++i) { - const ownerships = await checkpointStore.claimOwnership([originalPartitionOwnership]); + const expectedMessages = await sendOneMessagePerPartition(partitionIds, producerClient); + const receivedEvents = await subscriptionEventHandler.waitForEvents(partitionIds); - // second sanity check - we were also modifying the input parameter - // (which was also bad) - copyOfPartitionOwnership.should.deep.equal(originalPartitionOwnership); + // shutdown the processor + await processor.stop(); - assertUnique(...ownerships); - } + subscriptionEventHandler.hasErrors(partitionIds).should.equal(false); + subscriptionEventHandler.allShutdown(partitionIds).should.equal(true); - for (let i = 0; i < 2; ++i) { - const ownerships = await checkpointStore.listOwnership( - basicProperties.fullyQualifiedNamespace, - basicProperties.eventHubName, - basicProperties.consumerGroup - ); - assertUnique(...ownerships); - } + receivedEvents.should.deep.equal(expectedMessages); + }); + }); - const originalCheckpoint: Checkpoint = { - ...basicProperties, - sequenceNumber: 1, - partitionId: "1", - offset: 101 - }; + describe("InMemory Partition Manager", function(): void { + it("should claim ownership, get a list of ownership and update checkpoint", async function(): Promise< + void + > { + const inMemoryCheckpointStore = new InMemoryCheckpointStore(); + const partitionOwnership1: PartitionOwnership = { + fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", + eventHubName: "myEventHub", + consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, + ownerId: generate_uuid(), + partitionId: "0" + }; + const partitionOwnership2: PartitionOwnership = { + fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", + eventHubName: "myEventHub", + consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, + ownerId: generate_uuid(), + partitionId: "1" + }; + const partitionOwnership = await inMemoryCheckpointStore.claimOwnership([ + partitionOwnership1, + partitionOwnership2 + ]); + partitionOwnership.length.should.equals(2); + const ownershiplist = await inMemoryCheckpointStore.listOwnership( + "myNamespace.servicebus.windows.net", + "myEventHub", + EventHubConsumerClient.defaultConsumerGroupName + ); + ownershiplist.length.should.equals(2); + + const checkpoint: Checkpoint = { + fullyQualifiedNamespace: "myNamespace.servicebus.windows.net", + eventHubName: "myEventHub", + consumerGroup: EventHubConsumerClient.defaultConsumerGroupName, + partitionId: "0", + sequenceNumber: 10, + offset: 50 + }; - const copyOfOriginalCheckpoint = { - ...originalCheckpoint - }; + await inMemoryCheckpointStore.updateCheckpoint(checkpoint); + const partitionOwnershipList = await inMemoryCheckpointStore.listOwnership( + "myNamespace.servicebus.windows.net", + "myEventHub", + EventHubConsumerClient.defaultConsumerGroupName + ); + partitionOwnershipList[0].partitionId.should.equals(checkpoint.partitionId); + partitionOwnershipList[0].fullyQualifiedNamespace!.should.equals( + "myNamespace.servicebus.windows.net" + ); + partitionOwnershipList[0].eventHubName!.should.equals("myEventHub"); + partitionOwnershipList[0].consumerGroup!.should.equals( + EventHubConsumerClient.defaultConsumerGroupName + ); + }); - await checkpointStore.updateCheckpoint(originalCheckpoint); + it("should receive events from the checkpoint", async function(): Promise { + const partitionIds = await producerClient.getPartitionIds(); - // checking that we don't modify input parameters - copyOfOriginalCheckpoint.should.deep.equal(originalCheckpoint); + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - for (let i = 0; i < 2; ++i) { - const checkpoints = await checkpointStore.listCheckpoints( - basicProperties.fullyQualifiedNamespace, - basicProperties.eventHubName, - basicProperties.consumerGroup - ); - assertUnique(...checkpoints); - } - }); - }); + let checkpointMap = new Map(); + partitionIds.forEach((id) => checkpointMap.set(id, [])); - describe("Load balancing", function(): void { - beforeEach("validate partitions", async function(): Promise { - const partitionIds = await producerClient.getPartitionIds(); - // ensure we have at least 3 partitions - partitionIds.length.should.gte( - 3, - "The load balancing tests must be ran on an Event Hub with at least 3 partitions" - ); - }); - - it("should 'steal' partitions until all the processors have reached a steady-state (BalancedLoadBalancingStrategy)", async function(): Promise< - void - > { - loggerForTest("starting up the stealing test"); - - const processorByName: Dictionary = {}; - const checkpointStore = new InMemoryCheckpointStore(); - const partitionIds = await producerClient.getPartitionIds(); - const partitionOwnershipArr = new Set(); - - const partitionResultsMap = new Map< - string, - { events: string[]; initialized: boolean; closeReason?: CloseReason } - >(); - partitionIds.forEach((id) => partitionResultsMap.set(id, { events: [], initialized: false })); - let didGetReceiverDisconnectedError = false; - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor implements Required { - async processInitialize(context: PartitionContext): Promise { - loggerForTest(`processInitialize(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.initialized = true; - } - async processClose(reason: CloseReason, context: PartitionContext): Promise { - loggerForTest(`processClose(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.closeReason = reason; - } - async processEvents(events: ReceivedEventData[], context: PartitionContext): Promise { - partitionOwnershipArr.add(context.partitionId); - const existingEvents = partitionResultsMap.get(context.partitionId)!.events; - existingEvents.push(...events.map((event) => event.body)); - } - async processError(err: Error, context: PartitionContext): Promise { - loggerForTest(`processError(${context.partitionId})`); - const errorName = (err as any).code; - if (errorName === "ReceiverDisconnectedError") { - didGetReceiverDisconnectedError = true; + let didError = false; + let processedAtLeastOneEvent = new Set(); + const checkpointSequenceNumbers: Map = new Map(); + + let partionCount: { [x: string]: number } = {}; + + class FooPartitionProcessor { + async processEvents( + events: ReceivedEventData[], + context: PartitionContext + ): Promise { + processedAtLeastOneEvent.add(context.partitionId); + + if (!partionCount[context.partitionId]) { + partionCount[context.partitionId] = 0; + } + partionCount[context.partitionId]++; + + const existingEvents = checkpointMap.get(context.partitionId)!; + + for (const event of events) { + debug("Received event: '%s' from partition: '%s'", event.body, context.partitionId); + + if (partionCount[context.partitionId] <= 50) { + checkpointSequenceNumbers.set(context.partitionId, event.sequenceNumber); + await context.updateCheckpoint(event); + existingEvents.push(event); + } + } + } + async processError(): Promise { + didError = true; + } } - } - } - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId - }); - } + const inMemoryCheckpointStore = new InMemoryCheckpointStore(); + const processor1 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + inMemoryCheckpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition + } + ); - const processor1LoadBalancingInterval = { - loopIntervalInMs: 1000 - }; + // start first processor + processor1.start(); - // working around a potential deadlock - this allows `processor-2` to more - // aggressively pursue getting its required partitions and avoid being in - // lockstep with `processor-1` - const processor2LoadBalancingInterval = { - loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 - }; + // create messages + const expectedMessagePrefix = "EventProcessor test - checkpoint - "; + const events: EventData[] = []; - processorByName[`processor-1`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor1LoadBalancingInterval, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) - } - ); + for (const partitionId of partitionIds) { + for (let index = 1; index <= 100; index++) { + events.push({ body: `${expectedMessagePrefix} ${index} ${partitionId}` }); + } + await producerClient.sendBatch(events, { partitionId }); + } - processorByName[`processor-1`].start(); + // set a delay to give a consumers a chance to receive a message + while (checkpointSequenceNumbers.size !== partitionIds.length) { + await delay(5000); + } - await loopUntil({ - name: "All partitions are owned", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => partitionOwnershipArr.size === partitionIds.length, - errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` - }); + // shutdown the first processor + await processor1.stop(); - processorByName[`processor-2`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor2LoadBalancingInterval, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) - } - ); - - partitionOwnershipArr.size.should.equal(partitionIds.length); - processorByName[`processor-2`].start(); - - await loopUntil({ - name: "Processors are balanced", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => { - // it should be impossible for 'processor-2' to have obtained the number of - // partitions it needed without having stolen some from 'processor-1' - // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing - // hasn't occurred yet. - if (!didGetReceiverDisconnectedError) { - return false; + const lastEventsReceivedFromProcessor1: ReceivedEventData[] = []; + let index = 0; + + for (const partitionId of partitionIds) { + const receivedEvents = checkpointMap.get(partitionId)!; + lastEventsReceivedFromProcessor1[index++] = receivedEvents[receivedEvents.length - 1]; } - const partitionOwnership = await checkpointStore.listOwnership( + checkpointMap = new Map(); + partitionIds.forEach((id) => checkpointMap.set(id, [])); + partionCount = {}; + processedAtLeastOneEvent = new Set(); + + const processor2 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + inMemoryCheckpointStore, + { ...defaultOptions, startPosition: earliestEventPosition } + ); + + const checkpoints = await inMemoryCheckpointStore.listCheckpoints( consumerClient.fullyQualifiedNamespace, consumerClient.eventHubName, EventHubConsumerClient.defaultConsumerGroupName ); - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = ownershipListToMap( - partitionOwnership - ); + checkpoints.sort((a, b) => a.partitionId.localeCompare(b.partitionId)); - // if stealing has occurred we just want to make sure that _all_ - // the stealing has completed. - const isBalanced = (friendlyName: string): boolean => { - const n = Math.floor(partitionIds.length / 2); - const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! - .length; - return numPartitions === n || numPartitions === n + 1; - }; + for (const checkpoint of checkpoints) { + const expectedSequenceNumber = checkpointSequenceNumbers.get(checkpoint.partitionId); + should.exist(expectedSequenceNumber); - if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { - return false; + expectedSequenceNumber!.should.equal(checkpoint.sequenceNumber); } - return true; - } - }); + // start second processor + processor2.start(); - for (const processor in processorByName) { - await processorByName[processor].stop(); - } + // set a delay to give a consumers a chance to receive a message + while (processedAtLeastOneEvent.size !== partitionIds.length) { + await delay(5000); + } - // now that all the dust has settled let's make sure that - // a. we received some events from each partition (doesn't matter which processor) - // did the work - // b. each partition was initialized - // c. each partition should have received at least one shutdown event - for (const partitionId of partitionIds) { - const results = partitionResultsMap.get(partitionId)!; - results.events.length.should.be.gte(1); - results.initialized.should.equal(true); - (results.closeReason === CloseReason.Shutdown).should.equal(true); - } - }); - - it("should 'steal' partitions until all the processors have reached a steady-state (GreedyLoadBalancingStrategy)", async function(): Promise< - void - > { - loggerForTest("starting up the stealing test"); - - const processorByName: Dictionary = {}; - const checkpointStore = new InMemoryCheckpointStore(); - const partitionIds = await producerClient.getPartitionIds(); - const partitionOwnershipArr = new Set(); - - const partitionResultsMap = new Map< - string, - { events: string[]; initialized: boolean; closeReason?: CloseReason } - >(); - partitionIds.forEach((id) => partitionResultsMap.set(id, { events: [], initialized: false })); - let didGetReceiverDisconnectedError = false; - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor implements Required { - async processInitialize(context: PartitionContext): Promise { - loggerForTest(`processInitialize(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.initialized = true; - } - async processClose(reason: CloseReason, context: PartitionContext): Promise { - loggerForTest(`processClose(${context.partitionId})`); - partitionResultsMap.get(context.partitionId)!.closeReason = reason; - } - async processEvents(events: ReceivedEventData[], context: PartitionContext): Promise { - partitionOwnershipArr.add(context.partitionId); - const existingEvents = partitionResultsMap.get(context.partitionId)!.events; - existingEvents.push(...events.map((event) => event.body)); - } - async processError(err: Error, context: PartitionContext): Promise { - loggerForTest(`processError(${context.partitionId})`); - const errorName = (err as any).code; - if (errorName === "ReceiverDisconnectedError") { - didGetReceiverDisconnectedError = true; + // shutdown the second processor + await processor2.stop(); + + index = 0; + const firstEventsReceivedFromProcessor2: ReceivedEventData[] = []; + for (const partitionId of partitionIds) { + const receivedEvents = checkpointMap.get(partitionId)!; + firstEventsReceivedFromProcessor2[index++] = receivedEvents[0]; } - } - } - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId + didError.should.equal(false); + index = 0; + // validate correct events captured for each partition using checkpoint + for (const partitionId of partitionIds) { + debug(`Validate events for partition: ${partitionId}`); + lastEventsReceivedFromProcessor1[index].sequenceNumber.should.equal( + firstEventsReceivedFromProcessor2[index].sequenceNumber - 1 + ); + index++; + } }); - } - const processor1LoadBalancingInterval = { - loopIntervalInMs: 1000 - }; + it("makes copies and never returns internal instances directly", async () => { + const checkpointStore = new InMemoryCheckpointStore(); + const allObjects = new Set(); - // working around a potential deadlock - this allows `processor-2` to more - // aggressively pursue getting its required partitions and avoid being in - // lockstep with `processor-1` - const processor2LoadBalancingInterval = { - loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 - }; + const assertUnique = (...objects: any[]): void => { + const size = allObjects.size; - processorByName[`processor-1`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor1LoadBalancingInterval, - loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) - } - ); + for (const obj of objects) { + allObjects.add(obj); + size.should.be.lessThan(allObjects.size); + } + }; - processorByName[`processor-1`].start(); + const basicProperties = { + consumerGroup: "initial consumer group", + eventHubName: "initial event hub name", + fullyQualifiedNamespace: "initial fully qualified namespace" + }; - await loopUntil({ - name: "All partitions are owned", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => partitionOwnershipArr.size === partitionIds.length, - errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` - }); + const originalPartitionOwnership = { + ...basicProperties, + ownerId: "initial owner ID", + partitionId: "1001" + }; - processorByName[`processor-2`] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - ...processor2LoadBalancingInterval, - loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) - } - ); - - partitionOwnershipArr.size.should.equal(partitionIds.length); - processorByName[`processor-2`].start(); - - await loopUntil({ - name: "Processors are balanced", - maxTimes: 60, - timeBetweenRunsMs: 1000, - until: async () => { - // it should be impossible for 'processor-2' to have obtained the number of - // partitions it needed without having stolen some from 'processor-1' - // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing - // hasn't occurred yet. - if (!didGetReceiverDisconnectedError) { - return false; + const copyOfPartitionOwnership = { + ...originalPartitionOwnership + }; + + assertUnique(originalPartitionOwnership); + + for (let i = 0; i < 2; ++i) { + const ownerships = await checkpointStore.claimOwnership([originalPartitionOwnership]); + + // second sanity check - we were also modifying the input parameter + // (which was also bad) + copyOfPartitionOwnership.should.deep.equal(originalPartitionOwnership); + + assertUnique(...ownerships); } - const partitionOwnership = await checkpointStore.listOwnership( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); + for (let i = 0; i < 2; ++i) { + const ownerships = await checkpointStore.listOwnership( + basicProperties.fullyQualifiedNamespace, + basicProperties.eventHubName, + basicProperties.consumerGroup + ); + assertUnique(...ownerships); + } - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = ownershipListToMap( - partitionOwnership - ); + const originalCheckpoint: Checkpoint = { + ...basicProperties, + sequenceNumber: 1, + partitionId: "1", + offset: 101 + }; - // if stealing has occurred we just want to make sure that _all_ - // the stealing has completed. - const isBalanced = (friendlyName: string): boolean => { - const n = Math.floor(partitionIds.length / 2); - const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! - .length; - return numPartitions === n || numPartitions === n + 1; + const copyOfOriginalCheckpoint = { + ...originalCheckpoint }; - if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { - return false; - } + await checkpointStore.updateCheckpoint(originalCheckpoint); - return true; - } + // checking that we don't modify input parameters + copyOfOriginalCheckpoint.should.deep.equal(originalCheckpoint); + + for (let i = 0; i < 2; ++i) { + const checkpoints = await checkpointStore.listCheckpoints( + basicProperties.fullyQualifiedNamespace, + basicProperties.eventHubName, + basicProperties.consumerGroup + ); + assertUnique(...checkpoints); + } + }); }); - for (const processor in processorByName) { - await processorByName[processor].stop(); - } + describe("Load balancing", function(): void { + beforeEach("validate partitions", async function(): Promise { + const partitionIds = await producerClient.getPartitionIds(); + // ensure we have at least 3 partitions + partitionIds.length.should.gte( + 3, + "The load balancing tests must be ran on an Event Hub with at least 3 partitions" + ); + }); - // now that all the dust has settled let's make sure that - // a. we received some events from each partition (doesn't matter which processor) - // did the work - // b. each partition was initialized - // c. each partition should have received at least one shutdown event - for (const partitionId of partitionIds) { - const results = partitionResultsMap.get(partitionId)!; - results.events.length.should.be.gte(1); - results.initialized.should.equal(true); - (results.closeReason === CloseReason.Shutdown).should.equal(true); - } - }); - - it("should ensure that all the processors reach a steady-state where all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< - void - > { - const processorByName: Dictionary = {}; - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const partitionOwnershipArr = new Set(); - let didError = false; - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor { - async processEvents( - _events: ReceivedEventData[], - context: PartitionContext - ): Promise { - partitionOwnershipArr.add(context.partitionId); - } - async processError(): Promise { - didError = true; - } - } + it("should 'steal' partitions until all the processors have reached a steady-state (BalancedLoadBalancingStrategy)", async function(): Promise< + void + > { + loggerForTest("starting up the stealing test"); + + const processorByName: Dictionary = {}; + const checkpointStore = new InMemoryCheckpointStore(); + const partitionIds = await producerClient.getPartitionIds(); + const partitionOwnershipArr = new Set(); + + const partitionResultsMap = new Map< + string, + { events: string[]; initialized: boolean; closeReason?: CloseReason } + >(); + partitionIds.forEach((id) => + partitionResultsMap.set(id, { events: [], initialized: false }) + ); + let didGetReceiverDisconnectedError = false; - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId - }); - } + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor implements Required { + async processInitialize(context: PartitionContext): Promise { + loggerForTest(`processInitialize(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.initialized = true; + } + async processClose(reason: CloseReason, context: PartitionContext): Promise { + loggerForTest(`processClose(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.closeReason = reason; + } + async processEvents( + events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); + const existingEvents = partitionResultsMap.get(context.partitionId)!.events; + existingEvents.push(...events.map((event) => event.body)); + } + async processError(err: Error, context: PartitionContext): Promise { + loggerForTest(`processError(${context.partitionId})`); + const errorName = (err as any).code; + if (errorName === "ReceiverDisconnectedError") { + didGetReceiverDisconnectedError = true; + } + } + } - for (let i = 0; i < 2; i++) { - const processorName = `processor-${i}`; - processorByName[processorName] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); } - ); - processorByName[processorName].start(); - await delay(12000); - } - await loopUntil({ - name: "partitionownership", - timeBetweenRunsMs: 5000, - maxTimes: 10, - until: async () => partitionOwnershipArr.size === partitionIds.length - }); + const processor1LoadBalancingInterval = { + loopIntervalInMs: 1000 + }; - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = new Map(); + // working around a potential deadlock - this allows `processor-2` to more + // aggressively pursue getting its required partitions and avoid being in + // lockstep with `processor-1` + const processor2LoadBalancingInterval = { + loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 + }; - const partitionOwnership = await checkpointStore.listOwnership( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); + processorByName[`processor-1`] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + ...processor1LoadBalancingInterval, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + } + ); - partitionOwnershipArr.size.should.equal(partitionIds.length); - for (const processor in processorByName) { - await processorByName[processor].stop(); - } + processorByName[`processor-1`].start(); - for (const ownership of partitionOwnership) { - if (!partitionOwnershipMap.has(ownership.ownerId)) { - partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); - } else { - const arr = partitionOwnershipMap.get(ownership.ownerId); - arr!.push(ownership.partitionId); - partitionOwnershipMap.set(ownership.ownerId, arr!); - } - } + await loopUntil({ + name: "All partitions are owned", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => partitionOwnershipArr.size === partitionIds.length, + errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` + }); - didError.should.equal(false); - const n = Math.floor(partitionIds.length / 2); - partitionOwnershipMap.get(processorByName[`processor-0`].id)!.length.should.oneOf([n, n + 1]); - partitionOwnershipMap.get(processorByName[`processor-1`].id)!.length.should.oneOf([n, n + 1]); - }); - - it("should ensure that all the processors reach a steady-state where all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< - void - > { - const processorByName: Dictionary = {}; - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const partitionOwnershipArr = new Set(); - - // The partitionProcess will need to add events to the partitionResultsMap as they are received - class FooPartitionProcessor { - async processEvents( - _events: ReceivedEventData[], - context: PartitionContext - ): Promise { - partitionOwnershipArr.add(context.partitionId); - } - async processError(): Promise { - /* no-op */ - } - } + processorByName[`processor-2`] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + ...processor2LoadBalancingInterval, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + } + ); - // create messages - const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; - for (const partitionId of partitionIds) { - await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { - partitionId - }); - } + partitionOwnershipArr.size.should.equal(partitionIds.length); + processorByName[`processor-2`].start(); + + await loopUntil({ + name: "Processors are balanced", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => { + // it should be impossible for 'processor-2' to have obtained the number of + // partitions it needed without having stolen some from 'processor-1' + // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing + // hasn't occurred yet. + if (!didGetReceiverDisconnectedError) { + return false; + } - for (let i = 0; i < 2; i++) { - const processorName = `processor-${i}`; - processorByName[processorName] = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - new FooPartitionProcessor(), - checkpointStore, - { - ...defaultOptions, - startPosition: earliestEventPosition, - loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) - } - ); - processorByName[processorName].start(); - await delay(12000); - } + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); + + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = ownershipListToMap( + partitionOwnership + ); + + // if stealing has occurred we just want to make sure that _all_ + // the stealing has completed. + const isBalanced = (friendlyName: string): boolean => { + const n = Math.floor(partitionIds.length / 2); + const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! + .length; + return numPartitions === n || numPartitions === n + 1; + }; - await loopUntil({ - name: "partitionownership", - timeBetweenRunsMs: 5000, - maxTimes: 10, - until: async () => partitionOwnershipArr.size === partitionIds.length - }); + if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { + return false; + } - // map of ownerId as a key and partitionIds as a value - const partitionOwnershipMap: Map = new Map(); + return true; + } + }); - const partitionOwnership = await checkpointStore.listOwnership( - consumerClient.fullyQualifiedNamespace, - consumerClient.eventHubName, - EventHubConsumerClient.defaultConsumerGroupName - ); + for (const processor in processorByName) { + await processorByName[processor].stop(); + } - partitionOwnershipArr.size.should.equal(partitionIds.length); - for (const processor in processorByName) { - await processorByName[processor].stop(); - } + // now that all the dust has settled let's make sure that + // a. we received some events from each partition (doesn't matter which processor) + // did the work + // b. each partition was initialized + // c. each partition should have received at least one shutdown event + for (const partitionId of partitionIds) { + const results = partitionResultsMap.get(partitionId)!; + results.events.length.should.be.gte(1); + results.initialized.should.equal(true); + (results.closeReason === CloseReason.Shutdown).should.equal(true); + } + }); - for (const ownership of partitionOwnership) { - if (!partitionOwnershipMap.has(ownership.ownerId)) { - partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); - } else { - const arr = partitionOwnershipMap.get(ownership.ownerId); - arr!.push(ownership.partitionId); - partitionOwnershipMap.set(ownership.ownerId, arr!); - } - } + it("should 'steal' partitions until all the processors have reached a steady-state (GreedyLoadBalancingStrategy)", async function(): Promise< + void + > { + loggerForTest("starting up the stealing test"); + + const processorByName: Dictionary = {}; + const checkpointStore = new InMemoryCheckpointStore(); + const partitionIds = await producerClient.getPartitionIds(); + const partitionOwnershipArr = new Set(); + + const partitionResultsMap = new Map< + string, + { events: string[]; initialized: boolean; closeReason?: CloseReason } + >(); + partitionIds.forEach((id) => + partitionResultsMap.set(id, { events: [], initialized: false }) + ); + let didGetReceiverDisconnectedError = false; - const n = Math.floor(partitionIds.length / 2); - partitionOwnershipMap.get(processorByName[`processor-0`].id)!.length.should.oneOf([n, n + 1]); - partitionOwnershipMap.get(processorByName[`processor-1`].id)!.length.should.oneOf([n, n + 1]); - }); - - it("should ensure that all the processors maintain a steady-state when all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< - void - > { - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; - - const partitionOwnershipHistory: string[] = []; - - let allPartitionsClaimed = false; - let thrashAfterSettling = false; - const handlers: SubscriptionEventHandlers = { - async processInitialize(context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; - - partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); - - loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); - if (allPartitionsClaimed) { - thrashAfterSettling = true; - return; + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor implements Required { + async processInitialize(context: PartitionContext): Promise { + loggerForTest(`processInitialize(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.initialized = true; + } + async processClose(reason: CloseReason, context: PartitionContext): Promise { + loggerForTest(`processClose(${context.partitionId})`); + partitionResultsMap.get(context.partitionId)!.closeReason = reason; + } + async processEvents( + events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); + const existingEvents = partitionResultsMap.get(context.partitionId)!.events; + existingEvents.push(...events.map((event) => event.body)); + } + async processError(err: Error, context: PartitionContext): Promise { + loggerForTest(`processError(${context.partitionId})`); + const errorName = (err as any).code; + if (errorName === "ReceiverDisconnectedError") { + didGetReceiverDisconnectedError = true; + } + } } - const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); - claimedPartitions.add(partitionId); - claimedPartitionsMap[eventProcessorId] = claimedPartitions; - }, - async processEvents() { - /* no-op */ - }, - async processError() { - /* no-op */ - }, - async processClose(reason, context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; - const claimedPartitions = claimedPartitionsMap[eventProcessorId]; - claimedPartitions.delete(partitionId); - loggerForTest( - `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ - context.partitionId - }` - ); - if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { - loggerForTest( - `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` - ); - thrashAfterSettling = true; + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); } - } - }; - const eventProcessorOptions: FullEventProcessorOptions = { - maxBatchSize: 1, - maxWaitTimeInSeconds: 5, - loopIntervalInMs: 1000, - inactiveTimeLimitInMs: 3000, - ownerLevel: 0, - // For this test we don't want to actually checkpoint, just test ownership. - startPosition: latestEventPosition, - loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) - }; + const processor1LoadBalancingInterval = { + loopIntervalInMs: 1000 + }; - const processor1 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); - - const processor2 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); - - processor1.start(); - processor2.start(); - - // loop until all partitions are claimed - try { - let lastLoopError: Record = {}; - - await loopUntil({ - name: "partitionOwnership", - maxTimes: 30, - timeBetweenRunsMs: 10000, - - errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), - until: async () => { - // Ensure the partition ownerships are balanced. - const eventProcessorIds = Object.keys(claimedPartitionsMap); - - // There are 2 processors, so we should see 2 entries. - if (eventProcessorIds.length !== 2) { - lastLoopError = { - reason: "Not all event processors have shown up", - eventProcessorIds, - partitionOwnershipHistory - }; - return false; + // working around a potential deadlock - this allows `processor-2` to more + // aggressively pursue getting its required partitions and avoid being in + // lockstep with `processor-1` + const processor2LoadBalancingInterval = { + loopIntervalInMs: processor1LoadBalancingInterval.loopIntervalInMs / 2 + }; + + processorByName[`processor-1`] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + ...processor1LoadBalancingInterval, + loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) } + ); - const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; - const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + processorByName[`processor-1`].start(); - // The delta between number of partitions each processor owns can't be more than 1. - if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { - lastLoopError = { - reason: "Delta between partitions is greater than 1", - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory - }; - return false; + await loopUntil({ + name: "All partitions are owned", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => partitionOwnershipArr.size === partitionIds.length, + errorMessageFn: () => `${partitionOwnershipArr.size}/${partitionIds.length}` + }); + + processorByName[`processor-2`] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + ...processor2LoadBalancingInterval, + loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) } + ); + + partitionOwnershipArr.size.should.equal(partitionIds.length); + processorByName[`processor-2`].start(); + + await loopUntil({ + name: "Processors are balanced", + maxTimes: 60, + timeBetweenRunsMs: 1000, + until: async () => { + // it should be impossible for 'processor-2' to have obtained the number of + // partitions it needed without having stolen some from 'processor-1' + // so if we haven't see any `ReceiverDisconnectedError`'s then that stealing + // hasn't occurred yet. + if (!didGetReceiverDisconnectedError) { + return false; + } - // All partitions must be claimed. - const innerAllPartitionsClaimed = - aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; - - if (!innerAllPartitionsClaimed) { - lastLoopError = { - reason: "All partitions not claimed", - partitionIds, - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); + + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = ownershipListToMap( + partitionOwnership + ); + + // if stealing has occurred we just want to make sure that _all_ + // the stealing has completed. + const isBalanced = (friendlyName: string): boolean => { + const n = Math.floor(partitionIds.length / 2); + const numPartitions = partitionOwnershipMap.get(processorByName[friendlyName].id)! + .length; + return numPartitions === n || numPartitions === n + 1; }; + + if (!isBalanced(`processor-1`) || !isBalanced(`processor-2`)) { + return false; + } + + return true; } + }); + + for (const processor in processorByName) { + await processorByName[processor].stop(); + } - return innerAllPartitionsClaimed; + // now that all the dust has settled let's make sure that + // a. we received some events from each partition (doesn't matter which processor) + // did the work + // b. each partition was initialized + // c. each partition should have received at least one shutdown event + for (const partitionId of partitionIds) { + const results = partitionResultsMap.get(partitionId)!; + results.events.length.should.be.gte(1); + results.initialized.should.equal(true); + (results.closeReason === CloseReason.Shutdown).should.equal(true); } }); - } catch (err) { - // close processors - await Promise.all([processor1.stop(), processor2.stop()]); - throw err; - } - loggerForTest(`All partitions have been claimed.`); - allPartitionsClaimed = true; + it("should ensure that all the processors reach a steady-state where all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< + void + > { + const processorByName: Dictionary = {}; + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const partitionOwnershipArr = new Set(); + let didError = false; + + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor { + async processEvents( + _events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); + } + async processError(): Promise { + didError = true; + } + } - try { - // loop for some time to see if thrashing occurs - await loopUntil({ - name: "partitionThrash", - maxTimes: 4, - timeBetweenRunsMs: 1000, - until: async () => thrashAfterSettling - }); - } catch (err) { - // swallow error, check trashAfterSettling for the condition in finally - } finally { - await Promise.all([processor1.stop(), processor2.stop()]); - should.equal( - thrashAfterSettling, - false, - "Detected PartitionOwnership thrashing after load-balancing has settled." - ); - } - }); - - it("should ensure that all the processors maintain a steady-state when all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< - void - > { - const partitionIds = await producerClient.getPartitionIds(); - const checkpointStore = new InMemoryCheckpointStore(); - const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; - - const partitionOwnershipHistory: string[] = []; - - let allPartitionsClaimed = false; - let thrashAfterSettling = false; - const handlers: SubscriptionEventHandlers = { - async processInitialize(context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; - - partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); - - loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); - if (allPartitionsClaimed) { - thrashAfterSettling = true; - return; + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); } - const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); - claimedPartitions.add(partitionId); - claimedPartitionsMap[eventProcessorId] = claimedPartitions; - }, - async processEvents() { - /* no-op */ - }, - async processError() { - /* no-op */ - }, - async processClose(reason, context) { - const eventProcessorId: string = (context as any).eventProcessorId; - const partitionId = context.partitionId; - const claimedPartitions = claimedPartitionsMap[eventProcessorId]; - claimedPartitions.delete(partitionId); - loggerForTest( - `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ - context.partitionId - }` - ); - if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { - loggerForTest( - `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` + for (let i = 0; i < 2; i++) { + const processorName = `processor-${i}`; + processorByName[processorName] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + } ); - thrashAfterSettling = true; + processorByName[processorName].start(); + await delay(12000); } - } - }; - const eventProcessorOptions: FullEventProcessorOptions = { - maxBatchSize: 1, - maxWaitTimeInSeconds: 5, - loopIntervalInMs: 1000, - inactiveTimeLimitInMs: 3000, - ownerLevel: 0, - // For this test we don't want to actually checkpoint, just test ownership. - startPosition: latestEventPosition, - loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) - }; + await loopUntil({ + name: "partitionownership", + timeBetweenRunsMs: 5000, + maxTimes: 10, + until: async () => partitionOwnershipArr.size === partitionIds.length + }); - const processor1 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); - - const processor2 = new EventProcessor( - EventHubConsumerClient.defaultConsumerGroupName, - consumerClient["_context"], - handlers, - checkpointStore, - eventProcessorOptions - ); - - processor1.start(); - processor2.start(); - - // loop until all partitions are claimed - try { - let lastLoopError: Record = {}; - - await loopUntil({ - name: "partitionOwnership", - maxTimes: 30, - timeBetweenRunsMs: 10000, - - errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), - until: async () => { - // Ensure the partition ownerships are balanced. - const eventProcessorIds = Object.keys(claimedPartitionsMap); - - // There are 2 processors, so we should see 2 entries. - if (eventProcessorIds.length !== 2) { - lastLoopError = { - reason: "Not all event processors have shown up", - eventProcessorIds, - partitionOwnershipHistory - }; - return false; + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = new Map(); + + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); + + partitionOwnershipArr.size.should.equal(partitionIds.length); + for (const processor in processorByName) { + await processorByName[processor].stop(); + } + + for (const ownership of partitionOwnership) { + if (!partitionOwnershipMap.has(ownership.ownerId)) { + partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); + } else { + const arr = partitionOwnershipMap.get(ownership.ownerId); + arr!.push(ownership.partitionId); + partitionOwnershipMap.set(ownership.ownerId, arr!); } + } - const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; - const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + didError.should.equal(false); + const n = Math.floor(partitionIds.length / 2); + partitionOwnershipMap + .get(processorByName[`processor-0`].id)! + .length.should.oneOf([n, n + 1]); + partitionOwnershipMap + .get(processorByName[`processor-1`].id)! + .length.should.oneOf([n, n + 1]); + }); - // The delta between number of partitions each processor owns can't be more than 1. - if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { - lastLoopError = { - reason: "Delta between partitions is greater than 1", - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory - }; - return false; + it("should ensure that all the processors reach a steady-state where all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< + void + > { + const processorByName: Dictionary = {}; + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const partitionOwnershipArr = new Set(); + + // The partitionProcess will need to add events to the partitionResultsMap as they are received + class FooPartitionProcessor { + async processEvents( + _events: ReceivedEventData[], + context: PartitionContext + ): Promise { + partitionOwnershipArr.add(context.partitionId); + } + async processError(): Promise { + /* no-op */ } + } - // All partitions must be claimed. - const innerAllPartitionsClaimed = - aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; - - if (!innerAllPartitionsClaimed) { - lastLoopError = { - reason: "All partitions not claimed", - partitionIds, - a: Array.from(aProcessorPartitions), - b: Array.from(bProcessorPartitions), - partitionOwnershipHistory - }; + // create messages + const expectedMessagePrefix = "EventProcessor test - multiple partitions - "; + for (const partitionId of partitionIds) { + await producerClient.sendBatch([{ body: expectedMessagePrefix + partitionId }], { + partitionId + }); + } + + for (let i = 0; i < 2; i++) { + const processorName = `processor-${i}`; + processorByName[processorName] = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + new FooPartitionProcessor(), + checkpointStore, + { + ...defaultOptions, + startPosition: earliestEventPosition, + loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) + } + ); + processorByName[processorName].start(); + await delay(12000); + } + + await loopUntil({ + name: "partitionownership", + timeBetweenRunsMs: 5000, + maxTimes: 10, + until: async () => partitionOwnershipArr.size === partitionIds.length + }); + + // map of ownerId as a key and partitionIds as a value + const partitionOwnershipMap: Map = new Map(); + + const partitionOwnership = await checkpointStore.listOwnership( + consumerClient.fullyQualifiedNamespace, + consumerClient.eventHubName, + EventHubConsumerClient.defaultConsumerGroupName + ); + + partitionOwnershipArr.size.should.equal(partitionIds.length); + for (const processor in processorByName) { + await processorByName[processor].stop(); + } + + for (const ownership of partitionOwnership) { + if (!partitionOwnershipMap.has(ownership.ownerId)) { + partitionOwnershipMap.set(ownership.ownerId, [ownership.partitionId]); + } else { + const arr = partitionOwnershipMap.get(ownership.ownerId); + arr!.push(ownership.partitionId); + partitionOwnershipMap.set(ownership.ownerId, arr!); } + } + + const n = Math.floor(partitionIds.length / 2); + partitionOwnershipMap + .get(processorByName[`processor-0`].id)! + .length.should.oneOf([n, n + 1]); + partitionOwnershipMap + .get(processorByName[`processor-1`].id)! + .length.should.oneOf([n, n + 1]); + }); + + it("should ensure that all the processors maintain a steady-state when all partitions are being processed (BalancedLoadBalancingStrategy)", async function(): Promise< + void + > { + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; + + const partitionOwnershipHistory: string[] = []; + + let allPartitionsClaimed = false; + let thrashAfterSettling = false; + const handlers: SubscriptionEventHandlers = { + async processInitialize(context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + + partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); + + loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); + if (allPartitionsClaimed) { + thrashAfterSettling = true; + return; + } + + const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); + claimedPartitions.add(partitionId); + claimedPartitionsMap[eventProcessorId] = claimedPartitions; + }, + async processEvents() { + /* no-op */ + }, + async processError() { + /* no-op */ + }, + async processClose(reason, context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + const claimedPartitions = claimedPartitionsMap[eventProcessorId]; + claimedPartitions.delete(partitionId); + loggerForTest( + `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ + context.partitionId + }` + ); + if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { + loggerForTest( + `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` + ); + thrashAfterSettling = true; + } + } + }; + + const eventProcessorOptions: FullEventProcessorOptions = { + maxBatchSize: 1, + maxWaitTimeInSeconds: 5, + loopIntervalInMs: 1000, + inactiveTimeLimitInMs: 3000, + ownerLevel: 0, + // For this test we don't want to actually checkpoint, just test ownership. + startPosition: latestEventPosition, + loadBalancingStrategy: new BalancedLoadBalancingStrategy(60000) + }; - return innerAllPartitionsClaimed; + const processor1 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions + ); + + const processor2 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions + ); + + processor1.start(); + processor2.start(); + + // loop until all partitions are claimed + try { + let lastLoopError: Record = {}; + + await loopUntil({ + name: "partitionOwnership", + maxTimes: 30, + timeBetweenRunsMs: 10000, + + errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), + until: async () => { + // Ensure the partition ownerships are balanced. + const eventProcessorIds = Object.keys(claimedPartitionsMap); + + // There are 2 processors, so we should see 2 entries. + if (eventProcessorIds.length !== 2) { + lastLoopError = { + reason: "Not all event processors have shown up", + eventProcessorIds, + partitionOwnershipHistory + }; + return false; + } + + const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; + const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + + // The delta between number of partitions each processor owns can't be more than 1. + if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { + lastLoopError = { + reason: "Delta between partitions is greater than 1", + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + return false; + } + + // All partitions must be claimed. + const innerAllPartitionsClaimed = + aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; + + if (!innerAllPartitionsClaimed) { + lastLoopError = { + reason: "All partitions not claimed", + partitionIds, + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + } + + return innerAllPartitionsClaimed; + } + }); + } catch (err) { + // close processors + await Promise.all([processor1.stop(), processor2.stop()]); + throw err; + } + + loggerForTest(`All partitions have been claimed.`); + allPartitionsClaimed = true; + + try { + // loop for some time to see if thrashing occurs + await loopUntil({ + name: "partitionThrash", + maxTimes: 4, + timeBetweenRunsMs: 1000, + until: async () => thrashAfterSettling + }); + } catch (err) { + // swallow error, check trashAfterSettling for the condition in finally + } finally { + await Promise.all([processor1.stop(), processor2.stop()]); + should.equal( + thrashAfterSettling, + false, + "Detected PartitionOwnership thrashing after load-balancing has settled." + ); } }); - } catch (err) { - // close processors - await Promise.all([processor1.stop(), processor2.stop()]); - throw err; - } - loggerForTest(`All partitions have been claimed.`); - allPartitionsClaimed = true; + it("should ensure that all the processors maintain a steady-state when all partitions are being processed (GreedyLoadBalancingStrategy)", async function(): Promise< + void + > { + const partitionIds = await producerClient.getPartitionIds(); + const checkpointStore = new InMemoryCheckpointStore(); + const claimedPartitionsMap = {} as { [eventProcessorId: string]: Set }; - try { - // loop for some time to see if thrashing occurs - await loopUntil({ - name: "partitionThrash", - maxTimes: 4, - timeBetweenRunsMs: 1000, - until: async () => thrashAfterSettling + const partitionOwnershipHistory: string[] = []; + + let allPartitionsClaimed = false; + let thrashAfterSettling = false; + const handlers: SubscriptionEventHandlers = { + async processInitialize(context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + + partitionOwnershipHistory.push(`${eventProcessorId}: init ${partitionId}`); + + loggerForTest(`[${eventProcessorId}] Claimed partition ${partitionId}`); + if (allPartitionsClaimed) { + thrashAfterSettling = true; + return; + } + + const claimedPartitions = claimedPartitionsMap[eventProcessorId] || new Set(); + claimedPartitions.add(partitionId); + claimedPartitionsMap[eventProcessorId] = claimedPartitions; + }, + async processEvents() { + /* no-op */ + }, + async processError() { + /* no-op */ + }, + async processClose(reason, context) { + const eventProcessorId: string = (context as any).eventProcessorId; + const partitionId = context.partitionId; + const claimedPartitions = claimedPartitionsMap[eventProcessorId]; + claimedPartitions.delete(partitionId); + loggerForTest( + `[${(context as any).eventProcessorId}] processClose(${reason}) on partition ${ + context.partitionId + }` + ); + if (reason === CloseReason.OwnershipLost && allPartitionsClaimed) { + loggerForTest( + `[${(context as any).eventProcessorId}] Lost partition ${context.partitionId}` + ); + thrashAfterSettling = true; + } + } + }; + + const eventProcessorOptions: FullEventProcessorOptions = { + maxBatchSize: 1, + maxWaitTimeInSeconds: 5, + loopIntervalInMs: 1000, + inactiveTimeLimitInMs: 3000, + ownerLevel: 0, + // For this test we don't want to actually checkpoint, just test ownership. + startPosition: latestEventPosition, + loadBalancingStrategy: new GreedyLoadBalancingStrategy(60000) + }; + + const processor1 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions + ); + + const processor2 = new EventProcessor( + EventHubConsumerClient.defaultConsumerGroupName, + consumerClient["_context"], + handlers, + checkpointStore, + eventProcessorOptions + ); + + processor1.start(); + processor2.start(); + + // loop until all partitions are claimed + try { + let lastLoopError: Record = {}; + + await loopUntil({ + name: "partitionOwnership", + maxTimes: 30, + timeBetweenRunsMs: 10000, + + errorMessageFn: () => JSON.stringify(lastLoopError, undefined, " "), + until: async () => { + // Ensure the partition ownerships are balanced. + const eventProcessorIds = Object.keys(claimedPartitionsMap); + + // There are 2 processors, so we should see 2 entries. + if (eventProcessorIds.length !== 2) { + lastLoopError = { + reason: "Not all event processors have shown up", + eventProcessorIds, + partitionOwnershipHistory + }; + return false; + } + + const aProcessorPartitions = claimedPartitionsMap[eventProcessorIds[0]]; + const bProcessorPartitions = claimedPartitionsMap[eventProcessorIds[1]]; + + // The delta between number of partitions each processor owns can't be more than 1. + if (Math.abs(aProcessorPartitions.size - bProcessorPartitions.size) > 1) { + lastLoopError = { + reason: "Delta between partitions is greater than 1", + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + return false; + } + + // All partitions must be claimed. + const innerAllPartitionsClaimed = + aProcessorPartitions.size + bProcessorPartitions.size === partitionIds.length; + + if (!innerAllPartitionsClaimed) { + lastLoopError = { + reason: "All partitions not claimed", + partitionIds, + a: Array.from(aProcessorPartitions), + b: Array.from(bProcessorPartitions), + partitionOwnershipHistory + }; + } + + return innerAllPartitionsClaimed; + } + }); + } catch (err) { + // close processors + await Promise.all([processor1.stop(), processor2.stop()]); + throw err; + } + + loggerForTest(`All partitions have been claimed.`); + allPartitionsClaimed = true; + + try { + // loop for some time to see if thrashing occurs + await loopUntil({ + name: "partitionThrash", + maxTimes: 4, + timeBetweenRunsMs: 1000, + until: async () => thrashAfterSettling + }); + } catch (err) { + // swallow error, check trashAfterSettling for the condition in finally + } finally { + await Promise.all([processor1.stop(), processor2.stop()]); + should.equal( + thrashAfterSettling, + false, + "Detected PartitionOwnership thrashing after load-balancing has settled." + ); + } }); - } catch (err) { - // swallow error, check trashAfterSettling for the condition in finally - } finally { - await Promise.all([processor1.stop(), processor2.stop()]); - should.equal( - thrashAfterSettling, - false, - "Detected PartitionOwnership thrashing after load-balancing has settled." - ); - } - }); + }); + }).timeout(100000); }); -}).timeout(100000); +}); function ownershipListToMap(partitionOwnership: PartitionOwnership[]): Map { const partitionOwnershipMap: Map = new Map(); diff --git a/sdk/eventhub/event-hubs/test/internal/misc.spec.ts b/sdk/eventhub/event-hubs/test/internal/misc.spec.ts index 7763ba6ffd50..cdf52699c2f2 100644 --- a/sdk/eventhub/event-hubs/test/internal/misc.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/misc.spec.ts @@ -17,271 +17,119 @@ import { ReceivedEventData, Subscription } from "../../src"; -import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "../public/utils/testUtils"; import { TRACEPARENT_PROPERTY, extractSpanContextFromEventData } from "../../src/diagnostics/instrumentEventData"; import { TraceFlags } from "@azure/core-tracing"; import { SubscriptionHandlerForTests } from "../public/utils/subscriptionHandlerForTests"; -const env = getEnvVars(); - -describe("Misc tests", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let consumerClient: EventHubConsumerClient; - let producerClient: EventHubProducerClient; - let hubInfo: EventHubProperties; - let partitionId: string; - let lastEnqueuedOffset: number; - - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach(async () => { - debug("Creating the clients.."); - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - hubInfo = await consumerClient.getEventHubProperties(); - partitionId = hubInfo.partitionIds[0]; - lastEnqueuedOffset = (await consumerClient.getPartitionProperties(partitionId)) - .lastEnqueuedOffset; - }); - - afterEach(async () => { - debug("Closing the clients.."); - await producerClient.close(); - await consumerClient.close(); - }); - - it("should be able to send and receive a large message correctly", async function(): Promise< - void - > { - const bodysize = 220 * 1024; - const msgString = "A".repeat(220 * 1024); - const msgBody = Buffer.from(msgString); - const obj: EventData = { body: msgBody }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message with %d bytes.", bodysize); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - should.equal(data[0].body.toString(), msgString); - should.not.exist((data[0].properties || {}).message_id); - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); - }); - await subscription!.close(); - }); - - it("should be able to send and receive a JSON object as a message correctly", async function(): Promise< - void - > { - const msgBody = { - id: "123-456-789", - weight: 10, - isBlue: true, - siblings: [ - { - id: "098-789-564", - weight: 20, - isBlue: false - } - ] - }; - const obj: EventData = { body: msgBody }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message %O", obj); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - assert.deepEqual(data[0].body, msgBody); - should.not.exist((data[0].properties || {}).message_id); - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); - }); - await subscription!.close(); - }); - - it("should be able to send and receive an array as a message correctly", async function(): Promise< - void - > { - const msgBody = [ - { - id: "098-789-564", - weight: 20, - isBlue: false - }, - 10, - 20, - "some string" - ]; - const obj: EventData = { body: msgBody, properties: { message_id: uuid() } }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message %O", obj); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - assert.deepEqual(data[0].body, msgBody); - assert.strictEqual(data[0].properties!.message_id, obj.properties!.message_id); - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); - }); - await subscription!.close(); - }); +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../public/utils/mockService"; + +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("internal/misc.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } + describe("Misc tests", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let consumerClient: EventHubConsumerClient; + let producerClient: EventHubProducerClient; + let hubInfo: EventHubProperties; + let partitionId: string; + let lastEnqueuedOffset: number; + + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - it("should be able to send a boolean as a message correctly", async function(): Promise { - const msgBody = true; - const obj: EventData = { body: msgBody }; - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - debug("Sending one message %O", obj); - await producerClient.sendBatch([obj], { partitionId }); - debug("Successfully sent the large message."); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - should.exist(data); - should.equal(data.length, 1); - assert.deepEqual(data[0].body, msgBody); - should.not.exist((data[0].properties || {}).message_id); - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); - }); - await subscription!.close(); - }); + beforeEach(async () => { + debug("Creating the clients.."); + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + hubInfo = await consumerClient.getEventHubProperties(); + partitionId = hubInfo.partitionIds[0]; + lastEnqueuedOffset = (await consumerClient.getPartitionProperties(partitionId)) + .lastEnqueuedOffset; + }); - it("should be able to send and receive batched messages correctly ", async function(): Promise< - void - > { - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - const messageCount = 5; - const d: EventData[] = []; - for (let i = 0; i < messageCount; i++) { - const obj: EventData = { body: `Hello EH ${i}` }; - d.push(obj); - } + afterEach(async () => { + debug("Closing the clients.."); + await producerClient.close(); + await consumerClient.close(); + }); - await producerClient.sendBatch(d, { partitionId }); - debug("Successfully sent 5 messages batched together."); - - let subscription: Subscription | undefined; - const receivedMsgs: ReceivedEventData[] = []; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - receivedMsgs.push(...data); - if (receivedMsgs.length === 5) { - resolve(); + it("should be able to send and receive a large message correctly", async function(): Promise< + void + > { + const bodysize = 220 * 1024; + const msgString = "A".repeat(220 * 1024); + const msgBody = Buffer.from(msgString); + const obj: EventData = { body: msgBody }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message with %d bytes.", bodysize); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + should.equal(data[0].body.toString(), msgString); + should.not.exist((data[0].properties || {}).message_id); + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: lastEnqueuedOffset } } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: lastEnqueuedOffset } - } - ); - }); - await subscription!.close(); - receivedMsgs.length.should.equal(5); - for (const message of receivedMsgs) { - should.not.exist((message.properties || {}).message_id); - } - }); + ); + }); + await subscription!.close(); + }); - it("should be able to send and receive batched messages as JSON objects correctly ", async function(): Promise< - void - > { - debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); - const messageCount = 5; - const d: EventData[] = []; - for (let i = 0; i < messageCount; i++) { - const obj: EventData = { - body: { + it("should be able to send and receive a JSON object as a message correctly", async function(): Promise< + void + > { + const msgBody = { id: "123-456-789", - count: i, weight: 10, isBlue: true, siblings: [ @@ -291,177 +139,357 @@ describe("Misc tests", function(): void { isBlue: false } ] - }, - properties: { - message_id: uuid() + }; + const obj: EventData = { body: msgBody }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message %O", obj); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + assert.deepEqual(data[0].body, msgBody); + should.not.exist((data[0].properties || {}).message_id); + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: lastEnqueuedOffset } + } + ); + }); + await subscription!.close(); + }); + + it("should be able to send and receive an array as a message correctly", async function(): Promise< + void + > { + const msgBody = [ + { + id: "098-789-564", + weight: 20, + isBlue: false + }, + 10, + 20, + "some string" + ]; + const obj: EventData = { body: msgBody, properties: { message_id: uuid() } }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message %O", obj); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + assert.deepEqual(data[0].body, msgBody); + assert.strictEqual(data[0].properties!.message_id, obj.properties!.message_id); + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: lastEnqueuedOffset } + } + ); + }); + await subscription!.close(); + }); + + it("should be able to send a boolean as a message correctly", async function(): Promise< + void + > { + const msgBody = true; + const obj: EventData = { body: msgBody }; + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + debug("Sending one message %O", obj); + await producerClient.sendBatch([obj], { partitionId }); + debug("Successfully sent the large message."); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + should.exist(data); + should.equal(data.length, 1); + assert.deepEqual(data[0].body, msgBody); + should.not.exist((data[0].properties || {}).message_id); + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: lastEnqueuedOffset } + } + ); + }); + await subscription!.close(); + }); + + it("should be able to send and receive batched messages correctly ", async function(): Promise< + void + > { + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + const messageCount = 5; + const d: EventData[] = []; + for (let i = 0; i < messageCount; i++) { + const obj: EventData = { body: `Hello EH ${i}` }; + d.push(obj); } - }; - d.push(obj); - } - await producerClient.sendBatch(d, { partitionId }); - debug("Successfully sent 5 messages batched together."); - - let subscription: Subscription | undefined; - const receivedMsgs: ReceivedEventData[] = []; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - debug("received message: ", data.length); - receivedMsgs.push(...data); - if (receivedMsgs.length === 5) { - resolve(); + await producerClient.sendBatch(d, { partitionId }); + debug("Successfully sent 5 messages batched together."); + + let subscription: Subscription | undefined; + const receivedMsgs: ReceivedEventData[] = []; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + receivedMsgs.push(...data); + if (receivedMsgs.length === 5) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: lastEnqueuedOffset } } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: lastEnqueuedOffset } + ); + }); + await subscription!.close(); + receivedMsgs.length.should.equal(5); + for (const message of receivedMsgs) { + should.not.exist((message.properties || {}).message_id); } - ); - }); - await subscription!.close(); - should.equal(receivedMsgs[0].body.count, 0); - should.equal(receivedMsgs.length, 5); - for (const [index, message] of receivedMsgs.entries()) { - assert.strictEqual(message.properties!.message_id, d[index].properties!.message_id); - } - }); + }); - it("should consistently send messages with partitionkey to a partitionId", async function(): Promise< - void - > { - const { - subscriptionEventHandler, - startPosition - } = await SubscriptionHandlerForTests.startingFromHere(consumerClient); + it("should be able to send and receive batched messages as JSON objects correctly ", async function(): Promise< + void + > { + debug(`Partition ${partitionId} has last message with offset ${lastEnqueuedOffset}.`); + const messageCount = 5; + const d: EventData[] = []; + for (let i = 0; i < messageCount; i++) { + const obj: EventData = { + body: { + id: "123-456-789", + count: i, + weight: 10, + isBlue: true, + siblings: [ + { + id: "098-789-564", + weight: 20, + isBlue: false + } + ] + }, + properties: { + message_id: uuid() + } + }; + d.push(obj); + } - const msgToSendCount = 50; - debug("Sending %d messages.", msgToSendCount); + await producerClient.sendBatch(d, { partitionId }); + debug("Successfully sent 5 messages batched together."); - function getRandomInt(max: number): number { - return Math.floor(Math.random() * Math.floor(max)); - } + let subscription: Subscription | undefined; + const receivedMsgs: ReceivedEventData[] = []; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + debug("received message: ", data.length); + receivedMsgs.push(...data); + if (receivedMsgs.length === 5) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: lastEnqueuedOffset } + } + ); + }); + await subscription!.close(); + should.equal(receivedMsgs[0].body.count, 0); + should.equal(receivedMsgs.length, 5); + for (const [index, message] of receivedMsgs.entries()) { + assert.strictEqual(message.properties!.message_id, d[index].properties!.message_id); + } + }); - const senderPromises = []; + it("should consistently send messages with partitionkey to a partitionId", async function(): Promise< + void + > { + const { + subscriptionEventHandler, + startPosition + } = await SubscriptionHandlerForTests.startingFromHere(consumerClient); - for (let i = 0; i < msgToSendCount; i++) { - const partitionKey = getRandomInt(10); - senderPromises.push( - producerClient.sendBatch([{ body: "Hello EventHub " + i }], { - partitionKey: partitionKey.toString() - }) - ); - } + const msgToSendCount = 50; + debug("Sending %d messages.", msgToSendCount); - await Promise.all(senderPromises); + function getRandomInt(max: number): number { + return Math.floor(Math.random() * Math.floor(max)); + } - debug("Starting to receive all messages from each partition."); - const partitionMap: any = {}; + const senderPromises = []; - let subscription: Subscription | undefined = undefined; + for (let i = 0; i < msgToSendCount; i++) { + const partitionKey = getRandomInt(10); + senderPromises.push( + producerClient.sendBatch([{ body: "Hello EventHub " + i }], { + partitionKey: partitionKey.toString() + }) + ); + } - try { - subscription = consumerClient.subscribe(subscriptionEventHandler, { - startPosition - }); - const receivedEvents = await subscriptionEventHandler.waitForFullEvents( - hubInfo.partitionIds, - msgToSendCount - ); - - for (const d of receivedEvents) { - debug(">>>> _raw_amqp_mesage: ", (d as any)._raw_amqp_mesage); - const pk = d.event.partitionKey as string; - debug("pk: ", pk); - - if (partitionMap[pk] && partitionMap[pk] !== d.partitionId) { - debug( - `#### Error: Received a message from partition ${d.partitionId} with partition key ${pk}, whereas the same key was observed on partition ${partitionMap[pk]} before.` + await Promise.all(senderPromises); + + debug("Starting to receive all messages from each partition."); + const partitionMap: any = {}; + + let subscription: Subscription | undefined = undefined; + + try { + subscription = consumerClient.subscribe(subscriptionEventHandler, { + startPosition + }); + const receivedEvents = await subscriptionEventHandler.waitForFullEvents( + hubInfo.partitionIds, + msgToSendCount ); - assert(partitionMap[pk] === d.partitionId); + + for (const d of receivedEvents) { + debug(">>>> _raw_amqp_mesage: ", (d as any)._raw_amqp_mesage); + const pk = d.event.partitionKey as string; + debug("pk: ", pk); + + if (partitionMap[pk] && partitionMap[pk] !== d.partitionId) { + debug( + `#### Error: Received a message from partition ${d.partitionId} with partition key ${pk}, whereas the same key was observed on partition ${partitionMap[pk]} before.` + ); + assert(partitionMap[pk] === d.partitionId); + } + partitionMap[pk] = d.partitionId; + debug("partitionMap ", partitionMap); + } + } finally { + if (subscription) { + await subscription.close(); + } + await consumerClient.close(); } - partitionMap[pk] = d.partitionId; - debug("partitionMap ", partitionMap); - } - } finally { - if (subscription) { - await subscription.close(); - } - await consumerClient.close(); - } - }); -}).timeout(60000); - -describe("extractSpanContextFromEventData", function() { - it("should extract a SpanContext from a properly instrumented EventData", function() { - const traceId = "11111111111111111111111111111111"; - const spanId = "2222222222222222"; - const flags = "00"; - const eventData: ReceivedEventData = { - body: "This is a test.", - enqueuedTimeUtc: new Date(), - offset: 0, - sequenceNumber: 0, - partitionKey: null, - properties: { - [TRACEPARENT_PROPERTY]: `00-${traceId}-${spanId}-${flags}` - } - }; - - const spanContext = extractSpanContextFromEventData(eventData); - - should.exist(spanContext, "Extracted spanContext should be defined."); - should.equal(spanContext!.traceId, traceId, "Extracted traceId does not match expectation."); - should.equal(spanContext!.spanId, spanId, "Extracted spanId does not match expectation."); - should.equal( - spanContext!.traceFlags, - TraceFlags.NONE, - "Extracted traceFlags do not match expectations." - ); - }); + }); + }).timeout(60000); + + describe("extractSpanContextFromEventData", function() { + it("should extract a SpanContext from a properly instrumented EventData", function() { + const traceId = "11111111111111111111111111111111"; + const spanId = "2222222222222222"; + const flags = "00"; + const eventData: ReceivedEventData = { + body: "This is a test.", + enqueuedTimeUtc: new Date(), + offset: 0, + sequenceNumber: 0, + partitionKey: null, + properties: { + [TRACEPARENT_PROPERTY]: `00-${traceId}-${spanId}-${flags}` + } + }; + + const spanContext = extractSpanContextFromEventData(eventData); + + should.exist(spanContext, "Extracted spanContext should be defined."); + should.equal( + spanContext!.traceId, + traceId, + "Extracted traceId does not match expectation." + ); + should.equal(spanContext!.spanId, spanId, "Extracted spanId does not match expectation."); + should.equal( + spanContext!.traceFlags, + TraceFlags.NONE, + "Extracted traceFlags do not match expectations." + ); + }); - it("should return undefined when EventData is not properly instrumented", function() { - const traceId = "11111111111111111111111111111111"; - const spanId = "2222222222222222"; - const flags = "00"; - const eventData: ReceivedEventData = { - body: "This is a test.", - enqueuedTimeUtc: new Date(), - offset: 0, - sequenceNumber: 0, - partitionKey: null, - properties: { - [TRACEPARENT_PROPERTY]: `99-${traceId}-${spanId}-${flags}` - } - }; + it("should return undefined when EventData is not properly instrumented", function() { + const traceId = "11111111111111111111111111111111"; + const spanId = "2222222222222222"; + const flags = "00"; + const eventData: ReceivedEventData = { + body: "This is a test.", + enqueuedTimeUtc: new Date(), + offset: 0, + sequenceNumber: 0, + partitionKey: null, + properties: { + [TRACEPARENT_PROPERTY]: `99-${traceId}-${spanId}-${flags}` + } + }; - const spanContext = extractSpanContextFromEventData(eventData); + const spanContext = extractSpanContextFromEventData(eventData); - should.not.exist( - spanContext, - "Invalid diagnosticId version should return undefined spanContext." - ); - }); + should.not.exist( + spanContext, + "Invalid diagnosticId version should return undefined spanContext." + ); + }); - it("should return undefined when EventData is not instrumented", function() { - const eventData: ReceivedEventData = { - body: "This is a test.", - enqueuedTimeUtc: new Date(), - offset: 0, - sequenceNumber: 0, - partitionKey: null - }; - - const spanContext = extractSpanContextFromEventData(eventData); - - should.not.exist( - spanContext, - `Missing property "${TRACEPARENT_PROPERTY}" should return undefined spanContext.` - ); + it("should return undefined when EventData is not instrumented", function() { + const eventData: ReceivedEventData = { + body: "This is a test.", + enqueuedTimeUtc: new Date(), + offset: 0, + sequenceNumber: 0, + partitionKey: null + }; + + const spanContext = extractSpanContextFromEventData(eventData); + + should.not.exist( + spanContext, + `Missing property "${TRACEPARENT_PROPERTY}" should return undefined spanContext.` + ); + }); + }); }); }); diff --git a/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts b/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts index afee69fab991..08dd6dca662e 100644 --- a/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/receiveBatch.spec.ts @@ -13,246 +13,268 @@ import { EventHubProducerClient, EventPosition } from "../../src"; -import { EnvVarKeys, getEnvVars } from "../public/utils/testUtils"; +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "../public/utils/testUtils"; import { AbortController } from "@azure/abort-controller"; import { EventHubReceiver } from "../../src/eventHubReceiver"; import { translate, StandardAbortMessage } from "@azure/core-amqp"; -const env = getEnvVars(); - -describe("EventHubConsumerClient", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let partitionIds: string[]; - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach("Creating the clients", async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - partitionIds = await producerClient.getPartitionIds({}); - }); - - afterEach("Closing the clients", async () => { - await producerClient.close(); - await consumerClient.close(); - }); - - describe("EventHubConsumer receiveBatch", function(): void { - it("should support being cancelled", async function(): Promise { - const partitionId = partitionIds[0]; - const time = Date.now(); - - // send a message that can be received - await producerClient.sendBatch([{ body: "batchReceiver cancellation - timeout 0" }], { - partitionId - }); - - const receiver = new EventHubReceiver( - consumerClient["_context"], - EventHubConsumerClient.defaultConsumerGroupName, - partitionId, - { - enqueuedOn: time - } - ); - - try { - // abortSignal event listeners will be triggered after synchronous paths are executed - const abortSignal = AbortController.timeout(0); - await receiver.receiveBatch(1, 60, abortSignal); - throw new Error(`Test failure`); - } catch (err) { - err.name.should.equal("AbortError"); - err.message.should.equal(StandardAbortMessage); +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../public/utils/mockService"; + +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("internal/receiveBatch.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); } - - await receiver.close(); - }); - - it("should support being cancelled from an already aborted AbortSignal", async function(): Promise< - void - > { - const partitionId = partitionIds[0]; - const time = Date.now(); - - // send a message that can be received - await producerClient.sendBatch([{ body: "batchReceiver cancellation - immediate" }], { - partitionId + } + describe("EventHubConsumerClient", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let partitionIds: string[]; + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); }); - const receiver = new EventHubReceiver( - consumerClient["_context"], - EventHubConsumerClient.defaultConsumerGroupName, - partitionId, - { - enqueuedOn: time - } - ); - - try { - // abortSignal event listeners will be triggered after synchronous paths are executed - const abortController = new AbortController(); - abortController.abort(); - await receiver.receiveBatch(1, 60, abortController.signal); - throw new Error(`Test failure`); - } catch (err) { - err.name.should.equal("AbortError"); - err.message.should.equal(StandardAbortMessage); - } - - await receiver.close(); - }); - - it("should support cancellation when a connection already exists", async function(): Promise< - void - > { - const partitionId = partitionIds[0]; - const time = Date.now(); - - // send a message that can be received - await producerClient.sendBatch([{ body: "batchReceiver cancellation - timeout 0" }], { - partitionId + beforeEach("Creating the clients", async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + partitionIds = await producerClient.getPartitionIds({}); }); - const receiver = new EventHubReceiver( - consumerClient["_context"], - EventHubConsumerClient.defaultConsumerGroupName, - partitionId, - { - enqueuedOn: time - } - ); - - try { - // call receiveBatch once to establish a connection - await receiver.receiveBatch(1, 60); - // abortSignal event listeners will be triggered after synchronous paths are executed - const abortSignal = AbortController.timeout(0); - await receiver.receiveBatch(1, 60, abortSignal); - throw new Error(`Test failure`); - } catch (err) { - err.name.should.equal("AbortError"); - err.message.should.equal(StandardAbortMessage); - } - - await receiver.close(); - }); - - it("should not lose messages on error", async () => { - const partitionId = partitionIds[0]; - const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( - partitionId - ); + afterEach("Closing the clients", async () => { + await producerClient.close(); + await consumerClient.close(); + }); - // Ensure the receiver only looks at new messages. - const startPosition: EventPosition = { - sequenceNumber: lastEnqueuedSequenceNumber, - isInclusive: false - }; + describe("EventHubConsumer receiveBatch", function(): void { + it("should support being cancelled", async function(): Promise { + const partitionId = partitionIds[0]; + const time = Date.now(); + + // send a message that can be received + await producerClient.sendBatch([{ body: "batchReceiver cancellation - timeout 0" }], { + partitionId + }); + + const receiver = new EventHubReceiver( + consumerClient["_context"], + EventHubConsumerClient.defaultConsumerGroupName, + partitionId, + { + enqueuedOn: time + } + ); + + try { + // abortSignal event listeners will be triggered after synchronous paths are executed + const abortSignal = AbortController.timeout(0); + await receiver.receiveBatch(1, 60, abortSignal); + throw new Error(`Test failure`); + } catch (err) { + err.name.should.equal("AbortError"); + err.message.should.equal(StandardAbortMessage); + } - // Send a message we expect to receive. - const message: EventData = { body: "remember me!" }; - await producerClient.sendBatch([message], { partitionId }); - - // Disable retries to make it easier to test scenario. - const receiver = new EventHubReceiver( - consumerClient["_context"], - EventHubConsumerClient.defaultConsumerGroupName, - partitionId, - startPosition, - { - retryOptions: { - maxRetries: 0 + await receiver.close(); + }); + + it("should support being cancelled from an already aborted AbortSignal", async function(): Promise< + void + > { + const partitionId = partitionIds[0]; + const time = Date.now(); + + // send a message that can be received + await producerClient.sendBatch([{ body: "batchReceiver cancellation - immediate" }], { + partitionId + }); + + const receiver = new EventHubReceiver( + consumerClient["_context"], + EventHubConsumerClient.defaultConsumerGroupName, + partitionId, + { + enqueuedOn: time + } + ); + + try { + // abortSignal event listeners will be triggered after synchronous paths are executed + const abortController = new AbortController(); + abortController.abort(); + await receiver.receiveBatch(1, 60, abortController.signal); + throw new Error(`Test failure`); + } catch (err) { + err.name.should.equal("AbortError"); + err.message.should.equal(StandardAbortMessage); } - } - ); - - // Periodically check that the receiver's checkpoint has been updated. - const checkpointInterval = setInterval(() => { - if (receiver.checkpoint > -1) { - clearInterval(checkpointInterval); - const error = translate(new Error("I break receivers for fun.")); - receiver["_onError"]!(error); - } - }, 50); - - try { - // There is only 1 message. - // We expect to see an error. - await receiver.receiveBatch(2, 60); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.not.equal("Test failure"); - receiver.checkpoint.should.be.greaterThan(-1, "Did not see a message come through."); - } finally { - clearInterval(checkpointInterval); - } - const events = await receiver.receiveBatch(1); - events.length.should.equal(1, "Unexpected number of events received."); - events[0].body.should.equal(message.body, "Unexpected message received."); - }); - - it("should not lose messages between retries", async () => { - const partitionId = partitionIds[0]; - const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( - partitionId - ); - - // Ensure the receiver only looks at new messages. - const startPosition: EventPosition = { - sequenceNumber: lastEnqueuedSequenceNumber, - isInclusive: false - }; + await receiver.close(); + }); + + it("should support cancellation when a connection already exists", async function(): Promise< + void + > { + const partitionId = partitionIds[0]; + const time = Date.now(); + + // send a message that can be received + await producerClient.sendBatch([{ body: "batchReceiver cancellation - timeout 0" }], { + partitionId + }); + + const receiver = new EventHubReceiver( + consumerClient["_context"], + EventHubConsumerClient.defaultConsumerGroupName, + partitionId, + { + enqueuedOn: time + } + ); + + try { + // call receiveBatch once to establish a connection + await receiver.receiveBatch(1, 60); + // abortSignal event listeners will be triggered after synchronous paths are executed + const abortSignal = AbortController.timeout(0); + await receiver.receiveBatch(1, 60, abortSignal); + throw new Error(`Test failure`); + } catch (err) { + err.name.should.equal("AbortError"); + err.message.should.equal(StandardAbortMessage); + } - // Send a message we expect to receive. - const message: EventData = { body: "remember me!" }; - await producerClient.sendBatch([message], { partitionId }); - - // Disable retries to make it easier to test scenario. - const receiver = new EventHubReceiver( - consumerClient["_context"], - EventHubConsumerClient.defaultConsumerGroupName, - partitionId, - startPosition, - { - retryOptions: { - maxRetries: 1 + await receiver.close(); + }); + + it("should not lose messages on error", async () => { + const partitionId = partitionIds[0]; + const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( + partitionId + ); + + // Ensure the receiver only looks at new messages. + const startPosition: EventPosition = { + sequenceNumber: lastEnqueuedSequenceNumber, + isInclusive: false + }; + + // Send a message we expect to receive. + const message: EventData = { body: "remember me!" }; + await producerClient.sendBatch([message], { partitionId }); + + // Disable retries to make it easier to test scenario. + const receiver = new EventHubReceiver( + consumerClient["_context"], + EventHubConsumerClient.defaultConsumerGroupName, + partitionId, + startPosition, + { + retryOptions: { + maxRetries: 0 + } + } + ); + + // Periodically check that the receiver's checkpoint has been updated. + const checkpointInterval = setInterval(() => { + if (receiver.checkpoint > -1) { + clearInterval(checkpointInterval); + const error = translate(new Error("I break receivers for fun.")); + receiver["_onError"]!(error); + } + }, 50); + + try { + // There is only 1 message. + // We expect to see an error. + await receiver.receiveBatch(2, 60); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.not.equal("Test failure"); + receiver.checkpoint.should.be.greaterThan(-1, "Did not see a message come through."); + } finally { + clearInterval(checkpointInterval); } - } - ); - - // Periodically check that the receiver's checkpoint has been updated. - const checkpointInterval = setInterval(() => { - if (receiver.checkpoint > -1) { - clearInterval(checkpointInterval); - const error = translate(new Error("I break receivers for fun.")) as MessagingError; - error.retryable = true; - receiver["_onError"]!(error); - } - }, 50); - - // There is only 1 message. - const events = await receiver.receiveBatch(2, 20); - - events.length.should.equal(1, "Unexpected number of events received."); - events[0].body.should.equal(message.body, "Unexpected message received."); - }); + + const events = await receiver.receiveBatch(1); + events.length.should.equal(1, "Unexpected number of events received."); + events[0].body.should.equal(message.body, "Unexpected message received."); + }); + + it("should not lose messages between retries", async () => { + const partitionId = partitionIds[0]; + const { lastEnqueuedSequenceNumber } = await producerClient.getPartitionProperties( + partitionId + ); + + // Ensure the receiver only looks at new messages. + const startPosition: EventPosition = { + sequenceNumber: lastEnqueuedSequenceNumber, + isInclusive: false + }; + + // Send a message we expect to receive. + const message: EventData = { body: "remember me!" }; + await producerClient.sendBatch([message], { partitionId }); + + // Disable retries to make it easier to test scenario. + const receiver = new EventHubReceiver( + consumerClient["_context"], + EventHubConsumerClient.defaultConsumerGroupName, + partitionId, + startPosition, + { + retryOptions: { + maxRetries: 1 + } + } + ); + + // Periodically check that the receiver's checkpoint has been updated. + const checkpointInterval = setInterval(() => { + if (receiver.checkpoint > -1) { + clearInterval(checkpointInterval); + const error = translate(new Error("I break receivers for fun.")) as MessagingError; + error.retryable = true; + receiver["_onError"]!(error); + } + }, 50); + + // There is only 1 message. + const events = await receiver.receiveBatch(2, 20); + + events.length.should.equal(1, "Unexpected number of events received."); + events[0].body.should.equal(message.body, "Unexpected message received."); + }); + }); + }).timeout(90000); }); -}).timeout(90000); +}); diff --git a/sdk/eventhub/event-hubs/test/internal/sender.spec.ts b/sdk/eventhub/event-hubs/test/internal/sender.spec.ts index 7364825aae3c..a6e6e5b58af6 100644 --- a/sdk/eventhub/event-hubs/test/internal/sender.spec.ts +++ b/sdk/eventhub/event-hubs/test/internal/sender.spec.ts @@ -20,7 +20,9 @@ import { import { EnvVarKeys, getEnvVars, + getEnvVarValue, getStartingPositionsForTests, + isNode, setTracerForTest } from "../public/utils/testUtils"; import { AbortController } from "@azure/abort-controller"; @@ -29,369 +31,313 @@ import { TRACEPARENT_PROPERTY } from "../../src/diagnostics/instrumentEventData" import { SubscriptionHandlerForTests } from "../public/utils/subscriptionHandlerForTests"; import { StandardAbortMessage } from "@azure/core-amqp"; import { setSpan, context } from "@azure/core-tracing"; -const env = getEnvVars(); - -describe("EventHub Sender", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let startPosition: { [partitionId: string]: EventPosition }; - - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach(async () => { - debug("Creating the clients.."); - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - startPosition = await getStartingPositionsForTests(consumerClient); - }); - - afterEach(async () => { - debug("Closing the clients.."); - await producerClient.close(); - await consumerClient.close(); - }); - - describe("Create batch", function(): void { - describe("tryAdd", function() { - it("doesn't grow if invalid events are added", async () => { - const batch = await producerClient.createBatch({ maxSizeInBytes: 20 }); - const event = { body: Buffer.alloc(30).toString() }; - - const numToAdd = 5; - let failures = 0; - for (let i = 0; i < numToAdd; i++) { - if (!batch.tryAdd(event)) { - failures++; - } - } - - failures.should.equal(5); - batch.sizeInBytes.should.equal(0); - }); - }); - - it("partitionId is set as expected", async () => { - const batch = await producerClient.createBatch({ - partitionId: "0" - }); - should.equal(batch.partitionId, "0"); - }); +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../public/utils/mockService"; + +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("internal/sender.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } - it("partitionId is set as expected when it is 0 i.e. falsy", async () => { - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0 + describe("EventHub Sender", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let startPosition: { [partitionId: string]: EventPosition }; + + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); }); - should.equal(batch.partitionId, "0"); - }); - it("partitionKey is set as expected", async () => { - const batch = await producerClient.createBatch({ - partitionKey: "boo" + beforeEach(async () => { + debug("Creating the clients.."); + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + startPosition = await getStartingPositionsForTests(consumerClient); }); - should.equal(batch.partitionKey, "boo"); - }); - it("partitionKey is set as expected when it is 0 i.e. falsy", async () => { - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0 + afterEach(async () => { + debug("Closing the clients.."); + await producerClient.close(); + await consumerClient.close(); }); - should.equal(batch.partitionKey, "0"); - }); - it("maxSizeInBytes is set as expected", async () => { - const batch = await producerClient.createBatch({ maxSizeInBytes: 30 }); - should.equal(batch.maxSizeInBytes, 30); - }); - - it("should be sent successfully", async function(): Promise { - const list = ["Albert", `${Buffer.from("Mike".repeat(1300000))}`, "Marie"]; + describe("Create batch", function(): void { + describe("tryAdd", function() { + it("doesn't grow if invalid events are added", async () => { + const batch = await producerClient.createBatch({ maxSizeInBytes: 20 }); + const event = { body: Buffer.alloc(30).toString() }; + + const numToAdd = 5; + let failures = 0; + for (let i = 0; i < numToAdd; i++) { + if (!batch.tryAdd(event)) { + failures++; + } + } - const batch = await producerClient.createBatch({ - partitionId: "0" - }); + failures.should.equal(5); + batch.sizeInBytes.should.equal(0); + }); + }); - batch.partitionId!.should.equal("0"); - should.not.exist(batch.partitionKey); - batch.maxSizeInBytes.should.be.gt(0); + it("partitionId is set as expected", async () => { + const batch = await producerClient.createBatch({ + partitionId: "0" + }); + should.equal(batch.partitionId, "0"); + }); - should.equal(batch.tryAdd({ body: list[0] }), true); - should.equal(batch.tryAdd({ body: list[1] }), false); // The Mike message will be rejected - it's over the limit. - should.equal(batch.tryAdd({ body: list[2] }), true); // Marie should get added"; + it("partitionId is set as expected when it is 0 i.e. falsy", async () => { + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0 + }); + should.equal(batch.partitionId, "0"); + }); - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - producerClient - ); + it("partitionKey is set as expected", async () => { + const batch = await producerClient.createBatch({ + partitionKey: "boo" + }); + should.equal(batch.partitionKey, "boo"); + }); - const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { startPosition }); - await producerClient.sendBatch(batch); + it("partitionKey is set as expected when it is 0 i.e. falsy", async () => { + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0 + }); + should.equal(batch.partitionKey, "0"); + }); - let receivedEvents; + it("maxSizeInBytes is set as expected", async () => { + const batch = await producerClient.createBatch({ maxSizeInBytes: 30 }); + should.equal(batch.maxSizeInBytes, 30); + }); - try { - receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); - } finally { - await subscriber.close(); - } + it("should be sent successfully", async function(): Promise { + const list = ["Albert", `${Buffer.from("Mike".repeat(1300000))}`, "Marie"]; - // Mike didn't make it - the message was too big for the batch - // and was rejected above. - [list[0], list[2]].should.be.deep.eq( - receivedEvents.map((event) => event.body), - "Received messages should be equal to our sent messages" - ); - }); - - it("should be sent successfully when partitionId is 0 i.e. falsy", async function(): Promise< - void - > { - const list = ["Albert", "Marie"]; - - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0 - }); + const batch = await producerClient.createBatch({ + partitionId: "0" + }); - batch.partitionId!.should.equal("0"); - should.not.exist(batch.partitionKey); - batch.maxSizeInBytes.should.be.gt(0); + batch.partitionId!.should.equal("0"); + should.not.exist(batch.partitionKey); + batch.maxSizeInBytes.should.be.gt(0); - should.equal(batch.tryAdd({ body: list[0] }), true); - should.equal(batch.tryAdd({ body: list[1] }), true); + should.equal(batch.tryAdd({ body: list[0] }), true); + should.equal(batch.tryAdd({ body: list[1] }), false); // The Mike message will be rejected - it's over the limit. + should.equal(batch.tryAdd({ body: list[2] }), true); // Marie should get added"; - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - producerClient - ); + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + producerClient + ); - const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { startPosition }); - await producerClient.sendBatch(batch); + const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { + startPosition + }); + await producerClient.sendBatch(batch); - let receivedEvents; + let receivedEvents; - try { - receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); - } finally { - await subscriber.close(); - } + try { + receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); + } finally { + await subscriber.close(); + } - list.should.be.deep.eq( - receivedEvents.map((event) => event.body), - "Received messages should be equal to our sent messages" - ); - }); + // Mike didn't make it - the message was too big for the batch + // and was rejected above. + [list[0], list[2]].should.be.deep.eq( + receivedEvents.map((event) => event.body), + "Received messages should be equal to our sent messages" + ); + }); - it("should be sent successfully when partitionKey is 0 i.e. falsy", async function(): Promise< - void - > { - const list = ["Albert", "Marie"]; + it("should be sent successfully when partitionId is 0 i.e. falsy", async function(): Promise< + void + > { + const list = ["Albert", "Marie"]; - const batch = await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0 - }); + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0 + }); - batch.partitionKey!.should.equal("0"); - should.not.exist(batch.partitionId); - batch.maxSizeInBytes.should.be.gt(0); + batch.partitionId!.should.equal("0"); + should.not.exist(batch.partitionKey); + batch.maxSizeInBytes.should.be.gt(0); - should.equal(batch.tryAdd({ body: list[0] }), true); - should.equal(batch.tryAdd({ body: list[1] }), true); + should.equal(batch.tryAdd({ body: list[0] }), true); + should.equal(batch.tryAdd({ body: list[1] }), true); - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - producerClient - ); + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + producerClient + ); - const subscriber = consumerClient.subscribe(subscriptionEventHandler, { - startPosition - }); - await producerClient.sendBatch(batch); - - let receivedEvents; - const allPartitionIds = await producerClient.getPartitionIds(); - try { - receivedEvents = await subscriptionEventHandler.waitForEvents(allPartitionIds, 2); - } finally { - await subscriber.close(); - } + const subscriber = consumerClient.subscribe("0", subscriptionEventHandler, { + startPosition + }); + await producerClient.sendBatch(batch); - list.should.be.deep.eq( - receivedEvents.map((event) => event.body), - "Received messages should be equal to our sent messages" - ); - }); - - it("should be sent successfully with properties", async function(): Promise { - const properties = { test: "super" }; - const list = [ - { body: "Albert-With-Properties", properties }, - { body: "Mike-With-Properties", properties }, - { body: "Marie-With-Properties", properties } - ]; - - const batch = await producerClient.createBatch({ - partitionId: "0" - }); + let receivedEvents; - batch.maxSizeInBytes.should.be.gt(0); - - should.equal(batch.tryAdd(list[0]), true); - should.equal(batch.tryAdd(list[1]), true); - should.equal(batch.tryAdd(list[2]), true); - - const receivedEvents: ReceivedEventData[] = []; - let waitUntilEventsReceivedResolver: (value?: any) => void; - const waitUntilEventsReceived = new Promise( - (resolve) => (waitUntilEventsReceivedResolver = resolve) - ); - - const sequenceNumber = (await consumerClient.getPartitionProperties("0")) - .lastEnqueuedSequenceNumber; - - const subscriber = consumerClient.subscribe( - "0", - { - async processError() { - /* no-op */ - }, - async processEvents(events) { - receivedEvents.push(...events); - if (receivedEvents.length >= 3) { - waitUntilEventsReceivedResolver(); - } + try { + receivedEvents = await subscriptionEventHandler.waitForEvents(["0"], 2); + } finally { + await subscriber.close(); } - }, - { - startPosition: { - sequenceNumber - }, - maxBatchSize: 3 - } - ); - await producerClient.sendBatch(batch); - await waitUntilEventsReceived; - await subscriber.close(); + list.should.be.deep.eq( + receivedEvents.map((event) => event.body), + "Received messages should be equal to our sent messages" + ); + }); - sequenceNumber.should.be.lessThan(receivedEvents[0].sequenceNumber); - sequenceNumber.should.be.lessThan(receivedEvents[1].sequenceNumber); - sequenceNumber.should.be.lessThan(receivedEvents[2].sequenceNumber); + it("should be sent successfully when partitionKey is 0 i.e. falsy", async function(): Promise< + void + > { + const list = ["Albert", "Marie"]; - [list[0], list[1], list[2]].should.be.deep.eq( - receivedEvents.map((event) => { - return { - body: event.body, - properties: event.properties - }; - }), - "Received messages should be equal to our sent messages" - ); - }); + const batch = await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0 + }); - it("can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + batch.partitionKey!.should.equal("0"); + should.not.exist(batch.partitionId); + batch.maxSizeInBytes.should.be.gt(0); - const rootSpan = tracer.startSpan("root"); + should.equal(batch.tryAdd({ body: list[0] }), true); + should.equal(batch.tryAdd({ body: list[1] }), true); - const list = [{ name: "Albert" }, { name: "Marie" }]; + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + producerClient + ); - const eventDataBatch = await producerClient.createBatch({ - partitionId: "0" - }); + const subscriber = consumerClient.subscribe(subscriptionEventHandler, { + startPosition + }); + await producerClient.sendBatch(batch); - for (let i = 0; i < 2; i++) { - eventDataBatch.tryAdd( - { body: `${list[i].name}` }, - { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - } - ); - } - await producerClient.sendBatch(eventDataBatch); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(2, "Should only have two root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - } - ] + let receivedEvents; + const allPartitionIds = await producerClient.getPartitionIds(); + try { + receivedEvents = await subscriptionEventHandler.waitForEvents(allPartitionIds, 2); + } finally { + await subscriber.close(); } - ] - }; - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + list.should.be.deep.eq( + receivedEvents.map((event) => event.body), + "Received messages should be equal to our sent messages" + ); + }); - function legacyOptionsUsingSpanContext(rootSpan: TestSpan): Pick { - return { - parentSpan: rootSpan.context() - }; - } + it("should be sent successfully with properties", async function(): Promise { + const properties = { test: "super" }; + const list = [ + { body: "Albert-With-Properties", properties }, + { body: "Mike-With-Properties", properties }, + { body: "Marie-With-Properties", properties } + ]; - function legacyOptionsUsingSpan(rootSpan: TestSpan): Pick { - return { - parentSpan: rootSpan - }; - } + const batch = await producerClient.createBatch({ + partitionId: "0" + }); - function modernOptions(rootSpan: TestSpan): OperationOptions { - return { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }; - } + batch.maxSizeInBytes.should.be.gt(0); - [legacyOptionsUsingSpan, legacyOptionsUsingSpanContext, modernOptions].forEach((optionsFn) => { - describe(`tracing (${optionsFn.name})`, () => { - it("will not instrument already instrumented events", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + should.equal(batch.tryAdd(list[0]), true); + should.equal(batch.tryAdd(list[1]), true); + should.equal(batch.tryAdd(list[2]), true); + + const receivedEvents: ReceivedEventData[] = []; + let waitUntilEventsReceivedResolver: (value?: any) => void; + const waitUntilEventsReceived = new Promise( + (resolve) => (waitUntilEventsReceivedResolver = resolve) + ); - const rootSpan = tracer.startSpan("test"); + const sequenceNumber = (await consumerClient.getPartitionProperties("0")) + .lastEnqueuedSequenceNumber; - const list = [ - { name: "Albert" }, + const subscriber = consumerClient.subscribe( + "0", { - name: "Marie", - properties: { - [TRACEPARENT_PROPERTY]: "foo" + async processError() { + /* no-op */ + }, + async processEvents(events) { + receivedEvents.push(...events); + if (receivedEvents.length >= 3) { + waitUntilEventsReceivedResolver(); + } } + }, + { + startPosition: { + sequenceNumber + }, + maxBatchSize: 3 } - ]; + ); + + await producerClient.sendBatch(batch); + await waitUntilEventsReceived; + await subscriber.close(); + + sequenceNumber.should.be.lessThan(receivedEvents[0].sequenceNumber); + sequenceNumber.should.be.lessThan(receivedEvents[1].sequenceNumber); + sequenceNumber.should.be.lessThan(receivedEvents[2].sequenceNumber); + + [list[0], list[1], list[2]].should.be.deep.eq( + receivedEvents.map((event) => { + return { + body: event.body, + properties: event.properties + }; + }), + "Received messages should be equal to our sent messages" + ); + }); + + it("can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + + const list = [{ name: "Albert" }, { name: "Marie" }]; const eventDataBatch = await producerClient.createBatch({ partitionId: "0" @@ -399,8 +345,12 @@ describe("EventHub Sender", function(): void { for (let i = 0; i < 2; i++) { eventDataBatch.tryAdd( - { body: `${list[i].name}`, properties: list[i].properties }, - optionsFn(rootSpan) + { body: `${list[i].name}` }, + { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + } ); } await producerClient.sendBatch(eventDataBatch); @@ -415,6 +365,10 @@ describe("EventHub Sender", function(): void { { name: rootSpan.name, children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, { name: "Azure.EventHubs.message", children: [] @@ -429,20 +383,324 @@ describe("EventHub Sender", function(): void { resetTracer(); }); - it("will support tracing batch and send", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + function legacyOptionsUsingSpanContext( + rootSpan: TestSpan + ): Pick { + return { + parentSpan: rootSpan.context() + }; + } - const rootSpan = tracer.startSpan("root"); + function legacyOptionsUsingSpan(rootSpan: TestSpan): Pick { + return { + parentSpan: rootSpan + }; + } - const list = [{ name: "Albert" }, { name: "Marie" }]; + function modernOptions(rootSpan: TestSpan): OperationOptions { + return { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }; + } + + [legacyOptionsUsingSpan, legacyOptionsUsingSpanContext, modernOptions].forEach( + (optionsFn) => { + describe(`tracing (${optionsFn.name})`, () => { + it("will not instrument already instrumented events", async function(): Promise< + void + > { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("test"); + + const list = [ + { name: "Albert" }, + { + name: "Marie", + properties: { + [TRACEPARENT_PROPERTY]: "foo" + } + } + ]; + + const eventDataBatch = await producerClient.createBatch({ + partitionId: "0" + }); + + for (let i = 0; i < 2; i++) { + eventDataBatch.tryAdd( + { body: `${list[i].name}`, properties: list[i].properties }, + optionsFn(rootSpan) + ); + } + await producerClient.sendBatch(eventDataBatch); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(2, "Should only have two root spans."); + rootSpans[0].should.equal( + rootSpan, + "The root span should match what was passed in." + ); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer + .getActiveSpans() + .length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); + + it("will support tracing batch and send", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + + const list = [{ name: "Albert" }, { name: "Marie" }]; + + const eventDataBatch = await producerClient.createBatch({ + partitionId: "0" + }); + for (let i = 0; i < 2; i++) { + eventDataBatch.tryAdd({ body: `${list[i].name}` }, optionsFn(rootSpan)); + } + await producerClient.sendBatch(eventDataBatch, { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal( + rootSpan, + "The root span should match what was passed in." + ); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] + } + ] + }; + + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer + .getActiveSpans() + .length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); + }); + } + ); + + it("with partition key should be sent successfully.", async function(): Promise { + const eventDataBatch = await producerClient.createBatch({ partitionKey: "1" }); + for (let i = 0; i < 5; i++) { + eventDataBatch.tryAdd({ body: `Hello World ${i}` }); + } + await producerClient.sendBatch(eventDataBatch); + }); + it("with max message size should be sent successfully.", async function(): Promise { const eventDataBatch = await producerClient.createBatch({ + maxSizeInBytes: 5000, partitionId: "0" }); - for (let i = 0; i < 2; i++) { - eventDataBatch.tryAdd({ body: `${list[i].name}` }, optionsFn(rootSpan)); + const message = { body: `${Buffer.from("Z".repeat(4096))}` }; + for (let i = 1; i <= 3; i++) { + const isAdded = eventDataBatch.tryAdd(message); + if (!isAdded) { + debug(`Unable to add ${i} event to the batch`); + break; + } + } + await producerClient.sendBatch(eventDataBatch); + eventDataBatch.count.should.equal(1); + }); + + // TODO: Enable this test https://github.com/Azure/azure-sdk-for-js/issues/9202 is fixed + it.skip("should support being cancelled", async function(): Promise { + try { + // abortSignal event listeners will be triggered after synchronous paths are executed + const abortSignal = AbortController.timeout(0); + await producerClient.createBatch({ abortSignal: abortSignal }); + throw new Error(`Test failure`); + } catch (err) { + err.name.should.equal("AbortError"); + err.message.should.equal(StandardAbortMessage); + } + }); + + it("should support being cancelled from an already aborted AbortSignal", async function(): Promise< + void + > { + const abortController = new AbortController(); + abortController.abort(); + try { + await producerClient.createBatch({ abortSignal: abortController.signal }); + throw new Error(`Test failure`); + } catch (err) { + err.name.should.equal("AbortError"); + err.message.should.equal(StandardAbortMessage); + } + }); + }); + + describe("Multiple sendBatch calls", function(): void { + it("should be sent successfully in parallel", async function(): Promise { + const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( + consumerClient + ); + + const promises = []; + for (let i = 0; i < 5; i++) { + promises.push(producerClient.sendBatch([{ body: `Hello World ${i}` }])); + } + await Promise.all(promises); + + const subscription = await consumerClient.subscribe(subscriptionEventHandler, { + startPosition + }); + + try { + const events = await subscriptionEventHandler.waitForEvents( + await consumerClient.getPartitionIds({}), + 5 + ); + + // we've allowed the server to choose which partition the messages are distributed to + // so our expectation here is just that all the bodies have arrived + const bodiesOnly = events.map((evt) => evt.body); + bodiesOnly.sort(); + + bodiesOnly.should.deep.equal([ + "Hello World 0", + "Hello World 1", + "Hello World 2", + "Hello World 3", + "Hello World 4" + ]); + } finally { + subscription.close(); + } + }); + + it("should be sent successfully in parallel, even when exceeding max event listener count of 1000", async function(): Promise< + void + > { + const senderCount = 1200; + try { + const promises = []; + for (let i = 0; i < senderCount; i++) { + promises.push(producerClient.sendBatch([{ body: `Hello World ${i}` }])); + } + await Promise.all(promises); + } catch (err) { + debug("An error occurred while running the test: ", err); + throw err; + } + }); + + it("should be sent successfully in parallel by multiple clients", async function(): Promise< + void + > { + const senderCount = 3; + try { + const promises = []; + for (let i = 0; i < senderCount; i++) { + if (i === 0) { + debug(">>>>> Sending a message to partition %d", i); + promises.push( + await producerClient.sendBatch([{ body: `Hello World ${i}` }], { + partitionId: "0" + }) + ); + } else if (i === 1) { + debug(">>>>> Sending a message to partition %d", i); + promises.push( + await producerClient.sendBatch([{ body: `Hello World ${i}` }], { + partitionId: "1" + }) + ); + } else { + debug(">>>>> Sending a message to the hub when i == %d", i); + promises.push(await producerClient.sendBatch([{ body: `Hello World ${i}` }])); + } + } + await Promise.all(promises); + } catch (err) { + debug("An error occurred while running the test: ", err); + throw err; + } + }); + + it("should fail when a message greater than 1 MB is sent and succeed when a normal message is sent after that on the same link.", async function(): Promise< + void + > { + const data: EventData = { + body: Buffer.from("Z".repeat(1300000)) + }; + try { + debug("Sending a message of 300KB..."); + await producerClient.sendBatch([data], { partitionId: "0" }); + throw new Error("Test failure"); + } catch (err) { + debug(err); + should.exist(err); + should.equal(err.code, "MessageTooLargeError"); + err.message.should.match( + /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi + ); + } + await producerClient.sendBatch([{ body: "Hello World EventHub!!" }], { + partitionId: "0" + }); + debug("Sent the message successfully on the same link.."); + }); + + it("can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + + const events = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); } - await producerClient.sendBatch(eventDataBatch, { + await producerClient.sendBatch(events, { + partitionId: "0", tracingOptions: { tracingContext: setSpan(context.active(), rootSpan) } @@ -450,7 +708,7 @@ describe("EventHub Sender", function(): void { rootSpan.end(); const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans.length.should.equal(1, "Should only have one root spans."); rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); const expectedGraph: SpanGraph = { @@ -458,6 +716,18 @@ describe("EventHub Sender", function(): void { { name: rootSpan.name, children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, { name: "Azure.EventHubs.message", children: [] @@ -477,829 +747,614 @@ describe("EventHub Sender", function(): void { tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); }); - }); - }); - - it("with partition key should be sent successfully.", async function(): Promise { - const eventDataBatch = await producerClient.createBatch({ partitionKey: "1" }); - for (let i = 0; i < 5; i++) { - eventDataBatch.tryAdd({ body: `Hello World ${i}` }); - } - await producerClient.sendBatch(eventDataBatch); - }); - it("with max message size should be sent successfully.", async function(): Promise { - const eventDataBatch = await producerClient.createBatch({ - maxSizeInBytes: 5000, - partitionId: "0" - }); - const message = { body: `${Buffer.from("Z".repeat(4096))}` }; - for (let i = 1; i <= 3; i++) { - const isAdded = eventDataBatch.tryAdd(message); - if (!isAdded) { - debug(`Unable to add ${i} event to the batch`); - break; - } - } - await producerClient.sendBatch(eventDataBatch); - eventDataBatch.count.should.equal(1); - }); - - // TODO: Enable this test https://github.com/Azure/azure-sdk-for-js/issues/9202 is fixed - it.skip("should support being cancelled", async function(): Promise { - try { - // abortSignal event listeners will be triggered after synchronous paths are executed - const abortSignal = AbortController.timeout(0); - await producerClient.createBatch({ abortSignal: abortSignal }); - throw new Error(`Test failure`); - } catch (err) { - err.name.should.equal("AbortError"); - err.message.should.equal(StandardAbortMessage); - } - }); - - it("should support being cancelled from an already aborted AbortSignal", async function(): Promise< - void - > { - const abortController = new AbortController(); - abortController.abort(); - try { - await producerClient.createBatch({ abortSignal: abortController.signal }); - throw new Error(`Test failure`); - } catch (err) { - err.name.should.equal("AbortError"); - err.message.should.equal(StandardAbortMessage); - } - }); - }); - - describe("Multiple sendBatch calls", function(): void { - it("should be sent successfully in parallel", async function(): Promise { - const { subscriptionEventHandler } = await SubscriptionHandlerForTests.startingFromHere( - consumerClient - ); - - const promises = []; - for (let i = 0; i < 5; i++) { - promises.push(producerClient.sendBatch([{ body: `Hello World ${i}` }])); - } - await Promise.all(promises); - - const subscription = await consumerClient.subscribe(subscriptionEventHandler, { - startPosition - }); + it("skips already instrumented events when manually traced", async function(): Promise< + void + > { + const { tracer, resetTracer } = setTracerForTest(); - try { - const events = await subscriptionEventHandler.waitForEvents( - await consumerClient.getPartitionIds({}), - 5 - ); + const rootSpan = tracer.startSpan("root"); - // we've allowed the server to choose which partition the messages are distributed to - // so our expectation here is just that all the bodies have arrived - const bodiesOnly = events.map((evt) => evt.body); - bodiesOnly.sort(); - - bodiesOnly.should.deep.equal([ - "Hello World 0", - "Hello World 1", - "Hello World 2", - "Hello World 3", - "Hello World 4" - ]); - } finally { - subscription.close(); - } - }); - - it("should be sent successfully in parallel, even when exceeding max event listener count of 1000", async function(): Promise< - void - > { - const senderCount = 1200; - try { - const promises = []; - for (let i = 0; i < senderCount; i++) { - promises.push(producerClient.sendBatch([{ body: `Hello World ${i}` }])); - } - await Promise.all(promises); - } catch (err) { - debug("An error occurred while running the test: ", err); - throw err; - } - }); - - it("should be sent successfully in parallel by multiple clients", async function(): Promise< - void - > { - const senderCount = 3; - try { - const promises = []; - for (let i = 0; i < senderCount; i++) { - if (i === 0) { - debug(">>>>> Sending a message to partition %d", i); - promises.push( - await producerClient.sendBatch([{ body: `Hello World ${i}` }], { partitionId: "0" }) - ); - } else if (i === 1) { - debug(">>>>> Sending a message to partition %d", i); - promises.push( - await producerClient.sendBatch([{ body: `Hello World ${i}` }], { partitionId: "1" }) - ); - } else { - debug(">>>>> Sending a message to the hub when i == %d", i); - promises.push(await producerClient.sendBatch([{ body: `Hello World ${i}` }])); + const events: EventData[] = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); } - } - await Promise.all(promises); - } catch (err) { - debug("An error occurred while running the test: ", err); - throw err; - } - }); - - it("should fail when a message greater than 1 MB is sent and succeed when a normal message is sent after that on the same link.", async function(): Promise< - void - > { - const data: EventData = { - body: Buffer.from("Z".repeat(1300000)) - }; - try { - debug("Sending a message of 300KB..."); - await producerClient.sendBatch([data], { partitionId: "0" }); - throw new Error("Test failure"); - } catch (err) { - debug(err); - should.exist(err); - should.equal(err.code, "MessageTooLargeError"); - err.message.should.match( - /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi - ); - } - await producerClient.sendBatch([{ body: "Hello World EventHub!!" }], { partitionId: "0" }); - debug("Sent the message successfully on the same link.."); - }); + events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; + await producerClient.sendBatch(events, { + partitionId: "0", + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); - it("can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - const rootSpan = tracer.startSpan("root"); + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] + } + ] + }; - const events = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - await producerClient.sendBatch(events, { - partitionId: "0", - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + + resetTracer(); + }); }); - rootSpan.end(); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + describe("Array of events", function() { + it("should be sent successfully", async () => { + const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; + const receivedEvents: ReceivedEventData[] = []; + let receivingResolver: (value?: unknown) => void; - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] + const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); + const subscription = consumerClient.subscribe( + { + async processError() { + /* no-op */ }, - { - name: "Azure.EventHubs.send", - children: [] + async processEvents(events) { + receivedEvents.push(...events); + receivingResolver(); } - ] - } - ] - }; + }, + { + startPosition, + maxBatchSize: data.length + } + ); - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + await producerClient.sendBatch(data); - resetTracer(); - }); + await receivingPromise; + await subscription.close(); + + receivedEvents.length.should.equal(data.length); + receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); + }); - it("skips already instrumented events when manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("should be sent successfully with partitionKey", async () => { + const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; + const receivedEvents: ReceivedEventData[] = []; + let receivingResolver: (value?: unknown) => void; + const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); + const subscription = consumerClient.subscribe( + { + async processError() { + /* no-op */ + }, + async processEvents(events) { + receivedEvents.push(...events); + receivingResolver(); + } + }, + { + startPosition, + maxBatchSize: data.length + } + ); - const rootSpan = tracer.startSpan("root"); + await producerClient.sendBatch(data, { partitionKey: "foo" }); - const events: EventData[] = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; - await producerClient.sendBatch(events, { - partitionId: "0", - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - rootSpan.end(); + await receivingPromise; + await subscription.close(); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + receivedEvents.length.should.equal(data.length); + receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); + for (let i = 0; i < receivedEvents.length; i++) { + receivedEvents[i].body.should.equal(data[i].body); + } + }); - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] + it("should be sent successfully with partitionId", async () => { + const partitionId = "0"; + const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; + const receivedEvents: ReceivedEventData[] = []; + let receivingResolver: (value?: unknown) => void; + const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); + const subscription = consumerClient.subscribe( + partitionId, + { + async processError() { + /* no-op */ }, - { - name: "Azure.EventHubs.send", - children: [] + async processEvents(events) { + receivedEvents.push(...events); + receivingResolver(); } - ] - } - ] - }; + }, + { + startPosition, + maxBatchSize: data.length + } + ); - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + await producerClient.sendBatch(data, { partitionId }); - resetTracer(); - }); - }); + await receivingPromise; + await subscription.close(); - describe("Array of events", function() { - it("should be sent successfully", async () => { - const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; - const receivedEvents: ReceivedEventData[] = []; - let receivingResolver: (value?: unknown) => void; - - const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); - const subscription = consumerClient.subscribe( - { - async processError() { - /* no-op */ - }, - async processEvents(events) { - receivedEvents.push(...events); - receivingResolver(); + receivedEvents.length.should.equal(data.length); + receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); + for (let i = 0; i < receivedEvents.length; i++) { + receivedEvents[i].body.should.equal(data[i].body); } - }, - { - startPosition, - maxBatchSize: data.length - } - ); - - await producerClient.sendBatch(data); - - await receivingPromise; - await subscription.close(); - - receivedEvents.length.should.equal(data.length); - receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); - }); - - it("should be sent successfully with partitionKey", async () => { - const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; - const receivedEvents: ReceivedEventData[] = []; - let receivingResolver: (value?: unknown) => void; - const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); - const subscription = consumerClient.subscribe( - { - async processError() { - /* no-op */ - }, - async processEvents(events) { - receivedEvents.push(...events); - receivingResolver(); - } - }, - { - startPosition, - maxBatchSize: data.length - } - ); + }); - await producerClient.sendBatch(data, { partitionKey: "foo" }); + it("can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - await receivingPromise; - await subscription.close(); + const rootSpan = tracer.startSpan("root"); - receivedEvents.length.should.equal(data.length); - receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); - for (let i = 0; i < receivedEvents.length; i++) { - receivedEvents[i].body.should.equal(data[i].body); - } - }); - - it("should be sent successfully with partitionId", async () => { - const partitionId = "0"; - const data: EventData[] = [{ body: "Hello World 1" }, { body: "Hello World 2" }]; - const receivedEvents: ReceivedEventData[] = []; - let receivingResolver: (value?: unknown) => void; - const receivingPromise = new Promise((resolve) => (receivingResolver = resolve)); - const subscription = consumerClient.subscribe( - partitionId, - { - async processError() { - /* no-op */ - }, - async processEvents(events) { - receivedEvents.push(...events); - receivingResolver(); + const events = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); } - }, - { - startPosition, - maxBatchSize: data.length - } - ); + await producerClient.sendBatch(events, { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); - await producerClient.sendBatch(data, { partitionId }); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] + } + ] + }; - await receivingPromise; - await subscription.close(); + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - receivedEvents.length.should.equal(data.length); - receivedEvents.map((e) => e.body).should.eql(data.map((d) => d.body)); - for (let i = 0; i < receivedEvents.length; i++) { - receivedEvents[i].body.should.equal(data[i].body); - } - }); + const knownSendSpans = tracer + .getKnownSpans() + .filter((span: TestSpan) => span.name === "Azure.EventHubs.send"); + knownSendSpans.length.should.equal(1, "There should have been one send span."); + knownSendSpans[0].attributes.should.deep.equal({ + "az.namespace": "Microsoft.EventHub", + "message_bus.destination": producerClient.eventHubName, + "peer.address": producerClient.fullyQualifiedNamespace + }); + resetTracer(); + }); - it("can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("skips already instrumented events when manually traced", async function(): Promise< + void + > { + const { tracer, resetTracer } = setTracerForTest(); - const rootSpan = tracer.startSpan("root"); + const rootSpan = tracer.startSpan("root"); - const events = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - await producerClient.sendBatch(events, { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - rootSpan.end(); + const events: EventData[] = []; + for (let i = 0; i < 5; i++) { + events.push({ body: `multiple messages - manual trace propgation: ${i}` }); + } + events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; + await producerClient.sendBatch(events, { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + rootSpan.end(); - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root spans."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.send", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.message", + children: [] + }, + { + name: "Azure.EventHubs.send", + children: [] + } + ] } ] - } - ] - }; - - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - - const knownSendSpans = tracer - .getKnownSpans() - .filter((span: TestSpan) => span.name === "Azure.EventHubs.send"); - knownSendSpans.length.should.equal(1, "There should have been one send span."); - knownSendSpans[0].attributes.should.deep.equal({ - "az.namespace": "Microsoft.EventHub", - "message_bus.destination": producerClient.eventHubName, - "peer.address": producerClient.fullyQualifiedNamespace - }); - resetTracer(); - }); - - it("skips already instrumented events when manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); - - const rootSpan = tracer.startSpan("root"); - - const events: EventData[] = []; - for (let i = 0; i < 5; i++) { - events.push({ body: `multiple messages - manual trace propgation: ${i}` }); - } - events[0].properties = { [TRACEPARENT_PROPERTY]: "foo" }; - await producerClient.sendBatch(events, { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - rootSpan.end(); + }; - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root spans."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, - { - name: "Azure.EventHubs.message", - children: [] - }, + it("should support being cancelled", async function(): Promise { + try { + const data: EventData[] = [ { - name: "Azure.EventHubs.send", - children: [] + body: "Sender Cancellation Test - timeout 0" } - ] + ]; + // call send() once to create a connection + await producerClient.sendBatch(data); + // abortSignal event listeners will be triggered after synchronous paths are executed + const abortSignal = AbortController.timeout(0); + await producerClient.sendBatch(data, { abortSignal }); + throw new Error(`Test failure`); + } catch (err) { + err.name.should.equal("AbortError"); + err.message.should.equal(StandardAbortMessage); } - ] - }; + }); - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + it("should support being cancelled from an already aborted AbortSignal", async function(): Promise< + void + > { + const abortController = new AbortController(); + abortController.abort(); - it("should support being cancelled", async function(): Promise { - try { - const data: EventData[] = [ - { - body: "Sender Cancellation Test - timeout 0" - } - ]; - // call send() once to create a connection - await producerClient.sendBatch(data); - // abortSignal event listeners will be triggered after synchronous paths are executed - const abortSignal = AbortController.timeout(0); - await producerClient.sendBatch(data, { abortSignal }); - throw new Error(`Test failure`); - } catch (err) { - err.name.should.equal("AbortError"); - err.message.should.equal(StandardAbortMessage); - } - }); - - it("should support being cancelled from an already aborted AbortSignal", async function(): Promise< - void - > { - const abortController = new AbortController(); - abortController.abort(); - - try { - const data: EventData[] = [ - { - body: "Sender Cancellation Test - immediate" - } - ]; - await producerClient.sendBatch(data, { abortSignal: abortController.signal }); - throw new Error(`Test failure`); - } catch (err) { - err.name.should.equal("AbortError"); - err.message.should.equal(StandardAbortMessage); - } - }); - - it("should throw when partitionId and partitionKey are provided", async function(): Promise< - void - > { - try { - const data: EventData[] = [ - { - body: "Sender paritition id and partition key" + try { + const data: EventData[] = [ + { + body: "Sender Cancellation Test - immediate" + } + ]; + await producerClient.sendBatch(data, { abortSignal: abortController.signal }); + throw new Error(`Test failure`); + } catch (err) { + err.name.should.equal("AbortError"); + err.message.should.equal(StandardAbortMessage); } - ]; - await producerClient.sendBatch(data, { partitionKey: "1", partitionId: "0" }); - throw new Error("Test Failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (1) cannot both be specified." - ); - } - }); - }); + }); - describe("Validation", function() { - describe("createBatch", function() { - it("throws an error if partitionId and partitionKey are set", async () => { - try { - await producerClient.createBatch({ partitionId: "0", partitionKey: "boo" }); - throw new Error("Test failure"); - } catch (error) { - error.message.should.equal( - "partitionId and partitionKey cannot both be set when creating a batch" - ); - } + it("should throw when partitionId and partitionKey are provided", async function(): Promise< + void + > { + try { + const data: EventData[] = [ + { + body: "Sender paritition id and partition key" + } + ]; + await producerClient.sendBatch(data, { partitionKey: "1", partitionId: "0" }); + throw new Error("Test Failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) and partitionKey (1) cannot both be specified." + ); + } + }); }); - it("throws an error if partitionId and partitionKey are set and partitionId is 0 i.e. falsy", async () => { - try { - await producerClient.createBatch({ - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0, - partitionKey: "boo" + describe("Validation", function() { + describe("createBatch", function() { + it("throws an error if partitionId and partitionKey are set", async () => { + try { + await producerClient.createBatch({ partitionId: "0", partitionKey: "boo" }); + throw new Error("Test failure"); + } catch (error) { + error.message.should.equal( + "partitionId and partitionKey cannot both be set when creating a batch" + ); + } }); - throw new Error("Test failure"); - } catch (error) { - error.message.should.equal( - "partitionId and partitionKey cannot both be set when creating a batch" - ); - } - }); - it("throws an error if partitionId and partitionKey are set and partitionKey is 0 i.e. falsy", async () => { - try { - await producerClient.createBatch({ - partitionId: "1", - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0 + it("throws an error if partitionId and partitionKey are set and partitionId is 0 i.e. falsy", async () => { + try { + await producerClient.createBatch({ + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0, + partitionKey: "boo" + }); + throw new Error("Test failure"); + } catch (error) { + error.message.should.equal( + "partitionId and partitionKey cannot both be set when creating a batch" + ); + } }); - throw new Error("Test failure"); - } catch (error) { - error.message.should.equal( - "partitionId and partitionKey cannot both be set when creating a batch" - ); - } - }); - it("should throw when maxMessageSize is greater than maximum message size on the AMQP sender link", async function(): Promise< - void - > { - try { - await producerClient.createBatch({ maxSizeInBytes: 2046528 }); - throw new Error("Test Failure"); - } catch (err) { - err.message.should.match( - /.*Max message size \((\d+) bytes\) is greater than maximum message size \((\d+) bytes\) on the AMQP sender link.*/gi - ); - } - }); - }); - describe("sendBatch with EventDataBatch", function() { - it("works if partitionKeys match", async () => { - const misconfiguredOptions: SendBatchOptions = { - partitionKey: "foo" - }; - const batch = await producerClient.createBatch({ partitionKey: "foo" }); - await producerClient.sendBatch(batch, misconfiguredOptions); - }); - it("works if partitionIds match", async () => { - const misconfiguredOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch({ partitionId: "0" }); - await producerClient.sendBatch(batch, misconfiguredOptions); - }); - it("throws an error if partitionKeys don't match", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "bar" - }; - const batch = await producerClient.createBatch({ partitionKey: "foo" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionKey (bar) set on sendBatch does not match the partitionKey (foo) set when creating the batch." - ); - } - }); - it("throws an error if partitionKeys don't match (undefined)", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "bar" - }; - const batch = await producerClient.createBatch(); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionKey (bar) set on sendBatch does not match the partitionKey (undefined) set when creating the batch." - ); - } - }); - it("throws an error if partitionIds don't match", async () => { - const badOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch({ partitionId: "1" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) set on sendBatch does not match the partitionId (1) set when creating the batch." - ); - } - }); - it("throws an error if partitionIds don't match (undefined)", async () => { - const badOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch(); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) set on sendBatch does not match the partitionId (undefined) set when creating the batch." - ); - } - }); - it("throws an error if partitionId and partitionKey are set (create, send)", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo" - }; - const batch = await producerClient.createBatch({ partitionId: "0" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.not.equal("Test failure"); - } - }); - it("throws an error if partitionId and partitionKey are set (send, create)", async () => { - const badOptions: SendBatchOptions = { - partitionId: "0" - }; - const batch = await producerClient.createBatch({ partitionKey: "foo" }); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.not.equal("Test failure"); - } - }); - it("throws an error if partitionId and partitionKey are set (send, send)", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo", - partitionId: "0" - }; - const batch = await producerClient.createBatch(); - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.not.equal("Test failure"); - } - }); - }); - - describe("sendBatch with EventDataBatch with events array", function() { - it("throws an error if partitionId and partitionKey are set", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo", - partitionId: "0" - }; - const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (foo) cannot both be specified." - ); - } - }); - it("throws an error if partitionId and partitionKey are set with partitionId set to 0 i.e. falsy", async () => { - const badOptions: SendBatchOptions = { - partitionKey: "foo", - // @ts-expect-error Testing the value 0 is not ignored. - partitionId: 0 - }; - const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (foo) cannot both be specified." - ); - } - }); - it("throws an error if partitionId and partitionKey are set with partitionKey set to 0 i.e. falsy", async () => { - const badOptions: SendBatchOptions = { - // @ts-expect-error Testing the value 0 is not ignored. - partitionKey: 0, - partitionId: "0" - }; - const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; - try { - await producerClient.sendBatch(batch, badOptions); - throw new Error("Test failure"); - } catch (err) { - err.message.should.equal( - "The partitionId (0) and partitionKey (0) cannot both be specified." - ); - } - }); - }); - }); + it("throws an error if partitionId and partitionKey are set and partitionKey is 0 i.e. falsy", async () => { + try { + await producerClient.createBatch({ + partitionId: "1", + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0 + }); + throw new Error("Test failure"); + } catch (error) { + error.message.should.equal( + "partitionId and partitionKey cannot both be set when creating a batch" + ); + } + }); - describe("Negative scenarios", function(): void { - it("a message greater than 1 MB should fail.", async function(): Promise { - const data: EventData = { - body: Buffer.from("Z".repeat(1300000)) - }; - try { - await producerClient.sendBatch([data]); - throw new Error("Test failure"); - } catch (err) { - debug(err); - should.exist(err); - should.equal(err.code, "MessageTooLargeError"); - err.message.should.match( - /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi - ); - } - }); + it("should throw when maxMessageSize is greater than maximum message size on the AMQP sender link", async function(): Promise< + void + > { + try { + await producerClient.createBatch({ maxSizeInBytes: 2046528 }); + throw new Error("Test Failure"); + } catch (err) { + err.message.should.match( + /.*Max message size \((\d+) bytes\) is greater than maximum message size \((\d+) bytes\) on the AMQP sender link.*/gi + ); + } + }); + }); + describe("sendBatch with EventDataBatch", function() { + it("works if partitionKeys match", async () => { + const misconfiguredOptions: SendBatchOptions = { + partitionKey: "foo" + }; + const batch = await producerClient.createBatch({ partitionKey: "foo" }); + await producerClient.sendBatch(batch, misconfiguredOptions); + }); + it("works if partitionIds match", async () => { + const misconfiguredOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch({ partitionId: "0" }); + await producerClient.sendBatch(batch, misconfiguredOptions); + }); + it("throws an error if partitionKeys don't match", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "bar" + }; + const batch = await producerClient.createBatch({ partitionKey: "foo" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionKey (bar) set on sendBatch does not match the partitionKey (foo) set when creating the batch." + ); + } + }); + it("throws an error if partitionKeys don't match (undefined)", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "bar" + }; + const batch = await producerClient.createBatch(); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionKey (bar) set on sendBatch does not match the partitionKey (undefined) set when creating the batch." + ); + } + }); + it("throws an error if partitionIds don't match", async () => { + const badOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch({ partitionId: "1" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) set on sendBatch does not match the partitionId (1) set when creating the batch." + ); + } + }); + it("throws an error if partitionIds don't match (undefined)", async () => { + const badOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch(); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) set on sendBatch does not match the partitionId (undefined) set when creating the batch." + ); + } + }); + it("throws an error if partitionId and partitionKey are set (create, send)", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo" + }; + const batch = await producerClient.createBatch({ partitionId: "0" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.not.equal("Test failure"); + } + }); + it("throws an error if partitionId and partitionKey are set (send, create)", async () => { + const badOptions: SendBatchOptions = { + partitionId: "0" + }; + const batch = await producerClient.createBatch({ partitionKey: "foo" }); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.not.equal("Test failure"); + } + }); + it("throws an error if partitionId and partitionKey are set (send, send)", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo", + partitionId: "0" + }; + const batch = await producerClient.createBatch(); + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.not.equal("Test failure"); + } + }); + }); + + describe("sendBatch with EventDataBatch with events array", function() { + it("throws an error if partitionId and partitionKey are set", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo", + partitionId: "0" + }; + const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) and partitionKey (foo) cannot both be specified." + ); + } + }); + it("throws an error if partitionId and partitionKey are set with partitionId set to 0 i.e. falsy", async () => { + const badOptions: SendBatchOptions = { + partitionKey: "foo", + // @ts-expect-error Testing the value 0 is not ignored. + partitionId: 0 + }; + const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) and partitionKey (foo) cannot both be specified." + ); + } + }); + it("throws an error if partitionId and partitionKey are set with partitionKey set to 0 i.e. falsy", async () => { + const badOptions: SendBatchOptions = { + // @ts-expect-error Testing the value 0 is not ignored. + partitionKey: 0, + partitionId: "0" + }; + const batch = [{ body: "Hello 1" }, { body: "Hello 2" }]; + try { + await producerClient.sendBatch(batch, badOptions); + throw new Error("Test failure"); + } catch (err) { + err.message.should.equal( + "The partitionId (0) and partitionKey (0) cannot both be specified." + ); + } + }); + }); + }); - describe("on invalid partition ids like", function(): void { - // tslint:disable-next-line: no-null-keyword - const invalidIds = ["XYZ", "-1", "1000", "-"]; - invalidIds.forEach(function(id: string | null): void { - it(`"${id}" should throw an error`, async function(): Promise { + describe("Negative scenarios", function(): void { + it("a message greater than 1 MB should fail.", async function(): Promise { + const data: EventData = { + body: Buffer.from("Z".repeat(1300000)) + }; try { - debug("Created sender and will be sending a message to partition id ...", id); - await producerClient.sendBatch([{ body: "Hello world!" }], { - partitionId: id as any - }); - debug("sent the message."); + await producerClient.sendBatch([data]); throw new Error("Test failure"); } catch (err) { - debug(`>>>> Received error for invalid partition id "${id}" - `, err); + debug(err); should.exist(err); + should.equal(err.code, "MessageTooLargeError"); err.message.should.match( - /.*The specified partition is invalid for an EventHub partition sender or receiver.*/gi + /.*The received message \(delivery-id:(\d+), size:(\d+) bytes\) exceeds the limit \((\d+) bytes\) currently allowed on the link\..*/gi ); } }); + + describe("on invalid partition ids like", function(): void { + // tslint:disable-next-line: no-null-keyword + const invalidIds = ["XYZ", "-1", "1000", "-"]; + invalidIds.forEach(function(id: string | null): void { + it(`"${id}" should throw an error`, async function(): Promise { + try { + debug("Created sender and will be sending a message to partition id ...", id); + await producerClient.sendBatch([{ body: "Hello world!" }], { + partitionId: id as any + }); + debug("sent the message."); + throw new Error("Test failure"); + } catch (err) { + debug(`>>>> Received error for invalid partition id "${id}" - `, err); + should.exist(err); + err.message.should.match( + /.*The specified partition is invalid for an EventHub partition sender or receiver.*/gi + ); + } + }); + }); + }); }); - }); + }).timeout(20000); }); -}).timeout(20000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/auth.spec.ts b/sdk/eventhub/event-hubs/test/public/auth.spec.ts index dd8fce06f393..0745489364da 100644 --- a/sdk/eventhub/event-hubs/test/public/auth.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/auth.spec.ts @@ -6,375 +6,409 @@ import { EventHubProducerClient, parseEventHubConnectionString } from "../../src/index"; -import { EnvVarKeys, getEnvVars } from "./utils/testUtils"; +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "./utils/testUtils"; import chai from "chai"; import { AzureNamedKeyCredential, AzureSASCredential } from "@azure/core-auth"; import { createSasTokenProvider } from "@azure/core-amqp"; import { SinonFakeTimers, useFakeTimers } from "sinon"; +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "./utils/mockService"; const should = chai.should(); -const env = getEnvVars(); const TEST_FAILURE = "test failure"; - -describe("Authentication via", () => { - const { - endpoint, - fullyQualifiedNamespace, - sharedAccessKey, - sharedAccessKeyName - } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME], - endpoint: endpoint.replace(/\/+$/, "") - }; - - before(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - let clock: SinonFakeTimers; - beforeEach("setup new space-time continuum", () => { - clock = useFakeTimers({ - now: new Date(), - shouldAdvanceTime: true - }); - }); - - afterEach("returning back to current space-time variant", () => { - clock.restore(); - }); - - describe("AzureNamedKeyCredential", () => { - describe("supports key rotation", () => { - it("EventHubConsumerClient $management calls", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); - - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - namedKeyCredential - ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - // Rotate credential to invalid value. - namedKeyCredential.update("foo", "bar"); - try { - await consumerClient.getEventHubProperties(); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("public/auth.spec.ts", function() { + versionsToTest( + serviceVersions, + { versionForRecording: testTarget }, + (serviceVersion, onVersions) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); } + } + + /** + * Only supports "live" because the mock service currently doesn't validate authentication, + * so rotating keys will never cause UnauthorizedError to be thrown. + */ + onVersions(["live"]).describe("Authentication via", () => { + const { + endpoint, + fullyQualifiedNamespace, + sharedAccessKey, + sharedAccessKeyName + } = parseEventHubConnectionString(env[EnvVarKeys.EVENTHUB_CONNECTION_STRING]); + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME], + endpoint: endpoint.replace(/\/+$/, "") + }; + + before(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - // Rotate credential to valid value. - namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); - await consumerClient.getEventHubProperties(); - should.exist(properties); + let clock: SinonFakeTimers; + beforeEach("setup new space-time continuum", () => { + clock = useFakeTimers({ + now: new Date(), + shouldAdvanceTime: true + }); + }); - return consumerClient.close(); - }); + afterEach("returning back to current space-time variant", () => { + clock.restore(); + }); - it("EventHubConsumerClient receive calls", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); + describe("AzureNamedKeyCredential", () => { + describe("supports key rotation", () => { + it("EventHubConsumerClient $management calls", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - namedKeyCredential, - { - retryOptions: { - maxRetries: 0 - } - } - ); - - await new Promise((resolve, reject) => { - // My attempt at defining the order of operations I expect to see. - const steps: Array<(...args: any[]) => void> = [ - // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 1 failed. Expected to see a list of events.")); - } - // Rotate credentials to invalid values and fast forward past the token refresh. + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + namedKeyCredential + ); + + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + // Rotate credential to invalid value. namedKeyCredential.update("foo", "bar"); - clock.tick(1000 * 60 * 45); - }, - // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 2 failed. Expected to see a list of events.")); - } - }, - // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. - // Rotate the credentials back to valid values. - (err: any) => { - if (err.code !== "UnauthorizedError") { - reject( - new Error(`Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".`) - ); + try { + await consumerClient.getEventHubProperties(); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); } - // Rotate the credentials back to valid values. + + // Rotate credential to valid value. namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); - }, - // 4: observe another `processEvents` call. - // If the credentials were still invalid, we'd expect to see `processError` thrown instead. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 4 failed. Expected to see a list of events.")); - } - resolve(); - } - ]; - - consumerClient.subscribe( - "0", - { - async processError(err) { - const step = steps.shift(); - if (step) step(err); - }, - async processEvents(events) { - const step = steps.shift(); - if (step) step(events); - } - }, - { - maxWaitTimeInSeconds: 5 - } - ); - }); + await consumerClient.getEventHubProperties(); + should.exist(properties); - return consumerClient.close(); - }); + return consumerClient.close(); + }); - it("EventHubProducerClient send calls", async () => { - const namedKeyCredential = new AzureNamedKeyCredential( - sharedAccessKeyName!, - sharedAccessKey! - ); + it("EventHubConsumerClient receive calls", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - namedKeyCredential, - { - retryOptions: { - maxRetries: 0 - } - } - ); - - // The 1st sendBatch is called with valid credentials, so it should succeed. - await producerClient.sendBatch([{ body: "test" }]); - - // Rotate credential to invalid value. - namedKeyCredential.update("foo", "bar"); - // Fast forward through time to after the token refresh. - clock.tick(1000 * 60 * 45); - - try { - // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. - await producerClient.sendBatch([{ body: "I don't have access." }]); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); - } + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + namedKeyCredential, + { + retryOptions: { + maxRetries: 0 + } + } + ); - // Rotate credential to valid value. - namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); + await new Promise((resolve, reject) => { + // My attempt at defining the order of operations I expect to see. + const steps: Array<(...args: any[]) => void> = [ + // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 1 failed. Expected to see a list of events.")); + } + // Rotate credentials to invalid values and fast forward past the token refresh. + namedKeyCredential.update("foo", "bar"); + clock.tick(1000 * 60 * 45); + }, + // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 2 failed. Expected to see a list of events.")); + } + }, + // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. + // Rotate the credentials back to valid values. + (err: any) => { + if (err.code !== "UnauthorizedError") { + reject( + new Error( + `Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".` + ) + ); + } + // Rotate the credentials back to valid values. + namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); + }, + // 4: observe another `processEvents` call. + // If the credentials were still invalid, we'd expect to see `processError` thrown instead. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 4 failed. Expected to see a list of events.")); + } + resolve(); + } + ]; + + consumerClient.subscribe( + "0", + { + async processError(err) { + const step = steps.shift(); + if (step) step(err); + }, + async processEvents(events) { + const step = steps.shift(); + if (step) step(events); + } + }, + { + maxWaitTimeInSeconds: 5 + } + ); + }); - // This last sendBatch should succeed because we've updated our credentials again. - // Notice that we didn't have to fast forward through time to move past a token refresh! - await producerClient.sendBatch([{ body: "test2" }]); + return consumerClient.close(); + }); - return producerClient.close(); - }); - }); - }); - - describe("AzureSASCredential", () => { - function getSas(): string { - return createSasTokenProvider({ - sharedAccessKeyName: sharedAccessKeyName!, - sharedAccessKey: sharedAccessKey! - }).getToken(`${service.endpoint}/${service.path}`).token; - } + it("EventHubProducerClient send calls", async () => { + const namedKeyCredential = new AzureNamedKeyCredential( + sharedAccessKeyName!, + sharedAccessKey! + ); - describe("supports key rotation", () => { - it("EventHubConsumerClient $management calls", async () => { - const sasCredential = new AzureSASCredential(getSas()); + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + namedKeyCredential, + { + retryOptions: { + maxRetries: 0 + } + } + ); - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - sasCredential, - { - retryOptions: { - maxRetries: 0 - } - } - ); - - const properties = await consumerClient.getEventHubProperties(); - should.exist(properties); - - // Rotate credential to invalid value. - sasCredential.update( - `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` - ); - try { - await consumerClient.getEventHubProperties(); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); - } + // The 1st sendBatch is called with valid credentials, so it should succeed. + await producerClient.sendBatch([{ body: "test" }]); - // Rotate credential to valid value. - sasCredential.update(getSas()); - await consumerClient.getEventHubProperties(); - should.exist(properties); + // Rotate credential to invalid value. + namedKeyCredential.update("foo", "bar"); + // Fast forward through time to after the token refresh. + clock.tick(1000 * 60 * 45); - return consumerClient.close(); - }); + try { + // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. + await producerClient.sendBatch([{ body: "I don't have access." }]); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); + } + + // Rotate credential to valid value. + namedKeyCredential.update(sharedAccessKeyName!, sharedAccessKey!); - it("EventHubConsumerClient receive calls", async () => { - const sasCredential = new AzureSASCredential(getSas()); + // This last sendBatch should succeed because we've updated our credentials again. + // Notice that we didn't have to fast forward through time to move past a token refresh! + await producerClient.sendBatch([{ body: "test2" }]); - const consumerClient = new EventHubConsumerClient( - "$Default", - fullyQualifiedNamespace, - service.path, - sasCredential, - { - retryOptions: { - maxRetries: 0 - } + return producerClient.close(); + }); + }); + }); + + describe("AzureSASCredential", () => { + function getSas(): string { + return createSasTokenProvider({ + sharedAccessKeyName: sharedAccessKeyName!, + sharedAccessKey: sharedAccessKey! + }).getToken(`${service.endpoint}/${service.path}`).token; } - ); - - await new Promise((resolve, reject) => { - // My attempt at defining the order of operations I expect to see. - const steps: Array<(...args: any[]) => void> = [ - // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 1 failed. Expected to see a list of events.")); - } - // Rotate credentials to invalid values and fast forward past the token refresh. + + describe("supports key rotation", () => { + it("EventHubConsumerClient $management calls", async () => { + const sasCredential = new AzureSASCredential(getSas()); + + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + sasCredential, + { + retryOptions: { + maxRetries: 0 + } + } + ); + + const properties = await consumerClient.getEventHubProperties(); + should.exist(properties); + + // Rotate credential to invalid value. sasCredential.update( `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` ); - clock.tick(1000 * 60 * 45); - }, - // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 2 failed. Expected to see a list of events.")); + try { + await consumerClient.getEventHubProperties(); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); } - }, - // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. - // Rotate the credentials back to valid values. - (err: any) => { - if (err.code !== "UnauthorizedError") { - reject( - new Error(`Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".`) - ); - } - // Rotate the credentials back to valid values. + + // Rotate credential to valid value. sasCredential.update(getSas()); - }, - // 4: observe another `processEvents` call. - // If the credentials were still invalid, we'd expect to see `processError` thrown instead. - (events: []) => { - if (!Array.isArray(events)) { - reject(new Error("Step 4 failed. Expected to see a list of events.")); - } - resolve(); - } - ]; - - consumerClient.subscribe( - "0", - { - async processError(err) { - const step = steps.shift(); - if (step) step(err); - }, - async processEvents(events) { - const step = steps.shift(); - if (step) step(events); - } - }, - { - maxWaitTimeInSeconds: 5 - } - ); - }); + await consumerClient.getEventHubProperties(); + should.exist(properties); + + return consumerClient.close(); + }); + + it("EventHubConsumerClient receive calls", async () => { + const sasCredential = new AzureSASCredential(getSas()); + + const consumerClient = new EventHubConsumerClient( + "$Default", + fullyQualifiedNamespace, + service.path, + sasCredential, + { + retryOptions: { + maxRetries: 0 + } + } + ); - return consumerClient.close(); - }); + await new Promise((resolve, reject) => { + // My attempt at defining the order of operations I expect to see. + const steps: Array<(...args: any[]) => void> = [ + // 1: wait for a `processEvents` to be called, then rotate the credentials to an invalid value and fast forward the clock! + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 1 failed. Expected to see a list of events.")); + } + // Rotate credentials to invalid values and fast forward past the token refresh. + sasCredential.update( + `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` + ); + clock.tick(1000 * 60 * 45); + }, + // 2: observe another `processEvents` call. We should see this because the maxWaitTimeInSeconds is set to 5 seconds, and we fast forwarded the clock 45 minutes. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 2 failed. Expected to see a list of events.")); + } + }, + // 3: Since the token renewal has occurred, we should start seeing `UnauthorizedError` being thrown from our `processError` handler. + // Rotate the credentials back to valid values. + (err: any) => { + if (err.code !== "UnauthorizedError") { + reject( + new Error( + `Step 3 failed. Expected ${err.code} to equal "UnauthorizedError".` + ) + ); + } + // Rotate the credentials back to valid values. + sasCredential.update(getSas()); + }, + // 4: observe another `processEvents` call. + // If the credentials were still invalid, we'd expect to see `processError` thrown instead. + (events: []) => { + if (!Array.isArray(events)) { + reject(new Error("Step 4 failed. Expected to see a list of events.")); + } + resolve(); + } + ]; + + consumerClient.subscribe( + "0", + { + async processError(err) { + const step = steps.shift(); + if (step) step(err); + }, + async processEvents(events) { + const step = steps.shift(); + if (step) step(events); + } + }, + { + maxWaitTimeInSeconds: 5 + } + ); + }); + + return consumerClient.close(); + }); + + it("EventHubProducerClient send calls", async () => { + const sasCredential = new AzureSASCredential(getSas()); + + const producerClient = new EventHubProducerClient( + fullyQualifiedNamespace, + service.path, + sasCredential, + { + retryOptions: { + maxRetries: 0 + } + } + ); - it("EventHubProducerClient send calls", async () => { - const sasCredential = new AzureSASCredential(getSas()); + // The 1st sendBatch is called with valid credentials, so it should succeed. + await producerClient.sendBatch([{ body: "test" }]); - const producerClient = new EventHubProducerClient( - fullyQualifiedNamespace, - service.path, - sasCredential, - { - retryOptions: { - maxRetries: 0 - } - } - ); - - // The 1st sendBatch is called with valid credentials, so it should succeed. - await producerClient.sendBatch([{ body: "test" }]); - - // Rotate credential to invalid value. - sasCredential.update( - `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` - ); - // Fast forward through time to after the token refresh. - clock.tick(1000 * 60 * 45); - - try { - // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. - await producerClient.sendBatch([{ body: "I don't have access." }]); - throw new Error(TEST_FAILURE); - } catch (err) { - should.equal(err.code, "UnauthorizedError"); - } + // Rotate credential to invalid value. + sasCredential.update( + `SharedAccessSignature sr=fake&sig=foo&se=${Date.now() / 1000}&skn=FakeKey` + ); + // Fast forward through time to after the token refresh. + clock.tick(1000 * 60 * 45); - // Rotate credential to valid value. - sasCredential.update(getSas()); + try { + // This sendBatch should fail because we've updated the credential to invalid values and allowed the cbs link to refresh. + await producerClient.sendBatch([{ body: "I don't have access." }]); + throw new Error(TEST_FAILURE); + } catch (err) { + should.equal(err.code, "UnauthorizedError"); + } - // This last sendBatch should succeed because we've updated our credentials again. - // Notice that we didn't have to fast forward through time to move past a token refresh! - await producerClient.sendBatch([{ body: "test2" }]); + // Rotate credential to valid value. + sasCredential.update(getSas()); + + // This last sendBatch should succeed because we've updated our credentials again. + // Notice that we didn't have to fast forward through time to move past a token refresh! + await producerClient.sendBatch([{ body: "test2" }]); - return producerClient.close(); + return producerClient.close(); + }); + }); + }); }); - }); - }); + } + ); }); diff --git a/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts b/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts index f83bc5a436f5..4be5f8358064 100644 --- a/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/eventHubConsumerClient.spec.ts @@ -16,1270 +16,1314 @@ import { } from "../../src"; import debugModule from "debug"; const debug = debugModule("azure:event-hubs:receiver-spec"); -import { EnvVarKeys, getEnvVars, loopUntil, getStartingPositionsForTests } from "./utils/testUtils"; +import { + EnvVarKeys, + getEnvVars, + getEnvVarValue, + loopUntil, + getStartingPositionsForTests, + isNode +} from "./utils/testUtils"; import chai from "chai"; import { ReceivedMessagesTester } from "./utils/receivedMessagesTester"; import { LogTester } from "./utils/logHelpers"; import { TestInMemoryCheckpointStore } from "./utils/testInMemoryCheckpointStore"; +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "./utils/mockService"; const should = chai.should(); -const env = getEnvVars(); - -describe("EventHubConsumerClient", () => { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let partitionIds: string[]; - - before(() => { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach("Creating the clients", async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - partitionIds = await producerClient.getPartitionIds({}); - }); - afterEach("Closing the clients", () => { - return Promise.all([producerClient.close(), consumerClient.close()]); - }); +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; - describe("functional tests", () => { - let clients: EventHubConsumerClient[]; - let subscriptions: Subscription[]; +describe("public/eventHubConsumerClient.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); - beforeEach(() => { - // ensure we have at least 2 partitions - partitionIds.length.should.gte(2); + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } - clients = []; - subscriptions = []; - }); + describe("EventHubConsumerClient", () => { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; - afterEach(async () => { - for (const subscription of subscriptions) { - await subscription.close(); - } + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let partitionIds: string[]; - await Promise.all(clients.map((client) => client.close())); - clients = []; - }); + before(() => { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - describe("#close()", function(): void { - it("stops any actively running subscriptions", async function(): Promise { - const client = new EventHubConsumerClient( + beforeEach("Creating the clients", async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( EventHubConsumerClient.defaultConsumerGroupName, service.connectionString, service.path ); + partitionIds = await producerClient.getPartitionIds({}); + }); - // Spin up multiple subscriptions. - for (const partitionId of partitionIds) { - subscriptions.push( - client.subscribe(partitionId, { - async processError() { - /* no-op for test */ - }, - async processEvents() { - /* no-op for test */ - } - }) - ); - } + afterEach("Closing the clients", () => { + return Promise.all([producerClient.close(), consumerClient.close()]); + }); - // Assert that the subscriptions are all running. - for (const subscription of subscriptions) { - subscription.isRunning.should.equal(true, "The subscription should be running."); - } + describe("functional tests", () => { + let clients: EventHubConsumerClient[]; + let subscriptions: Subscription[]; - // Stop the client, which should stop the subscriptions. - await client.close(); + beforeEach(() => { + // ensure we have at least 2 partitions + partitionIds.length.should.gte(2); - // Assert that the subscriptions are all not running. - for (const subscription of subscriptions) { - subscription.isRunning.should.equal(false, "The subscription should not be running."); - } + clients = []; + subscriptions = []; + }); - client["_subscriptions"].size.should.equal( - 0, - "Some dangling subscriptions are still hanging around!" - ); - }); + afterEach(async () => { + for (const subscription of subscriptions) { + await subscription.close(); + } - it("gracefully stops running subscriptions", async function(): Promise { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + await Promise.all(clients.map((client) => client.close())); + clients = []; + }); - const startingPositions = await getStartingPositionsForTests(client); + describe("#close()", function(): void { + it("stops any actively running subscriptions", async function(): Promise { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - let waitForInitializeResolver: () => void; - const waitForInitialize = new Promise( - (resolve) => (waitForInitializeResolver = resolve) - ); - let waitForCloseResolver: (reason: CloseReason) => void; - const waitForClose = new Promise( - (resolve) => (waitForCloseResolver = resolve) - ); - let unexpectedError: Error | undefined; - let eventsWereReceived = false; + // Spin up multiple subscriptions. + for (const partitionId of partitionIds) { + subscriptions.push( + client.subscribe(partitionId, { + async processError() { + /* no-op for test */ + }, + async processEvents() { + /* no-op for test */ + } + }) + ); + } - const subscription = client.subscribe( - partitionIds[0], - { - async processInitialize() { - waitForInitializeResolver(); - }, - async processError(err) { - unexpectedError = err; - }, - async processEvents() { - eventsWereReceived = true; - }, - async processClose(reason) { - waitForCloseResolver(reason); + // Assert that the subscriptions are all running. + for (const subscription of subscriptions) { + subscription.isRunning.should.equal(true, "The subscription should be running."); } - }, - { - startPosition: startingPositions - } - ); - // Assert that the subscription is running. - subscription.isRunning.should.equal(true, "The subscription should be running."); + // Stop the client, which should stop the subscriptions. + await client.close(); + + // Assert that the subscriptions are all not running. + for (const subscription of subscriptions) { + subscription.isRunning.should.equal(false, "The subscription should not be running."); + } - // Wait until we see a `processInitialze` handler get invoked. - // This lets us know that the subscription is starting to read from a partition. - await waitForInitialize; + client["_subscriptions"].size.should.equal( + 0, + "Some dangling subscriptions are still hanging around!" + ); + }); - // Stop the client, which should stop the subscriptions. - await client.close(); + it("gracefully stops running subscriptions", async function(): Promise { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - // Ensure that the `processClose` handler was invoked with the expected reason. - const closeReason = await waitForClose; - closeReason.should.equal( - CloseReason.Shutdown, - "Subscription closed for an unexpected reason." - ); + const startingPositions = await getStartingPositionsForTests(client); - // Ensure no errors were thrown. - should.not.exist(unexpectedError, "Did not expect to observe an error."); + let waitForInitializeResolver: () => void; + const waitForInitialize = new Promise( + (resolve) => (waitForInitializeResolver = resolve) + ); + let waitForCloseResolver: (reason: CloseReason) => void; + const waitForClose = new Promise( + (resolve) => (waitForCloseResolver = resolve) + ); + let unexpectedError: Error | undefined; + let eventsWereReceived = false; + + const subscription = client.subscribe( + partitionIds[0], + { + async processInitialize() { + waitForInitializeResolver(); + }, + async processError(err) { + unexpectedError = err; + }, + async processEvents() { + eventsWereReceived = true; + }, + async processClose(reason) { + waitForCloseResolver(reason); + } + }, + { + startPosition: startingPositions + } + ); - // Ensure the event handler wasn't called. - eventsWereReceived.should.equal(false, "Should not have received events."); + // Assert that the subscription is running. + subscription.isRunning.should.equal(true, "The subscription should be running."); - // Assert that the subscription is not running. - subscription.isRunning.should.equal(false, "The subscription should not be running."); + // Wait until we see a `processInitialze` handler get invoked. + // This lets us know that the subscription is starting to read from a partition. + await waitForInitialize; - client["_subscriptions"].size.should.equal( - 0, - "Some dangling subscriptions are still hanging around!" - ); - }); - }); + // Stop the client, which should stop the subscriptions. + await client.close(); - describe("Reinitialize partition processing after error", function(): void { - it("when subscribed to single partition", async function(): Promise { - const partitionId = "0"; - const consumerClient1 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - const consumerClient2 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + // Ensure that the `processClose` handler was invoked with the expected reason. + const closeReason = await waitForClose; + closeReason.should.equal( + CloseReason.Shutdown, + "Subscription closed for an unexpected reason." + ); - clients.push(consumerClient1, consumerClient2); - let subscription2: Subscription | undefined; - const subscriptionHandlers2: SubscriptionEventHandlers = { - async processError() { - /* no-op */ - }, - async processEvents() { - // stop this subscription since it already should have forced the 1st subscription to have an error. - await subscription2!.close(); - } - }; + // Ensure no errors were thrown. + should.not.exist(unexpectedError, "Did not expect to observe an error."); - // keep track of the handlers called on subscription 1 - const handlerCalls = { - initialize: 0, - close: 0 - }; + // Ensure the event handler wasn't called. + eventsWereReceived.should.equal(false, "Should not have received events."); - const subscription1 = consumerClient1.subscribe( - partitionId, - { - async processError() { - /* no-op */ - }, - async processEvents() { - if (!handlerCalls.close) { - // start the 2nd subscription that will kick the 1st subscription off - subscription2 = consumerClient2.subscribe(partitionId, subscriptionHandlers2, { - ownerLevel: 1, - maxBatchSize: 1, - maxWaitTimeInSeconds: 1 - }); - } else { - // stop this subscription, we know close was called so we've restarted - await subscription1.close(); - } - }, - async processClose() { - handlerCalls.close++; - }, - async processInitialize() { - handlerCalls.initialize++; - } - }, - { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1 - } - ); + // Assert that the subscription is not running. + subscription.isRunning.should.equal(false, "The subscription should not be running."); - await loopUntil({ - maxTimes: 10, - name: "Wait for subscription1 to recover", - timeBetweenRunsMs: 5000, - async until() { - return !subscription1.isRunning && !subscription2!.isRunning; - } + client["_subscriptions"].size.should.equal( + 0, + "Some dangling subscriptions are still hanging around!" + ); + }); }); - // Initialize may be called multiple times while the 2nd subscription is running. - // We want to make sure it has been called at least twice to verify that subscription1 - // attempts to recover. - handlerCalls.initialize.should.be.greaterThan(1); - handlerCalls.close.should.be.greaterThan(1); - }); - - it("when subscribed to multiple partitions", async function(): Promise { - const consumerClient1 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path, - { loadBalancingOptions: { updateIntervalInMs: 1000 } } - ); - const consumerClient2 = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path, - { loadBalancingOptions: { updateIntervalInMs: 1000 } } - ); + describe("Reinitialize partition processing after error", function(): void { + it("when subscribed to single partition", async function(): Promise { + const partitionId = "0"; + const consumerClient1 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + const consumerClient2 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - clients.push(consumerClient1, consumerClient2); + clients.push(consumerClient1, consumerClient2); + let subscription2: Subscription | undefined; + const subscriptionHandlers2: SubscriptionEventHandlers = { + async processError() { + /* no-op */ + }, + async processEvents() { + // stop this subscription since it already should have forced the 1st subscription to have an error. + await subscription2!.close(); + } + }; + + // keep track of the handlers called on subscription 1 + const handlerCalls = { + initialize: 0, + close: 0 + }; + + const subscription1 = consumerClient1.subscribe( + partitionId, + { + async processError() { + /* no-op */ + }, + async processEvents() { + if (!handlerCalls.close) { + // start the 2nd subscription that will kick the 1st subscription off + subscription2 = consumerClient2.subscribe(partitionId, subscriptionHandlers2, { + ownerLevel: 1, + maxBatchSize: 1, + maxWaitTimeInSeconds: 1 + }); + } else { + // stop this subscription, we know close was called so we've restarted + await subscription1.close(); + } + }, + async processClose() { + handlerCalls.close++; + }, + async processInitialize() { + handlerCalls.initialize++; + } + }, + { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1 + } + ); - const partitionHandlerCalls: { - [partitionId: string]: { - initialize: number; - processEvents: boolean; - close: number; - }; - } = {}; - - // keep track of the handlers called on subscription 1 - for (const id of partitionIds) { - partitionHandlerCalls[id] = { initialize: 0, processEvents: false, close: 0 }; - } - - const subscriptionHandlers1: SubscriptionEventHandlers = { - async processError() { - /* no-op */ - }, - async processEvents(_, context) { - partitionHandlerCalls[context.partitionId].processEvents = true; - }, - async processClose(_, context) { - partitionHandlerCalls[context.partitionId].close++; - // reset processEvents count - partitionHandlerCalls[context.partitionId].processEvents = false; - }, - async processInitialize(context) { - partitionHandlerCalls[context.partitionId].initialize++; - } - }; + await loopUntil({ + maxTimes: 10, + name: "Wait for subscription1 to recover", + timeBetweenRunsMs: 5000, + async until() { + return !subscription1.isRunning && !subscription2!.isRunning; + } + }); - const subscription1 = consumerClient1.subscribe(subscriptionHandlers1, { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1 - }); + // Initialize may be called multiple times while the 2nd subscription is running. + // We want to make sure it has been called at least twice to verify that subscription1 + // attempts to recover. + handlerCalls.initialize.should.be.greaterThan(1); + handlerCalls.close.should.be.greaterThan(1); + }); - await loopUntil({ - maxTimes: 10, - name: "Wait for subscription1 to read from all partitions", - timeBetweenRunsMs: 1000, - async until() { - // wait until we've seen processEvents invoked for each partition. - return ( - partitionIds.filter((id) => { - return partitionHandlerCalls[id].processEvents; - }).length === partitionIds.length + it("when subscribed to multiple partitions", async function(): Promise { + const consumerClient1 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path, + { loadBalancingOptions: { updateIntervalInMs: 1000 } } + ); + const consumerClient2 = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path, + { loadBalancingOptions: { updateIntervalInMs: 1000 } } ); - } - }); - const partitionsReadFromSub2 = new Set(); - const subscriptionHandlers2: SubscriptionEventHandlers = { - async processError() { - /* no-op */ - }, - async processEvents(_, context) { - partitionsReadFromSub2.add(context.partitionId); - } - }; + clients.push(consumerClient1, consumerClient2); - // start 2nd subscription with an ownerLevel so it triggers the close handlers on the 1st subscription. - const subscription2 = consumerClient2.subscribe(subscriptionHandlers2, { - maxBatchSize: 1, - maxWaitTimeInSeconds: 1, - ownerLevel: 1 - }); + const partitionHandlerCalls: { + [partitionId: string]: { + initialize: number; + processEvents: boolean; + close: number; + }; + } = {}; - await loopUntil({ - maxTimes: 10, - name: - "Wait for subscription2 to read from all partitions and subscription1 to invoke close handlers", - timeBetweenRunsMs: 1000, - async until() { - const sub1CloseHandlersCalled = Boolean( - partitionIds.filter((id) => { - return partitionHandlerCalls[id].close > 0; - }).length === partitionIds.length - ); - return partitionsReadFromSub2.size === partitionIds.length && sub1CloseHandlersCalled; - } - }); + // keep track of the handlers called on subscription 1 + for (const id of partitionIds) { + partitionHandlerCalls[id] = { initialize: 0, processEvents: false, close: 0 }; + } - // close subscription2 so subscription1 can recover. - await subscription2.close(); - - await loopUntil({ - maxTimes: 10, - name: "Wait for subscription1 to recover", - timeBetweenRunsMs: 1000, - async until() { - // wait until we've seen an additional processEvent for each partition. - return ( - partitionIds.filter((id) => { - return partitionHandlerCalls[id].processEvents; - }).length === partitionIds.length - ); - } - }); + const subscriptionHandlers1: SubscriptionEventHandlers = { + async processError() { + /* no-op */ + }, + async processEvents(_, context) { + partitionHandlerCalls[context.partitionId].processEvents = true; + }, + async processClose(_, context) { + partitionHandlerCalls[context.partitionId].close++; + // reset processEvents count + partitionHandlerCalls[context.partitionId].processEvents = false; + }, + async processInitialize(context) { + partitionHandlerCalls[context.partitionId].initialize++; + } + }; + + const subscription1 = consumerClient1.subscribe(subscriptionHandlers1, { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1 + }); + + await loopUntil({ + maxTimes: 10, + name: "Wait for subscription1 to read from all partitions", + timeBetweenRunsMs: 1000, + async until() { + // wait until we've seen processEvents invoked for each partition. + return ( + partitionIds.filter((id) => { + return partitionHandlerCalls[id].processEvents; + }).length === partitionIds.length + ); + } + }); - await subscription1.close(); + const partitionsReadFromSub2 = new Set(); + const subscriptionHandlers2: SubscriptionEventHandlers = { + async processError() { + /* no-op */ + }, + async processEvents(_, context) { + partitionsReadFromSub2.add(context.partitionId); + } + }; + + // start 2nd subscription with an ownerLevel so it triggers the close handlers on the 1st subscription. + const subscription2 = consumerClient2.subscribe(subscriptionHandlers2, { + maxBatchSize: 1, + maxWaitTimeInSeconds: 1, + ownerLevel: 1 + }); + + await loopUntil({ + maxTimes: 10, + name: + "Wait for subscription2 to read from all partitions and subscription1 to invoke close handlers", + timeBetweenRunsMs: 1000, + async until() { + const sub1CloseHandlersCalled = Boolean( + partitionIds.filter((id) => { + return partitionHandlerCalls[id].close > 0; + }).length === partitionIds.length + ); + return ( + partitionsReadFromSub2.size === partitionIds.length && sub1CloseHandlersCalled + ); + } + }); + + // close subscription2 so subscription1 can recover. + await subscription2.close(); + + await loopUntil({ + maxTimes: 10, + name: "Wait for subscription1 to recover", + timeBetweenRunsMs: 1000, + async until() { + // wait until we've seen an additional processEvent for each partition. + return ( + partitionIds.filter((id) => { + return partitionHandlerCalls[id].processEvents; + }).length === partitionIds.length + ); + } + }); + + await subscription1.close(); + + for (const id of partitionIds) { + partitionHandlerCalls[id].initialize.should.be.greaterThan( + 1, + `Initialize on partition ${id} was not called more than 1 time.` + ); + partitionHandlerCalls[id].close.should.be.greaterThan( + 1, + `Close on partition ${id} was not called more than 1 time.` + ); + } + }); + }); - for (const id of partitionIds) { - partitionHandlerCalls[id].initialize.should.be.greaterThan( - 1, - `Initialize on partition ${id} was not called more than 1 time.` + it("Receive from specific partitions, no coordination", async function(): Promise { + const logTester = new LogTester( + [ + "EventHubConsumerClient subscribing to specific partition (0), no checkpoint store.", + "Single partition target: 0", + "No partitions owned, skipping abandoning." + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] ); - partitionHandlerCalls[id].close.should.be.greaterThan( - 1, - `Close on partition ${id} was not called more than 1 time.` + + const tester = new ReceivedMessagesTester(["0"], false); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + ) ); - } - }); - }); - - it("Receive from specific partitions, no coordination", async function(): Promise { - const logTester = new LogTester( - [ - "EventHubConsumerClient subscribing to specific partition (0), no checkpoint store.", - "Single partition target: 0", - "No partitions owned, skipping abandoning." - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); - - const tester = new ReceivedMessagesTester(["0"], false); - - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - ) - ); - const subscription = clients[0].subscribe("0", tester, { - startPosition: latestEventPosition - }); + const subscription = clients[0].subscribe("0", tester, { + startPosition: latestEventPosition + }); - subscriptions.push(subscription); + subscriptions.push(subscription); - await tester.runTestAndPoll(producerClient); - await subscription.close(); // or else we won't see the partition abandoning messages + await tester.runTestAndPoll(producerClient); + await subscription.close(); // or else we won't see the partition abandoning messages - logTester.assert(); - }); + logTester.assert(); + }); - it("Receive from all partitions, no coordination", async function(): Promise { - const logTester = new LogTester( - ["EventHubConsumerClient subscribing to all partitions, no checkpoint store."], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); + it("Receive from all partitions, no coordination", async function(): Promise { + const logTester = new LogTester( + ["EventHubConsumerClient subscribing to all partitions, no checkpoint store."], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); - const tester = new ReceivedMessagesTester(partitionIds, false); + const tester = new ReceivedMessagesTester(partitionIds, false); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - ) - ); + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + ) + ); - const subscription = clients[0].subscribe(tester, { - startPosition: latestEventPosition - }); + const subscription = clients[0].subscribe(tester, { + startPosition: latestEventPosition + }); - await tester.runTestAndPoll(producerClient); - subscriptions.push(subscription); - - logTester.assert(); - }); - - it("Receive from all partitions, no coordination but through multiple subscribe() calls", async function(): Promise< - void - > { - const logTester = new LogTester( - [ - ...partitionIds.map( - (partitionId) => - `EventHubConsumerClient subscribing to specific partition (${partitionId}), no checkpoint store.`, - `Abandoning owned partitions` - ), - ...partitionIds.map((partitionId) => `Single partition target: ${partitionId}`) - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); - - const tester = new ReceivedMessagesTester(partitionIds, false); - - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - ) - ); + await tester.runTestAndPoll(producerClient); + subscriptions.push(subscription); - for (const partitionId of await partitionIds) { - const subscription = clients[0].subscribe(partitionId, tester, { - startPosition: latestEventPosition + logTester.assert(); }); - subscriptions.push(subscription); - } - - await tester.runTestAndPoll(producerClient); - - logTester.assert(); - }); - - it("Receive from all partitions, coordinating with the same partition manager and using the default LoadBalancingStrategy", async function(): Promise< - void - > { - // fast forward our partition manager so it starts reading from the latest offset - // instead of the beginning of time. - const logTester = new LogTester( - [ - "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", - /Starting event processor with ID /, - "Abandoning owned partitions" - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); - - const checkpointStore = new TestInMemoryCheckpointStore(); - - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // specifying your own checkpoint store activates the "production ready" code path that - checkpointStore - // also uses the BalancedLoadBalancingStrategy - ) - ); - - const tester = new ReceivedMessagesTester(partitionIds, true); - - const subscriber1 = clients[0].subscribe(tester, { - startPosition: latestEventPosition - }); - subscriptions.push(subscriber1); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path - // specifying your own checkpoint store activates the "production ready" code path that - // also uses the BalancedLoadBalancingStrategy - ) - ); + it("Receive from all partitions, no coordination but through multiple subscribe() calls", async function(): Promise< + void + > { + const logTester = new LogTester( + [ + ...partitionIds.map( + (partitionId) => + `EventHubConsumerClient subscribing to specific partition (${partitionId}), no checkpoint store.`, + `Abandoning owned partitions` + ), + ...partitionIds.map((partitionId) => `Single partition target: ${partitionId}`) + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); - const subscriber2 = clients[1].subscribe(tester, { - startPosition: latestEventPosition - }); - subscriptions.push(subscriber2); + const tester = new ReceivedMessagesTester(partitionIds, false); - await tester.runTestAndPoll(producerClient); + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + ) + ); - // or else we won't see the abandoning message - for (const subscription of subscriptions) { - await subscription.close(); - } - logTester.assert(); - }); - - it("Receive from all partitions, coordinating with the same partition manager and using the GreedyLoadBalancingStrategy", async function(): Promise< - void - > { - // fast forward our partition manager so it starts reading from the latest offset - // instead of the beginning of time. - const logTester = new LogTester( - [ - "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", - /Starting event processor with ID /, - "Abandoning owned partitions" - ], - [ - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger, - logger.verbose as debug.Debugger - ] - ); - - const checkpointStore = new TestInMemoryCheckpointStore(); - - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // specifying your own checkpoint store activates the "production ready" code path that - { - loadBalancingOptions: { - strategy: "greedy" - } + for (const partitionId of await partitionIds) { + const subscription = clients[0].subscribe(partitionId, tester, { + startPosition: latestEventPosition + }); + subscriptions.push(subscription); } - ) - ); - const tester = new ReceivedMessagesTester(partitionIds, true); + await tester.runTestAndPoll(producerClient); - const subscriber1 = clients[0].subscribe(tester, { - startPosition: latestEventPosition - }); - subscriptions.push(subscriber1); + logTester.assert(); + }); - clients.push( - new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString!, - service.path, - // specifying your own checkpoint store activates the "production ready" code path that - checkpointStore, - { - loadBalancingOptions: { - strategy: "greedy" - } + it("Receive from all partitions, coordinating with the same partition manager and using the default LoadBalancingStrategy", async function(): Promise< + void + > { + // fast forward our partition manager so it starts reading from the latest offset + // instead of the beginning of time. + const logTester = new LogTester( + [ + "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", + /Starting event processor with ID /, + "Abandoning owned partitions" + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); + + const checkpointStore = new TestInMemoryCheckpointStore(); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // specifying your own checkpoint store activates the "production ready" code path that + checkpointStore + // also uses the BalancedLoadBalancingStrategy + ) + ); + + const tester = new ReceivedMessagesTester(partitionIds, true); + + const subscriber1 = clients[0].subscribe(tester, { + startPosition: latestEventPosition + }); + subscriptions.push(subscriber1); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path + // specifying your own checkpoint store activates the "production ready" code path that + // also uses the BalancedLoadBalancingStrategy + ) + ); + + const subscriber2 = clients[1].subscribe(tester, { + startPosition: latestEventPosition + }); + subscriptions.push(subscriber2); + + await tester.runTestAndPoll(producerClient); + + // or else we won't see the abandoning message + for (const subscription of subscriptions) { + await subscription.close(); } - ) - ); + logTester.assert(); + }); - const subscriber2 = clients[1].subscribe(tester, { - startPosition: latestEventPosition - }); - subscriptions.push(subscriber2); + it("Receive from all partitions, coordinating with the same partition manager and using the GreedyLoadBalancingStrategy", async function(): Promise< + void + > { + // fast forward our partition manager so it starts reading from the latest offset + // instead of the beginning of time. + const logTester = new LogTester( + [ + "EventHubConsumerClient subscribing to all partitions, using a checkpoint store.", + /Starting event processor with ID /, + "Abandoning owned partitions" + ], + [ + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger, + logger.verbose as debug.Debugger + ] + ); - await tester.runTestAndPoll(producerClient); + const checkpointStore = new TestInMemoryCheckpointStore(); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // specifying your own checkpoint store activates the "production ready" code path that + { + loadBalancingOptions: { + strategy: "greedy" + } + } + ) + ); - // or else we won't see the abandoning message - for (const subscription of subscriptions) { - await subscription.close(); - } - logTester.assert(); - }); - - it("Stops receiving events if close is immediately called, single partition.", async function(): Promise< - void - > { - const partitionId = "0"; - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - - clients.push(client); - - let initializeCalled = 0; - let closeCalled = 0; - - const subscription = client.subscribe(partitionId, { - async processError() { - /* no-op */ - }, - async processEvents() { - /* no-op */ - }, - async processClose() { - closeCalled++; - }, - async processInitialize() { - initializeCalled++; - } - }); + const tester = new ReceivedMessagesTester(partitionIds, true); - await subscription.close(); + const subscriber1 = clients[0].subscribe(tester, { + startPosition: latestEventPosition + }); + subscriptions.push(subscriber1); + + clients.push( + new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString!, + service.path, + // specifying your own checkpoint store activates the "production ready" code path that + checkpointStore, + { + loadBalancingOptions: { + strategy: "greedy" + } + } + ) + ); - await loopUntil({ - maxTimes: 10, - name: "Wait for the subscription to stop running.", - timeBetweenRunsMs: 100, - async until() { - return !subscription.isRunning; - } - }); + const subscriber2 = clients[1].subscribe(tester, { + startPosition: latestEventPosition + }); + subscriptions.push(subscriber2); - // If `processInitialize` is called, then `processClose` should be called as well. - // Otherwise, we shouldn't see either called. - initializeCalled.should.equal( - closeCalled, - "processClose was not called the same number of times as processInitialize." - ); - }); - - it("Stops receiving events if close is immediately called, multiple partitions.", async function(): Promise< - void - > { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - - clients.push(client); - - let initializeCalled = 0; - let closeCalled = 0; - - const subscription = client.subscribe({ - async processError() { - /* no-op */ - }, - async processEvents() { - /* no-op */ - }, - async processClose() { - closeCalled++; - }, - async processInitialize() { - initializeCalled++; - } - }); + await tester.runTestAndPoll(producerClient); - await subscription.close(); + // or else we won't see the abandoning message + for (const subscription of subscriptions) { + await subscription.close(); + } + logTester.assert(); + }); - await loopUntil({ - maxTimes: 10, - name: "Wait for the subscription to stop running.", - timeBetweenRunsMs: 100, - async until() { - return !subscription.isRunning; - } - }); + it("Stops receiving events if close is immediately called, single partition.", async function(): Promise< + void + > { + const partitionId = "0"; + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - // If `processInitialize` is called, then `processClose` should be called as well. - // Otherwise, we shouldn't see either called. - initializeCalled.should.equal( - closeCalled, - "processClose was not called the same number of times as processInitialize." - ); - }); - - describe("processError", function(): void { - it("supports awaiting subscription.close on non partition-specific errors", async function(): Promise< - void - > { - // Use an invalid Event Hub name to trigger a non partition-specific error. - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - "Fake-Hub" - ); + clients.push(client); - let subscription: Subscription; - const caughtErr: Error = await new Promise((resolve) => { - subscription = client.subscribe({ - processEvents: async () => { + let initializeCalled = 0; + let closeCalled = 0; + + const subscription = client.subscribe(partitionId, { + async processError() { /* no-op */ }, - processError: async (err, context) => { - if (!context.partitionId) { - await subscription.close(); - resolve(err); - } + async processEvents() { + /* no-op */ + }, + async processClose() { + closeCalled++; + }, + async processInitialize() { + initializeCalled++; + } + }); + + await subscription.close(); + + await loopUntil({ + maxTimes: 10, + name: "Wait for the subscription to stop running.", + timeBetweenRunsMs: 100, + async until() { + return !subscription.isRunning; } }); + + // If `processInitialize` is called, then `processClose` should be called as well. + // Otherwise, we shouldn't see either called. + initializeCalled.should.equal( + closeCalled, + "processClose was not called the same number of times as processInitialize." + ); }); - should.exist(caughtErr); + it("Stops receiving events if close is immediately called, multiple partitions.", async function(): Promise< + void + > { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - await client.close(); - }); + clients.push(client); - it("supports awaiting subscription.close on partition-specific errors", async function(): Promise< - void - > { - // Use an invalid Event Hub name to trigger a non partition-specific error. - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); + let initializeCalled = 0; + let closeCalled = 0; - let subscription: Subscription; - const caughtErr: Error = await new Promise((resolve) => { - // Subscribe to an invalid partition id to trigger a partition-specific error. - subscription = client.subscribe("-1", { - processEvents: async () => { + const subscription = client.subscribe({ + async processError() { /* no-op */ }, - processError: async (err, context) => { - if (context.partitionId) { - await subscription.close(); - resolve(err); - } + async processEvents() { + /* no-op */ + }, + async processClose() { + closeCalled++; + }, + async processInitialize() { + initializeCalled++; + } + }); + + await subscription.close(); + + await loopUntil({ + maxTimes: 10, + name: "Wait for the subscription to stop running.", + timeBetweenRunsMs: 100, + async until() { + return !subscription.isRunning; } }); + + // If `processInitialize` is called, then `processClose` should be called as well. + // Otherwise, we shouldn't see either called. + initializeCalled.should.equal( + closeCalled, + "processClose was not called the same number of times as processInitialize." + ); }); - should.exist(caughtErr); + describe("processError", function(): void { + it("supports awaiting subscription.close on non partition-specific errors", async function(): Promise< + void + > { + // Use an invalid Event Hub name to trigger a non partition-specific error. + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + "Fake-Hub" + ); + + let subscription: Subscription; + const caughtErr: Error = await new Promise((resolve) => { + subscription = client.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err, context) => { + if (!context.partitionId) { + await subscription.close(); + resolve(err); + } + } + }); + }); + + should.exist(caughtErr); + + await client.close(); + }); + + it("supports awaiting subscription.close on partition-specific errors", async function(): Promise< + void + > { + // Use an invalid Event Hub name to trigger a non partition-specific error. + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); - await client.close(); + let subscription: Subscription; + const caughtErr: Error = await new Promise((resolve) => { + // Subscribe to an invalid partition id to trigger a partition-specific error. + subscription = client.subscribe("-1", { + processEvents: async () => { + /* no-op */ + }, + processError: async (err, context) => { + if (context.partitionId) { + await subscription.close(); + resolve(err); + } + } + }); + }); + + should.exist(caughtErr); + + await client.close(); + }); + }); }); - }); - }); - describe("subscribe() with partitionId 0 as number", function(): void { - it("should not throw an error", async function(): Promise { - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - // @ts-expect-error number for partitionId should work even if type is string - 0, - { - processEvents: async () => { - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP - } - ); + describe("subscribe() with partitionId 0 as number", function(): void { + it("should not throw an error", async function(): Promise { + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + // @ts-expect-error number for partitionId should work even if type is string + 0, + { + processEvents: async () => { + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP + } + ); + }); + await subscription!.close(); + }); }); - await subscription!.close(); - }); - }); - describe("subscribe() with EventPosition specified as", function(): void { - let partitionId: string; - let eventSentBeforeSubscribe: EventData; - let eventsSentAfterSubscribe: EventData[]; + describe("subscribe() with EventPosition specified as", function(): void { + let partitionId: string; + let eventSentBeforeSubscribe: EventData; + let eventsSentAfterSubscribe: EventData[]; - beforeEach(async () => { - partitionId = partitionIds[0]; + beforeEach(async () => { + partitionId = partitionIds[0]; - eventSentBeforeSubscribe = { - body: "Hello awesome world " + Math.random() - }; - await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); - - eventsSentAfterSubscribe = []; - for (let i = 0; i < 5; i++) { - eventsSentAfterSubscribe.push({ - body: "Hello awesome world " + Math.random(), - properties: { - stamp: Math.random() + eventSentBeforeSubscribe = { + body: "Hello awesome world " + Math.random() + }; + await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); + + eventsSentAfterSubscribe = []; + for (let i = 0; i < 5; i++) { + eventsSentAfterSubscribe.push({ + body: "Hello awesome world " + Math.random(), + properties: { + stamp: Math.random() + } + }); } }); - } - }); - - it("'from end of stream' should receive messages correctly", async function(): Promise { - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + + it("'from end of stream' should receive messages correctly", async function(): Promise< + void + > { + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 30 } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 30 + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with latestEventPosition."); } - ); - }); - await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with latestEventPosition."); - } + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular sequence number' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'after a particular sequence number' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, + maxWaitTimeInSeconds: 30 } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, - maxWaitTimeInSeconds: 30 - } - ); - }); - await subscription!.close(); + ); + }); + await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last sequence number."); - } + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last sequence number."); + } - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; + it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 } + ); + }); + await subscription!.close(); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { - sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); } - ); - }); - await subscription!.close(); - - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular offset' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + }); + + it("'after a particular offset' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: partitionInfo.lastEnqueuedOffset }, + maxWaitTimeInSeconds: 30 } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: partitionInfo.lastEnqueuedOffset }, - maxWaitTimeInSeconds: 30 - } - ); - }); - await subscription!.close(); + ); + }); + await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); + } - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; + it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + offset: partitionInfo.lastEnqueuedOffset, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 } + ); + }); + await subscription!.close(); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { - offset: partitionInfo.lastEnqueuedOffset, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 - } - ); - }); - await subscription!.close(); - - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; - } + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, - maxWaitTimeInSeconds: 30 + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); } - ); - }); - await subscription!.close(); + }); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, + maxWaitTimeInSeconds: 30 + } + ); + }); + await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - }); + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); + } - describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { - it("should have lastEnqueuedEventProperties populated", async function(): Promise { - const partitionId = partitionIds[0]; - - const eventData = { body: "Hello awesome world " + Math.random() }; - await producerClient.sendBatch([eventData], { partitionId }); - debug("sent: ", eventData); - - const pInfo = await consumerClient.getPartitionProperties(partitionId); - debug("partition info: ", pInfo); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data, context) => { - data.length.should.equal(1); - should.exist(context.lastEnqueuedEventProperties); - context.lastEnqueuedEventProperties!.offset!.should.equal(pInfo.lastEnqueuedOffset); - context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( - pInfo.lastEnqueuedSequenceNumber - ); - context - .lastEnqueuedEventProperties!.enqueuedOn!.getTime() - .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); - context - .lastEnqueuedEventProperties!.retrievedOn!.getTime() - .should.be.greaterThan(Date.now() - 60000); - - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: earliestEventPosition, - maxBatchSize: 1, - trackLastEnqueuedEventProperties: true + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); } - ); + }); }); - await subscription!.close(); - }); - }); - describe("Negative scenarios", function(): void { - it("should throw MessagingEntityNotFoundError for non existing consumer group", async function(): Promise< - void - > { - const badConsumerClient = new EventHubConsumerClient( - "boo", - service.connectionString, - service.path - ); - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = badConsumerClient.subscribe({ - processEvents: async () => { - /** Nothing to do here */ - }, - processError: async (err) => { - resolve(err); - } + describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { + it("should have lastEnqueuedEventProperties populated", async function(): Promise { + const partitionId = partitionIds[0]; + + const eventData = { body: "Hello awesome world " + Math.random() }; + await producerClient.sendBatch([eventData], { partitionId }); + debug("sent: ", eventData); + + const pInfo = await consumerClient.getPartitionProperties(partitionId); + debug("partition info: ", pInfo); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data, context) => { + data.length.should.equal(1); + should.exist(context.lastEnqueuedEventProperties); + context.lastEnqueuedEventProperties!.offset!.should.equal( + pInfo.lastEnqueuedOffset + ); + context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( + pInfo.lastEnqueuedSequenceNumber + ); + context + .lastEnqueuedEventProperties!.enqueuedOn!.getTime() + .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); + context + .lastEnqueuedEventProperties!.retrievedOn!.getTime() + .should.be.greaterThan(Date.now() - 60000); + + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: earliestEventPosition, + maxBatchSize: 1, + trackLastEnqueuedEventProperties: true + } + ); + }); + await subscription!.close(); }); }); - await subscription!.close(); - await badConsumerClient.close(); - - should.exist(caughtErr); - should.equal((caughtErr as MessagingError).code, "MessagingEntityNotFoundError"); - }); - - it(`should throw an invalid EventHub address error for invalid partition`, async function(): Promise< - void - > { - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = consumerClient.subscribe("boo", { - processEvents: async () => { - /** Nothing to do here */ - }, - processError: async (err) => { - resolve(err); - } + + describe("Negative scenarios", function(): void { + it("should throw MessagingEntityNotFoundError for non existing consumer group", async function(): Promise< + void + > { + const badConsumerClient = new EventHubConsumerClient( + "boo", + service.connectionString, + service.path + ); + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = badConsumerClient.subscribe({ + processEvents: async () => { + /** Nothing to do here */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + await subscription!.close(); + await badConsumerClient.close(); + + should.exist(caughtErr); + should.equal((caughtErr as MessagingError).code, "MessagingEntityNotFoundError"); + }); + + it(`should throw an invalid EventHub address error for invalid partition`, async function(): Promise< + void + > { + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = consumerClient.subscribe("boo", { + processEvents: async () => { + /** Nothing to do here */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + await subscription!.close(); + should.exist(caughtErr); + should.equal((caughtErr as MessagingError).code, "ArgumentOutOfRangeError"); }); }); - await subscription!.close(); - should.exist(caughtErr); - should.equal((caughtErr as MessagingError).code, "ArgumentOutOfRangeError"); - }); + }).timeout(120000); }); -}).timeout(120000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts b/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts index 8020524a8339..95f82b2a9072 100644 --- a/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/hubruntime.spec.ts @@ -7,500 +7,536 @@ import chaiAsPromised from "chai-as-promised"; chai.use(chaiAsPromised); import debugModule from "debug"; const debug = debugModule("azure:event-hubs:hubruntime-spec"); -import { EnvVarKeys, getEnvVars, setTracerForTest } from "./utils/testUtils"; +import { + EnvVarKeys, + getEnvVars, + getEnvVarValue, + isNode, + setTracerForTest +} from "./utils/testUtils"; import { setSpan, context } from "@azure/core-tracing"; -const env = getEnvVars(); - +import { versionsToTest } from "@azure/test-utils-multi-version"; import { AbortController } from "@azure/abort-controller"; import { SpanGraph } from "@azure/core-tracing"; import { EventHubProducerClient, EventHubConsumerClient, MessagingError } from "../../src"; +import { createMockServer } from "./utils/mockService"; -describe("RuntimeInformation", function(): void { - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach(async () => { - debug("Creating the clients.."); - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - }); +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; - afterEach("close the connection", async function(): Promise { - await producerClient.close(); - await consumerClient.close(); - }); +describe("public/hubruntime.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); - function arrayOfIncreasingNumbersFromZero(length: any): Array { - const result = new Array(length); - for (let i = 0; i < length; i++) { - result[i] = `${i}`; + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } } - return result; - } - describe("getPartitionIds", function(): void { - it("EventHubProducerClient returns an array of partition IDs", async function(): Promise { - const ids = await producerClient.getPartitionIds({}); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); - }); + describe("RuntimeInformation", function(): void { + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); - it("EventHubConsumerClient returns an array of partition IDs", async function(): Promise { - const ids = await consumerClient.getPartitionIds({}); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); - }); + beforeEach(async () => { + debug("Creating the clients.."); + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + }); - it("EventHubProducerClient respects cancellationTokens", async function(): Promise { - try { - const controller = new AbortController(); - setTimeout(() => controller.abort(), 1); - await producerClient.getPartitionIds({ - abortSignal: controller.signal - }); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.equal("The operation was aborted."); + afterEach("close the connection", async function(): Promise { + await producerClient.close(); + await consumerClient.close(); + }); + + function arrayOfIncreasingNumbersFromZero(length: any): Array { + const result = new Array(length); + for (let i = 0; i < length; i++) { + result[i] = `${i}`; + } + return result; } - }); - it("EventHubConsumerClient respects cancellationTokens", async function(): Promise { - try { - const controller = new AbortController(); - setTimeout(() => controller.abort(), 1); - await consumerClient.getPartitionIds({ - abortSignal: controller.signal + describe("getPartitionIds", function(): void { + it("EventHubProducerClient returns an array of partition IDs", async function(): Promise< + void + > { + const ids = await producerClient.getPartitionIds({}); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); }); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.equal("The operation was aborted."); - } - }); - it("EventHubProducerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("EventHubConsumerClient returns an array of partition IDs", async function(): Promise< + void + > { + const ids = await consumerClient.getPartitionIds({}); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + }); - const rootSpan = tracer.startSpan("root"); - const ids = await producerClient.getPartitionIds({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ - { - name: "Azure.EventHubs.getEventHubProperties", - children: [] - } - ] + it("EventHubProducerClient respects cancellationTokens", async function(): Promise { + try { + const controller = new AbortController(); + setTimeout(() => controller.abort(), 1); + await producerClient.getPartitionIds({ + abortSignal: controller.signal + }); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.equal("The operation was aborted."); } - ] - }; + }); - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + it("EventHubConsumerClient respects cancellationTokens", async function(): Promise { + try { + const controller = new AbortController(); + setTimeout(() => controller.abort(), 1); + await consumerClient.getPartitionIds({ + abortSignal: controller.signal + }); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.equal("The operation was aborted."); + } + }); - it("EventHubConsumerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("EventHubProducerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - const rootSpan = tracer.startSpan("root"); - const ids = await consumerClient.getPartitionIds({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ + const rootSpan = tracer.startSpan("root"); + const ids = await producerClient.getPartitionIds({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.getEventHubProperties", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] } ] - } - ] - }; + }; - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); - }); + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - describe("hub runtime information", function(): void { - it("EventHubProducerClient gets the hub runtime information", async function(): Promise { - const hubRuntimeInfo = await producerClient.getEventHubProperties(); - debug(hubRuntimeInfo); - hubRuntimeInfo.name.should.equal(service.path); + it("EventHubConsumerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - hubRuntimeInfo.createdOn.should.be.instanceof(Date); - }); + const rootSpan = tracer.startSpan("root"); + const ids = await consumerClient.getPartitionIds({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + ids.should.have.members(arrayOfIncreasingNumbersFromZero(ids.length)); + rootSpan.end(); - it("EventHubConsumerClient gets the hub runtime information", async function(): Promise { - const hubRuntimeInfo = await consumerClient.getEventHubProperties(); - debug(hubRuntimeInfo); - hubRuntimeInfo.name.should.equal(service.path); + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - hubRuntimeInfo.createdOn.should.be.instanceof(Date); - }); + const expectedGraph: SpanGraph = { + roots: [ + { + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] + } + ] + }; - it("EventHubProducerClient can cancel a request for hub runtime information", async function(): Promise< - void - > { - try { - const controller = new AbortController(); - setTimeout(() => controller.abort(), 1); - await producerClient.getEventHubProperties({ - abortSignal: controller.signal + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); }); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.equal("The operation was aborted."); - } - }); + }); - it("EventHubConsumerClient can cancel a request for hub runtime information", async function(): Promise< - void - > { - try { - const controller = new AbortController(); - setTimeout(() => controller.abort(), 1); - await consumerClient.getEventHubProperties({ - abortSignal: controller.signal + describe("hub runtime information", function(): void { + it("EventHubProducerClient gets the hub runtime information", async function(): Promise< + void + > { + const hubRuntimeInfo = await producerClient.getEventHubProperties(); + debug(hubRuntimeInfo); + hubRuntimeInfo.name.should.equal(service.path); + + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) + ); + hubRuntimeInfo.createdOn.should.be.instanceof(Date); }); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.equal("The operation was aborted."); - } - }); - it("EventHubProducerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("EventHubConsumerClient gets the hub runtime information", async function(): Promise< + void + > { + const hubRuntimeInfo = await consumerClient.getEventHubProperties(); + debug(hubRuntimeInfo); + hubRuntimeInfo.name.should.equal(service.path); + + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) + ); + hubRuntimeInfo.createdOn.should.be.instanceof(Date); + }); - const rootSpan = tracer.startSpan("root"); - const hubRuntimeInfo = await producerClient.getEventHubProperties({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ + it("EventHubProducerClient can cancel a request for hub runtime information", async function(): Promise< + void + > { + try { + const controller = new AbortController(); + setTimeout(() => controller.abort(), 1); + await producerClient.getEventHubProperties({ + abortSignal: controller.signal + }); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.equal("The operation was aborted."); + } + }); + + it("EventHubConsumerClient can cancel a request for hub runtime information", async function(): Promise< + void + > { + try { + const controller = new AbortController(); + setTimeout(() => controller.abort(), 1); + await consumerClient.getEventHubProperties({ + abortSignal: controller.signal + }); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.equal("The operation was aborted."); + } + }); + + it("EventHubProducerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + const hubRuntimeInfo = await producerClient.getEventHubProperties({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) + ); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.getEventHubProperties", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] } ] - } - ] - }; - - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + }; - it("EventHubConsumerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - const rootSpan = tracer.startSpan("root"); - const hubRuntimeInfo = await consumerClient.getEventHubProperties({ - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - hubRuntimeInfo.partitionIds.should.have.members( - arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) - ); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ + it("EventHubConsumerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + const hubRuntimeInfo = await consumerClient.getEventHubProperties({ + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + hubRuntimeInfo.partitionIds.should.have.members( + arrayOfIncreasingNumbersFromZero(hubRuntimeInfo.partitionIds.length) + ); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.getEventHubProperties", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getEventHubProperties", + children: [] + } + ] } ] - } - ] - }; + }; - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); - }); - - describe("partition runtime information", function(): void { - it("EventHubProducerClient should throw an error if partitionId is missing", async function(): Promise< - void - > { - try { - await producerClient.getPartitionProperties(undefined as any); - throw new Error("Test failure"); - } catch (err) { - err.name.should.equal("TypeError"); - err.message.should.equal( - `getPartitionProperties called without required argument "partitionId"` - ); - } - }); + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); + }); - it("EventHubConsumerClient should throw an error if partitionId is missing", async function(): Promise< - void - > { - try { - await consumerClient.getPartitionProperties(undefined as any); - throw new Error("Test failure"); - } catch (err) { - err.name.should.equal("TypeError"); - err.message.should.equal( - `getPartitionProperties called without required argument "partitionId"` - ); - } - }); + describe("partition runtime information", function(): void { + it("EventHubProducerClient should throw an error if partitionId is missing", async function(): Promise< + void + > { + try { + await producerClient.getPartitionProperties(undefined as any); + throw new Error("Test failure"); + } catch (err) { + err.name.should.equal("TypeError"); + err.message.should.equal( + `getPartitionProperties called without required argument "partitionId"` + ); + } + }); - it("EventHubProducerClient gets the partition runtime information with partitionId as a string", async function(): Promise< - void - > { - const partitionRuntimeInfo = await producerClient.getPartitionProperties("0"); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - }); + it("EventHubConsumerClient should throw an error if partitionId is missing", async function(): Promise< + void + > { + try { + await consumerClient.getPartitionProperties(undefined as any); + throw new Error("Test failure"); + } catch (err) { + err.name.should.equal("TypeError"); + err.message.should.equal( + `getPartitionProperties called without required argument "partitionId"` + ); + } + }); - it("EventHubConsumerClient gets the partition runtime information with partitionId as a string", async function(): Promise< - void - > { - const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0"); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - }); + it("EventHubProducerClient gets the partition runtime information with partitionId as a string", async function(): Promise< + void + > { + const partitionRuntimeInfo = await producerClient.getPartitionProperties("0"); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + }); - it("EventHubProducerClient gets the partition runtime information with partitionId as a number", async function(): Promise< - void - > { - const partitionRuntimeInfo = await producerClient.getPartitionProperties(0 as any); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - }); + it("EventHubConsumerClient gets the partition runtime information with partitionId as a string", async function(): Promise< + void + > { + const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0"); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + }); - it("EventHubConsumerClient gets the partition runtime information with partitionId as a number", async function(): Promise< - void - > { - const partitionRuntimeInfo = await consumerClient.getPartitionProperties(0 as any); - debug(partitionRuntimeInfo); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - }); + it("EventHubProducerClient gets the partition runtime information with partitionId as a number", async function(): Promise< + void + > { + const partitionRuntimeInfo = await producerClient.getPartitionProperties(0 as any); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + }); - it("EventHubProducerClient bubbles up error from service for invalid partitionId", async function(): Promise< - void - > { - try { - await producerClient.getPartitionProperties("boo"); - throw new Error("Test failure"); - } catch (err) { - debug(`>>>> Received error - `, err); - should.exist(err); - should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); - } - }); + it("EventHubConsumerClient gets the partition runtime information with partitionId as a number", async function(): Promise< + void + > { + const partitionRuntimeInfo = await consumerClient.getPartitionProperties(0 as any); + debug(partitionRuntimeInfo); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + }); - it("EventHubConsumerClient bubbles up error from service for invalid partitionId", async function(): Promise< - void - > { - try { - await consumerClient.getPartitionProperties("boo"); - throw new Error("Test failure"); - } catch (err) { - debug(`>>>> Received error - `, err); - should.exist(err); - should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); - } - }); + it("EventHubProducerClient bubbles up error from service for invalid partitionId", async function(): Promise< + void + > { + try { + await producerClient.getPartitionProperties("boo"); + throw new Error("Test failure"); + } catch (err) { + debug(`>>>> Received error - `, err); + should.exist(err); + should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); + } + }); - it("EventHubProducerClient can cancel a request for getPartitionInformation", async function(): Promise< - void - > { - try { - const controller = new AbortController(); - setTimeout(() => controller.abort(), 1); - await producerClient.getPartitionProperties("0", { - abortSignal: controller.signal + it("EventHubConsumerClient bubbles up error from service for invalid partitionId", async function(): Promise< + void + > { + try { + await consumerClient.getPartitionProperties("boo"); + throw new Error("Test failure"); + } catch (err) { + debug(`>>>> Received error - `, err); + should.exist(err); + should.equal((err as MessagingError).code, "ArgumentOutOfRangeError"); + } }); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.equal("The operation was aborted."); - } - }); - it("EventHubConsumerClient can cancel a request for getPartitionInformation", async function(): Promise< - void - > { - try { - const controller = new AbortController(); - setTimeout(() => controller.abort(), 1); - await consumerClient.getPartitionProperties("0", { - abortSignal: controller.signal + it("EventHubProducerClient can cancel a request for getPartitionInformation", async function(): Promise< + void + > { + try { + const controller = new AbortController(); + setTimeout(() => controller.abort(), 1); + await producerClient.getPartitionProperties("0", { + abortSignal: controller.signal + }); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.equal("The operation was aborted."); + } }); - throw new Error(`Test failure`); - } catch (err) { - err.message.should.equal("The operation was aborted."); - } - }); - it("EventHubProducerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + it("EventHubConsumerClient can cancel a request for getPartitionInformation", async function(): Promise< + void + > { + try { + const controller = new AbortController(); + setTimeout(() => controller.abort(), 1); + await consumerClient.getPartitionProperties("0", { + abortSignal: controller.signal + }); + throw new Error(`Test failure`); + } catch (err) { + err.message.should.equal("The operation was aborted."); + } + }); - const rootSpan = tracer.startSpan("root"); - const partitionRuntimeInfo = await producerClient.getPartitionProperties("0", { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ + it("EventHubProducerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + const partitionRuntimeInfo = await producerClient.getPartitionProperties("0", { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.getPartitionProperties", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getPartitionProperties", + children: [] + } + ] } ] - } - ] - }; - - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); - }); + }; - it("EventHubConsumerClient can be manually traced", async function(): Promise { - const { tracer, resetTracer } = setTracerForTest(); + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); - const rootSpan = tracer.startSpan("root"); - const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0", { - tracingOptions: { - tracingContext: setSpan(context.active(), rootSpan) - } - }); - partitionRuntimeInfo.partitionId.should.equal("0"); - partitionRuntimeInfo.eventHubName.should.equal(service.path); - partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); - should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); - should.exist(partitionRuntimeInfo.lastEnqueuedOffset); - rootSpan.end(); - - const rootSpans = tracer.getRootSpans(); - rootSpans.length.should.equal(1, "Should only have one root span."); - rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); - - const expectedGraph: SpanGraph = { - roots: [ - { - name: rootSpan.name, - children: [ + it("EventHubConsumerClient can be manually traced", async function(): Promise { + const { tracer, resetTracer } = setTracerForTest(); + + const rootSpan = tracer.startSpan("root"); + const partitionRuntimeInfo = await consumerClient.getPartitionProperties("0", { + tracingOptions: { + tracingContext: setSpan(context.active(), rootSpan) + } + }); + partitionRuntimeInfo.partitionId.should.equal("0"); + partitionRuntimeInfo.eventHubName.should.equal(service.path); + partitionRuntimeInfo.lastEnqueuedOnUtc.should.be.instanceof(Date); + should.exist(partitionRuntimeInfo.lastEnqueuedSequenceNumber); + should.exist(partitionRuntimeInfo.lastEnqueuedOffset); + rootSpan.end(); + + const rootSpans = tracer.getRootSpans(); + rootSpans.length.should.equal(1, "Should only have one root span."); + rootSpans[0].should.equal(rootSpan, "The root span should match what was passed in."); + + const expectedGraph: SpanGraph = { + roots: [ { - name: "Azure.EventHubs.getPartitionProperties", - children: [] + name: rootSpan.name, + children: [ + { + name: "Azure.EventHubs.getPartitionProperties", + children: [] + } + ] } ] - } - ] - }; + }; - tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); - tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); - resetTracer(); + tracer.getSpanGraph(rootSpan.context().traceId).should.eql(expectedGraph); + tracer.getActiveSpans().length.should.equal(0, "All spans should have had end called."); + resetTracer(); + }); + }); }); }); -}).timeout(60000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/node/client.spec.ts b/sdk/eventhub/event-hubs/test/public/node/client.spec.ts index 7a735e949e62..f01cdbfcbc13 100644 --- a/sdk/eventhub/event-hubs/test/public/node/client.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/node/client.spec.ts @@ -7,104 +7,140 @@ import chaiAsPromised from "chai-as-promised"; chai.use(chaiAsPromised); import chaiString from "chai-string"; chai.use(chaiString); -import { EnvVarKeys, getEnvVars } from "../utils/testUtils"; +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "../utils/testUtils"; import { EnvironmentCredential, TokenCredential } from "@azure/identity"; import { EventHubProducerClient, EventHubConsumerClient } from "../../../src"; import { getTracer, setTracer, TestTracer } from "@azure/core-tracing"; -const env = getEnvVars(); - -describe("Create clients using Azure Identity", function(): void { - let endpoint: string; - let credential: TokenCredential; - before("validate environment", function() { - should.exist( - env[EnvVarKeys.AZURE_CLIENT_ID], - "define AZURE_CLIENT_ID in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.AZURE_TENANT_ID], - "define AZURE_TENANT_ID in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.AZURE_CLIENT_SECRET], - "define AZURE_CLIENT_SECRET in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - // This is of the form .servicebus.windows.net - endpoint = (env.EVENTHUB_CONNECTION_STRING.match("Endpoint=sb://(.*)/;") || "")[1]; - credential = new EnvironmentCredential(); - }); - - it("creates an EventHubProducerClient from an Azure.Identity credential", async function(): Promise< - void - > { - const client = new EventHubProducerClient(endpoint, env.EVENTHUB_NAME, credential); - should.equal(client.fullyQualifiedNamespace, endpoint); - - // Extra check involving actual call to the service to ensure this works - const hubInfo = await client.getEventHubProperties(); - should.equal(hubInfo.name, client.eventHubName); +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../utils/mockService"; + +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("public/node/client.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } + + describe("Create clients using Azure Identity", function(): void { + let endpoint: string; + let credential: TokenCredential; + before("validate environment", function() { + should.exist( + env[EnvVarKeys.AZURE_CLIENT_ID], + "define AZURE_CLIENT_ID in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.AZURE_TENANT_ID], + "define AZURE_TENANT_ID in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.AZURE_CLIENT_SECRET], + "define AZURE_CLIENT_SECRET in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + + // This is of the form .servicebus.windows.net + endpoint = (env.EVENTHUB_CONNECTION_STRING.match("Endpoint=sb://(.*)/;") || "")[1]; + if (serviceVersion === "mock") { + credential = { + getToken() { + return Promise.resolve({ + token: "token", + expiresOnTimestamp: Date.now() + 1000 * 60 * 5 + }); + } + }; + } else { + credential = new EnvironmentCredential(); + } + }); - await client.close(); - }); + it("creates an EventHubProducerClient from an Azure.Identity credential", async function(): Promise< + void + > { + const client = new EventHubProducerClient(endpoint, env.EVENTHUB_NAME, credential); + should.equal(client.fullyQualifiedNamespace, endpoint); - it("creates an EventHubConsumerClient from an Azure.Identity credential", async function(): Promise< - void - > { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - endpoint, - env.EVENTHUB_NAME, - credential - ); - should.equal(client.fullyQualifiedNamespace, endpoint); - - // Extra check involving actual call to the service to ensure this works - const hubInfo = await client.getEventHubProperties(); - should.equal(hubInfo.name, client.eventHubName); - - await client.close(); - }); + // Extra check involving actual call to the service to ensure this works + const hubInfo = await client.getEventHubProperties(); + should.equal(hubInfo.name, client.eventHubName); - describe("tracing", () => { - const tracer = new TestTracer(); - const origTracer = getTracer(); - - before(() => { - setTracer(tracer); - }); + await client.close(); + }); - after(() => { - setTracer(origTracer); - }); + it("creates an EventHubConsumerClient from an Azure.Identity credential", async function(): Promise< + void + > { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + endpoint, + env.EVENTHUB_NAME, + credential + ); + should.equal(client.fullyQualifiedNamespace, endpoint); + + // Extra check involving actual call to the service to ensure this works + const hubInfo = await client.getEventHubProperties(); + should.equal(hubInfo.name, client.eventHubName); + + await client.close(); + }); - it("getEventHubProperties() creates a span with a peer.address attribute as the FQNS", async () => { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - endpoint, - env.EVENTHUB_NAME, - credential - ); - should.equal(client.fullyQualifiedNamespace, endpoint); - - // Extra check involving actual call to the service to ensure this works - const hubInfo = await client.getEventHubProperties(); - should.equal(hubInfo.name, client.eventHubName); - - await client.close(); - - const spans = tracer - .getKnownSpans() - .filter((s) => s.name === "Azure.EventHubs.getEventHubProperties"); - - spans.length.should.equal(1); - spans[0].attributes.should.deep.equal({ - "az.namespace": "Microsoft.EventHub", - "message_bus.destination": client.eventHubName, - "peer.address": client.fullyQualifiedNamespace + describe("tracing", () => { + const tracer = new TestTracer(); + const origTracer = getTracer(); + + before(() => { + setTracer(tracer); + }); + + after(() => { + setTracer(origTracer); + }); + + it("getEventHubProperties() creates a span with a peer.address attribute as the FQNS", async () => { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + endpoint, + env.EVENTHUB_NAME, + credential + ); + should.equal(client.fullyQualifiedNamespace, endpoint); + + // Extra check involving actual call to the service to ensure this works + const hubInfo = await client.getEventHubProperties(); + should.equal(hubInfo.name, client.eventHubName); + + await client.close(); + + const spans = tracer + .getKnownSpans() + .filter((s) => s.name === "Azure.EventHubs.getEventHubProperties"); + + spans.length.should.equal(1); + spans[0].attributes.should.deep.equal({ + "az.namespace": "Microsoft.EventHub", + "message_bus.destination": client.eventHubName, + "peer.address": client.fullyQualifiedNamespace + }); + }); }); }); }); diff --git a/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts b/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts index f0fe9e8ed6f0..4dac4cb7d334 100644 --- a/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/node/disconnects.spec.ts @@ -5,227 +5,256 @@ import chai from "chai"; const should = chai.should(); import chaiAsPromised from "chai-as-promised"; chai.use(chaiAsPromised); -import { EnvVarKeys, getEnvVars } from "../utils/testUtils"; +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "../utils/testUtils"; import { EventHubConsumerClient, EventHubProducerClient, Subscription } from "../../../src"; -const env = getEnvVars(); - -describe("disconnected", function() { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - before("validate environment", function(): void { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - describe("EventHubConsumerClient", function() { - it("runtimeInfo work after disconnect", async () => { - const client = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - const clientConnectionContext = client["_context"]; - - await client.getPartitionIds({}); - const originalConnectionId = clientConnectionContext.connectionId; - - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); - - const partitionIds = await client.getPartitionIds({}); - const newConnectionId = clientConnectionContext.connectionId; - - should.not.equal(originalConnectionId, newConnectionId); - partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); - - await client.close(); - }); - - it("should receive after a disconnect", async () => { - /** - * This test validates that an `EventHubConsumerClient.subscribe()` call continues - * receiving events after a `disconnected` event occurs on the underlying connection. - * - * https://github.com/Azure/azure-sdk-for-js/pull/12280 describes an issue where `processEvents` - * would be invoked with 0 events and ignoring the `maxWaitTimeInSeconds` after a `disconnected` event. - * - * For a single `subscribe()` call, this test does the following: - * 1. Ensure events can be received normally before the `disconnected` event. - * 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. - * 3. Ensure that events can be received normally after the `disconnected` event. - */ - const consumer = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - - const producer = new EventHubProducerClient(service.connectionString, service.path); - const eventSentBeforeDisconnect = { body: "the first event" }; - const eventSentAfterDisconnect = { body: "the second event" }; - - const maxWaitTimeInSeconds = 10; - const partitionId = "0"; - const partitionProperties = await consumer.getPartitionProperties(partitionId); - const clientConnectionContext = consumer["_context"]; - - // Send the first event after getting partition properties so that we can expect to receive it. - await producer.sendBatch([eventSentBeforeDisconnect], { partitionId }); - - let subscription: Subscription | undefined; - let originalConnectionId: string; - - let processEventsInvocationCount = 0; - let firstInvocationEndTime = 0; - await new Promise((resolve, reject) => { - subscription = consumer.subscribe( - partitionId, - { - processEvents: async (data) => { - processEventsInvocationCount++; - should.exist(data); - if (processEventsInvocationCount === 1) { - // 1. Ensure events can be received normally before the `disconnected` event. - should.equal( - data.length, - 1, - "Expected to receive 1 event in first processEvents invocation." - ); - should.equal(data[0].body, eventSentBeforeDisconnect.body); - originalConnectionId = clientConnectionContext.connectionId; - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); - firstInvocationEndTime = Date.now(); - } else if (processEventsInvocationCount === 2) { - // 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. - // No new events should have been received at this point since we received the last event in the previous invocation. - should.equal( - data.length, - 0, - "Expected to receive 0 events in second processEvents invocation." - ); - // The elapsed time since the last processEvents invocation should be >= maxWaitTimeInSeconds - should.equal( - Date.now() - firstInvocationEndTime >= maxWaitTimeInSeconds, - true, - "Expected elapsed time between first and second processEvents invocations to be >= maxWaitTimeInSeconds." - ); - const newConnectionId = clientConnectionContext.connectionId; - should.not.equal(originalConnectionId, newConnectionId); - // Send a new event that will be immediately receivable. - await producer.sendBatch([eventSentAfterDisconnect], { partitionId }); - } else if (processEventsInvocationCount === 3) { - // 3. Ensure that events can be received normally after the `disconnected` event. - should.equal( - data.length, - 1, - "Expected to receive 1 event in third processEvents invocation." - ); - should.equal(data[0].body, eventSentAfterDisconnect.body); - const newConnectionId = clientConnectionContext.connectionId; - should.not.equal(originalConnectionId, newConnectionId); - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { - sequenceNumber: partitionProperties.lastEnqueuedSequenceNumber - }, - maxWaitTimeInSeconds - } - ); +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "../utils/mockService"; + +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("public/node/client.spec.ts", function() { + versionsToTest( + serviceVersions, + { versionForRecording: testTarget }, + (serviceVersion, onVersions) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } + + describe("disconnected", function() { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + before("validate environment", function(): void { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." + ); + }); + + describe("EventHubConsumerClient", function() { + it("runtimeInfo work after disconnect", async () => { + const client = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + const clientConnectionContext = client["_context"]; + + await client.getPartitionIds({}); + const originalConnectionId = clientConnectionContext.connectionId; + + // Trigger a disconnect on the underlying connection. + clientConnectionContext.connection["_connection"].idle(); + + const partitionIds = await client.getPartitionIds({}); + const newConnectionId = clientConnectionContext.connectionId; + + should.not.equal(originalConnectionId, newConnectionId); + partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); + + await client.close(); + }); + + it("should receive after a disconnect", async () => { + /** + * This test validates that an `EventHubConsumerClient.subscribe()` call continues + * receiving events after a `disconnected` event occurs on the underlying connection. + * + * https://github.com/Azure/azure-sdk-for-js/pull/12280 describes an issue where `processEvents` + * would be invoked with 0 events and ignoring the `maxWaitTimeInSeconds` after a `disconnected` event. + * + * For a single `subscribe()` call, this test does the following: + * 1. Ensure events can be received normally before the `disconnected` event. + * 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. + * 3. Ensure that events can be received normally after the `disconnected` event. + */ + const consumer = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path + ); + + const producer = new EventHubProducerClient(service.connectionString, service.path); + const eventSentBeforeDisconnect = { body: "the first event" }; + const eventSentAfterDisconnect = { body: "the second event" }; + + const maxWaitTimeInSeconds = 10; + const partitionId = "0"; + const partitionProperties = await consumer.getPartitionProperties(partitionId); + const clientConnectionContext = consumer["_context"]; + + // Send the first event after getting partition properties so that we can expect to receive it. + await producer.sendBatch([eventSentBeforeDisconnect], { partitionId }); + + let subscription: Subscription | undefined; + let originalConnectionId: string; + + let processEventsInvocationCount = 0; + let firstInvocationEndTime = 0; + await new Promise((resolve, reject) => { + subscription = consumer.subscribe( + partitionId, + { + processEvents: async (data) => { + processEventsInvocationCount++; + should.exist(data); + if (processEventsInvocationCount === 1) { + // 1. Ensure events can be received normally before the `disconnected` event. + should.equal( + data.length, + 1, + "Expected to receive 1 event in first processEvents invocation." + ); + should.equal(data[0].body, eventSentBeforeDisconnect.body); + originalConnectionId = clientConnectionContext.connectionId; + // Trigger a disconnect on the underlying connection. + clientConnectionContext.connection["_connection"].idle(); + firstInvocationEndTime = Date.now(); + } else if (processEventsInvocationCount === 2) { + // 2. Ensure that the `maxWaitTimeInSeconds` is honoured after a `disconnected` event. + // No new events should have been received at this point since we received the last event in the previous invocation. + should.equal( + data.length, + 0, + "Expected to receive 0 events in second processEvents invocation." + ); + // The elapsed time since the last processEvents invocation should be >= maxWaitTimeInSeconds + should.equal( + Date.now() - firstInvocationEndTime >= maxWaitTimeInSeconds, + true, + "Expected elapsed time between first and second processEvents invocations to be >= maxWaitTimeInSeconds." + ); + const newConnectionId = clientConnectionContext.connectionId; + should.not.equal(originalConnectionId, newConnectionId); + // Send a new event that will be immediately receivable. + await producer.sendBatch([eventSentAfterDisconnect], { partitionId }); + } else if (processEventsInvocationCount === 3) { + // 3. Ensure that events can be received normally after the `disconnected` event. + should.equal( + data.length, + 1, + "Expected to receive 1 event in third processEvents invocation." + ); + should.equal(data[0].body, eventSentAfterDisconnect.body); + const newConnectionId = clientConnectionContext.connectionId; + should.not.equal(originalConnectionId, newConnectionId); + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + sequenceNumber: partitionProperties.lastEnqueuedSequenceNumber + }, + maxWaitTimeInSeconds + } + ); + }); + await subscription!.close(); + await consumer.close(); + await producer.close(); + }); + }); + + describe("EventHubProducerClient", function() { + it("runtimeInfo work after disconnect", async () => { + const client = new EventHubProducerClient(service.connectionString, service.path); + const clientConnectionContext = client["_context"]; + + await client.getPartitionIds({}); + const originalConnectionId = clientConnectionContext.connectionId; + + // Trigger a disconnect on the underlying connection. + clientConnectionContext.connection["_connection"].idle(); + + const partitionIds = await client.getPartitionIds({}); + const newConnectionId = clientConnectionContext.connectionId; + + should.not.equal(originalConnectionId, newConnectionId); + partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); + + await client.close(); + }); + + it("should send after a disconnect", async () => { + const client = new EventHubProducerClient(service.connectionString, service.path); + const clientConnectionContext = client["_context"]; + + await client.sendBatch([{ body: "test" }]); + const originalConnectionId = clientConnectionContext.connectionId; + + // Trigger a disconnect on the underlying connection. + clientConnectionContext.connection["_connection"].idle(); + + await client.sendBatch([{ body: "test2" }]); + const newConnectionId = clientConnectionContext.connectionId; + + should.not.equal(originalConnectionId, newConnectionId); + + await client.close(); + }); + + // This test currently fails in mock because the connection refresh isn't happening. + // Eventually the test should be changed to let the mock service disconnect the client. + onVersions(["live"]).it("should not throw an uncaught exception", async () => { + const client = new EventHubProducerClient(service.connectionString, service.path); + const clientConnectionContext = client["_context"]; + + // Send an event to open the connection. + await client.sendBatch([{ body: "test" }]); + const originalConnectionId = clientConnectionContext.connectionId; + + // We need to dig deep into the internals to get the awaitable sender so that . + const awaitableSender = client["_sendersMap"].get("")!["_sender"]!; + + let thirdSend: Promise; + // Change the timeout on the awaitableSender so it forces an OperationTimeoutError + awaitableSender.sendTimeoutInSeconds = 0; + // Ensure that the connection will disconnect, and another sendBatch occurs while a sendBatch is in-flight. + setTimeout(() => { + // Trigger a disconnect on the underlying connection while the `sendBatch` is in flight. + clientConnectionContext.connection["_connection"].idle(); + // Triggering another sendBatch immediately after an idle + // used to cause the rhea connection remote state to be cleared. + // This caused the in-flight sendBatch to throw an uncaught error + // if it timed out. + thirdSend = client.sendBatch([{ body: "test3" }]); + }, 0); + + await client.sendBatch([{ body: "test2" }]); + const newConnectionId = clientConnectionContext.connectionId; + + should.not.equal(originalConnectionId, newConnectionId); + + // ensure the sendBatch from the setTimeout succeeded. + // Wait for the connectionContext to be ready for opening. + await thirdSend!; + + await client.close(); + }); + }); }); - await subscription!.close(); - await consumer.close(); - await producer.close(); - }); - }); - - describe("EventHubProducerClient", function() { - it("runtimeInfo work after disconnect", async () => { - const client = new EventHubProducerClient(service.connectionString, service.path); - const clientConnectionContext = client["_context"]; - - await client.getPartitionIds({}); - const originalConnectionId = clientConnectionContext.connectionId; - - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); - - const partitionIds = await client.getPartitionIds({}); - const newConnectionId = clientConnectionContext.connectionId; - - should.not.equal(originalConnectionId, newConnectionId); - partitionIds.length.should.greaterThan(0, "Invalid number of partition ids returned."); - - await client.close(); - }); - - it("should send after a disconnect", async () => { - const client = new EventHubProducerClient(service.connectionString, service.path); - const clientConnectionContext = client["_context"]; - - await client.sendBatch([{ body: "test" }]); - const originalConnectionId = clientConnectionContext.connectionId; - - // Trigger a disconnect on the underlying connection. - clientConnectionContext.connection["_connection"].idle(); - - await client.sendBatch([{ body: "test2" }]); - const newConnectionId = clientConnectionContext.connectionId; - - should.not.equal(originalConnectionId, newConnectionId); - - await client.close(); - }); - - it("should not throw an uncaught exception", async () => { - const client = new EventHubProducerClient(service.connectionString, service.path); - const clientConnectionContext = client["_context"]; - - // Send an event to open the connection. - await client.sendBatch([{ body: "test" }]); - const originalConnectionId = clientConnectionContext.connectionId; - - // We need to dig deep into the internals to get the awaitable sender so that . - const awaitableSender = client["_sendersMap"].get("")!["_sender"]!; - - let thirdSend: Promise; - // Change the timeout on the awaitableSender so it forces an OperationTimeoutError - awaitableSender.sendTimeoutInSeconds = 0; - // Ensure that the connection will disconnect, and another sendBatch occurs while a sendBatch is in-flight. - setTimeout(() => { - // Trigger a disconnect on the underlying connection while the `sendBatch` is in flight. - clientConnectionContext.connection["_connection"].idle(); - // Triggering another sendBatch immediately after an idle - // used to cause the rhea connection remote state to be cleared. - // This caused the in-flight sendBatch to throw an uncaught error - // if it timed out. - thirdSend = client.sendBatch([{ body: "test3" }]); - }, 0); - - await client.sendBatch([{ body: "test2" }]); - const newConnectionId = clientConnectionContext.connectionId; - - should.not.equal(originalConnectionId, newConnectionId); - - // ensure the sendBatch from the setTimeout succeeded. - // Wait for the connectionContext to be ready for opening. - await thirdSend!; - - await client.close(); - }); - }); + } + ); }); diff --git a/sdk/eventhub/event-hubs/test/public/receiver.spec.ts b/sdk/eventhub/event-hubs/test/public/receiver.spec.ts index 6db65c47c921..8512c555c90e 100644 --- a/sdk/eventhub/event-hubs/test/public/receiver.spec.ts +++ b/sdk/eventhub/event-hubs/test/public/receiver.spec.ts @@ -17,518 +17,553 @@ import { EventHubProducerClient, Subscription } from "../../src"; -import { EnvVarKeys, getEnvVars } from "./utils/testUtils"; -const env = getEnvVars(); - -describe("EventHubConsumerClient", function(): void { - const service = { - connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - path: env[EnvVarKeys.EVENTHUB_NAME] - }; - let producerClient: EventHubProducerClient; - let consumerClient: EventHubConsumerClient; - let partitionIds: string[]; - before("validate environment", async function(): Promise { - should.exist( - env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], - "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." - ); - should.exist( - env[EnvVarKeys.EVENTHUB_NAME], - "define EVENTHUB_NAME in your environment before running integration tests." - ); - }); - - beforeEach("Creating the clients", async () => { - producerClient = new EventHubProducerClient(service.connectionString, service.path); - consumerClient = new EventHubConsumerClient( - EventHubConsumerClient.defaultConsumerGroupName, - service.connectionString, - service.path - ); - partitionIds = await producerClient.getPartitionIds({}); - }); - - afterEach("Closing the clients", async () => { - await producerClient.close(); - await consumerClient.close(); - }); +import { EnvVarKeys, getEnvVars, getEnvVarValue, isNode } from "./utils/testUtils"; +import { versionsToTest } from "@azure/test-utils-multi-version"; +import { createMockServer } from "./utils/mockService"; + +const serviceVersions = ["mock", "live"] as const; +const testTarget = getEnvVarValue("TEST_TARGET") || "live"; + +describe("public/receiver.spec.ts", function() { + versionsToTest(serviceVersions, { versionForRecording: testTarget }, (serviceVersion) => { + const env = getEnvVars(serviceVersion as "live" | "mock"); + if (isNode) { + if (serviceVersion === "mock") { + let service: ReturnType; + before("Starting mock server", async () => { + service = createMockServer(); + + return service.start(); + }); + after("Stopping mock server", async () => { + return service?.stop(); + }); + } + } - describe("subscribe() with partitionId 0 as number", function(): void { - it("should not throw an error", async function(): Promise { - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - // @ts-expect-error Testing the value 0 can be provided as a number for JS users. - 0, - { - processEvents: async () => { - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP - } + describe("EventHubConsumerClient", function(): void { + const service = { + connectionString: env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + path: env[EnvVarKeys.EVENTHUB_NAME] + }; + let producerClient: EventHubProducerClient; + let consumerClient: EventHubConsumerClient; + let partitionIds: string[]; + before("validate environment", async function(): Promise { + should.exist( + env[EnvVarKeys.EVENTHUB_CONNECTION_STRING], + "define EVENTHUB_CONNECTION_STRING in your environment before running integration tests." + ); + should.exist( + env[EnvVarKeys.EVENTHUB_NAME], + "define EVENTHUB_NAME in your environment before running integration tests." ); }); - await subscription!.close(); - }); - }); - - describe("subscribe() with EventPosition specified as", function(): void { - let partitionId: string; - let eventSentBeforeSubscribe: EventData; - let eventsSentAfterSubscribe: EventData[]; - - beforeEach(async () => { - partitionId = partitionIds[0]; - eventSentBeforeSubscribe = { - body: "Hello awesome world " + Math.random() - }; - await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); - - eventsSentAfterSubscribe = []; - for (let i = 0; i < 5; i++) { - eventsSentAfterSubscribe.push({ - body: "Hello awesome world " + Math.random(), - properties: { - stamp: Math.random() - } - }); - } - }); - - it("'from end of stream' should receive messages correctly", async function(): Promise { - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: latestEventPosition, - maxWaitTimeInSeconds: 30 - } + beforeEach("Creating the clients", async () => { + producerClient = new EventHubProducerClient(service.connectionString, service.path); + consumerClient = new EventHubConsumerClient( + EventHubConsumerClient.defaultConsumerGroupName, + service.connectionString, + service.path ); + partitionIds = await producerClient.getPartitionIds({}); }); - await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with latestEventPosition."); - } + afterEach("Closing the clients", async () => { + await producerClient.close(); + await consumerClient.close(); + }); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular sequence number' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; + describe("subscribe() with partitionId 0 as number", function(): void { + it("should not throw an error", async function(): Promise { + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + // @ts-expect-error Testing the value 0 can be provided as a number for JS users. + 0, + { + processEvents: async () => { + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 0 // Set timeout of 0 to resolve the promise ASAP } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); - } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, - maxWaitTimeInSeconds: 30 - } - ); + ); + }); + await subscription!.close(); + }); }); - await subscription!.close(); - - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last sequence number."); - } - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); - - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; + describe("subscribe() with EventPosition specified as", function(): void { + let partitionId: string; + let eventSentBeforeSubscribe: EventData; + let eventsSentAfterSubscribe: EventData[]; + + beforeEach(async () => { + partitionId = partitionIds[0]; + + eventSentBeforeSubscribe = { + body: "Hello awesome world " + Math.random() + }; + await producerClient.sendBatch([eventSentBeforeSubscribe], { partitionId }); + + eventsSentAfterSubscribe = []; + for (let i = 0; i < 5; i++) { + eventsSentAfterSubscribe.push({ + body: "Hello awesome world " + Math.random(), + properties: { + stamp: Math.random() } + }); + } + }); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'from end of stream' should receive messages correctly", async function(): Promise< + void + > { + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: latestEventPosition, + maxWaitTimeInSeconds: 30 } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { - sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with latestEventPosition."); } - ); - }); - await subscription!.close(); - - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular offset' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); + + it("'after a particular sequence number' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber }, + maxWaitTimeInSeconds: 30 } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { offset: partitionInfo.lastEnqueuedOffset }, - maxWaitTimeInSeconds: 30 + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last sequence number."); } - ); - }); - await subscription!.close(); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 1, "Expected 1 event sent right before subscribe call."); - should.equal( - data[0].body, - eventSentBeforeSubscribe.body, - "Should have received only the 1 event sent right before subscribe call." - ); - - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; + it("'after a particular sequence number' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + sequenceNumber: partitionInfo.lastEnqueuedSequenceNumber, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 } + ); + }); + await subscription!.close(); + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + it("'after a particular offset' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { partitionId }); + return; + } + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { offset: partitionInfo.lastEnqueuedOffset }, + maxWaitTimeInSeconds: 30 } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { - offset: partitionInfo.lastEnqueuedOffset, - isInclusive: true - }, - maxWaitTimeInSeconds: 30 + ); + }); + await subscription!.close(); + + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); } - ); - }); - await subscription!.close(); - - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - - it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< - void - > { - const partitionInfo = await consumerClient.getPartitionProperties(partitionId); - let subscription: Subscription | undefined; - let processEventsCalled = false; - const eventsReceived: ReceivedEventData[] = []; - - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data) => { - if (!processEventsCalled) { - processEventsCalled = true; - should.equal(data.length, 0, "Received events when none were sent yet."); - await producerClient.sendBatch(eventsSentAfterSubscribe, { - partitionId - }); - return; - } - eventsReceived.push(...data); - if (eventsReceived.length === eventsSentAfterSubscribe.length) { - resolve(); + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); + } + }); + + it("'after a particular offset' with isInclusive should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal( + data.length, + 1, + "Expected 1 event sent right before subscribe call." + ); + should.equal( + data[0].body, + eventSentBeforeSubscribe.body, + "Should have received only the 1 event sent right before subscribe call." + ); + + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { + offset: partitionInfo.lastEnqueuedOffset, + isInclusive: true + }, + maxWaitTimeInSeconds: 30 } - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, - maxWaitTimeInSeconds: 30 + ); + }); + await subscription!.close(); + + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); } - ); - }); - await subscription!.close(); + }); - if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { - should.fail("Received event sent before subscribe call with last offset."); - } + it("'after a particular enqueued time' should receive messages correctly", async function(): Promise< + void + > { + const partitionInfo = await consumerClient.getPartitionProperties(partitionId); + let subscription: Subscription | undefined; + let processEventsCalled = false; + const eventsReceived: ReceivedEventData[] = []; + + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data) => { + if (!processEventsCalled) { + processEventsCalled = true; + should.equal(data.length, 0, "Received events when none were sent yet."); + await producerClient.sendBatch(eventsSentAfterSubscribe, { + partitionId + }); + return; + } + + eventsReceived.push(...data); + if (eventsReceived.length === eventsSentAfterSubscribe.length) { + resolve(); + } + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: { enqueuedOn: partitionInfo.lastEnqueuedOnUtc }, + maxWaitTimeInSeconds: 30 + } + ); + }); + await subscription!.close(); - should.equal( - eventsReceived.length, - eventsSentAfterSubscribe.length, - "Not received the same number of events that were sent." - ); - for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { - eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); - eventsReceived[i].properties!.stamp.should.equal( - eventsSentAfterSubscribe[i].properties!.stamp - ); - } - }); - }); + if (eventsReceived.find((event) => event.body === eventSentBeforeSubscribe.body)) { + should.fail("Received event sent before subscribe call with last offset."); + } - describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { - it("should have lastEnqueuedEventProperties populated", async function(): Promise { - const partitionId = partitionIds[0]; - - const eventData = { body: "Hello awesome world " + Math.random() }; - await producerClient.sendBatch([eventData], { partitionId }); - debug("sent: ", eventData); - - const pInfo = await consumerClient.getPartitionProperties(partitionId); - debug("partition info: ", pInfo); - - let subscription: Subscription | undefined; - await new Promise((resolve, reject) => { - subscription = consumerClient.subscribe( - partitionId, - { - processEvents: async (data, context) => { - data.length.should.equal(1); - should.exist(context.lastEnqueuedEventProperties); - context.lastEnqueuedEventProperties!.offset!.should.equal(pInfo.lastEnqueuedOffset); - context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( - pInfo.lastEnqueuedSequenceNumber - ); - context - .lastEnqueuedEventProperties!.enqueuedOn!.getTime() - .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); - context - .lastEnqueuedEventProperties!.retrievedOn!.getTime() - .should.be.greaterThan(Date.now() - 60000); - - resolve(); - }, - processError: async (err) => { - reject(err); - } - }, - { - startPosition: earliestEventPosition, - maxBatchSize: 1, - trackLastEnqueuedEventProperties: true + should.equal( + eventsReceived.length, + eventsSentAfterSubscribe.length, + "Not received the same number of events that were sent." + ); + for (let i = 0; i < eventsSentAfterSubscribe.length; i++) { + eventsReceived[i].body.should.equal(eventsSentAfterSubscribe[i].body); + eventsReceived[i].properties!.stamp.should.equal( + eventsSentAfterSubscribe[i].properties!.stamp + ); } - ); + }); }); - await subscription!.close(); - }); - }); - describe("Negative scenarios", function(): void { - it("should throw MessagingEntityNotFoundError for non existing consumer group", async function(): Promise< - void - > { - const badConsumerClient = new EventHubConsumerClient( - "boo", - service.connectionString, - service.path - ); - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = badConsumerClient.subscribe({ - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); - } + describe("subscribe() with trackLastEnqueuedEventProperties", function(): void { + it("should have lastEnqueuedEventProperties populated", async function(): Promise { + const partitionId = partitionIds[0]; + + const eventData = { body: "Hello awesome world " + Math.random() }; + await producerClient.sendBatch([eventData], { partitionId }); + debug("sent: ", eventData); + + const pInfo = await consumerClient.getPartitionProperties(partitionId); + debug("partition info: ", pInfo); + + let subscription: Subscription | undefined; + await new Promise((resolve, reject) => { + subscription = consumerClient.subscribe( + partitionId, + { + processEvents: async (data, context) => { + data.length.should.equal(1); + should.exist(context.lastEnqueuedEventProperties); + context.lastEnqueuedEventProperties!.offset!.should.equal( + pInfo.lastEnqueuedOffset + ); + context.lastEnqueuedEventProperties!.sequenceNumber!.should.equal( + pInfo.lastEnqueuedSequenceNumber + ); + context + .lastEnqueuedEventProperties!.enqueuedOn!.getTime() + .should.equal(pInfo.lastEnqueuedOnUtc.getTime()); + context + .lastEnqueuedEventProperties!.retrievedOn!.getTime() + .should.be.greaterThan(Date.now() - 60000); + + resolve(); + }, + processError: async (err) => { + reject(err); + } + }, + { + startPosition: earliestEventPosition, + maxBatchSize: 1, + trackLastEnqueuedEventProperties: true + } + ); + }); + await subscription!.close(); }); }); - await subscription!.close(); - await badConsumerClient.close(); - - should.exist(caughtErr); - should.equal((caughtErr as MessagingError).code, "MessagingEntityNotFoundError"); - }); - - it(`should throw an invalid EventHub address error for invalid partition`, async function(): Promise< - void - > { - let subscription: Subscription | undefined; - const caughtErr = await new Promise((resolve) => { - subscription = consumerClient.subscribe("boo", { - processEvents: async () => { - /* no-op */ - }, - processError: async (err) => { - resolve(err); - } + + describe("Negative scenarios", function(): void { + it("should throw MessagingEntityNotFoundError for non existing consumer group", async function(): Promise< + void + > { + const badConsumerClient = new EventHubConsumerClient( + "boo", + service.connectionString, + service.path + ); + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = badConsumerClient.subscribe({ + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + await subscription!.close(); + await badConsumerClient.close(); + + should.exist(caughtErr); + should.equal((caughtErr as MessagingError).code, "MessagingEntityNotFoundError"); + }); + + it(`should throw an invalid EventHub address error for invalid partition`, async function(): Promise< + void + > { + let subscription: Subscription | undefined; + const caughtErr = await new Promise((resolve) => { + subscription = consumerClient.subscribe("boo", { + processEvents: async () => { + /* no-op */ + }, + processError: async (err) => { + resolve(err); + } + }); + }); + await subscription!.close(); + should.exist(caughtErr); + should.equal((caughtErr as MessagingError).code, "ArgumentOutOfRangeError"); }); }); - await subscription!.close(); - should.exist(caughtErr); - should.equal((caughtErr as MessagingError).code, "ArgumentOutOfRangeError"); - }); + }).timeout(90000); }); -}).timeout(90000); +}); diff --git a/sdk/eventhub/event-hubs/test/public/utils/mockService.browser.ts b/sdk/eventhub/event-hubs/test/public/utils/mockService.browser.ts new file mode 100644 index 000000000000..c099a5de1b59 --- /dev/null +++ b/sdk/eventhub/event-hubs/test/public/utils/mockService.browser.ts @@ -0,0 +1,6 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +export function createMockServer(): void { + /* no-op in browsers */ +} diff --git a/sdk/eventhub/event-hubs/test/public/utils/mockService.ts b/sdk/eventhub/event-hubs/test/public/utils/mockService.ts new file mode 100644 index 000000000000..058951592c60 --- /dev/null +++ b/sdk/eventhub/event-hubs/test/public/utils/mockService.ts @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +import { readFileSync } from "fs"; +import { resolve as resolvePath } from "path"; +import { MockEventHub, MockServerOptions } from "@azure/mock-hub"; +import { getEnvVars } from "./testUtils"; + +export function createMockServer(options: MockServerOptions = {}): MockEventHub { + const env = getEnvVars("mock"); + return new MockEventHub({ + name: env.EVENTHUB_NAME, + partitionCount: 4, + connectionInactivityTimeoutInMs: 300000, // 5 minutes + port: 5671, + tlsOptions: { + cert: readFileSync(resolvePath(process.cwd(), "certs", "my-server.crt.pem")), + key: readFileSync(resolvePath(process.cwd(), "certs", "my-server.key.pem")) + }, + ...options + }); +} diff --git a/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts b/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts index 4c12ea968666..1035f975f4e7 100644 --- a/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts +++ b/sdk/eventhub/event-hubs/test/public/utils/testUtils.ts @@ -22,7 +22,7 @@ export enum EnvVarKeys { AZURE_CLIENT_SECRET = "AZURE_CLIENT_SECRET" } -function getEnvVarValue(name: string): string | undefined { +export function getEnvVarValue(name: string): string | undefined { if (isNode) { return process.env[name]; } else { @@ -30,14 +30,27 @@ function getEnvVarValue(name: string): string | undefined { } } -export function getEnvVars(): { [key in EnvVarKeys]: any } { - return { - [EnvVarKeys.EVENTHUB_CONNECTION_STRING]: getEnvVarValue(EnvVarKeys.EVENTHUB_CONNECTION_STRING), - [EnvVarKeys.EVENTHUB_NAME]: getEnvVarValue(EnvVarKeys.EVENTHUB_NAME), - [EnvVarKeys.AZURE_TENANT_ID]: getEnvVarValue(EnvVarKeys.AZURE_TENANT_ID), - [EnvVarKeys.AZURE_CLIENT_ID]: getEnvVarValue(EnvVarKeys.AZURE_CLIENT_ID), - [EnvVarKeys.AZURE_CLIENT_SECRET]: getEnvVarValue(EnvVarKeys.AZURE_CLIENT_SECRET) - }; +export function getEnvVars(type: "live" | "mock"): { [key in EnvVarKeys]: any } { + if (type === "live") { + return { + [EnvVarKeys.EVENTHUB_CONNECTION_STRING]: getEnvVarValue( + EnvVarKeys.EVENTHUB_CONNECTION_STRING + ), + [EnvVarKeys.EVENTHUB_NAME]: getEnvVarValue(EnvVarKeys.EVENTHUB_NAME), + [EnvVarKeys.AZURE_TENANT_ID]: getEnvVarValue(EnvVarKeys.AZURE_TENANT_ID), + [EnvVarKeys.AZURE_CLIENT_ID]: getEnvVarValue(EnvVarKeys.AZURE_CLIENT_ID), + [EnvVarKeys.AZURE_CLIENT_SECRET]: getEnvVarValue(EnvVarKeys.AZURE_CLIENT_SECRET) + }; + } else { + return { + [EnvVarKeys.EVENTHUB_CONNECTION_STRING]: + "Endpoint=sb://localhost/;SharedAccessKeyName=Foo;SharedAccessKey=Bar", + [EnvVarKeys.EVENTHUB_NAME]: "mock-hub", + [EnvVarKeys.AZURE_TENANT_ID]: "AzureTenantId", + [EnvVarKeys.AZURE_CLIENT_ID]: "AzureClientId", + [EnvVarKeys.AZURE_CLIENT_SECRET]: "AzureClientSecret" + }; + } } export async function loopUntil(args: { diff --git a/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js b/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js index a731ae25a082..6c9d4ee56e5c 100644 --- a/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js +++ b/sdk/eventhub/mock-hub/samples/javascript/src/ehSample.js @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -const { readFileSync } = require('fs'); -const { resolve: resolvePath } = require('path'); +const { readFileSync } = require("fs"); +const { resolve: resolvePath } = require("path"); const { MockEventHub } = require("@azure/mock-hub"); // Load the .env file if it exists @@ -14,19 +14,18 @@ async function main() { name: "mock-hub", partitionCount: 4, consumerGroups: ["foo"], - connectionInactivityTimeoutInMs: 300000 // 5 minutes - }); - - await service.start({ + connectionInactivityTimeoutInMs: 300000, // 5 minutes port: 5671, tlsOptions: { - pfx: readFileSync(resolvePath(__dirname, 'certs', 'my-cert.pfx')), + pfx: readFileSync(resolvePath(__dirname, "certs", "my-cert.pfx")), passphrase: process.env["CERT_PASSPHRASE"] } }); + await service.start(); + // Wait a minute then shut the service down. - await new Promise(resolve => setTimeout(resolve, 60000)); + await new Promise((resolve) => setTimeout(resolve, 60000)); return service.stop(); } diff --git a/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts b/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts index a813ca9db326..a8ccdb1d2ee8 100644 --- a/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts +++ b/sdk/eventhub/mock-hub/samples/typescript/src/ehSample.ts @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -import { readFileSync } from 'fs'; -import { resolve as resolvePath } from 'path'; +import { readFileSync } from "fs"; +import { resolve as resolvePath } from "path"; import { MockEventHub } from "@azure/mock-hub"; // Load the .env file if it exists @@ -14,19 +14,18 @@ export async function main(): Promise { name: "mock-hub", partitionCount: 4, consumerGroups: ["foo"], - connectionInactivityTimeoutInMs: 300000 // 5 minutes - }); - - await service.start({ + connectionInactivityTimeoutInMs: 300000, // 5 minutes port: 5671, tlsOptions: { - pfx: readFileSync(resolvePath(__dirname, 'certs', 'my-cert.pfx')), + pfx: readFileSync(resolvePath(__dirname, "certs", "my-cert.pfx")), passphrase: process.env["CERT_PASSPHRASE"] } }); + await service.start(); + // Wait a minute then shut the service down. - await new Promise(resolve => setTimeout(resolve, 60000)); + await new Promise((resolve) => setTimeout(resolve, 60000)); return service.stop(); } diff --git a/sdk/eventhub/mock-hub/src/index.ts b/sdk/eventhub/mock-hub/src/index.ts index 7ff25dd9c331..d50706562263 100644 --- a/sdk/eventhub/mock-hub/src/index.ts +++ b/sdk/eventhub/mock-hub/src/index.ts @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. // Licensed under the MIT license. -export { MockEventHub, MockEventHubOptions } from "./services/eventHubs"; -export { StartOptions } from "./server/mockServer"; +export { IMockEventHub, MockEventHub, MockEventHubOptions } from "./services/eventHubs"; +export { MockServerOptions } from "./server/mockServer"; diff --git a/sdk/eventhub/mock-hub/src/server/mockServer.ts b/sdk/eventhub/mock-hub/src/server/mockServer.ts index 38ed33eadd55..13c03ac12d9a 100644 --- a/sdk/eventhub/mock-hub/src/server/mockServer.ts +++ b/sdk/eventhub/mock-hub/src/server/mockServer.ts @@ -18,7 +18,7 @@ import { } from "rhea"; import { convertBufferToMessages } from "../utils/convertBufferToMessage"; -export interface StartOptions { +export interface MockServerOptions { /** * The port number the server should listen on. * If not specified, an open port will be chosen at random. @@ -122,8 +122,11 @@ export class MockServer extends EventEmitter { private _container: Container; private _listener?: ReturnType; - constructor() { + private _options: MockServerOptions; + + constructor(options: MockServerOptions = {}) { super(); + this._options = options; this._container = create_container(); } @@ -143,8 +146,9 @@ export class MockServer extends EventEmitter { * Starts the server using the specified options. * @param options */ - public start(options: StartOptions = {}): Promise { + public start(): Promise { return new Promise((resolve, reject) => { + const options = this._options; const ONE_MB = 1024 * 1024; const listenOptions: ListenOptions & ConnectionOptions & any = { port: options.port ?? 0, @@ -181,6 +185,7 @@ export class MockServer extends EventEmitter { emit(type: "receiverClose", event: ReceiverCloseEvent): boolean; emit(type: "senderClose", event: SenderCloseEvent): boolean; emit(type: "connectionClose", event: ConnectionCloseEvent): boolean; + emit(type: "shutdown", {}): boolean; emit(type: string, event: any): boolean { return super.emit(type, event); } @@ -239,6 +244,12 @@ export class MockServer extends EventEmitter { * @param listener */ public on(type: "onMessages", listener: (event: OnMessagesEvent) => void): this; + /** + * Fired when the server is shutting down. + * @param type + * @param listener + */ + public on(type: "shutdown", listener: () => void): this; public on(type: string, listener: (event: any) => void): this { return super.on(type, listener); } @@ -253,6 +264,7 @@ export class MockServer extends EventEmitter { return Promise.resolve(); } return new Promise((resolve, reject) => { + this.emit("shutdown", {}); listener.close((err) => { if (err) { reject(err); diff --git a/sdk/eventhub/mock-hub/src/services/eventHubs.ts b/sdk/eventhub/mock-hub/src/services/eventHubs.ts index 304b903de9a0..b34ff21f2823 100644 --- a/sdk/eventhub/mock-hub/src/services/eventHubs.ts +++ b/sdk/eventhub/mock-hub/src/services/eventHubs.ts @@ -13,7 +13,7 @@ import { } from "rhea"; import { MockServer, - StartOptions, + MockServerOptions, SenderOpenEvent, ReceiverOpenEvent, OnMessagesEvent, @@ -34,7 +34,7 @@ import { generateBadPartitionInfoResponse } from "../messages/event-hubs/partitionInfo"; -export interface MockEventHubOptions { +export interface MockEventHubOptions extends MockServerOptions { /** * The number of partitions for the Event Hub. * Defaults to 2. @@ -68,12 +68,21 @@ interface PartionReceiverEntityComponents { partitionId: string; } +export interface IMockEventHub { + readonly partitionIds: string[]; + readonly consumerGroups: Set; + readonly port: number; + + start: () => Promise; + stop: () => Promise; +} + /** * `MockEventHub` represents a mock EventHubs service. * * It stores events in memory and does not perform any auth verification. */ -export class MockEventHub { +export class MockEventHub implements IMockEventHub { /** * When the EventHub was 'created'. */ @@ -151,7 +160,7 @@ export class MockEventHub { this._consumerGroups = options.consumerGroups ?? []; this._connectionInactivityTimeoutInMs = options.connectionInactivityTimeoutInMs ?? 0; - this._mockServer = new MockServer(); + this._mockServer = new MockServer(options); this._mockServer.on("receiverOpen", this._handleReceiverOpen); this._mockServer.on("senderOpen", this._handleSenderOpen); this._mockServer.on("senderClose", this._handleSenderClose); @@ -164,6 +173,14 @@ export class MockEventHub { this._mockServer.on("connectionClose", (event) => { this._connections.delete(event.context.connection); }); + this._mockServer.on("shutdown", () => { + for (const connection of this._connections.values()) { + connection.close({ + condition: "amqp:connection:forced", + description: "The service is shutting down." + }); + } + }); } private _handleConnectionInactivity = (connection: Connection) => { @@ -199,7 +216,6 @@ export class MockEventHub { * @param event */ private _handleReceiverOpen = (event: ReceiverOpenEvent) => { - console.log(`Attempting to open receiver: ${event.entityPath}`); event.receiver.set_source(event.receiver.source); event.receiver.set_target(event.receiver.target); if (this._isReceiverPartitionEntityPath(event.entityPath)) { @@ -228,7 +244,6 @@ export class MockEventHub { * @param event */ private _handleSenderOpen = (event: SenderOpenEvent) => { - console.log(`Attempting to open sender: ${event.entityPath}`); event.sender.set_source(event.sender.source); event.sender.set_target(event.sender.target); if (event.entityPath === "$cbs") { @@ -363,8 +378,6 @@ export class MockEventHub { * @param event */ private _handleOnMessages = (event: OnMessagesEvent) => { - console.log(`message entityPath: "${event.entityPath}"`); - // Handle batched messages first. if (event.entityPath === this._name) { // received a message without a partition id @@ -481,7 +494,6 @@ export class MockEventHub { const maxMessageSize = event.context.receiver?.get_option("max_message_size", 1024 * 1024) ?? 1024 * 1024; if (deliverySize >= maxMessageSize) { - console.log("too large!"); delivery.reject({ condition: "amqp:link:message-size-exceeded", description: `The received message (delivery-id:${ @@ -697,11 +709,9 @@ export class MockEventHub { /** * Starts the service. - * @param options */ - start(options: StartOptions) { - // this.enableDebug(1000); - return this._mockServer.start(options); + start() { + return this._mockServer.start(); } /**