diff --git a/common/lib/aws_client.ts b/common/lib/aws_client.ts index d08dc02a..6f16665b 100644 --- a/common/lib/aws_client.ts +++ b/common/lib/aws_client.ts @@ -177,6 +177,10 @@ export abstract class AwsClient extends EventEmitter { abstract rollback(): Promise; + unwrapPlugin(iface: new (...args: any[]) => T): T | null { + return this.pluginManager.unwrapPlugin(iface); + } + async isValid(): Promise { if (!this.targetClient) { return Promise.resolve(false); diff --git a/common/lib/connection_plugin_chain_builder.ts b/common/lib/connection_plugin_chain_builder.ts index b4e7bc44..9cc01baa 100644 --- a/common/lib/connection_plugin_chain_builder.ts +++ b/common/lib/connection_plugin_chain_builder.ts @@ -33,19 +33,16 @@ import { ReadWriteSplittingPluginFactory } from "./plugins/read_write_splitting_ import { OktaAuthPluginFactory } from "./plugins/federated_auth/okta_auth_plugin_factory"; import { HostMonitoringPluginFactory } from "./plugins/efm/host_monitoring_plugin_factory"; import { AuroraInitialConnectionStrategyFactory } from "./plugins/aurora_initial_connection_strategy_plugin_factory"; -import { - AuroraConnectionTrackerPluginFactory -} from "./plugins/connection_tracker/aurora_connection_tracker_plugin_factory"; +import { AuroraConnectionTrackerPluginFactory } from "./plugins/connection_tracker/aurora_connection_tracker_plugin_factory"; import { ConnectionProviderManager } from "./connection_provider_manager"; import { DeveloperConnectionPluginFactory } from "./plugins/dev/developer_connection_plugin_factory"; import { ConnectionPluginFactory } from "./plugin_factory"; import { LimitlessConnectionPluginFactory } from "./plugins/limitless/limitless_connection_plugin_factory"; -import { - FastestResponseStrategyPluginFactory -} from "./plugins/strategy/fastest_response/fastest_respose_strategy_plugin_factory"; +import { FastestResponseStrategyPluginFactory } from "./plugins/strategy/fastest_response/fastest_respose_strategy_plugin_factory"; import { CustomEndpointPluginFactory } from "./plugins/custom_endpoint/custom_endpoint_plugin_factory"; import { ConfigurationProfile } from "./profile/configuration_profile"; import { HostMonitoring2PluginFactory } from "./plugins/efm2/host_monitoring2_plugin_factory"; +import { BlueGreenPluginFactory } from "./plugins/bluegreen/blue_green_plugin_factory"; /* Type alias used for plugin factory sorting. It holds a reference to a plugin @@ -64,6 +61,7 @@ export class ConnectionPluginChainBuilder { ["initialConnection", { factory: AuroraInitialConnectionStrategyFactory, weight: 390 }], ["auroraConnectionTracker", { factory: AuroraConnectionTrackerPluginFactory, weight: 400 }], ["staleDns", { factory: StaleDnsPluginFactory, weight: 500 }], + ["bg", { factory: BlueGreenPluginFactory, weight: 550 }], ["readWriteSplitting", { factory: ReadWriteSplittingPluginFactory, weight: 600 }], ["failover", { factory: FailoverPluginFactory, weight: 700 }], ["failover2", { factory: Failover2PluginFactory, weight: 710 }], @@ -84,6 +82,7 @@ export class ConnectionPluginChainBuilder { [AuroraInitialConnectionStrategyFactory, 390], [AuroraConnectionTrackerPluginFactory, 400], [StaleDnsPluginFactory, 500], + [BlueGreenPluginFactory, 550], [ReadWriteSplittingPluginFactory, 600], [FailoverPluginFactory, 700], [Failover2PluginFactory, 710], diff --git a/common/lib/database_dialect/blue_green_dialect.ts b/common/lib/database_dialect/blue_green_dialect.ts new file mode 100644 index 00000000..1309524f --- /dev/null +++ b/common/lib/database_dialect/blue_green_dialect.ts @@ -0,0 +1,58 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { ClientWrapper } from "../client_wrapper"; + +export class BlueGreenResult { + private readonly _version: string; + private readonly _endpoint: string; + private readonly _port: number; + private readonly _role: string; + private readonly _status: string; + + constructor(version: string, endpoint: string, port: number, role: string, status: string) { + this._version = version; + this._endpoint = endpoint; + this._port = port; + this._role = role; + this._status = status; + } + + get version(): string { + return this._version; + } + + get endpoint(): string { + return this._endpoint; + } + + get port(): number { + return this._port; + } + + get role(): string { + return this._role; + } + + get status(): string { + return this._status; + } +} + +export interface BlueGreenDialect { + isBlueGreenStatusAvailable(clientWrapper: ClientWrapper): Promise; + getBlueGreenStatus(clientWrapper: ClientWrapper): Promise; +} diff --git a/common/lib/driver_connection_provider.ts b/common/lib/driver_connection_provider.ts index bb7a3a40..367f6c1e 100644 --- a/common/lib/driver_connection_provider.ts +++ b/common/lib/driver_connection_provider.ts @@ -27,7 +27,6 @@ import { promisify } from "util"; import { lookup } from "dns"; import { PluginService } from "./plugin_service"; import { logger } from "../logutils"; -import { maskProperties } from "./utils/utils"; import { ClientWrapper } from "./client_wrapper"; import { RoundRobinHostSelector } from "./round_robin_host_selector"; import { DriverDialect } from "./driver_dialect/driver_dialect"; @@ -88,14 +87,7 @@ export class DriverConnectionProvider implements ConnectionProvider { const fixedHost: string = this.rdsUtils.removeGreenInstancePrefix(hostInfo.host); resultProps.set(WrapperProperties.HOST.name, fixedHost); - logger.info( - "Connecting to " + - fixedHost + - " after correcting the hostname from " + - originalHost + - "\nwith properties: \n" + - JSON.stringify(Object.fromEntries(maskProperties(resultProps))) - ); + logger.info("Connecting to " + fixedHost + " after correcting the hostname from " + originalHost); resultTargetClient = driverDialect.connect(hostInfo, resultProps); } diff --git a/common/lib/error_handler.ts b/common/lib/error_handler.ts index 585b670f..69f999b1 100644 --- a/common/lib/error_handler.ts +++ b/common/lib/error_handler.ts @@ -21,6 +21,8 @@ export interface ErrorHandler { isNetworkError(e: Error): boolean; + isSyntaxError(e: Error): boolean; + /** * Checks whether there has been an unexpected error emitted and if the error is a type of login error. */ diff --git a/common/lib/plugin_manager.ts b/common/lib/plugin_manager.ts index 0951c324..580d71c0 100644 --- a/common/lib/plugin_manager.ts +++ b/common/lib/plugin_manager.ts @@ -413,10 +413,32 @@ export class PluginManager { throw new AwsWrapperError(Messages.get("PluginManager.unableToRetrievePlugin")); } + isPluginInUse(plugin: any): boolean { + for (const p of this._plugins) { + if (p instanceof plugin) { + return true; + } + } + return false; + } + static registerPlugin(pluginCode: string, pluginFactory: typeof ConnectionPluginFactory) { ConnectionPluginChainBuilder.PLUGIN_FACTORIES.set(pluginCode, { factory: pluginFactory, weight: ConnectionPluginChainBuilder.WEIGHT_RELATIVE_TO_PRIOR_PLUGIN }); } + + unwrapPlugin(iface: new (...args: any[]) => T): T | null { + if (!this._plugins) { + return null; + } + + for (const p of this._plugins) { + if (p instanceof iface) { + return p as any; + } + } + return null; + } } diff --git a/common/lib/plugin_service.ts b/common/lib/plugin_service.ts index a324b0ae..3b66dc28 100644 --- a/common/lib/plugin_service.ts +++ b/common/lib/plugin_service.ts @@ -140,6 +140,16 @@ export interface PluginService extends ErrorHandler { getTelemetryFactory(): TelemetryFactory; setAllowedAndBlockedHosts(allowedAndBlockedHosts: AllowedAndBlockedHosts): void; + + setStatus(clazz: any, status: T | null, clusterBound: boolean): void; + + setStatus(clazz: any, status: T | null, key: string): void; + + getStatus(clazz: any, clusterBound: boolean): T; + + getStatus(clazz: any, key: string): T; + + isPluginInUse(plugin: any): boolean; } export class PluginServiceImpl implements PluginService, HostListProviderService { @@ -159,6 +169,8 @@ export class PluginServiceImpl implements PluginService, HostListProviderService protected static readonly hostAvailabilityExpiringCache: CacheMap = new CacheMap(); readonly props: Map; private allowedAndBlockedHosts: AllowedAndBlockedHosts | null = null; + protected static readonly statusesExpiringCache: CacheMap = new CacheMap(); + protected static readonly DEFAULT_STATUS_CACHE_EXPIRE_NANO: number = 3_600_000_000_000; // 60 minutes constructor( container: PluginServiceManagerContainer, @@ -686,6 +698,10 @@ export class PluginServiceImpl implements PluginService, HostListProviderService return this.getDialect().getErrorHandler().isNetworkError(e); } + isSyntaxError(e: Error): boolean { + return this.getDialect().getErrorHandler().isSyntaxError(e); + } + hasLoginError(): boolean { return this.getDialect().getErrorHandler().hasLoginError(); } @@ -713,4 +729,53 @@ export class PluginServiceImpl implements PluginService, HostListProviderService static clearHostAvailabilityCache(): void { PluginServiceImpl.hostAvailabilityExpiringCache.clear(); } + + getStatus(clazz: any, clusterBound: boolean): T; + getStatus(clazz: any, key: string): T; + getStatus(clazz: any, clusterBound: boolean | string): T { + if (typeof clusterBound === "string") { + return PluginServiceImpl.statusesExpiringCache.get(this.getStatusCacheKey(clazz, clusterBound)); + } + let clusterId: string = null; + if (clusterBound) { + try { + clusterId = this._hostListProvider.getClusterId(); + } catch (e) { + // Do nothing + } + } + return this.getStatus(clazz, clusterId); + } + + protected getStatusCacheKey(clazz: T, key: string): string { + return `${!key ? "" : key.trim().toLowerCase()}::${clazz.toString()}`; + } + + setStatus(clazz: any, status: T | null, clusterBound: boolean): void; + setStatus(clazz: any, status: T | null, key: string): void; + setStatus(clazz: any, status: T, clusterBound: boolean | string): void { + if (typeof clusterBound === "string") { + const cacheKey: string = this.getStatusCacheKey(clazz, clusterBound); + if (!status) { + PluginServiceImpl.statusesExpiringCache.delete(cacheKey); + } else { + PluginServiceImpl.statusesExpiringCache.put(cacheKey, status, PluginServiceImpl.DEFAULT_STATUS_CACHE_EXPIRE_NANO); + } + return; + } + + let clusterId: string | null = null; + if (clusterBound) { + try { + clusterId = this._hostListProvider.getClusterId(); + } catch (e) { + // Do nothing + } + } + this.setStatus(clazz, status, clusterId); + } + + isPluginInUse(plugin: any) { + return this.pluginServiceManagerContainer.pluginManager!.isPluginInUse(plugin); + } } diff --git a/common/lib/plugins/bluegreen/blue_green_interim_status.ts b/common/lib/plugins/bluegreen/blue_green_interim_status.ts new file mode 100644 index 00000000..c2c97931 --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_interim_status.ts @@ -0,0 +1,144 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BlueGreenPhase } from "./blue_green_phase"; +import { HostInfo } from "../../host_info"; +import { logTopology } from "../../utils/utils"; +import { getValueHash } from "./blue_green_utils"; + +export class BlueGreenInterimStatus { + public blueGreenPhase: BlueGreenPhase; + public version: string; + public port: number; + public startTopology: HostInfo[]; + public currentTopology: HostInfo[]; + public startIpAddressesByHostMap: Map; + public currentIpAddressesByHostMap: Map; + public hostNames: Set; // all known host names; just host, no port + public allStartTopologyIpChanged: boolean; + public allStartTopologyEndpointsRemoved: boolean; + public allTopologyChanged: boolean; + + constructor( + blueGreenPhase: BlueGreenPhase, + version: string, + port: number, + startTopology: HostInfo[], + currentTopology: HostInfo[], + startIpAddressesByHostMap: Map, + currentIpAddressesByHostMap: Map, + hostNames: Set, + allStartTopologyIpChanged: boolean, + allStartTopologyEndpointsRemoved: boolean, + allTopologyChanged: boolean + ) { + this.blueGreenPhase = blueGreenPhase; + this.version = version; + this.port = port; + this.startTopology = startTopology; + this.currentTopology = currentTopology; + this.startIpAddressesByHostMap = startIpAddressesByHostMap; + this.currentIpAddressesByHostMap = currentIpAddressesByHostMap; + this.hostNames = hostNames; + this.allStartTopologyIpChanged = allStartTopologyIpChanged; + this.allStartTopologyEndpointsRemoved = allStartTopologyEndpointsRemoved; + this.allTopologyChanged = allTopologyChanged; + } + + public toString(): string { + const currentIpMap = Array.from(this.currentIpAddressesByHostMap.entries()) + .map(([key, value]) => `${key} -> ${value}`) + .join("\n\t"); + + const startIpMap = Array.from(this.startIpAddressesByHostMap.entries()) + .map(([key, value]) => `${key} -> ${value}`) + .join("\n\t"); + + const allHostNamesStr = Array.from(this.hostNames).join("\n\t"); + const startTopologyStr = logTopology(this.startTopology, ""); + const currentTopologyStr = logTopology(this.currentTopology, ""); + + return `${this.constructor.name} [ + phase: ${this.blueGreenPhase?.name ?? ""}, + version: '${this.version}', + port: ${this.port}, + hostNames: + ${!allHostNamesStr ? "-" : allHostNamesStr} + startTopology: ${!startTopologyStr ? "-" : startTopologyStr} + start IP map: + ${!startIpMap ? "-" : startIpMap} + currentTopology: ${!currentTopologyStr ? "-" : currentTopologyStr} + current IP map: + ${!currentIpMap ? "-" : currentIpMap} + allStartTopologyIpChanged: ${this.allStartTopologyIpChanged} + allStartTopologyEndpointsRemoved: ${this.allStartTopologyEndpointsRemoved} + allTopologyChanged: ${this.allTopologyChanged} + ]`; + } + + getCustomHashCode(): bigint { + let result: bigint = getValueHash(1n, this.blueGreenPhase?.name || ""); + result = getValueHash(result, this.version || ""); + result = getValueHash(result, this.port.toString()); + result = getValueHash(result, this.allStartTopologyIpChanged.toString()); + result = getValueHash(result, this.allStartTopologyEndpointsRemoved.toString()); + result = getValueHash(result, this.allTopologyChanged.toString()); + + result = getValueHash(result, this.hostNames == null ? "" : Array.from(this.hostNames).sort().join(",")); + + result = getValueHash( + result, + this.startTopology == null + ? "" + : this.startTopology + .map((x) => x.getHostAndPort() + x.role) + .sort() + .join(",") + ); + + result = getValueHash( + result, + this.currentTopology == null + ? "" + : this.currentTopology + .map((x) => x.getHostAndPort() + x.role) + .sort() + .join(",") + ); + + result = getValueHash( + result, + this.startIpAddressesByHostMap == null + ? "" + : Array.from(this.startIpAddressesByHostMap.entries()) + .map(([key, value]) => key + value) + .sort() + .join(",") + ); + + result = getValueHash( + result, + this.currentIpAddressesByHostMap == null + ? "" + : Array.from(this.currentIpAddressesByHostMap.entries()) + .map(([key, value]) => key + value) + .sort() + .join(",") + ); + + return result; + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_interval_rate.ts b/common/lib/plugins/bluegreen/blue_green_interval_rate.ts new file mode 100644 index 00000000..010e7d5d --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_interval_rate.ts @@ -0,0 +1,21 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +export enum BlueGreenIntervalRate { + BASELINE, + INCREASED, + HIGH +} diff --git a/common/lib/plugins/bluegreen/blue_green_phase.ts b/common/lib/plugins/bluegreen/blue_green_phase.ts new file mode 100644 index 00000000..8d8d9533 --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_phase.ts @@ -0,0 +1,72 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { AwsWrapperError } from "../../utils/errors"; +import { Messages } from "../../utils/messages"; + +export class BlueGreenPhase { + static readonly NOT_CREATED: BlueGreenPhase = new BlueGreenPhase("NOT_CREATED", 0, false); + static readonly CREATED: BlueGreenPhase = new BlueGreenPhase("CREATED", 1, false); + static readonly PREPARATION: BlueGreenPhase = new BlueGreenPhase("PREPARATION", 2, true); + static readonly IN_PROGRESS: BlueGreenPhase = new BlueGreenPhase("IN_PROGRESS", 3, true); + static readonly POST: BlueGreenPhase = new BlueGreenPhase("POST", 4, true); + static readonly COMPLETED: BlueGreenPhase = new BlueGreenPhase("COMPLETED", 5, true); + + private readonly _name: string; + private readonly _phase: number; + private readonly _isActiveSwitchoverOrCompleted: boolean; + + constructor(name: string, phase: number, activeSwitchoverOrCompleted: boolean) { + this._name = name; + this._phase = phase; + this._isActiveSwitchoverOrCompleted = activeSwitchoverOrCompleted; + } + + private static readonly blueGreenStatusMapping: { [key: string]: BlueGreenPhase } = { + AVAILABLE: BlueGreenPhase.CREATED, + SWITCHOVER_INITIATED: BlueGreenPhase.PREPARATION, + SWITCHOVER_IN_PROGRESS: BlueGreenPhase.IN_PROGRESS, + SWITCHOVER_IN_POST_PROCESSING: BlueGreenPhase.POST, + SWITCHOVER_COMPLETED: BlueGreenPhase.COMPLETED + }; + + public static parsePhase(value?: string, version?: string): BlueGreenPhase { + if (!value) { + return BlueGreenPhase.NOT_CREATED; + } + + // Version parameter may be used to identify a proper mapping. + // For now lets assume that mapping is always the same. + const phase = this.blueGreenStatusMapping[value.toUpperCase()]; + + if (!phase) { + throw new AwsWrapperError(Messages.get("Bgd.unknownStatus", value)); + } + return phase; + } + + get name(): string { + return this._name; + } + + get phase(): number { + return this._phase; + } + + get isActiveSwitchoverOrCompleted(): boolean { + return this._isActiveSwitchoverOrCompleted; + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_plugin.ts b/common/lib/plugins/bluegreen/blue_green_plugin.ts new file mode 100644 index 00000000..4f7b90ec --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_plugin.ts @@ -0,0 +1,241 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { AbstractConnectionPlugin } from "../../abstract_connection_plugin"; +import { HostInfo } from "../../host_info"; +import { ClientWrapper } from "../../client_wrapper"; +import { PluginService } from "../../plugin_service"; +import { BlueGreenStatusProvider } from "./blue_green_status_provider"; +import { BlueGreenStatus } from "./blue_green_status"; +import { WrapperProperties } from "../../wrapper_property"; +import { getTimeInNanos } from "../../utils/utils"; +import { ConnectRouting } from "./routing/connect_routing"; +import { IamAuthenticationPlugin } from "../../authentication/iam_authentication_plugin"; +import { BlueGreenRole } from "./blue_green_role"; +import { ExecuteRouting, RoutingResultHolder } from "./routing/execute_routing"; +import { CanReleaseResources } from "../../can_release_resources"; + +export interface BlueGreenProviderSupplier { + create(pluginService: PluginService, props: Map, bgdId: string): BlueGreenStatusProvider; +} + +export class BlueGreenPlugin extends AbstractConnectionPlugin implements CanReleaseResources { + private static readonly SUBSCRIBED_METHODS: Set = new Set([ + // We should NOT subscribe to "forceConnect" pipeline since it's used by + // BG monitoring, and we don't want to intercept/block those monitoring connections. + "connect", + "query" + ]); + + private static readonly CLOSED_METHOD_NAMES: Set = new Set(["end", "abort"]); + protected readonly pluginService: PluginService; + protected readonly properties: Map; + protected bgProviderSupplier: BlueGreenProviderSupplier; + protected bgStatus: BlueGreenStatus = null; + + protected bgdId: string = null; + protected isIamInUse: boolean = false; + + protected startTimeNano: bigint = BigInt(0); + protected endTimeNano: bigint = BigInt(0); + private static provider: Map = new Map(); + + constructor(pluginService: PluginService, properties: Map, bgProviderSupplier: BlueGreenProviderSupplier = null) { + super(); + if (!bgProviderSupplier) { + bgProviderSupplier = { + create: (pluginService: PluginService, props: Map, bgdId: string): BlueGreenStatusProvider => { + return new BlueGreenStatusProvider(pluginService, props, bgdId); + } + }; + } + + this.properties = properties; + this.pluginService = pluginService; + this.bgProviderSupplier = bgProviderSupplier; + this.bgdId = WrapperProperties.BGD_ID.get(this.properties).trim().toLowerCase(); + } + + getSubscribedMethods(): Set { + return BlueGreenPlugin.SUBSCRIBED_METHODS; + } + + async connect( + hostInfo: HostInfo, + props: Map, + isInitialConnection: boolean, + connectFunc: () => Promise + ): Promise { + this.resetRoutingTimeNano(); + + try { + this.bgStatus = this.pluginService.getStatus(BlueGreenStatus, this.bgdId); + + if (!this.bgStatus) { + return this.regularOpenConnection(connectFunc, isInitialConnection); + } + + if (isInitialConnection) { + this.isIamInUse = this.pluginService.isPluginInUse(IamAuthenticationPlugin); + } + + const hostRole: BlueGreenRole = this.bgStatus.getRole(hostInfo); + + if (!hostRole) { + // Connection to a host that isn't participating in BG switchover. + return this.regularOpenConnection(connectFunc, isInitialConnection); + } + + let client: ClientWrapper | null = null; + let routing: ConnectRouting | undefined = this.bgStatus.connectRouting.filter((routing: ConnectRouting) => + routing.isMatch(hostInfo, hostRole) + )[0]; + + if (!routing) { + return this.regularOpenConnection(connectFunc, isInitialConnection); + } + + this.startTimeNano = getTimeInNanos(); + while (routing && !client) { + client = await routing.apply(this, hostInfo, props, isInitialConnection, connectFunc, this.pluginService); + if (client) { + break; + } + this.bgStatus = this.pluginService.getStatus(BlueGreenStatus, this.bgdId); + if (!this.bgStatus) { + this.endTimeNano = getTimeInNanos(); + return this.regularOpenConnection(connectFunc, isInitialConnection); + } + routing = this.bgStatus.connectRouting.filter((routing: ConnectRouting) => routing.isMatch(hostInfo, hostRole))[0]; + } + + this.endTimeNano = getTimeInNanos(); + if (!client) { + client = await connectFunc(); + } + + if (isInitialConnection) { + // Provider should be initialized after connection is open and a dialect is properly identified. + this.initProvider(); + } + + return client; + } finally { + if (this.startTimeNano > 0 && this.endTimeNano === BigInt(0)) { + this.endTimeNano = getTimeInNanos(); + } + } + } + + async execute(methodName: string, methodFunc: () => Promise, methodArgs: any[]): Promise { + this.resetRoutingTimeNano(); + + try { + this.initProvider(); + + if (BlueGreenPlugin.CLOSED_METHOD_NAMES.has(methodName)) { + return await methodFunc(); + } + + this.bgStatus = this.pluginService.getStatus(BlueGreenStatus, this.bgdId); + + if (!this.bgStatus) { + this.endTimeNano = getTimeInNanos(); + return await methodFunc(); + } + + const currentHostInfo: HostInfo = this.pluginService.getCurrentHostInfo(); + const hostRole: BlueGreenRole = this.bgStatus.getRole(currentHostInfo); + + if (!hostRole) { + // Connection to a host that isn't participating in BG switchover. + return await methodFunc(); + } + + let result: RoutingResultHolder | null = null; + let routing: ExecuteRouting | undefined = this.bgStatus.executeRouting.filter((routing: ExecuteRouting) => + routing.isMatch(currentHostInfo, hostRole) + )[0]; + + if (!routing) { + return await methodFunc(); + } + + this.startTimeNano = getTimeInNanos(); + + while (routing && result && !result.isPresent()) { + result = await routing.apply(this, methodName, methodFunc, methodArgs, this.properties, this.pluginService); + if (!result?.isPresent()) { + this.bgStatus = this.pluginService.getStatus(BlueGreenStatus, this.bgdId); + if (!this.bgStatus) { + this.endTimeNano = getTimeInNanos(); + return await methodFunc(); + } + routing = this.bgStatus.executeRouting.filter((routing: ExecuteRouting) => routing.isMatch(currentHostInfo, hostRole))[0]; + } + } + + this.endTimeNano = getTimeInNanos(); + + if (result?.isPresent()) { + return result.get(); + } + + return await methodFunc(); + } finally { + if (this.startTimeNano > 0 && this.endTimeNano === BigInt(0)) { + this.endTimeNano = getTimeInNanos(); + } + } + } + + protected async regularOpenConnection(connectFunc: () => Promise, isInitialConnection: boolean): Promise { + const client: ClientWrapper = await connectFunc(); + if (isInitialConnection) { + // Provider should be initialized after connection is open and a dialect is properly identified. + this.initProvider(); + } + + return client; + } + + private initProvider() { + const provider = BlueGreenPlugin.provider.get(this.bgdId); + if (!provider) { + const provider = this.bgProviderSupplier.create(this.pluginService, this.properties, this.bgdId); + BlueGreenPlugin.provider.set(this.bgdId, provider); + } + } + + public getHoldTimeNano(): bigint { + return this.startTimeNano === BigInt(0) + ? BigInt(0) + : this.endTimeNano === BigInt(0) + ? getTimeInNanos() - this.startTimeNano + : this.endTimeNano - this.startTimeNano; + } + + private resetRoutingTimeNano() { + this.startTimeNano = BigInt(0); + this.endTimeNano = BigInt(0); + } + + releaseResources(): Promise { + const provider: BlueGreenStatusProvider = BlueGreenPlugin.provider.get(this.bgdId); + provider.clearResources(); + return Promise.resolve(); + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_plugin_factory.ts b/common/lib/plugins/bluegreen/blue_green_plugin_factory.ts new file mode 100644 index 00000000..f5ba48a3 --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_plugin_factory.ts @@ -0,0 +1,36 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { ConnectionPluginFactory } from "../../plugin_factory"; +import { PluginService } from "../../plugin_service"; +import { ConnectionPlugin } from "../../connection_plugin"; +import { AwsWrapperError } from "../../utils/errors"; +import { Messages } from "../../utils/messages"; + +export class BlueGreenPluginFactory extends ConnectionPluginFactory { + private static blueGreenPlugin: any; + + async getInstance(pluginService: PluginService, props: Map): Promise { + try { + if (!BlueGreenPluginFactory.blueGreenPlugin) { + BlueGreenPluginFactory.blueGreenPlugin = await import("./blue_green_plugin"); + } + return new BlueGreenPluginFactory.blueGreenPlugin.BlueGreenPlugin(pluginService, props); + } catch (error: any) { + throw new AwsWrapperError(Messages.get("ConnectionPluginChainBuilder.errorImportingPlugin", error.message, "BlueGreenPluginFactory")); + } + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_role.ts b/common/lib/plugins/bluegreen/blue_green_role.ts new file mode 100644 index 00000000..12975e35 --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_role.ts @@ -0,0 +1,61 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { AwsWrapperError } from "../../utils/errors"; +import { Messages } from "../../utils/messages"; + +export class BlueGreenRole { + static readonly SOURCE = new BlueGreenRole("SOURCE", 0); + static readonly TARGET = new BlueGreenRole("TARGET", 1); + + private static readonly blueGreenRoleMapping_1_0: Map = new Map() + .set("BLUE_GREEN_DEPLOYMENT_SOURCE", BlueGreenRole.SOURCE) + .set("BLUE_GREEN_DEPLOYMENT_TARGET", BlueGreenRole.TARGET); + + private readonly _name: string; + private readonly _value: number; + + constructor(name: string, value: number) { + this._name = name; + this._value = value; + } + + get name(): string { + return this._name; + } + + get value(): number { + return this._value; + } + + public static parseRole(value: string, version: string): BlueGreenRole { + if (version === "1.0") { + if (!value?.trim()) { + throw new AwsWrapperError(Messages.get("Bgd.unknownRole", value)); + } + + const role = BlueGreenRole.blueGreenRoleMapping_1_0.get(value.toUpperCase()); + + if (role == null) { + throw new AwsWrapperError(Messages.get("Bgd.unknownRole", value)); + } + + return role; + } + + throw new AwsWrapperError(Messages.get("Bgd.unknownVersion", version)); + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_status.ts b/common/lib/plugins/bluegreen/blue_green_status.ts new file mode 100644 index 00000000..1c17463f --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_status.ts @@ -0,0 +1,92 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BlueGreenPhase } from "./blue_green_phase"; +import { ConnectRouting } from "./routing/connect_routing"; +import { ExecuteRouting } from "./routing/execute_routing"; +import { BlueGreenRole } from "./blue_green_role"; +import { HostInfo } from "../../host_info"; +import { Pair } from "../../utils/utils"; + +export class BlueGreenStatus { + private readonly bgdId: string; + private readonly _currentPhase: BlueGreenPhase; + private readonly _unmodifiableConnectRouting: readonly ConnectRouting[]; + private readonly _unmodifiableExecuteRouting: readonly ExecuteRouting[]; + + private readonly _roleByHost: Map; + private readonly _correspondingHosts: Map>; + + constructor( + bgdId: string, + phase: BlueGreenPhase, + unmodifiableConnectRouting?: ConnectRouting[], + unmodifiableExecuteRouting?: ExecuteRouting[], + roleByHost?: Map, + correspondingHosts?: Map> + ) { + this.bgdId = bgdId; + this._currentPhase = phase; + this._unmodifiableConnectRouting = Object.freeze(unmodifiableConnectRouting ?? []); + this._unmodifiableExecuteRouting = Object.freeze(unmodifiableExecuteRouting ?? []); + this._roleByHost = roleByHost ?? new Map(); + this._correspondingHosts = correspondingHosts ?? new Map(); + } + + get currentPhase(): BlueGreenPhase { + return this._currentPhase; + } + + get connectRouting(): readonly ConnectRouting[] { + return this._unmodifiableConnectRouting; + } + + get executeRouting(): readonly ExecuteRouting[] { + return this._unmodifiableExecuteRouting; + } + + get roleByHost(): Map { + return this._roleByHost; + } + + get correspondingHosts(): Map> { + return this._correspondingHosts; + } + + getRole(hostInfo: HostInfo): BlueGreenRole { + return this._roleByHost.get(hostInfo.host.toLowerCase()); + } + + toString(): string { + const roleByHostMap = Array.from(this._roleByHost.entries()) + .map(([key, value]) => `\t${key} -> ${value.name}`) + .join("\n"); + + const connectRoutingStr = this._unmodifiableConnectRouting.map((x) => x.toString()).join("\n"); + const executeRoutingStr = this._unmodifiableExecuteRouting.map((x) => x.toString()).join("\n"); + + return `${this.constructor.name} [ + bgdId: '${this.bgdId}', + phase: ${this.currentPhase.name}, + Connect routing: + ${connectRoutingStr ?? "-"} + Execute routing: + ${executeRoutingStr ?? "-"} + roleByHost: + ${roleByHostMap ?? "-"} + ]`; + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_status_monitor.ts b/common/lib/plugins/bluegreen/blue_green_status_monitor.ts new file mode 100644 index 00000000..f320c84f --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_status_monitor.ts @@ -0,0 +1,526 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BlueGreenRole } from "./blue_green_role"; +import { BlueGreenInterimStatus } from "./blue_green_interim_status"; +import { BlueGreenDialect, BlueGreenResult } from "../../database_dialect/blue_green_dialect"; +import { PluginService } from "../../plugin_service"; +import { BlueGreenIntervalRate } from "./blue_green_interval_rate"; +import { HostInfo } from "../../host_info"; +import { RdsUtils } from "../../utils/rds_utils"; +import { HostInfoBuilder } from "../../host_info_builder"; +import { SimpleHostAvailabilityStrategy } from "../../host_availability/simple_host_availability_strategy"; +import { HostListProvider } from "../../host_list_provider/host_list_provider"; +import { BlueGreenPhase } from "./blue_green_phase"; +import { ClientWrapper } from "../../client_wrapper"; +import { Messages } from "../../utils/messages"; +import { logger } from "../../../logutils"; +import { convertMsToNanos, getTimeInNanos, sleep } from "../../utils/utils"; +import { lookup } from "dns"; +import { promisify } from "util"; +import { WrapperProperties } from "../../wrapper_property"; +import { HostListProviderService } from "../../host_list_provider_service"; +import { StatusInfo } from "./status_info"; +import { DatabaseDialect } from "../../database_dialect/database_dialect"; +import { AwsWrapperError } from "../../utils/errors"; + +export interface OnBlueGreenStatusChange { + onBlueGreenStatusChanged(role: BlueGreenRole, interimStatus: BlueGreenInterimStatus): void; +} + +export class BlueGreenStatusMonitor { + protected static readonly ONE_MINUTE_IN_MS: number = 60 * 1000; + protected static readonly DEFAULT_CHECK_INTERVAL_MS: number = 5 * this.ONE_MINUTE_IN_MS; + protected static readonly latestKnownVersion: string = "1.0"; + protected static readonly BG_CLUSTER_ID = "941d00a8-8238-4f7d-bf59-771bff783a8e"; + // Add more versions here if needed. + protected static readonly knownVersions: Set = new Set([BlueGreenStatusMonitor.latestKnownVersion]); + + protected readonly blueGreenDialect: BlueGreenDialect; + protected readonly pluginService: PluginService; + protected readonly bgdId: string; + protected readonly props: Map; + protected readonly role: BlueGreenRole; + protected readonly onBlueGreenStatusChangeFunc: OnBlueGreenStatusChange; + + // Status check interval time in millis for each BlueGreenIntervalRate. + protected readonly statusCheckIntervalMap: Map; + + protected readonly initialHostInfo: HostInfo; + + protected readonly rdsUtils: RdsUtils = new RdsUtils(); + + protected readonly hostInfoBuilder: HostInfoBuilder = new HostInfoBuilder({ hostAvailabilityStrategy: new SimpleHostAvailabilityStrategy() }); + protected collectedIpAddresses: boolean = true; + protected collectedTopology: boolean = true; + + protected intervalRate: BlueGreenIntervalRate = BlueGreenIntervalRate.BASELINE; + protected stop: boolean = false; + protected useIpAddress: boolean = false; + + protected hostListProvider: HostListProvider | null = null; + protected startTopology: HostInfo[] = []; + protected currentTopology: HostInfo[] = []; + protected startIpAddressesByHostMap: Map = new Map(); + protected currentIpAddressesByHostMap: Map = new Map(); + + // Track all endpoints in startTopology and check whether all their IP addresses have changed. + protected allStartTopologyIpChanged: boolean = false; + + // Track all endpoints in startTopology and check whether they are removed (i.e. DNS could not be resolved). + protected allStartTopologyEndpointsRemoved: boolean = false; + protected allTopologyChanged: boolean = false; + protected currentPhase: BlueGreenPhase | null = BlueGreenPhase.NOT_CREATED; + protected hostNames: Set | null = new Set(); // No port + + protected version: string = "1.0"; + protected port: number = -1; + + protected clientWrapper: ClientWrapper | null = null; + protected connectionHostInfo: HostInfo | null = null; + protected connectedIpAddress: string | null = null; + protected connectionHostInfoCorrect: boolean = false; + protected panicMode: boolean = true; + + constructor( + role: BlueGreenRole, + bgdId: string, + initialHostInfo: HostInfo, + pluginService: PluginService, + props: Map, + statusCheckIntervalMap: Map, + onBlueGreenStatusChangeFunc: OnBlueGreenStatusChange + ) { + this.role = role; + this.bgdId = bgdId; + this.initialHostInfo = initialHostInfo; + this.pluginService = pluginService; + this.props = props; + this.statusCheckIntervalMap = statusCheckIntervalMap; + this.onBlueGreenStatusChangeFunc = onBlueGreenStatusChangeFunc; + + const dialect: DatabaseDialect = this.pluginService.getDialect(); + if (!BlueGreenStatusMonitor.implementsBlueGreenDialect(dialect)) { + throw new AwsWrapperError(Messages.get("Bgd.unsupportedDialect", bgdId, dialect.getDialectName())); + } + this.blueGreenDialect = (this.pluginService.getDialect()); + + // Intentionally not calling await on this method. + this.runMonitoringLoop(); + } + + private static implementsBlueGreenDialect(dialect: any): dialect is BlueGreenDialect { + return typeof dialect?.isBlueGreenStatusAvailable === "function" && typeof dialect?.getBlueGreenStatus === "function"; + } + + protected async runMonitoringLoop(): Promise { + try { + while (!this.stop) { + const oldPhase: BlueGreenPhase | null = this.currentPhase; + await this.openConnection(); + await this.collectStatus(); + await this.collectTopology(); + await this.collectHostIpAddresses(); + this.updateIpAddressFlags(); + + if (this.currentPhase !== null && (oldPhase === null || oldPhase !== this.currentPhase)) { + logger.debug(Messages.get("Bgd.statusChanged", this.role.name, this.currentPhase.name)); + } + + if (this.onBlueGreenStatusChangeFunc !== null) { + this.onBlueGreenStatusChangeFunc.onBlueGreenStatusChanged( + this.role, + new BlueGreenInterimStatus( + this.currentPhase, + this.version, + this.port, + this.startTopology, + this.currentTopology, + this.startIpAddressesByHostMap, + this.currentIpAddressesByHostMap, + this.hostNames, + this.allStartTopologyIpChanged, + this.allStartTopologyEndpointsRemoved, + this.allTopologyChanged + ) + ); + + const delayMs: number = Number( + this.statusCheckIntervalMap.get( + (this.panicMode ? BlueGreenIntervalRate.HIGH : this.intervalRate) ?? BlueGreenStatusMonitor.DEFAULT_CHECK_INTERVAL_MS + ) + ); + + await this.delay(delayMs); + } + } + } catch (e: any) { + logger.debug(Messages.get("Bgd.monitoringUnhandledError", this.role.name, JSON.stringify(e))); + } finally { + await this.closeConnection(); + logger.debug(Messages.get("Bgd.monitoringCompleted", this.role.name)); + } + } + + protected async delay(delayMs: number): Promise { + const start: bigint = getTimeInNanos(); + const end: bigint = start + convertMsToNanos(delayMs); + const currentBlueGreenIntervalRate: BlueGreenIntervalRate = this.intervalRate; + + const currentPanic: boolean = this.panicMode; + const minDelay = Math.min(delayMs, 50); + + // Repeat until the intervalType has changed, the stop flag has changed, the panic mode flag has changed, + // or we have hit the specified delay time. + + do { + await sleep(minDelay); + } while (this.intervalRate === currentBlueGreenIntervalRate && getTimeInNanos() < end && !this.stop && this.panicMode === currentPanic); + } + + setIntervalRate(blueGreenIntervalRate: BlueGreenIntervalRate): void { + this.intervalRate = blueGreenIntervalRate; + } + + setCollectedIpAddresses(collectedIpAddresses: boolean): void { + this.collectedIpAddresses = collectedIpAddresses; + } + + setCollectedTopology(collectedTopology: boolean): void { + this.collectedTopology = collectedTopology; + } + + setUseIpAddress(useIpAddresses: boolean): void { + this.useIpAddress = useIpAddresses; + } + + setStop(stop: boolean): void { + this.stop = stop; + } + + resetCollectedData(): void { + this.startIpAddressesByHostMap.clear(); + this.startTopology = []; + this.hostNames.clear(); + } + + protected async collectHostIpAddresses(): Promise { + this.currentIpAddressesByHostMap.clear(); + + if (this.hostNames === null) { + return; + } + + for (const host of this.hostNames) { + if (this.currentIpAddressesByHostMap.has(host)) { + continue; + } + this.currentIpAddressesByHostMap.set(host, await this.getIpAddress(host)); + } + if (this.collectedIpAddresses) { + this.startIpAddressesByHostMap = new Map([...this.currentIpAddressesByHostMap]); + } + } + + protected updateIpAddressFlags(): void { + if (this.collectedIpAddresses) { + this.allStartTopologyIpChanged = false; + this.allStartTopologyEndpointsRemoved = false; + this.allTopologyChanged = false; + return; + } + + if (!this.collectedIpAddresses) { + // Check whether all hosts in startTopology resolve to new IP addresses. + this.allStartTopologyIpChanged = + this.startTopology.length > 0 && + this.startTopology.every((x) => { + const host = x.host; + const startIp = this.startIpAddressesByHostMap.get(host); + const currentIp = this.currentIpAddressesByHostMap.get(host); + + return startIp !== undefined && currentIp !== undefined && startIp !== currentIp; + }); + } + + // Check whether all hosts in startTopology no longer have IP addresses. This indicates that the startTopology + // hosts can no longer be resolved because their DNS entries no longer exist. + this.allStartTopologyEndpointsRemoved = + this.startTopology.length > 0 && + this.startTopology.every((x) => { + const host = x.host; + const startIp = this.startIpAddressesByHostMap.get(host); + const currentIp = this.currentIpAddressesByHostMap.get(host); + + return startIp !== null && !currentIp; + }); + + if (!this.collectedTopology) { + // Check whether all hosts in currentTopology do not exist in startTopology + const startTopologyNodes: Set = !this.startTopology ? new Set() : new Set(this.startTopology.map((hostSpec) => hostSpec.host)); + + const currentTopologyCopy = this.currentTopology; + + this.allTopologyChanged = + currentTopologyCopy && + currentTopologyCopy.length > 0 && + startTopologyNodes.size > 0 && + !currentTopologyCopy.some((host) => startTopologyNodes.has(host.host)); + } + } + + protected async getIpAddress(host: string): Promise { + try { + const lookupResult = await promisify(lookup)(host, {}); + return lookupResult.address; + } catch (error) { + return null; + } + } + + protected async collectTopology(): Promise { + if (!this.hostListProvider) { + return; + } + + const client: ClientWrapper = this.clientWrapper; + if (await this.isConnectionClosed(client)) { + return; + } + + this.currentTopology = await this.hostListProvider.forceRefresh(client); + if (this.collectedTopology) { + this.startTopology = this.currentTopology; + } + + // Do not update endpoints when topology is frozen. + const currentTopologyCopy = this.currentTopology; + + if (currentTopologyCopy && this.collectedTopology) { + this.hostNames = new Set(currentTopologyCopy.map((hostSpec) => hostSpec.host)); + } + } + + protected async closeConnection(): Promise { + const client: ClientWrapper = this.clientWrapper; + this.clientWrapper = null; + + try { + if (client && (await this.pluginService.isClientValid(client))) { + await client.end(); + } + } catch (e: any) { + // ignore + } + } + + protected async collectStatus(): Promise { + const client: ClientWrapper = this.clientWrapper; + try { + if (await this.isConnectionClosed(client)) { + this.panicMode = true; + return; + } + + if (!(await this.blueGreenDialect.isBlueGreenStatusAvailable(client))) { + if (await this.pluginService.isClientValid(client)) { + this.currentPhase = BlueGreenPhase.NOT_CREATED; + logger.debug(Messages.get("Bgd.statusNotAvailable", this.role.name, BlueGreenPhase.NOT_CREATED.name)); + } else { + this.clientWrapper = null; + this.currentPhase = null; + this.panicMode = true; + } + return; + } + + const statusEntries: StatusInfo[] = []; + const results: BlueGreenResult[] = await this.blueGreenDialect.getBlueGreenStatus(client); + if (results !== null) { + for (const result of results) { + let version = result.version; + if (!BlueGreenStatusMonitor.knownVersions.has(version)) { + const versionCopy: string = version; + version = BlueGreenStatusMonitor.latestKnownVersion; + logger.warn(Messages.get("Bgd.unknownVersion", versionCopy)); + } + const role: BlueGreenRole = BlueGreenRole.parseRole(result.role, version); + const phase: BlueGreenPhase = BlueGreenPhase.parsePhase(result.status, version); + + if (this.role !== role) { + continue; + } + + statusEntries.push(new StatusInfo(version, result.endpoint, result.port, phase, role)); + } + } + + // Check if there's a cluster writer endpoint; + let statusInfo: StatusInfo | undefined = statusEntries.find( + (x) => this.rdsUtils.isWriterClusterDns(x.endpoint) && this.rdsUtils.isNotOldInstance(x.endpoint) + ); + + if (statusInfo !== undefined) { + // Cluster writer endpoint found. + // Add cluster reader endpoint as well. + this.hostNames.add(statusInfo.endpoint.toLowerCase().replace(".cluster-", ".cluster-ro-")); + } + + if (statusInfo === undefined) { + // maybe it's an instance endpoint? + statusInfo = statusEntries.find((x) => this.rdsUtils.isRdsInstance(x.endpoint) && this.rdsUtils.isNotOldInstance(x.endpoint)); + } + + if (statusInfo === undefined) { + if (statusEntries.length === 0) { + // It's normal to expect that the status table has no entries after BGD is completed. + // Old1 cluster/instance has been separated and no longer receives + // updates from related green cluster/instance. + if (this.role !== BlueGreenRole.SOURCE) { + logger.warn(Messages.get("Bgd.noEntriesInStatusTable", this.role.name)); + } + this.currentPhase = null; + } + } else { + this.currentPhase = statusInfo.phase; + this.version = statusInfo.version; + this.port = statusInfo.port; + } + + if (this.collectedTopology) { + statusEntries + .filter((x) => x.endpoint != null && this.rdsUtils.isNotOldInstance(x.endpoint)) + .forEach((x) => this.hostNames.add(x.endpoint.toLowerCase())); + } + + if (!this.connectionHostInfoCorrect && statusInfo !== undefined) { + // We connected to an initialHostInfo that might be not the desired Blue or Green cluster. + // We need to reconnect to a correct one. + + const statusInfoHostIpAddress: string | null = await this.getIpAddress(statusInfo.endpoint); + const connectedIpAddressCopy = this.connectedIpAddress; + + if (connectedIpAddressCopy !== null && connectedIpAddressCopy !== statusInfoHostIpAddress) { + // Found endpoint confirms that we're connected to a different node, and we need to reconnect. + this.connectionHostInfo = this.hostInfoBuilder.withHost(statusInfo.endpoint).withPort(statusInfo.port).build(); + this.connectionHostInfoCorrect = true; + await this.closeConnection(); + this.panicMode = true; + } else { + // We're already connected to a correct node. + this.connectionHostInfoCorrect = true; + this.panicMode = false; + } + } + + if (this.connectionHostInfoCorrect && this.hostListProvider == null) { + // A connection to a correct cluster (blue or green) is established. + // Let's initialize HostListProvider + this.initHostListProvider(); + } + } catch (e: any) { + if (this.pluginService.isSyntaxError(e)) { + this.currentPhase = BlueGreenPhase.NOT_CREATED; + logger.warn(Messages.get("Bgd.error", this.role.name, BlueGreenPhase.NOT_CREATED.name, e.message)); + } + if (!(await this.isConnectionClosed(client))) { + // It's normal to get connection closed during BGD switchover. + // If connection isn't closed but there's an error then let's log it. + logger.debug(Messages.get("Bgd.unhandledError", this.role.name, e.message)); + } + await this.closeConnection(); + this.panicMode = true; + } + } + + protected async isConnectionClosed(client: ClientWrapper): Promise { + return !client || !(await this.pluginService.isClientValid(client)); + } + + protected async openConnection(): Promise { + if (this.clientWrapper != null && !(await this.isConnectionClosed(this.clientWrapper))) { + return; + } + + await this.openConnectionAsync(); + } + + protected async openConnectionAsync(): Promise { + this.clientWrapper = null; + this.panicMode = true; + + if (this.connectionHostInfo === null) { + this.connectionHostInfo = this.initialHostInfo; + this.connectedIpAddress = null; + this.connectionHostInfoCorrect = false; + } + + const connectionHostInfoCopy = this.connectionHostInfo; + let connectedIpAddressCopy = this.connectedIpAddress; + + try { + if (this.useIpAddress && connectedIpAddressCopy !== null) { + const connectionWithIpHostInfo: HostInfo = this.hostInfoBuilder.copyFrom(connectionHostInfoCopy).withHost(connectedIpAddressCopy).build(); + const connectWithIpProperties: Map = new Map(this.props); + + WrapperProperties.IAM_HOST.set(connectWithIpProperties, this.connectionHostInfo.host); + + logger.debug(Messages.get("Bgd.openingConnectionWithIp", this.role.name, connectionWithIpHostInfo.host)); + + this.clientWrapper = await this.pluginService.forceConnect(connectionWithIpHostInfo, connectWithIpProperties); + logger.debug(Messages.get("Bgd.openedConnectionWithIp", this.role.name, connectionWithIpHostInfo.host)); + } else { + const finalConnectionHostInfoCopy: HostInfo = connectionHostInfoCopy; + logger.debug(Messages.get("Bgd.openingConnection", this.role.name, finalConnectionHostInfoCopy.host)); + + connectedIpAddressCopy = await this.getIpAddress(connectionHostInfoCopy.host); + this.clientWrapper = await this.pluginService.forceConnect(connectionHostInfoCopy, this.props); + this.connectedIpAddress = connectedIpAddressCopy; + + logger.debug(Messages.get("Bgd.openedConnection", this.role.name, finalConnectionHostInfoCopy.host)); + } + this.panicMode = false; + } catch (error: any) { + this.clientWrapper = null; + this.panicMode = true; + } + } + + protected initHostListProvider(): void { + if (this.hostListProvider || !this.connectionHostInfoCorrect) { + return; + } + + const hostListProperties: Map = new Map(this.props); + + // Need to instantiate a separate HostListProvider with + // a special unique clusterId to avoid interference with other HostListProviders opened for this cluster. + // Blue and Green clusters are expected to have different clusterId. + + WrapperProperties.CLUSTER_ID.set(hostListProperties, `${this.bgdId}::${this.role.name}::${BlueGreenStatusMonitor.BG_CLUSTER_ID}`); + + logger.debug(Messages.get("Bgd.createHostListProvider", `${this.role.name}`, WrapperProperties.CLUSTER_ID.get(hostListProperties))); + + const connectionHostInfoCopy: HostInfo = this.connectionHostInfo; + if (connectionHostInfoCopy) { + this.hostListProvider = this.pluginService + .getDialect() + .getHostListProvider(hostListProperties, connectionHostInfoCopy.host, this.pluginService as unknown as HostListProviderService); + } else { + logger.warn(Messages.get("Bgd.hostInfoNull")); + } + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_status_provider.ts b/common/lib/plugins/bluegreen/blue_green_status_provider.ts new file mode 100644 index 00000000..c503f133 --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_status_provider.ts @@ -0,0 +1,920 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { PluginService } from "../../plugin_service"; +import { HostInfoBuilder } from "../../host_info_builder"; +import { SimpleHostAvailabilityStrategy } from "../../host_availability/simple_host_availability_strategy"; +import { BlueGreenStatusMonitor } from "./blue_green_status_monitor"; +import { BlueGreenInterimStatus } from "./blue_green_interim_status"; +import { HostInfo } from "../../host_info"; +import { convertMsToNanos, getTimeInNanos, Pair } from "../../utils/utils"; +import { BlueGreenRole } from "./blue_green_role"; +import { BlueGreenStatus } from "./blue_green_status"; +import { BlueGreenPhase } from "./blue_green_phase"; +import { BlueGreenIntervalRate } from "./blue_green_interval_rate"; +import { RdsUtils } from "../../utils/rds_utils"; +import { WrapperProperties } from "../../wrapper_property"; +import { DatabaseDialect } from "../../database_dialect/database_dialect"; +import { BlueGreenDialect } from "../../database_dialect/blue_green_dialect"; +import { Messages } from "../../utils/messages"; +import { levels, logger } from "../../../logutils"; +import { HostRole } from "../../host_role"; +import { AwsWrapperError } from "../../utils/errors"; +import { ConnectRouting } from "./routing/connect_routing"; +import { SubstituteConnectRouting } from "./routing/substitute_connect_routing"; +import { SuspendConnectRouting } from "./routing/suspend_connect_routing"; +import { ExecuteRouting } from "./routing/execute_routing"; +import { SuspendExecuteRouting } from "./routing/suspend_execute_routing"; +import { + SuspendUntilCorrespondingHostFoundConnectRouting +} from "./routing/suspend_until_corresponding_host_found_connect_routing"; +import { RejectConnectRouting } from "./routing/reject_connect_routing"; +import { getValueHash } from "./blue_green_utils"; + +export class BlueGreenStatusProvider { + static readonly MONITORING_PROPERTY_PREFIX = "blue_green_monitoring_"; + private static readonly DEFAULT_CONNECT_TIMEOUT_MS = 10_000; // 10 seconds + private static readonly DEFAULT_QUERY_TIMEOUT_MS = 10_000; // 10 seconds + + protected readonly hostInfoBuilder: HostInfoBuilder = new HostInfoBuilder({ hostAvailabilityStrategy: new SimpleHostAvailabilityStrategy() }); + protected readonly monitors: BlueGreenStatusMonitor[] = [null, null]; + protected lastContextHash: bigint = 0n; + protected interimStatusHashes: bigint[] = [0n, 0n]; + protected interimStatuses: BlueGreenInterimStatus[] = [null, null]; + protected hostIpAddresses: Map = new Map(); + + // The second parameter of Pair is null when no corresponding host is found. + protected readonly correspondingHosts: Map> = new Map(); + + // all known host names; host with no port + protected readonly roleByHost: Map = new Map(); + protected readonly iamHostSuccessfulConnects: Map> = new Map(); + protected readonly greenHostChangeNameTimes: Map = new Map(); + protected summaryStatus: BlueGreenStatus | null = null; + protected latestStatusPhase: BlueGreenPhase = BlueGreenPhase.NOT_CREATED; + + protected rollback: boolean = false; + protected blueDnsUpdateCompleted: boolean = false; + protected greenDnsRemoved: boolean = false; + protected greenTopologyChanged: boolean = false; + protected allGreenHostsChangedName: boolean = false; + protected postStatusEndTimeNano: bigint = 0n; + + // Status check interval time in millis for each BlueGreenIntervalRate. + protected readonly statusCheckIntervalMap: Map = new Map(); + protected readonly switchoverTimeoutNanos: bigint; + protected readonly suspendNewBlueConnectionsWhenInProgress: boolean; + + protected readonly pluginService: PluginService; + protected readonly properties: Map; + protected readonly bgdId: string; + protected phaseTimeNanos: Map = new Map(); + protected readonly rdsUtils: RdsUtils = new RdsUtils(); + + constructor(pluginService: PluginService, properties: Map, bgdId: string) { + this.pluginService = pluginService; + this.properties = properties; + this.bgdId = bgdId; + + this.statusCheckIntervalMap.set(BlueGreenIntervalRate.BASELINE, BigInt(WrapperProperties.BG_INTERVAL_BASELINE_MS.get(properties))); + this.statusCheckIntervalMap.set(BlueGreenIntervalRate.INCREASED, BigInt(WrapperProperties.BG_INTERVAL_INCREASED_MS.get(properties))); + this.statusCheckIntervalMap.set(BlueGreenIntervalRate.HIGH, BigInt(WrapperProperties.BG_INTERVAL_HIGH_MS.get(properties))); + + this.switchoverTimeoutNanos = convertMsToNanos(WrapperProperties.BG_SWITCHOVER_TIMEOUT_MS.get(properties)); + this.suspendNewBlueConnectionsWhenInProgress = WrapperProperties.BG_SUSPEND_NEW_BLUE_CONNECTIONS.get(properties); + + const dialect: DatabaseDialect = this.pluginService.getDialect(); + if (this.isBlueGreenDialect(dialect)) { + this.initMonitoring(); + } else { + logger.warn(Messages.get("Bgd.unsupportedDialect", this.bgdId, dialect.getDialectName())); + } + } + + protected initMonitoring(): void { + this.monitors[BlueGreenRole.SOURCE.value] = new BlueGreenStatusMonitor( + BlueGreenRole.SOURCE, + this.bgdId, + this.pluginService.getCurrentHostInfo(), + this.pluginService, + this.getMonitoringProperties(), + this.statusCheckIntervalMap, + { onBlueGreenStatusChanged: (role, status) => this.prepareStatus(role, status) } + ); + + this.monitors[BlueGreenRole.TARGET.value] = new BlueGreenStatusMonitor( + BlueGreenRole.TARGET, + this.bgdId, + this.pluginService.getCurrentHostInfo(), + this.pluginService, + this.getMonitoringProperties(), + this.statusCheckIntervalMap, + { onBlueGreenStatusChanged: (role, status) => this.prepareStatus(role, status) } + ); + } + + protected getMonitoringProperties(): Map { + const monitoringConnProperties: Map = new Map(this.properties); + + for (const key of monitoringConnProperties.keys()) { + if (!key.startsWith(BlueGreenStatusProvider.MONITORING_PROPERTY_PREFIX)) { + continue; + } + + monitoringConnProperties.delete(key); + } + + if (!monitoringConnProperties.has(WrapperProperties.WRAPPER_CONNECT_TIMEOUT.name)) { + WrapperProperties.WRAPPER_CONNECT_TIMEOUT.set(monitoringConnProperties, BlueGreenStatusProvider.DEFAULT_CONNECT_TIMEOUT_MS); + } + + if (!monitoringConnProperties.has(WrapperProperties.WRAPPER_QUERY_TIMEOUT.name)) { + WrapperProperties.WRAPPER_QUERY_TIMEOUT.set(monitoringConnProperties, BlueGreenStatusProvider.DEFAULT_QUERY_TIMEOUT_MS); + } + + return monitoringConnProperties; + } + + protected isBlueGreenDialect(dialect: any): dialect is BlueGreenDialect { + return dialect; + } + + protected async prepareStatus(role: BlueGreenRole, interimStatus: BlueGreenInterimStatus): Promise { + // Detect changes + const statusHash: bigint = interimStatus.getCustomHashCode(); + const contextHash: bigint = this.getContextHash(); + const storedStatus = this.interimStatusHashes[role.value]; + + if (storedStatus === statusHash && this.lastContextHash === contextHash) { + // no changes detected + } + + // There are some changes detected. Let's update summary status. + logger.debug(Messages.get("Bgd.interimStatus", this.bgdId, role.name, interimStatus.toString())); + + this.updatePhase(role, interimStatus); + + // Store interimStatus and corresponding hash + this.interimStatuses[role.value] = interimStatus; + this.interimStatusHashes[role.value] = statusHash; + this.lastContextHash = contextHash; + + // Update map of IP addresses. + for (const [host, ip] of interimStatus.startIpAddressesByHostMap) { + this.hostIpAddresses.set(host, ip); + } + + // Update roleByHost based on provided host names. + interimStatus.hostNames.forEach((x) => this.roleByHost.set(x.toLowerCase(), role)); + + this.updateCorrespondingHosts(); + this.updateSummaryStatus(role, interimStatus); + await this.updateMonitors(); + this.updateStatusCache(); + this.logCurrentContext(); + + // Log final switchover results. + this.logSwitchoverFinalSummary(); + + this.resetContextWhenCompleted(); + } + + protected updatePhase(role: BlueGreenRole, interimStatus: BlueGreenInterimStatus): void { + const status: BlueGreenInterimStatus = this.interimStatuses[role.value]; + const latestInterimPhase: BlueGreenPhase = !status ? BlueGreenPhase.NOT_CREATED : status.blueGreenPhase; + + if (latestInterimPhase && interimStatus.blueGreenPhase && interimStatus.blueGreenPhase.phase < latestInterimPhase.phase) { + this.rollback = true; + logger.debug(Messages.get("Bgd.rollback", this.bgdId)); + } + + if (!interimStatus.blueGreenPhase) { + return; + } + + // Do not allow status moves backward (unless it's rollback). + // That could be caused by updating blue/green hosts delays. + if (!this.rollback) { + if (interimStatus.blueGreenPhase.phase >= this.latestStatusPhase.phase) { + this.latestStatusPhase = interimStatus.blueGreenPhase; + } + } else { + if (interimStatus.blueGreenPhase.phase < this.latestStatusPhase.phase) { + this.latestStatusPhase = interimStatus.blueGreenPhase; + } + } + } + + protected updateStatusCache(): void { + this.pluginService.setStatus(BlueGreenStatus, this.summaryStatus, this.bgdId); + this.storePhaseTime(this.summaryStatus.currentPhase); + } + + protected updateCorrespondingHosts(): void { + this.correspondingHosts.clear(); + + const sourceInterimStatus: BlueGreenInterimStatus | null = this.interimStatuses[BlueGreenRole.SOURCE.value]; + const targetInterimStatus: BlueGreenInterimStatus | null = this.interimStatuses[BlueGreenRole.TARGET.value]; + if (sourceInterimStatus?.startTopology?.length > 0 && targetInterimStatus?.startTopology?.length > 0) { + const blueWriterHostInfo: HostInfo = this.getWriterHost(BlueGreenRole.SOURCE); + const greenWriterHostInfo: HostInfo = this.getWriterHost(BlueGreenRole.TARGET); + const sortedBlueReaderHostInfos: HostInfo[] | null = this.getReaderHosts(BlueGreenRole.SOURCE); + const sortedGreenReaderHostInfos: HostInfo[] | null = this.getReaderHosts(BlueGreenRole.TARGET); + + if (blueWriterHostInfo) { + // greenWriterHostInfo can be null but that will be handled properly by corresponding routing. + this.correspondingHosts.set(blueWriterHostInfo.host, new Pair(blueWriterHostInfo, greenWriterHostInfo)); + } + + if (sortedBlueReaderHostInfos?.length > 0) { + if (sortedGreenReaderHostInfos?.length > 0) { + let greenIndex: number = 0; + sortedBlueReaderHostInfos.forEach((blueHostInfo) => { + this.correspondingHosts.set(blueHostInfo.host, new Pair(blueHostInfo, sortedGreenReaderHostInfos.at(greenIndex++))); + greenIndex %= sortedGreenReaderHostInfos.length; + }); + } else { + sortedBlueReaderHostInfos.forEach((blueHostInfo) => { + this.correspondingHosts.set(blueHostInfo.host, new Pair(blueHostInfo, greenWriterHostInfo)); + }); + } + } + } + if (sourceInterimStatus?.hostNames?.size > 0 && targetInterimStatus?.hostNames?.size > 0) { + const blueHosts: Set = sourceInterimStatus.hostNames; + const greenHosts: Set = targetInterimStatus.hostNames; + + // Find corresponding cluster hosts + const blueClusterHost: string | null = + Array.from(blueHosts) + .filter((host) => this.rdsUtils.isWriterClusterDns(host)) + .at(0) || null; + + const greenClusterHost: string | null = + Array.from(greenHosts) + .filter((host) => this.rdsUtils.isWriterClusterDns(host)) + .at(0) || null; + + if (blueClusterHost !== null && greenClusterHost !== null) { + if (!this.correspondingHosts.has(blueClusterHost)) { + this.correspondingHosts.set( + blueClusterHost, + new Pair(this.hostInfoBuilder.withHost(blueClusterHost).build(), this.hostInfoBuilder.withHost(greenClusterHost).build()) + ); + } + } + + // Find corresponding cluster reader hosts + const blueClusterReaderHost: string | null = + Array.from(blueHosts) + .filter((host) => this.rdsUtils.isReaderClusterDns(host)) + .at(0) || null; + + const greenClusterReaderHost: string | null = + Array.from(greenHosts) + .filter((host) => this.rdsUtils.isReaderClusterDns(host)) + .at(0) || null; + + if (blueClusterReaderHost !== null && greenClusterReaderHost !== null) { + if (!this.correspondingHosts.has(blueClusterReaderHost)) { + this.correspondingHosts.set( + blueClusterReaderHost, + new Pair(this.hostInfoBuilder.withHost(blueClusterReaderHost).build(), this.hostInfoBuilder.withHost(greenClusterReaderHost).build()) + ); + } + } + + Array.from(blueHosts) + .filter((host) => this.rdsUtils.isRdsCustomClusterDns(host)) + .forEach((blueHost) => { + const customClusterName: string | null = this.rdsUtils.getRdsClusterId(blueHost); + if (customClusterName !== null) { + const greenHost: string | undefined = Array.from(greenHosts).find((host) => { + return ( + this.rdsUtils.isRdsCustomClusterDns(host) && + customClusterName === this.rdsUtils.removeGreenInstancePrefix(this.rdsUtils.getRdsClusterId(host)) + ); + }); + if (greenHost) { + if (!this.correspondingHosts.has(blueHost)) { + this.correspondingHosts.set( + blueHost, + new Pair(this.hostInfoBuilder.withHost(blueHost).build(), this.hostInfoBuilder.withHost(greenHost).build()) + ); + } + } + } + }); + } + } + + protected getWriterHost(role: BlueGreenRole): HostInfo | null { + return this.interimStatuses[role.value]?.startTopology.find((x) => x.role === HostRole.WRITER) || null; + } + + protected getReaderHosts(role: BlueGreenRole): HostInfo[] | null { + if (!this.interimStatuses[role.value]) { + return null; + } + return Array.from(this.interimStatuses[role.value].startTopology) + .filter((x) => x.role !== HostRole.WRITER) + .sort(); + } + + protected updateSummaryStatus(role: BlueGreenRole, interimStatus: BlueGreenInterimStatus) { + switch (this.latestStatusPhase) { + case BlueGreenPhase.NOT_CREATED: + this.summaryStatus = new BlueGreenStatus(this.bgdId, BlueGreenPhase.NOT_CREATED); + break; + case BlueGreenPhase.CREATED: + this.updateDnsFlags(role, interimStatus); + this.summaryStatus = this.getStatusOfCreated(); + break; + case BlueGreenPhase.PREPARATION: + this.startSwitchoverTimer(); + this.updateDnsFlags(role, interimStatus); + this.summaryStatus = this.getStatusOfPreparation(); + break; + case BlueGreenPhase.IN_PROGRESS: + this.updateDnsFlags(role, interimStatus); + this.summaryStatus = this.getStatusOfInProgress(); + break; + case BlueGreenPhase.POST: + this.updateDnsFlags(role, interimStatus); + this.summaryStatus = this.getStatusOfPost(); + break; + case BlueGreenPhase.COMPLETED: + this.updateDnsFlags(role, interimStatus); + this.summaryStatus = this.getStatusOfCompleted(); + break; + default: + throw new AwsWrapperError(Messages.get("Bgd.unknownPhase", this.bgdId, this.latestStatusPhase.name)); + } + } + + protected async updateMonitors(): Promise { + switch (this.summaryStatus.currentPhase) { + case BlueGreenPhase.NOT_CREATED: + for (const monitor of this.monitors) { + monitor.setIntervalRate(BlueGreenIntervalRate.BASELINE); + monitor.setCollectedIpAddresses(false); + monitor.setCollectedTopology(false); + monitor.setUseIpAddress(false); + } + break; + + case BlueGreenPhase.CREATED: + for (const monitor of this.monitors) { + monitor.setIntervalRate(BlueGreenIntervalRate.INCREASED); + monitor.setCollectedIpAddresses(true); + monitor.setCollectedTopology(true); + monitor.setUseIpAddress(false); + if (this.rollback) { + monitor.resetCollectedData(); + } + } + break; + + case BlueGreenPhase.PREPARATION: + case BlueGreenPhase.IN_PROGRESS: + case BlueGreenPhase.POST: + this.monitors.forEach((monitor) => { + monitor.setIntervalRate(BlueGreenIntervalRate.HIGH); + monitor.setCollectedIpAddresses(false); + monitor.setCollectedTopology(false); + monitor.setUseIpAddress(true); + }); + break; + + case BlueGreenPhase.COMPLETED: + this.monitors.forEach((monitor) => { + monitor.setIntervalRate(BlueGreenIntervalRate.BASELINE); + monitor.setCollectedIpAddresses(false); + monitor.setCollectedTopology(false); + monitor.setUseIpAddress(false); + monitor.resetCollectedData(); + }); + + // Stop monitoring old/source cluster/instance + if (!this.rollback && this.monitors[BlueGreenRole.SOURCE.value]) { + this.monitors[BlueGreenRole.SOURCE.value].setStop(true); + } + break; + + default: + throw new AwsWrapperError(Messages.get("Bgd.unknownPhase", this.bgdId, this.summaryStatus.currentPhase.name)); + } + } + + protected updateDnsFlags(role: BlueGreenRole, interimStatus: BlueGreenInterimStatus): void { + if (role === BlueGreenRole.SOURCE && !this.blueDnsUpdateCompleted && interimStatus.allStartTopologyIpChanged) { + logger.debug(Messages.get("Bgd.blueDnsCompleted", this.bgdId)); + this.blueDnsUpdateCompleted = true; + this.storeBlueDnsUpdateTime(); + } + + if (role === BlueGreenRole.TARGET && !this.greenDnsRemoved && interimStatus.allStartTopologyEndpointsRemoved) { + logger.debug(Messages.get("Bgd.greenDnsRemoved", this.bgdId)); + this.greenDnsRemoved = true; + this.storeGreenDnsRemoveTime(); + } + + if (role === BlueGreenRole.TARGET && !this.greenTopologyChanged && interimStatus.allTopologyChanged) { + logger.debug(Messages.get("Bgd.greenTopologyChanged", this.bgdId)); + this.greenTopologyChanged = true; + this.storeGreenTopologyChangeTime(); + } + } + + protected getContextHash(): bigint { + let result = getValueHash(1n, this.allGreenHostsChangedName.toString()); + result = getValueHash(result, this.iamHostSuccessfulConnects.size.toString()); + return result; + } + + protected getHostAndPort(host: string, port: number): string { + if (port > 0) { + return `${host}:${port}`; + } + + return host; + } + + // New connect requests: go to blue or green hosts; default behaviour; no routing + // Existing connections: default behaviour; no action + // Execute JDBC calls: default behaviour; no action + protected getStatusOfCreated(): BlueGreenStatus { + return new BlueGreenStatus(this.bgdId, BlueGreenPhase.CREATED, [], [], this.roleByHost, this.correspondingHosts); + } + + /** + * New connect requests to blue: route to corresponding IP address. + * New connect requests to green: route to corresponding IP address + * New connect requests with IP address: default behaviour; no routing + * Existing connections: default behaviour; no action + * Execute database calls: default behaviour; no action + */ + protected getStatusOfPreparation(): BlueGreenStatus { + if (this.isSwitchoverTimerExpired()) { + logger.debug(Messages.get("Bgd.switchoverTimeout")); + if (this.rollback) { + return this.getStatusOfCreated(); + } + return this.getStatusOfCompleted(); + } + + const connectRouting: ConnectRouting[] = this.addSubstituteBlueWithIpAddressConnectRouting(); + return new BlueGreenStatus(this.bgdId, BlueGreenPhase.PREPARATION, connectRouting, [], this.roleByHost, this.correspondingHosts); + } + + protected addSubstituteBlueWithIpAddressConnectRouting(): ConnectRouting[] { + const connectRouting: ConnectRouting[] = []; + Array.from(this.roleByHost.entries()) + .filter(([host, role]) => role === BlueGreenRole.SOURCE && this.correspondingHosts.has(host)) + .forEach(([host, role]) => { + const hostSpec = this.correspondingHosts.get(host).left; + const blueIp = this.hostIpAddresses.get(hostSpec.host); + const substituteHostSpecWithIp = !blueIp ? hostSpec : this.hostInfoBuilder.copyFrom(hostSpec).withHost(blueIp).build(); + + connectRouting.push(new SubstituteConnectRouting(host, role, substituteHostSpecWithIp, [hostSpec], null)); + + const status = this.interimStatuses[role.value]; + if (status) { + connectRouting.push(new SubstituteConnectRouting(this.getHostAndPort(host, status.port), role, substituteHostSpecWithIp, [hostSpec], null)); + } + }); + + return connectRouting; + } + + /** + * New connect requests to blue: suspend or route to corresponding IP address (depending on settings). + * New connect requests to green: suspend + * New connect requests with IP address: suspend + * Existing connections: default behaviour; no action + * Execute database calls: suspend + */ + protected getStatusOfInProgress(): BlueGreenStatus { + if (this.isSwitchoverTimerExpired()) { + logger.debug(Messages.get("Bgd.switchoverTimeout")); + if (this.rollback) { + return this.getStatusOfCreated(); + } + return this.getStatusOfCompleted(); + } + + let connectRouting: ConnectRouting[]; + if (this.suspendNewBlueConnectionsWhenInProgress) { + connectRouting = []; + connectRouting.push(new SuspendConnectRouting(null, BlueGreenRole.SOURCE, this.bgdId)); + } else { + // If we're not suspending new connections then, at least, we need to use IP addresses. + connectRouting = this.addSubstituteBlueWithIpAddressConnectRouting(); + } + + connectRouting.push(new SuspendConnectRouting(null, BlueGreenRole.TARGET, this.bgdId)); + + // All connect calls with IP address that belongs to blue or green host should be suspended. + Array.from(this.hostIpAddresses.values()) + .filter((opt): opt is NonNullable => opt !== null && opt !== undefined) + .filter((value, index, self) => self.indexOf(value) === index) // distinct + .forEach((ipAddress) => { + let interimStatus: BlueGreenInterimStatus; + + if (this.suspendNewBlueConnectionsWhenInProgress) { + // Try to confirm that the ipAddress belongs to one of the blue hosts + interimStatus = this.interimStatuses[BlueGreenRole.SOURCE.value]; + if (interimStatus != null) { + const hasMatchingBlueIp = Array.from(interimStatus.startIpAddressesByHostMap.values()).some((x) => x && x === ipAddress); + + if (hasMatchingBlueIp) { + connectRouting.push(new SuspendConnectRouting(ipAddress, null, this.bgdId)); + connectRouting.push(new SuspendConnectRouting(this.getHostAndPort(ipAddress, interimStatus.port), null, this.bgdId)); + + return; + } + } + } + + // Try to confirm that the ipAddress belongs to one of the green hosts + interimStatus = this.interimStatuses[BlueGreenRole.TARGET.value]; + if (interimStatus != null) { + const hasMatchingGreenIp = Array.from(interimStatus.startIpAddressesByHostMap.values()).some((x) => x != null && x === ipAddress); + + if (hasMatchingGreenIp) { + connectRouting.push(new SuspendConnectRouting(ipAddress, null, this.bgdId)); + connectRouting.push(new SuspendConnectRouting(this.getHostAndPort(ipAddress, interimStatus.port), null, this.bgdId)); + + return; + } + } + }); + + // All blue and green traffic should be on hold. + const executeRouting: ExecuteRouting[] = []; + executeRouting.push(new SuspendExecuteRouting(null, BlueGreenRole.SOURCE, this.bgdId)); + executeRouting.push(new SuspendExecuteRouting(null, BlueGreenRole.TARGET, this.bgdId)); + + // All traffic through connections with IP addresses that belong to blue or green hosts should be on hold. + Array.from(this.hostIpAddresses.values()) + .filter((opt) => opt != null) + .filter((value, index, self) => self.indexOf(value) === index) // distinct + .forEach((ipAddress) => { + // Try to confirm that the ipAddress belongs to one of the blue hosts + let interimStatus = this.interimStatuses[BlueGreenRole.SOURCE.value]; + if (interimStatus != null) { + const hasMatchingBlueIp = Array.from(interimStatus.startIpAddressesByHostMap.values()).some((x) => x != null && x === ipAddress); + + if (hasMatchingBlueIp) { + executeRouting.push(new SuspendExecuteRouting(ipAddress, null, this.bgdId)); + executeRouting.push(new SuspendExecuteRouting(this.getHostAndPort(ipAddress, interimStatus.port), null, this.bgdId)); + + return; + } + } + + // Try to confirm that the ipAddress belongs to one of the green hosts + interimStatus = this.interimStatuses[BlueGreenRole.TARGET.value]; + if (interimStatus != null) { + const hasMatchingGreenIp = Array.from(interimStatus.startIpAddressesByHostMap.values()).some((x) => x != null && x === ipAddress); + + if (hasMatchingGreenIp) { + executeRouting.push(new SuspendExecuteRouting(ipAddress, null, this.bgdId)); + executeRouting.push(new SuspendExecuteRouting(this.getHostAndPort(ipAddress, interimStatus.port), null, this.bgdId)); + + return; + } + } + + executeRouting.push(new SuspendExecuteRouting(ipAddress, null, this.bgdId)); + }); + return new BlueGreenStatus(this.bgdId, BlueGreenPhase.IN_PROGRESS, connectRouting, executeRouting, this.roleByHost, this.correspondingHosts); + } + + protected getStatusOfPost(): BlueGreenStatus { + if (this.isSwitchoverTimerExpired()) { + logger.debug(Messages.get("Bgd.switchoverTimeout")); + if (this.rollback) { + return this.getStatusOfCreated(); + } + return this.getStatusOfCompleted(); + } + + const connectRouting: ConnectRouting[] = []; + const executeRouting: ExecuteRouting[] = []; + + this.createPostRouting(connectRouting); + + return new BlueGreenStatus(this.bgdId, BlueGreenPhase.POST, connectRouting, executeRouting, this.roleByHost, this.correspondingHosts); + } + + protected createPostRouting(connectRouting: ConnectRouting[]): void { + // New connect calls to blue hosts should be routed to green hosts. + Array.from(this.roleByHost.entries()) + .filter(([host, role]) => role === BlueGreenRole.SOURCE) + .filter(([host, role]) => this.correspondingHosts.has(host)) + .forEach(([host, role]) => { + const blueHost: string = host; + const isBlueHostInstance: boolean = this.rdsUtils.isRdsInstance(blueHost); + const pair: Pair | undefined = this.correspondingHosts?.get(host); + const blueHostInfo: HostInfo | undefined = pair?.left; + const greenHostInfo: HostInfo | undefined = pair?.right; + + if (!greenHostInfo) { + // A corresponding host is not found. We need to suspend this call. + connectRouting.push(new SuspendUntilCorrespondingHostFoundConnectRouting(blueHost, role, this.bgdId)); + const status: BlueGreenInterimStatus = this.interimStatuses[role.value]; + if (status) { + connectRouting.push(new SuspendUntilCorrespondingHostFoundConnectRouting(this.getHostAndPort(blueHost, status.port), role, this.bgdId)); + } + } else { + const greenHost: string = greenHostInfo.host; + const greenIp = this.hostIpAddresses.get(greenHostInfo.host); + const greenHostInfoWithIp = !greenIp ? greenHostInfo : this.hostInfoBuilder.copyFrom(greenHostInfo).withHost(greenIp).build(); + + // Check whether green host has already been connected with blue (no-prefixes) IAM host name. + let iamHosts: HostInfo[]; + if (this.isAlreadySuccessfullyConnected(greenHost, blueHost)) { + // Green host has already changed its name, and it's not a new blue host (no prefixes). + iamHosts = blueHostInfo == null ? null : [blueHostInfo]; + } else { + // Green host isn't yet changed its name, so we need to try both possible IAM host options. + iamHosts = blueHostInfo == null ? [greenHostInfo] : [greenHostInfo, blueHostInfo]; + } + + connectRouting.push( + new SubstituteConnectRouting( + blueHost, + role, + greenHostInfoWithIp, + iamHosts, + isBlueHostInstance ? { notify: (iamHost: string) => this.registerIamHost(greenHost, iamHost) } : null + ) + ); + + const interimStatus: BlueGreenInterimStatus = this.interimStatuses[role.value]; + if (interimStatus != null) { + connectRouting.push( + new SubstituteConnectRouting( + this.getHostAndPort(blueHost, interimStatus.port), + role, + greenHostInfoWithIp, + iamHosts, + isBlueHostInstance ? { notify: (iamHost: string) => this.registerIamHost(greenHost, iamHost) } : null + ) + ); + } + } + }); + + if (!this.greenDnsRemoved) { + // New connect calls to green endpoints should be rejected. + connectRouting.push(new RejectConnectRouting(null, BlueGreenRole.TARGET)); + } + } + + protected getStatusOfCompleted(): BlueGreenStatus { + if (this.isSwitchoverTimerExpired()) { + logger.debug(Messages.get("Bgd.switchoverTimeout")); + if (this.rollback) { + return this.getStatusOfCreated(); + } + return new BlueGreenStatus(this.bgdId, BlueGreenPhase.COMPLETED, [], [], this.roleByHost, this.correspondingHosts); + } + + // BGD reports that it's completed but DNS hasn't yet updated completely. + // Pretend that status isn't (yet) completed. + if (!this.blueDnsUpdateCompleted || !this.greenDnsRemoved) { + return this.getStatusOfPost(); + } + + return new BlueGreenStatus(this.bgdId, BlueGreenPhase.COMPLETED, [], [], this.roleByHost, new Map()); + } + + protected registerIamHost(connectHost: string, iamHost: string): void { + const differentHostNames = connectHost != null && connectHost !== iamHost; + if (differentHostNames) { + if (!this.isAlreadySuccessfullyConnected(connectHost, iamHost)) { + this.greenHostChangeNameTimes.set(connectHost, BigInt(Date.now())); + logger.debug(Messages.get("Bgd.greenHostChangedName", connectHost, iamHost)); + } + } + + if (!this.iamHostSuccessfulConnects.has(connectHost)) { + this.iamHostSuccessfulConnects.set(connectHost, new Set()); + } + this.iamHostSuccessfulConnects.get(connectHost)!.add(iamHost); + + if (differentHostNames) { + // Check all IAM host changed their names + const allHostChangedNames = Array.from(this.iamHostSuccessfulConnects.entries()) + .filter(([_, value]) => value.size > 0) + .every(([key, value]) => Array.from(value).some((y) => key !== y)); + + if (allHostChangedNames && !this.allGreenHostsChangedName) { + logger.debug("allGreenHostsChangedName: true"); + this.allGreenHostsChangedName = true; + this.storeGreenHostChangeNameTime(); + } + } + } + + protected isAlreadySuccessfullyConnected(connectHost: string, iamHost: string): boolean { + if (!this.iamHostSuccessfulConnects.has(connectHost)) { + this.iamHostSuccessfulConnects.set(connectHost, new Set()); + } + + return this.iamHostSuccessfulConnects.get(connectHost)!.has(iamHost); + } + + protected storePhaseTime(phase: BlueGreenPhase) { + if (phase == null) { + return; + } + + const key = `${phase.name}${this.rollback ? " (rollback)" : ""}`; + if (!this.phaseTimeNanos.has(key)) { + this.phaseTimeNanos.set(key, new PhaseTimeInfo(new Date(), getTimeInNanos(), phase)); + } + } + + protected storeBlueDnsUpdateTime(): void { + const key = `Blue DNS updated${this.rollback ? " (rollback)" : ""}`; + if (!this.phaseTimeNanos.has(key)) { + this.phaseTimeNanos.set(key, new PhaseTimeInfo(new Date(), getTimeInNanos(), null)); + } + } + + protected storeGreenDnsRemoveTime(): void { + const key = `Green DNS removed${this.rollback ? " (rollback)" : ""}`; + if (!this.phaseTimeNanos.has(key)) { + this.phaseTimeNanos.set(key, new PhaseTimeInfo(new Date(), getTimeInNanos(), null)); + } + } + + protected storeGreenHostChangeNameTime(): void { + const key = `Green host certificates changed${this.rollback ? " (rollback)" : ""}`; + if (!this.phaseTimeNanos.has(key)) { + this.phaseTimeNanos.set(key, new PhaseTimeInfo(new Date(), getTimeInNanos(), null)); + } + } + + protected storeGreenTopologyChangeTime(): void { + const key = `Green topology changed${this.rollback ? " (rollback)" : ""}`; + if (!this.phaseTimeNanos.has(key)) { + this.phaseTimeNanos.set(key, new PhaseTimeInfo(new Date(), getTimeInNanos(), null)); + } + } + + protected logSwitchoverFinalSummary() { + const switchoverCompleted = + (!this.rollback && this.summaryStatus?.currentPhase === BlueGreenPhase.COMPLETED) || + (this.rollback && this.summaryStatus?.currentPhase === BlueGreenPhase.CREATED); + + const hasActiveSwitchoverPhases = Array.from(this.phaseTimeNanos.entries()).some( + ([_, value]) => value.phase != null && value.phase.isActiveSwitchoverOrCompleted + ); + + if (!switchoverCompleted || !hasActiveSwitchoverPhases) { + return; + } + + const timeZeroPhase: BlueGreenPhase = this.rollback ? BlueGreenPhase.PREPARATION : BlueGreenPhase.IN_PROGRESS; + const timeZeroKey: string = `${timeZeroPhase.name}${this.rollback ? " (rollback)" : ""}`; + const timeZero = this.phaseTimeNanos.get(timeZeroKey); + const divider = "----------------------------------------------------------------------------------\n"; + + const logMessage = + `[bgdId: '${this.bgdId}']` + + "\n" + + divider + + `${"timestamp".padEnd(28)} ${"time offset (ms)".padStart(21)} ${"event".padStart(31)}\n` + + divider + + Array.from(this.phaseTimeNanos.entries()) + .sort((a, b) => Number(a[1].timestampNano - b[1].timestampNano)) + .map( + ([key, value]) => + `${value.timestamp.toISOString().padStart(28)} ${ + timeZero ? (Number(value.timestampNano - timeZero.timestampNano) / 1_000_000 + " ms").padStart(18) : "".padStart(18) + } ${key.padStart(31)}` + ) + .join("\n") + + "\n" + + divider; + logger.info(logMessage); + } + + protected resetContextWhenCompleted(): void { + const switchoverCompleted = + (!this.rollback && this.summaryStatus?.currentPhase === BlueGreenPhase.COMPLETED) || + (this.rollback && this.summaryStatus?.currentPhase === BlueGreenPhase.CREATED); + + const hasActiveSwitchoverPhases = Array.from(this.phaseTimeNanos.entries()).some( + ([_, value]) => value.phase != null && value.phase.isActiveSwitchoverOrCompleted + ); + + if (switchoverCompleted && hasActiveSwitchoverPhases) { + logger.debug(Messages.get("Bgd.resetContext")); + this.rollback = false; + this.summaryStatus = null; + this.latestStatusPhase = BlueGreenPhase.NOT_CREATED; + this.phaseTimeNanos.clear(); + this.blueDnsUpdateCompleted = false; + this.greenDnsRemoved = false; + this.greenTopologyChanged = false; + this.allGreenHostsChangedName = false; + this.postStatusEndTimeNano = 0n; + this.lastContextHash = 0n; + this.interimStatuses = [null, null]; + this.interimStatusHashes = [0n, 0n]; + this.hostIpAddresses.clear(); + this.correspondingHosts.clear(); + this.roleByHost.clear(); + this.iamHostSuccessfulConnects.clear(); + this.greenHostChangeNameTimes.clear(); + } + } + + protected startSwitchoverTimer(): void { + if (this.postStatusEndTimeNano === 0n) { + this.postStatusEndTimeNano = getTimeInNanos() + this.switchoverTimeoutNanos; + } + } + + protected isSwitchoverTimerExpired(): boolean { + return this.postStatusEndTimeNano >= 0n && getTimeInNanos() >= this.postStatusEndTimeNano; + } + + protected logCurrentContext(): void { + if (levels[logger.level] > levels.debug) { + // We can skip this log message if debug level is in effect + // and more detailed message is going to be printed few lines below. + logger.info( + `[bgdId: '${this.bgdId}'] BG status: ${ + this.summaryStatus == null || this.summaryStatus.currentPhase == null ? "" : this.summaryStatus.currentPhase.name + }` + ); + } + + logger.debug(`[bgdId: '${this.bgdId}'] Summary status:\n${this.summaryStatus == null ? "" : this.summaryStatus.toString()}`); + + logger.debug( + "Corresponding hosts:\n" + + Array.from(this.correspondingHosts.entries()) + .map(([key, value]) => ` ${key} -> ${value.right == null ? "" : value.right.getHostAndPort()}`) + .join("\n") + ); + + logger.debug( + "Phase times:\n" + + Array.from(this.phaseTimeNanos.entries()) + .map(([key, value]) => ` ${key} -> ${value.timestamp}`) + .join("\n") + ); + + logger.debug( + "Green host certificate change times:\n" + + Array.from(this.greenHostChangeNameTimes.entries()) + .map(([key, value]) => ` ${key} -> ${value}`) + .join("\n") + ); + + logger.debug(` + latestStatusPhase: ${this.latestStatusPhase.name} + blueDnsUpdateCompleted: ${this.blueDnsUpdateCompleted} + greenDnsRemoved: ${this.greenDnsRemoved} + greenHostChangedName: ${this.allGreenHostsChangedName} + greenTopologyChanged: ${this.greenTopologyChanged}`); + } + + clearResources() { + this.monitors.forEach((monitor) => { + monitor.setStop(true); + }); + } +} + +class PhaseTimeInfo { + readonly timestamp: Date; + readonly timestampNano: bigint; + phase: BlueGreenPhase | null; + + constructor(timestamp: Date, timestampNano: bigint, phase: BlueGreenPhase | null) { + this.timestamp = timestamp; + this.timestampNano = timestampNano; + this.phase = phase; + } +} diff --git a/common/lib/plugins/bluegreen/blue_green_utils.ts b/common/lib/plugins/bluegreen/blue_green_utils.ts new file mode 100644 index 00000000..e20f12d0 --- /dev/null +++ b/common/lib/plugins/bluegreen/blue_green_utils.ts @@ -0,0 +1,33 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +export function getValueHash(currentHash: bigint, val: string): bigint { + return currentHash * 31n + getHashCode(val); +} + +export function getHashCode(str: string): bigint { + let hash = 0n; + if (str.length === 0) { + return hash; + } + + for (let i = 0; i < str.length; i++) { + const char = BigInt(str.charCodeAt(i)); + hash = 31n * hash + char; + } + + return hash; +} diff --git a/common/lib/plugins/bluegreen/routing/base_connect_routing.ts b/common/lib/plugins/bluegreen/routing/base_connect_routing.ts new file mode 100644 index 00000000..5dfdd25c --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/base_connect_routing.ts @@ -0,0 +1,41 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { ConnectRouting } from "./connect_routing"; +import { BaseRouting } from "./base_routing"; +import { BlueGreenRole } from "../blue_green_role"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { HostInfo } from "../../../host_info"; +import { ClientWrapper } from "../../../client_wrapper"; +import { PluginService } from "../../../plugin_service"; + +export abstract class BaseConnectRouting extends BaseRouting implements ConnectRouting { + isMatch(hostInfo: HostInfo, hostRole: BlueGreenRole): boolean { + return ( + (this.hostAndPort === null || this.hostAndPort === (hostInfo ?? hostInfo.getHostAndPort().toLowerCase())) && + (this.role === null || this.role === hostRole) + ); + } + + abstract apply( + plugin: ConnectionPlugin, + hostInfo: HostInfo, + properties: Map, + isInitialConnection: boolean, + connectFunc: () => Promise, + pluginService: PluginService + ): Promise; +} diff --git a/common/lib/plugins/bluegreen/routing/base_execute_routing.ts b/common/lib/plugins/bluegreen/routing/base_execute_routing.ts new file mode 100644 index 00000000..80d7d50e --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/base_execute_routing.ts @@ -0,0 +1,40 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BaseRouting } from "./base_routing"; +import { BlueGreenRole } from "../blue_green_role"; +import { HostInfo } from "../../../host_info"; +import { PluginService } from "../../../plugin_service"; +import { ExecuteRouting, RoutingResultHolder } from "./execute_routing"; +import { ConnectionPlugin } from "../../../connection_plugin"; + +export abstract class BaseExecuteRouting extends BaseRouting implements ExecuteRouting { + isMatch(hostInfo: HostInfo, hostRole: BlueGreenRole): boolean { + return ( + (this.hostAndPort === null || this.hostAndPort === (hostInfo ?? hostInfo.getHostAndPort().toLowerCase())) && + (this.role === null || this.role === hostRole) + ); + } + + abstract apply( + plugin: ConnectionPlugin, + methodName: string, + methodFunc: () => Promise, + methodArgs: any, + properties: Map, + pluginService: PluginService + ): Promise>; +} diff --git a/common/lib/plugins/bluegreen/routing/base_routing.ts b/common/lib/plugins/bluegreen/routing/base_routing.ts new file mode 100644 index 00000000..42f970b8 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/base_routing.ts @@ -0,0 +1,49 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { PluginService } from "../../../plugin_service"; +import { BlueGreenStatus } from "../blue_green_status"; +import { convertMsToNanos, getTimeInNanos, sleep } from "../../../utils/utils"; +import { BlueGreenRole } from "../blue_green_role"; + +export abstract class BaseRouting { + protected static readonly SLEEP_CHUNK: number = 50; + protected readonly hostAndPort: string; + protected readonly role: BlueGreenRole; + + constructor(hostAndPort: string, role: BlueGreenRole) { + this.hostAndPort = hostAndPort; + this.role = role; + } + + protected async delay(delayMs: number, bgStatus: BlueGreenStatus, pluginService: PluginService, bgdId: string): Promise { + const start: bigint = getTimeInNanos(); + const end = convertMsToNanos(delayMs); + const minDelay: number = Math.min(delayMs, BaseRouting.SLEEP_CHUNK); + + if (!bgStatus) { + await sleep(delayMs); + } else { + do { + await sleep(minDelay); + } while (bgStatus === pluginService.getStatus(BlueGreenStatus, bgdId) && getTimeInNanos() < end); + } + } + + toString(): string { + return `${this.constructor.name} [${this.hostAndPort ?? ""}, ${this.role?.name ?? ""}]`; + } +} diff --git a/common/lib/plugins/bluegreen/routing/connect_routing.ts b/common/lib/plugins/bluegreen/routing/connect_routing.ts new file mode 100644 index 00000000..30197744 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/connect_routing.ts @@ -0,0 +1,36 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { PluginService } from "../../../plugin_service"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { HostInfo } from "../../../host_info"; +import { BlueGreenRole } from "../blue_green_role"; +import { ClientWrapper } from "../../../client_wrapper"; + +export interface ConnectRouting { + isMatch(hostInfo: HostInfo, hostRole: BlueGreenRole): boolean; + + apply( + plugin: ConnectionPlugin, + hostInfo: HostInfo, + properties: Map, + isInitialConnection: boolean, + connectFunc: () => Promise, + pluginService: PluginService + ): Promise; + + toString(): string; +} diff --git a/common/lib/plugins/bluegreen/routing/execute_routing.ts b/common/lib/plugins/bluegreen/routing/execute_routing.ts new file mode 100644 index 00000000..69849516 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/execute_routing.ts @@ -0,0 +1,59 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { PluginService } from "../../../plugin_service"; +import { HostInfo } from "../../../host_info"; +import { BlueGreenRole } from "../blue_green_role"; +import { ConnectionPlugin } from "../../../connection_plugin"; + +class EmptyResult {} + +export class RoutingResultHolder { + private static EMPTY_VAL: EmptyResult = {}; + private static EMPTY = new RoutingResultHolder(RoutingResultHolder.EMPTY_VAL); + + private readonly val: T; + + constructor(val: T) { + this.val = val; + } + + get(): T { + return this.val; + } + + isPresent(): boolean { + return this.val !== RoutingResultHolder.EMPTY_VAL; + } + + static empty() { + return RoutingResultHolder.EMPTY; + } +} + +export interface ExecuteRouting { + isMatch(hostInfo: HostInfo, hostRole: BlueGreenRole): boolean; + apply( + plugin: ConnectionPlugin, + methodName: string, + methodFunc: () => Promise, + methodArgs: any, + properties: Map, + pluginService: PluginService + ): Promise>; + + toString(): string; +} diff --git a/common/lib/plugins/bluegreen/routing/pass_through_connect_routing.ts b/common/lib/plugins/bluegreen/routing/pass_through_connect_routing.ts new file mode 100644 index 00000000..d5bd2a4c --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/pass_through_connect_routing.ts @@ -0,0 +1,35 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BaseConnectRouting } from "./base_connect_routing"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { HostInfo } from "../../../host_info"; +import { ClientWrapper } from "../../../client_wrapper"; +import { PluginService } from "../../../plugin_service"; + +// Pass through connect call without additional routing. +export class PassThroughConnectRouting extends BaseConnectRouting { + apply( + plugin: ConnectionPlugin, + hostInfo: HostInfo, + properties: Map, + isInitialConnection: boolean, + connectFunc: () => Promise, + pluginService: PluginService + ): Promise { + return connectFunc(); + } +} diff --git a/common/lib/plugins/bluegreen/routing/pass_through_execute_routing.ts b/common/lib/plugins/bluegreen/routing/pass_through_execute_routing.ts new file mode 100644 index 00000000..2c947bd3 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/pass_through_execute_routing.ts @@ -0,0 +1,34 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { PluginService } from "../../../plugin_service"; +import { BaseExecuteRouting } from "./base_execute_routing"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { RoutingResultHolder } from "./execute_routing"; + +// Normally execute database call. +export class PassThroughExecuteRouting extends BaseExecuteRouting { + async apply( + plugin: ConnectionPlugin, + methodName: string, + methodFunc: () => Promise, + methodArgs: any, + properties: Map, + pluginService: PluginService + ): Promise> { + return new RoutingResultHolder(await methodFunc()); + } +} diff --git a/common/lib/plugins/bluegreen/routing/reject_connect_routing.ts b/common/lib/plugins/bluegreen/routing/reject_connect_routing.ts new file mode 100644 index 00000000..0ef111ec --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/reject_connect_routing.ts @@ -0,0 +1,39 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BaseConnectRouting } from "./base_connect_routing"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { HostInfo } from "../../../host_info"; +import { ClientWrapper } from "../../../client_wrapper"; +import { PluginService } from "../../../plugin_service"; +import { logger } from "../../../../logutils"; +import { Messages } from "../../../utils/messages"; +import { AwsWrapperError } from "../../../utils/errors"; + +// Reject an attempt to open a new connection. +export class RejectConnectRouting extends BaseConnectRouting { + apply( + plugin: ConnectionPlugin, + hostInfo: HostInfo, + properties: Map, + isInitialConnection: boolean, + connectFunc: () => Promise, + pluginService: PluginService + ): Promise { + logger.debug(Messages.get("Bgd.inProgressCantConnect")); + throw new AwsWrapperError(Messages.get("Bgd.inProgressCantConnect")); + } +} diff --git a/common/lib/plugins/bluegreen/routing/substitute_connect_routing.ts b/common/lib/plugins/bluegreen/routing/substitute_connect_routing.ts new file mode 100644 index 00000000..1d453e46 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/substitute_connect_routing.ts @@ -0,0 +1,110 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BaseConnectRouting } from "./base_connect_routing"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { HostInfo } from "../../../host_info"; +import { ClientWrapper } from "../../../client_wrapper"; +import { PluginService } from "../../../plugin_service"; +import { RdsUtils } from "../../../utils/rds_utils"; +import { BlueGreenRole } from "../blue_green_role"; +import { AwsWrapperError } from "../../../utils/errors"; +import { Messages } from "../../../utils/messages"; +import { HostAvailability } from "../../../host_availability/host_availability"; +import { WrapperProperties } from "../../../wrapper_property"; + +export interface IamSuccessfulConnectFunc { + notify(iamHost: string): void; +} + +export class SubstituteConnectRouting extends BaseConnectRouting { + protected readonly rdsUtils: RdsUtils; + protected readonly substituteHost: HostInfo; + protected readonly iamHosts: HostInfo[]; + protected readonly iamSuccessfulConnectNotify: IamSuccessfulConnectFunc; + + constructor( + hostAndPort: string, + role: BlueGreenRole, + substituteHost: HostInfo, + iamHosts: HostInfo[], + iamSuccessfulConnectNotify: IamSuccessfulConnectFunc + ) { + super(hostAndPort, role); + this.substituteHost = substituteHost; + this.iamHosts = iamHosts; + this.iamSuccessfulConnectNotify = iamSuccessfulConnectNotify; + this.rdsUtils = new RdsUtils(); + } + + async apply( + plugin: ConnectionPlugin, + hostInfo: HostInfo, + properties: Map, + isInitialConnection: boolean, + connectFunc: () => Promise, + pluginService: PluginService + ): Promise { + if (!this.rdsUtils.isIP(this.substituteHost.host)) { + return pluginService.connect(this.substituteHost, properties, plugin); + } + + if (!this.iamHosts || this.iamHosts.length === 0) { + throw new AwsWrapperError(Messages.get("Bgd.requireIamHost")); + } + + for (const iamHost of this.iamHosts) { + const reroutedHostInfo: HostInfo = pluginService + .getHostInfoBuilder() + .copyFrom(this.substituteHost) + .withHostId(iamHost.hostId) + .withAvailability(HostAvailability.AVAILABLE) + .build(); + reroutedHostInfo.addAlias(iamHost.host); + + const reroutedProperties: Map = new Map(properties); + reroutedProperties.set(WrapperProperties.HOST.name, iamHost.host); + if (iamHost.isPortSpecified()) { + reroutedProperties.set(WrapperProperties.IAM_DEFAULT_PORT.name, iamHost.port); + } + + try { + const conn: ClientWrapper = await pluginService.connect(reroutedHostInfo, reroutedProperties); + if (!this.iamSuccessfulConnectNotify) { + try { + this.iamSuccessfulConnectNotify.notify(iamHost.host); + } catch (e: any) { + // do nothing + } + } + return conn; + } catch (e: any) { + if (!pluginService.isLoginError(e)) { + throw e; + } + // do nothing + // try with another IAM host + } + } + throw new AwsWrapperError(Messages.get("Bgd.inProgressCantOpenConnection", this.substituteHost.getHostAndPort())); + } + + toString(): string { + return `${this.constructor.name} [${this.hostAndPort ?? ""}, ${this.role?.name ?? ""}, substitute: ${this.substituteHost?.getHostAndPort() ?? ""}, iamHosts: ${ + this.iamHosts?.map((host) => host.getHostAndPort()).join(", ") ?? "" + }]`; + } +} diff --git a/common/lib/plugins/bluegreen/routing/suspend_connect_routing.ts b/common/lib/plugins/bluegreen/routing/suspend_connect_routing.ts new file mode 100644 index 00000000..525e6fd8 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/suspend_connect_routing.ts @@ -0,0 +1,81 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BaseConnectRouting } from "./base_connect_routing"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { HostInfo } from "../../../host_info"; +import { ClientWrapper } from "../../../client_wrapper"; +import { PluginService } from "../../../plugin_service"; +import { logger } from "../../../../logutils"; +import { Messages } from "../../../utils/messages"; +import { TelemetryFactory } from "../../../utils/telemetry/telemetry_factory"; +import { TelemetryContext } from "../../../utils/telemetry/telemetry_context"; +import { TelemetryTraceLevel } from "../../../utils/telemetry/telemetry_trace_level"; +import { BlueGreenStatus } from "../blue_green_status"; +import { convertMsToNanos, convertNanosToMs, getTimeInNanos } from "../../../utils/utils"; +import { WrapperProperties } from "../../../wrapper_property"; +import { BlueGreenPhase } from "../blue_green_phase"; +import { TimeoutError } from "@opentelemetry/sdk-metrics"; +import { BlueGreenRole } from "../blue_green_role"; + +export class SuspendConnectRouting extends BaseConnectRouting { + private static readonly TELEMETRY_SWITCHOVER = "Blue/Green switchover"; + private static readonly SLEEP_TIME_MS: number = 100; + + protected bgdId: string; + + constructor(hostAndPort: string, role: BlueGreenRole, bgdId: string) { + super(hostAndPort, role); + this.bgdId = bgdId; + } + + async apply( + plugin: ConnectionPlugin, + hostInfo: HostInfo, + properties: Map, + isInitialConnection: boolean, + connectFunc: () => Promise, + pluginService: PluginService + ): Promise { + logger.debug(Messages.get("Bgd.inProgressSuspendConnect")); + + const telemetryFactory: TelemetryFactory = pluginService.getTelemetryFactory(); + const telemetryContext: TelemetryContext = telemetryFactory.openTelemetryContext( + SuspendConnectRouting.TELEMETRY_SWITCHOVER, + TelemetryTraceLevel.NESTED + ); + + return await telemetryContext.start(async () => { + let bgStatus: BlueGreenStatus = pluginService.getStatus(BlueGreenStatus, this.bgdId); + const timeoutNanos: bigint = convertMsToNanos(WrapperProperties.BG_CONNECT_TIMEOUT_MS.get(properties)); + const suspendStartTime: bigint = getTimeInNanos(); + const endTime: bigint = getTimeInNanos() + timeoutNanos; + + while (getTimeInNanos() <= endTime && bgStatus != null && bgStatus.currentPhase === BlueGreenPhase.IN_PROGRESS) { + await this.delay(SuspendConnectRouting.SLEEP_TIME_MS, bgStatus, pluginService, this.bgdId); + + bgStatus = pluginService.getStatus(BlueGreenStatus, this.bgdId); + } + + if (bgStatus != null && bgStatus.currentPhase === BlueGreenPhase.IN_PROGRESS) { + throw new TimeoutError(Messages.get("Bgd.inProgressTryConnectLater", `${WrapperProperties.BG_CONNECT_TIMEOUT_MS.get(properties)}`)); + } + + logger.debug(Messages.get("Bgd.switchoverCompleteContinueWithConnect", `${convertNanosToMs(getTimeInNanos() - suspendStartTime)}`)); + return Promise.resolve(); + }); + } +} diff --git a/common/lib/plugins/bluegreen/routing/suspend_execute_routing.ts b/common/lib/plugins/bluegreen/routing/suspend_execute_routing.ts new file mode 100644 index 00000000..10c9b398 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/suspend_execute_routing.ts @@ -0,0 +1,81 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { PluginService } from "../../../plugin_service"; +import { BaseExecuteRouting } from "./base_execute_routing"; +import { BlueGreenRole } from "../blue_green_role"; +import { Messages } from "../../../utils/messages"; +import { logger } from "../../../../logutils"; +import { TelemetryFactory } from "../../../utils/telemetry/telemetry_factory"; +import { TelemetryContext } from "../../../utils/telemetry/telemetry_context"; +import { TelemetryTraceLevel } from "../../../utils/telemetry/telemetry_trace_level"; +import { BlueGreenStatus } from "../blue_green_status"; +import { convertMsToNanos, convertNanosToMs, getTimeInNanos } from "../../../utils/utils"; +import { WrapperProperties } from "../../../wrapper_property"; +import { BlueGreenPhase } from "../blue_green_phase"; +import { TimeoutError } from "@opentelemetry/sdk-metrics"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { RoutingResultHolder } from "./execute_routing"; + +export class SuspendExecuteRouting extends BaseExecuteRouting { + protected static readonly TELEMETRY_SWITCHOVER: string = "Blue/Green switchover"; + private static readonly SLEEP_TIME_MS: number = 100; + + protected bgdId: string; + + constructor(hostAndPort: string, role: BlueGreenRole, bgdId: string) { + super(hostAndPort, role); + this.bgdId = bgdId; + } + + async apply( + plugin: ConnectionPlugin, + methodName: string, + methodFunc: () => Promise, + methodArgs: any, + properties: Map, + pluginService: PluginService + ): Promise> { + logger.debug(Messages.get("Bgd.inProgressSuspendMethod", methodName)); + + const telemetryFactory: TelemetryFactory = pluginService.getTelemetryFactory(); + const telemetryContext: TelemetryContext = telemetryFactory.openTelemetryContext( + SuspendExecuteRouting.TELEMETRY_SWITCHOVER, + TelemetryTraceLevel.NESTED + ); + + return await telemetryContext.start(async () => { + let bgStatus: BlueGreenStatus = pluginService.getStatus(BlueGreenStatus, this.bgdId); + const timeoutNanos: bigint = convertMsToNanos(WrapperProperties.BG_CONNECT_TIMEOUT_MS.get(properties)); + const suspendStartTime: bigint = getTimeInNanos(); + const endTime: bigint = getTimeInNanos() + timeoutNanos; + + while (getTimeInNanos() <= endTime && bgStatus != null && bgStatus.currentPhase === BlueGreenPhase.IN_PROGRESS) { + await this.delay(SuspendExecuteRouting.SLEEP_TIME_MS, bgStatus, pluginService, this.bgdId); + + bgStatus = pluginService.getStatus(BlueGreenStatus, this.bgdId); + } + + if (bgStatus != null && bgStatus.currentPhase === BlueGreenPhase.IN_PROGRESS) { + throw new TimeoutError(Messages.get("Bgd.stillInProgressTryMethodLater", `${WrapperProperties.BG_CONNECT_TIMEOUT_MS.get(properties)}`)); + } + + logger.debug(Messages.get("Bgd.switchoverCompletedContinueWithMethod", methodName, `${convertNanosToMs(getTimeInNanos() - suspendStartTime)}`)); + + return RoutingResultHolder.empty(); + }); + } +} diff --git a/common/lib/plugins/bluegreen/routing/suspend_until_corresponding_host_found_connect_routing.ts b/common/lib/plugins/bluegreen/routing/suspend_until_corresponding_host_found_connect_routing.ts new file mode 100644 index 00000000..65d9aa87 --- /dev/null +++ b/common/lib/plugins/bluegreen/routing/suspend_until_corresponding_host_found_connect_routing.ts @@ -0,0 +1,96 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BaseConnectRouting } from "./base_connect_routing"; +import { ConnectionPlugin } from "../../../connection_plugin"; +import { HostInfo } from "../../../host_info"; +import { ClientWrapper } from "../../../client_wrapper"; +import { PluginService } from "../../../plugin_service"; +import { BlueGreenRole } from "../blue_green_role"; +import { Messages } from "../../../utils/messages"; +import { logger } from "../../../../logutils"; +import { TelemetryFactory } from "../../../utils/telemetry/telemetry_factory"; +import { TelemetryContext } from "../../../utils/telemetry/telemetry_context"; +import { TelemetryTraceLevel } from "../../../utils/telemetry/telemetry_trace_level"; +import { BlueGreenStatus } from "../blue_green_status"; +import { convertMsToNanos, convertNanosToMs, getTimeInNanos, Pair } from "../../../utils/utils"; +import { WrapperProperties } from "../../../wrapper_property"; +import { BlueGreenPhase } from "../blue_green_phase"; +import { TimeoutError } from "@opentelemetry/sdk-metrics"; + +export class SuspendUntilCorrespondingHostFoundConnectRouting extends BaseConnectRouting { + protected static readonly TELEMETRY_SWITCHOVER: string = "Blue/Green switchover"; + private static readonly SLEEP_TIME_MS: number = 100; + + protected bgdId: string; + + constructor(hostAndPort: string, role: BlueGreenRole, bgdId: string) { + super(hostAndPort, role); + this.bgdId = bgdId; + } + + async apply( + plugin: ConnectionPlugin, + hostInfo: HostInfo, + properties: Map, + isInitialConnection: boolean, + connectFunc: () => Promise, + pluginService: PluginService + ): Promise { + logger.debug(Messages.get("Bgd.waitConnectUntilCorrespondingHostFound", hostInfo.host)); + + const telemetryFactory: TelemetryFactory = pluginService.getTelemetryFactory(); + const telemetryContext: TelemetryContext = telemetryFactory.openTelemetryContext( + SuspendUntilCorrespondingHostFoundConnectRouting.TELEMETRY_SWITCHOVER, + TelemetryTraceLevel.NESTED + ); + + return await telemetryContext.start(async () => { + let bgStatus: BlueGreenStatus = pluginService.getStatus(BlueGreenStatus, this.bgdId); + let correspondingPair: Pair = bgStatus?.correspondingHosts.get(hostInfo.host); + + const timeoutNanos: bigint = convertMsToNanos(WrapperProperties.BG_CONNECT_TIMEOUT_MS.get(properties)); + const suspendStartTime: bigint = getTimeInNanos(); + const endTime: bigint = getTimeInNanos() + timeoutNanos; + + while ( + getTimeInNanos() <= endTime && + bgStatus != null && + bgStatus.currentPhase !== BlueGreenPhase.COMPLETED && + (!correspondingPair || !correspondingPair.right) + ) { + await this.delay(SuspendUntilCorrespondingHostFoundConnectRouting.SLEEP_TIME_MS, bgStatus, pluginService, this.bgdId); + + bgStatus = pluginService.getStatus(BlueGreenStatus, this.bgdId); + correspondingPair = bgStatus?.correspondingHosts.get(hostInfo.host); + } + + if (!bgStatus || bgStatus.currentPhase === BlueGreenPhase.COMPLETED) { + logger.debug(Messages.get("Bgd.completedContinueWithConnect", `${convertNanosToMs(getTimeInNanos() - suspendStartTime)}`)); + } else if (getTimeInNanos() > endTime) { + throw new TimeoutError( + Messages.get("Bgd.correspondingHostNotFoundTryConnectLater", hostInfo.host, `${WrapperProperties.BG_CONNECT_TIMEOUT_MS.get(properties)}`) + ); + } + + logger.debug( + Messages.get("Bgd.correspondingHostFoundContinueWithConnect", hostInfo.host, `${convertNanosToMs(getTimeInNanos() - suspendStartTime)}`) + ); + // returning no connection so the next routing can handle it + return Promise.resolve(); + }); + } +} diff --git a/common/lib/plugins/bluegreen/status_info.ts b/common/lib/plugins/bluegreen/status_info.ts new file mode 100644 index 00000000..5e374421 --- /dev/null +++ b/common/lib/plugins/bluegreen/status_info.ts @@ -0,0 +1,34 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { BlueGreenPhase } from "./blue_green_phase"; +import { BlueGreenRole } from "./blue_green_role"; + +export class StatusInfo { + version: string; + endpoint: string; + port: number; + phase: BlueGreenPhase; + role: BlueGreenRole; + + constructor(version: string, endpoint: string, port: number, phase: BlueGreenPhase, role: BlueGreenRole) { + this.version = version; + this.endpoint = endpoint; + this.port = port; + this.phase = phase; + this.role = role; + } +} diff --git a/common/lib/plugins/custom_endpoint/custom_endpoint_plugin.ts b/common/lib/plugins/custom_endpoint/custom_endpoint_plugin.ts index 39b12c26..99e5bfb4 100644 --- a/common/lib/plugins/custom_endpoint/custom_endpoint_plugin.ts +++ b/common/lib/plugins/custom_endpoint/custom_endpoint_plugin.ts @@ -101,7 +101,7 @@ export class CustomEndpointPlugin extends AbstractConnectionPlugin implements Ca this.customEndpointHostInfo = hostInfo; logger.debug(Messages.get("CustomEndpointPlugin.connectionRequestToCustomEndpoint", hostInfo.host)); - this.customEndpointId = CustomEndpointPlugin.rdsUtils.getRdsInstanceId(hostInfo.host); + this.customEndpointId = CustomEndpointPlugin.rdsUtils.getRdsClusterId(hostInfo.host); if (!this.customEndpointId) { throw new AwsWrapperError(Messages.get("CustomEndpointPlugin.errorParsingEndpointIdentifier", this.customEndpointHostInfo.host)); } diff --git a/common/lib/plugins/default_plugin.ts b/common/lib/plugins/default_plugin.ts index 7a02c08a..dca6c7f5 100644 --- a/common/lib/plugins/default_plugin.ts +++ b/common/lib/plugins/default_plugin.ts @@ -126,10 +126,6 @@ export class DefaultPlugin extends AbstractConnectionPlugin { throw new AwsWrapperError(Messages.get("DefaultConnectionPlugin.noHostsAvailable")); } - return this.connectionProviderManager.getHostInfoByStrategy( - hosts, - role, - strategy, - this.pluginService.getProperties()); + return this.connectionProviderManager.getHostInfoByStrategy(hosts, role, strategy, this.pluginService.getProperties()); } } diff --git a/common/lib/plugins/federated_auth/credentials_provider_factory.ts b/common/lib/plugins/federated_auth/credentials_provider_factory.ts index 4c0014e3..e879fd8d 100644 --- a/common/lib/plugins/federated_auth/credentials_provider_factory.ts +++ b/common/lib/plugins/federated_auth/credentials_provider_factory.ts @@ -14,10 +14,7 @@ limitations under the License. */ -import { - AwsCredentialIdentity, - AwsCredentialIdentityProvider -} from "@smithy/types/dist-types/identity/awsCredentialIdentity"; +import { AwsCredentialIdentity, AwsCredentialIdentityProvider } from "@smithy/types/dist-types/identity/awsCredentialIdentity"; export interface CredentialsProviderFactory { getAwsCredentialsProvider(host: string, region: string, props: Map): Promise; diff --git a/common/lib/plugins/federated_auth/saml_credentials_provider_factory.ts b/common/lib/plugins/federated_auth/saml_credentials_provider_factory.ts index 02afa7e6..bfdfe7f6 100644 --- a/common/lib/plugins/federated_auth/saml_credentials_provider_factory.ts +++ b/common/lib/plugins/federated_auth/saml_credentials_provider_factory.ts @@ -19,10 +19,7 @@ import { AssumeRoleWithSAMLCommand, STSClient } from "@aws-sdk/client-sts"; import { WrapperProperties } from "../../wrapper_property"; import { AwsWrapperError } from "../../utils/errors"; -import { - AwsCredentialIdentity, - AwsCredentialIdentityProvider -} from "@smithy/types/dist-types/identity/awsCredentialIdentity"; +import { AwsCredentialIdentity, AwsCredentialIdentityProvider } from "@smithy/types/dist-types/identity/awsCredentialIdentity"; import { decode } from "entities"; export abstract class SamlCredentialsProviderFactory implements CredentialsProviderFactory { diff --git a/common/lib/profile/driver_configuration_profiles.ts b/common/lib/profile/driver_configuration_profiles.ts index ef5864bd..ee387b82 100644 --- a/common/lib/profile/driver_configuration_profiles.ts +++ b/common/lib/profile/driver_configuration_profiles.ts @@ -19,9 +19,7 @@ import { ConfigurationProfilePresetCodes } from "./configuration_profile_codes"; import { WrapperProperties } from "../wrapper_property"; import { HostMonitoringPluginFactory } from "../plugins/efm/host_monitoring_plugin_factory"; import { AuroraInitialConnectionStrategyFactory } from "../plugins/aurora_initial_connection_strategy_plugin_factory"; -import { - AuroraConnectionTrackerPluginFactory -} from "../plugins/connection_tracker/aurora_connection_tracker_plugin_factory"; +import { AuroraConnectionTrackerPluginFactory } from "../plugins/connection_tracker/aurora_connection_tracker_plugin_factory"; import { ReadWriteSplittingPluginFactory } from "../plugins/read_write_splitting_plugin_factory"; import { FailoverPluginFactory } from "../plugins/failover/failover_plugin_factory"; import { InternalPooledConnectionProvider } from "../internal_pooled_connection_provider"; diff --git a/common/lib/utils/iam_auth_utils.ts b/common/lib/utils/iam_auth_utils.ts index 8b4ce417..42525ddf 100644 --- a/common/lib/utils/iam_auth_utils.ts +++ b/common/lib/utils/iam_auth_utils.ts @@ -21,10 +21,7 @@ import { AwsWrapperError } from "./errors"; import { Messages } from "./messages"; import { RdsUtils } from "./rds_utils"; import { Signer } from "@aws-sdk/rds-signer"; -import { - AwsCredentialIdentity, - AwsCredentialIdentityProvider -} from "@smithy/types/dist-types/identity/awsCredentialIdentity"; +import { AwsCredentialIdentity, AwsCredentialIdentityProvider } from "@smithy/types/dist-types/identity/awsCredentialIdentity"; import { PluginService } from "../plugin_service"; import { TelemetryTraceLevel } from "./telemetry/telemetry_trace_level"; diff --git a/common/lib/utils/locales/en.json b/common/lib/utils/locales/en.json index e9661fe7..b8637886 100644 --- a/common/lib/utils/locales/en.json +++ b/common/lib/utils/locales/en.json @@ -221,7 +221,7 @@ "ClusterTopologyMonitoring.ignoringNewTopologyRequest": "Previous failover has just completed, ignoring new topology request.", "ClusterTopologyMonitoring.timeoutSetToZero": "A topology refresh was requested, but the given timeout for the request was 0 ms. Returning cached hosts: ", "ClusterTopologyMonitor.startingHostMonitors": "Starting host monitoring tasks.", - "ClusterTopologyMonitor.writerPickedUpFromHostMonitors": "The writer host detected by the node monitors was picked up by the topology monitor: '%s'.", + "ClusterTopologyMonitor.writerPickedUpFromHostMonitors": "The writer host detected by the host monitors was picked up by the topology monitor: '%s'.", "ClusterTopologyMonitor.writerMonitoringConnection": "The monitoring connection is connected to a writer: '%s'.", "ClusterTopologyMonitor.invalidWriterQuery": "An error occurred while attempting to obtain the writer id because the query was invalid. Please ensure you are connecting to an Aurora or RDS DB cluster. Error: '%s'", "ClusterTopologyMonitor.unableToConnect": "Could not connect to initial host: '%s'.", @@ -253,5 +253,48 @@ "CustomEndpointMonitorImpl.stoppedMonitor": "Stopped custom endpoint monitor for '%s'.", "CustomEndpointMonitorImpl.stoppingMonitor": "Stopping custom endpoint monitor for '%s'.", "CustomEndpointMonitorImpl.noEndpoints": "Unable to find any custom endpoints. When connecting with a custom endpoint, at least one custom endpoint should be detected.", - "AwsSdk.unsupportedRegion": "Unsupported AWS region '%s'. For supported regions please read https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html" + "AwsSdk.unsupportedRegion": "Unsupported AWS region '%s'. For supported regions please read https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html", + "Bgd.inProgressConnectionClosed": "Connection has been closed since Blue/Green switchover is in progress.", + "Bgd.inProgressSuspendConnect": "Blue/Green Deployment switchover is in progress. The 'connect' call will be delayed until switchover is completed.", + "Bgd.inProgressTryConnectLater": "Blue/Green Deployment switchover is still in progress after %s ms. Try to connect again later.", + "Bgd.switchoverCompleteContinueWithConnect": "Blue/Green Deployment switchover is completed. Continue with connect call. The call was suspended for %s ms.", + "Bgd.inProgressSuspendMethod": "Blue/Green Deployment switchover is in progress. Suspend '%s' call until switchover is completed.", + "Bgd.stillInProgressTryMethodLater": "Blue/Green Deployment switchover is still in progress after %s ms. Try '%s' again later.", + "Bgd.switchoverCompletedContinueWithMethod": "Blue/Green Deployment switchover is completed. Continue with '%s' call. The call was suspended for %s ms.", + "Bgd.inProgressCantConnect": "Blue/Green Deployment switchover is in progress. New connection can't be opened.", + "Bgd.requireIamHost": "Connecting with IP address when IAM authentication is enabled requires an 'iamHost' parameter.", + "Bgd.inProgressCantOpenConnection": "Blue/Green Deployment switchover is in progress. Can''t establish connection to '%s'.", + "Bgd.unknownRole": "Unknown blue/green role '%s'.", + "Bgd.unknownVersion": "Unknown blue/green version '%s'.", + "Bgd.unknownStatus": "Unknown blue/green status '%s'.", + "Bgd.statusChanged": "[%s] Status changed to: %s", + "Bgd.interrupted": "[%s] Interrupted.", + "Bgd.monitoringUnhandledError": "[%s] Unhandled exception while monitoring blue/green status: %s", + "Bgd.monitoringCompleted": "[%s] Blue/green status monitoring loop is completed.", + "Bgd.statusNotAvailable": "[%s] (status not available) currentPhase: %s.", + "Bgd.usesVersion": "[%s] Blue/Green deployment uses version '%s' which the driver doesn't support. Version '%s' will be used instead.", + "Bgd.noEntriesInStatusTable": "[%s] No entries in status table.", + "Bgd.error": "[%s] currentPhase: %s, error while querying for blue/green status: %s.", + "Bgd.unhandledNetworkError": "[%s] Unhandled network error: %s.", + "Bgd.unhandledError": "[%s] Unhandled error: %s.", + "Bgd.openingConnectionWithIp": "[%s] Opening monitoring connection (IP) to %s.", + "Bgd.openedConnectionWithIp": "[%s] Opened monitoring connection (IP) to %s.", + "Bgd.openingConnection": "[%s] Opening monitoring connection to %s.", + "Bgd.openedConnection": "[%s] Opened monitoring connection to %s.", + "Bgd.createHostListProvider": "[%s] Creating a new HostListProvider, clusterId: %s.", + "Bgd.unsupportedDialect": "[bgdId: '%s'] Blue/Green Deployments aren't supported by the current database dialect: %s.", + "Bgd.interimStatus": "[bgdId: '%s', role: %s] %s", + "Bgd.rollback": "[bgdId: '%s'] Blue/Green deployment is in rollback mode.", + "Bgd.unknownPhase": "[bgdId: '%s'] Unknown BG phase '%s'.", + "Bgd.blueDnsCompleted": "[bgdId: '%s'] Blue DNS update completed.", + "Bgd.greenDnsRemoved": "[bgdId: '%s'] Green DNS removed.", + "Bgd.greenTopologyChanged": "[bgdId: '%s'] Green topology changed.", + "Bgd.switchoverTimeout": "Blue/Green switchover has timed out.", + "Bgd.greenHostChangedName": "Green host '%s' has changed names, using IAM host '%s'.", + "Bgd.resetContext": "Blue Green Status Provider resetting context.", + "Bgd.hostInfoNull": "Unable to initialize HostListProvider since connection host information is null.", + "Bgd.waitConnectUntilCorrespondingHostFound": "Blue/Green Deployment switchover is in progress and a corresponding host for '%s' is not found. The 'connect' call will be delayed.", + "Bgd.correspondingHostNotFoundTryConnectLater": "Blue/Green Deployment switchover is still in progress and a corresponding host for '%s' is not found after %s ms. Try to connect again later.", + "Bgd.correspondingHostFoundContinueWithConnect": "A corresponding host for '%s' is found. Continue with connect call. The call was suspended for %s ms.", + "Bgd.completedContinueWithConnect": "Blue/Green Deployment status is completed. Continue with 'connect' call. The call was suspended for %s ms." } diff --git a/common/lib/utils/rds_utils.ts b/common/lib/utils/rds_utils.ts index ad2b5e10..866dfaf9 100644 --- a/common/lib/utils/rds_utils.ts +++ b/common/lib/utils/rds_utils.ts @@ -108,6 +108,7 @@ export class RdsUtils { private static readonly IP_V6 = /^[0-9a-fA-F]{1,4}(:[0-9a-fA-F]{1,4}){7}$/i; private static readonly IP_V6_COMPRESSED = /^(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,5})?)::(([0-9A-Fa-f]{1,4}(:[0-9A-Fa-f]{1,4}){0,5})?)$/i; private static readonly BG_GREEN_HOST_PATTERN = /.*(?-green-[0-9a-z]{6})\..*/i; + private static readonly BG_OLD_HOST_PATTERN = /.*(?-old1)\..*/i; static readonly DNS_GROUP = "dns"; static readonly INSTANCE_GROUP = "instance"; @@ -153,6 +154,22 @@ export class RdsUtils { return dnsGroup && dnsGroup.startsWith("proxy-"); } + getRdsClusterId(host: string): string | null { + const matcher = this.cacheMatcher( + host, + RdsUtils.AURORA_DNS_PATTERN, + RdsUtils.AURORA_CHINA_DNS_PATTERN, + RdsUtils.AURORA_OLD_CHINA_DNS_PATTERN, + RdsUtils.AURORA_GOV_DNS_PATTERN + ); + + if (this.getRegexGroup(matcher, RdsUtils.DNS_GROUP) !== null) { + return this.getRegexGroup(matcher, RdsUtils.INSTANCE_GROUP); + } + + return null; + } + public getRdsInstanceId(host: string): string | null { if (!host) { return null; @@ -165,7 +182,7 @@ export class RdsUtils { RdsUtils.AURORA_OLD_CHINA_DNS_PATTERN, RdsUtils.AURORA_GOV_DNS_PATTERN ); - if (this.getRegexGroup(matcher, RdsUtils.DNS_GROUP) !== null) { + if (this.getRegexGroup(matcher, RdsUtils.DNS_GROUP) === null) { return this.getRegexGroup(matcher, RdsUtils.INSTANCE_GROUP); } @@ -260,6 +277,10 @@ export class RdsUtils { return null; } + public isIP(ip: string) { + return this.isIPv4(ip) || this.isIPv6(ip); + } + public isIPv4(ip: string) { return ip.match(RdsUtils.IP_V4); } @@ -298,7 +319,23 @@ export class RdsUtils { } public isGreenInstance(host: string) { - return host && host.match(RdsUtils.BG_GREEN_HOST_PATTERN); + return host && RdsUtils.BG_GREEN_HOST_PATTERN.test(host); + } + + public isOldInstance(host: string): boolean { + return !!host && RdsUtils.BG_OLD_HOST_PATTERN.test(host); + } + + public isNotOldInstance(host: string): boolean { + if (!host) { + return true; + } + return !RdsUtils.BG_OLD_HOST_PATTERN.test(host); + } + + // Verify that provided host is a blue host name and contains neither green prefix nor old prefix. + public isNotGreenAndOldPrefixInstance(host: string): boolean { + return !!host && !RdsUtils.BG_GREEN_HOST_PATTERN.test(host) && !RdsUtils.BG_OLD_HOST_PATTERN.test(host); } public removeGreenInstancePrefix(host: string): string { diff --git a/common/lib/utils/utils.ts b/common/lib/utils/utils.ts index bd9939c7..30993055 100644 --- a/common/lib/utils/utils.ts +++ b/common/lib/utils/utils.ts @@ -73,6 +73,10 @@ export function convertNanosToMs(nanos: bigint) { return Number(nanos) / 1000000; } +export function convertMsToNanos(millis: number): bigint { + return BigInt(millis * 1000000); +} + export function convertNanosToMinutes(nanos: bigint) { return Number(nanos) / 60_000_000_000; } @@ -108,3 +112,21 @@ export function equalsIgnoreCase(value1: string | null, value2: string | null): export function isDialectTopologyAware(dialect: any): dialect is TopologyAwareDatabaseDialect { return dialect; } + +export class Pair { + private readonly _left: K; + private readonly _right: V; + + constructor(value1: K, value2: V) { + this._left = value1; + this._right = value2; + } + + get left(): K { + return this._left; + } + + get right(): V { + return this._right; + } +} diff --git a/common/lib/wrapper_property.ts b/common/lib/wrapper_property.ts index 87ae5f9a..7a678deb 100644 --- a/common/lib/wrapper_property.ts +++ b/common/lib/wrapper_property.ts @@ -16,8 +16,8 @@ import { ConnectionProvider } from "./connection_provider"; import { DatabaseDialect } from "./database_dialect/database_dialect"; -import { Failover2Plugin } from "./plugins/failover2/failover2_plugin"; import { ClusterTopologyMonitorImpl } from "./host_list_provider/monitoring/cluster_topology_monitor"; +import { BlueGreenStatusProvider } from "./plugins/bluegreen/blue_green_status_provider"; export class WrapperProperty { name: string; @@ -420,6 +420,60 @@ export class WrapperProperties { null ); + static readonly BG_CONNECT_TIMEOUT_MS = new WrapperProperty( + "bgConnectTimeoutMs", + "Connect timeout in milliseconds during Blue/Green Deployment switchover.", + 30000 + ); + + static readonly BGD_ID = new WrapperProperty("bgdId", "Blue/Green Deployment ID", "1"); + + static readonly BG_INTERVAL_BASELINE_MS = new WrapperProperty( + "bgBaselineMs", + "Baseline Blue/Green Deployment status checking interval in milliseconds.", + 60000 + ); + + static readonly BG_INTERVAL_INCREASED_MS = new WrapperProperty( + "bgIncreasedMs", + "Increased Blue/Green Deployment status checking interval in milliseconds.", + 1000 + ); + + static readonly BG_INTERVAL_HIGH_MS = new WrapperProperty( + "bgHighMs", + "High Blue/Green Deployment status checking interval in milliseconds.", + 100 + ); + + static readonly BG_SWITCHOVER_TIMEOUT_MS = new WrapperProperty( + "bgSwitchoverTimeoutMs", + "Blue/Green Deployment switchover timeout in milliseconds.", + 180000 // 3min + ); + + static readonly BG_SUSPEND_NEW_BLUE_CONNECTIONS = new WrapperProperty( + "bgSuspendNewBlueConnections", + "Enables Blue/Green Deployment switchover to suspend new blue connection requests while the switchover process is in progress.", + false + ); + + private static readonly PREFIXES = [ + WrapperProperties.MONITORING_PROPERTY_PREFIX, + ClusterTopologyMonitorImpl.MONITORING_PROPERTY_PREFIX, + BlueGreenStatusProvider.MONITORING_PROPERTY_PREFIX + ]; + + private static startsWithPrefix(key: string) { + for (const prefix in WrapperProperties.PREFIXES) { + if (key.startsWith(prefix)) { + return true; + } + } + + return false; + } + static removeWrapperProperties(props: Map): Map { const persistingProperties = [ WrapperProperties.USER.name, @@ -432,10 +486,7 @@ export class WrapperProperties { const copy = new Map(props); for (const key of props.keys()) { - if ( - !key.startsWith(WrapperProperties.MONITORING_PROPERTY_PREFIX) && - !key.startsWith(ClusterTopologyMonitorImpl.MONITORING_PROPERTY_PREFIX) - ) { + if (!WrapperProperties.startsWithPrefix(key)) { continue; } diff --git a/common/logutils.ts b/common/logutils.ts index 18145739..a5cd8373 100644 --- a/common/logutils.ts +++ b/common/logutils.ts @@ -23,7 +23,7 @@ dotenv.config(); const logLevel = process.env.LOG_LEVEL; -enum levels { +export enum levels { error, warn, help, diff --git a/docs/Documentation.md b/docs/Documentation.md index 961bb25c..21ddc7b5 100644 --- a/docs/Documentation.md +++ b/docs/Documentation.md @@ -2,7 +2,7 @@ - [Getting Started](./GettingStarted.md) - [Using the AWS Advanced NodeJS Wrapper](./using-the-nodejs-wrapper/UsingTheNodejsWrapper.md) - - [Logging](/docs/using-the-nodejs-wrapper/UsingTheNodejsWrapper.md#logging) + - [Logging](../docs/using-the-nodejs-wrapper/UsingTheNodejsWrapper.md#logging) - [Telemetry](../docs/using-the-nodejs-wrapper/Telemetry.md) - [Database Dialects](../docs/using-the-nodejs-wrapper/DatabaseDialects.md) - [Plugins](./using-the-nodejs-wrapper/UsingTheNodejsWrapper.md#plugins) @@ -16,6 +16,7 @@ - [Aurora Initial Connection Strategy Plugin](./using-the-nodejs-wrapper/using-plugins/UsingTheAuroraInitialConnectionStrategyPlugin.md) - [AWS Secrets Manager Plugin](./using-the-nodejs-wrapper/using-plugins/UsingTheAwsSecretsManagerPlugin.md) - [Aurora Connection Tracker Plugin](./using-the-nodejs-wrapper/using-plugins/UsingTheAuroraConnectionTrackerPlugin.md) + - [Blue/Green Deployment Plugin](./using-the-nodejs-wrapper/using-plugins/UsingTheBlueGreenPlugin.md) - [Host Availability Strategy](./using-the-nodejs-wrapper/HostAvailabilityStrategy.md) - [Reader Selection Strategies](./using-the-nodejs-wrapper/ReaderSelectionStrategies.md) - Examples diff --git a/docs/development-guide/IntegrationTests.md b/docs/development-guide/IntegrationTests.md index 973cd41c..4b247336 100644 --- a/docs/development-guide/IntegrationTests.md +++ b/docs/development-guide/IntegrationTests.md @@ -16,10 +16,15 @@ Before running the integration tests for the AWS Advanced NodeJS Wrapper, you mu - RDS permissions. - EC2 permissions so integration tests can add the current IP address in the Aurora cluster's EC2 security group. - - For more information, see: [Setting Up for Amazon RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SettingUp.html). + - For more information, + see: [Setting Up for Amazon RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SettingUp.html). -- An available Aurora PostgreSQL or MySQL DB cluster is required if you're running the tests against an existing DB cluster. The `REUSE_RDS_CLUSTER` [environment variable](#environment-variables-for-running-against-an-existing-aurora-cluster) is required to run tests against an existing cluster. -- An IAM user or role with permissions to AWS X-Ray and Amazon CloudWatch is required to visualize the telemetry data in the AWS Console. For more details, see: [Telemetry](../using-the-nodejs-wrapper/Telemetry.md). +- An available Aurora PostgreSQL or MySQL DB cluster is required if you're running the tests against an existing DB + cluster. The + `REUSE_RDS_DB` [environment variable](#environment-variables-for-running-against-an-existing-aurora-cluster) is + required to run tests against an existing cluster. +- An IAM user or role with permissions to AWS X-Ray and Amazon CloudWatch is required to visualize the telemetry data in + the AWS Console. For more details, see: [Telemetry](../using-the-nodejs-wrapper/Telemetry.md). ### Aurora Integration Tests @@ -33,14 +38,24 @@ Both approaches will incur costs. PostgreSQL and MySQL tests are currently supported. > [!TIP] -> If you are not running against an existing cluster (`REUSE_RDS_CLUSTER` is `false`), the test will automatically create and delete the test resources. However, if the tests fail, the test resources may not be fully cleaned up. After running the integration tests, ensure all test resources are cleaned up. +> If you are not running against an existing cluster (`REUSE_RDS_DB` is `false`), the test will automatically create and +> delete the test resources. However, if the tests fail, the test resources may not be fully cleaned up. After running the +> integration tests, ensure all test resources are cleaned up. #### Environment Variables -If the environment variable `REUSE_RDS_CLUSTER` is set to true, the integration tests will use the existing cluster defined by your environment variables. Otherwise, the integration tests will create a new Aurora cluster and then delete it automatically when the tests are done. Note that you will need a valid Docker environment to run any of the integration tests because they are run using a Docker environment as a host. The appropriate Docker containers will be created automatically when you run the tests, so you will not need to execute any Docker commands manually. If an environment variable listed in the tables below is not provided by the user, it may use a default value. +If the environment variable `REUSE_RDS_DB` is set to true, the integration tests will use the existing cluster defined +by your environment variables. Otherwise, the integration tests will create a new Aurora cluster and then delete it +automatically when the tests are done. Note that you will need a valid Docker environment to run any of the integration +tests because they are run using a Docker environment as a host. The appropriate Docker containers will be created +automatically when you run the tests, so you will not need to execute any Docker commands manually. If an environment +variable listed in the tables below is not provided by the user, it may use a default value. > [!NOTE] -> If you are running tests against an existing cluster, the tests will only run against the Aurora database engine of that cluster. For example, if you specify a MySQL cluster using the environment variables, only the MySQL tests will be run even if you pick test-all-aurora as the task. To run against Postgres instead, you will need to change your environment variables. +> If you are running tests against an existing cluster, the tests will only run against the Aurora database engine of +> that cluster. For example, if you specify a MySQL cluster using the environment variables, only the MySQL tests will be +> run even if you pick test-all-aurora as the task. To run against Postgres instead, you will need to change your +> environment variables. ##### Environment Variables for Running Against a New Aurora Cluster @@ -77,7 +92,7 @@ If the environment variable `REUSE_RDS_CLUSTER` is set to true, the integration | `AWS_ACCESS_KEY_ID` | An AWS access key associated with an IAM user or role with RDS permissions. | `ASIAIOSFODNN7EXAMPLE` | | `AWS_SECRET_ACCESS_KEY` | The secret key associated with the provided AWS_ACCESS_KEY_ID. | `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` | | `AWS_SESSION_TOKEN` | AWS Session Token for CLI, SDK, & API access. This value is only required when using MFA credentials. See: [temporary AWS credentials](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html). | `AQoDYXdzEJr...` | -| `REUSE_RDS_CLUSTER` | Must be set to true to use a specified existing cluster for tests. If you would like to have the tests create a cluster, see [here](#environment-variables-for-running-against-a-new-aurora-cluster). | `true` | `false` | +| `REUSE_RDS_DB` | Must be set to true to use a specified existing cluster for tests. If you would like to have the tests create a cluster, see [here](#environment-variables-for-running-against-a-new-aurora-cluster). | `true` | `false` | | `RDS_DB_REGION` | The database region. | `us-east-1` | `us-east-1` | ###### (Optional) Additional Environment Variables @@ -93,7 +108,8 @@ PostgreSQL and MySQL tests are currently supported. ### Available Integration Test Tasks -The following are the currently available integration test tasks. Each task may run a different subset of integration tests: +The following are the currently available integration test tasks. Each task may run a different subset of integration +tests: #### Standard Integration Test Tasks @@ -116,9 +132,11 @@ The following are the currently available integration test tasks. Each task may ### Running the Integration Tests 1. Ensure all [prerequisites](#prerequisites) have been installed. Docker Desktop must be running. -2. If you are running any Aurora integration tests, ensure the [Aurora Test Requirements](#aurora-test-requirements) have been met. +2. If you are running any Aurora integration tests, ensure the [Aurora Test Requirements](#aurora-test-requirements) + have been met. 3. Set up [environment variables](#environment-variables). -4. Run one of the available [integration test tasks](#available-integration-test-tasks). For example, to run all integration tests, you can use the following commands: +4. Run one of the available [integration test tasks](#available-integration-test-tasks). For example, to run all + integration tests, you can use the following commands: macOS: @@ -140,4 +158,9 @@ Linux: Test results can be found in `tests/integration/host/build/test-results/test-all-environments/`. -[^1]: The cluster domain suffix can be determined by checking the endpoint of an existing cluster in the desired region, or by temporarily creating a database to check the endpoint. For example, given the database endpoint `db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com`, the domain suffix would be `XYZ.us-east-2.rds.amazonaws.com`. See [here](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Endpoints.Cluster.html) for more information on Amazon Aurora cluster endpoints. +[^1]: + The cluster domain suffix can be determined by checking the endpoint of an existing cluster in the desired region, + or by temporarily creating a database to check the endpoint. For example, given the database endpoint + `db-identifier.cluster-XYZ.us-east-2.rds.amazonaws.com`, the domain suffix would be `XYZ.us-east-2.rds.amazonaws.com`. + See [here](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Endpoints.Cluster.html) for more + information on Amazon Aurora cluster endpoints. diff --git a/docs/using-the-nodejs-wrapper/using-plugins/UsingTheBlueGreenPlugin.md b/docs/using-the-nodejs-wrapper/using-plugins/UsingTheBlueGreenPlugin.md new file mode 100644 index 00000000..01c92007 --- /dev/null +++ b/docs/using-the-nodejs-wrapper/using-plugins/UsingTheBlueGreenPlugin.md @@ -0,0 +1,121 @@ +# Blue/Green Deployment Plugin + +## What is Blue/Green Deployment? + +The [Blue/Green Deployment](https://docs.aws.amazon.com/whitepapers/latest/blue-green-deployments/introduction.html) technique enables organizations to release applications by seamlessly shifting traffic between two identical environments running different versions of the application. This strategy effectively mitigates common risks associated with software deployment, such as downtime and limited rollback capability. + +The AWS Advanced NodeJS Wrapper leverages the Blue/Green Deployment approach by intelligently managing traffic distribution between blue and green nodes, minimizing the impact of stale DNS data and connectivity disruptions on user applications. + +## Prerequisites + +> [!WARNING]\ +> Currently Supported Database Deployments: +> +> - Aurora MySQL and PostgreSQL clusters +> - RDS MySQL and PostgreSQL instances +> +> Unsupported Database Deployments and Configurations: +> +> - RDS MySQL and PostgreSQL Multi-AZ clusters +> - Aurora Global Database for MySQL and PostgreSQL +> +> Additional Requirements: +> +> - AWS cluster and instance endpoints must be directly accessible from the client side +> - Connecting to database nodes using CNAME aliases is not supported +> +> **Blue/Green Support Behaviour and Version Compatibility:** +> +> The AWS Advanced NodeJS Wrapper now includes enhanced full support for Blue/Green Deployments. This support requires a minimum database version that includes a specific metadata table. This constraint **does not** apply to RDS MySQL. +> +> If your database version does **not** support this table, the driver will automatically detect its absence and fallback to its previous behaviour. In this fallback mode, Blue/Green handling is subject to the same limitations listed above. +> +> **No action is required** if your database does not include the new metadata table -- the driver will continue to operate as before. If you have questions or encounter issues, please open an issue in this repository. +> +> Supported RDS PostgreSQL Versions: `rds_tools v1.7 (17.1, 16.5, 15.9, 14.14, 13.17, 12.21)` and above.
+> Supported Aurora PostgreSQL Versions: Engine Release `17.5, 16.9, 15.13, 14.18, 13.21` and above.
+> Supported Aurora MySQL Versions: Engine Release `3.07` and above. + +## What is Blue/Green Deployment Plugin? + +During a [Blue/Green switchover](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/blue-green-deployments-switching.html), several significant changes occur to your database configuration: + +- Connections to blue hosts terminate at a specific point during the transition +- Host connectivity may be temporarily impacted due to reconfigurations and potential node restarts +- Cluster and instance endpoints are redirected to different database hosts +- Internal database host names undergo changes +- Internal security certificates are regenerated to accommodate the new host names + +All factors mentioned above may cause application disruption. The AWS Advanced NodeJS Wrapper aims to minimize the application disruption during Blue/Green switchover by performing the following actions: + +- Actively monitors Blue/Green switchover status and implements appropriate measures to suspend, pass-through, or re-route database traffic +- Prior to Blue/Green switchover initiation, compiles a comprehensive inventory of cluster and instance endpoints for both blue and green nodes along with their corresponding IP addresses +- During the active switchover phase, temporarily suspends execution of database calls to blue nodes, which helps unload database nodes and reduces transaction lag for green nodes, thereby enhancing overall switchover performance +- Substitutes provided hostnames with corresponding IP addresses when establishing new blue connections, effectively eliminating stale DNS data and ensuring connections to current blue nodes +- During the brief post-switchover period, continuously monitors DNS entries, confirms that blue endpoints have been reconfigured, and discontinues hostname-to-IP address substitution as it becomes unnecessary +- Automatically rejects new connection requests to green nodes when the switchover is completed but DNS entries for green nodes remain temporarily available +- Intelligently detects switchover failures and rollbacks to the original state, implementing appropriate connection handling measures to maintain application stability + +## How do I use Blue/Green Deployment Plugin with the AWS Advanced NodeJS Wrapper? + +To enable the Blue/Green Deployment functionality, add the plugin code `bg` to the [`plugins`](../UsingTheNodejsWrapper.md#connection-plugin-manager-parameters) parameter value. +The Blue/Green Deployment Plugin supports the following configuration parameters: + +| Parameter | Value | Required | Description | Example Value | Default Value | +| ----------------------------- | :-----: | :----------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------ | ------------- | +| `bgdId` | String | If using multiple Blue/Green Deployments, yes; otherwise, no | This parameter is optional and defaults to `1`. When supporting multiple Blue/Green Deployments (BGDs), this parameter becomes mandatory. Each connection string must include the `bgdId` parameter with a value that can be any number or string. However, all connection strings associated with the same Blue/Green Deployment must use identical `bgdId` values, while connection strings belonging to different BGDs must specify distinct values. | `1234`, `abc-1`, `abc-2` | `1` | +| `bgConnectTimeoutMs` | Integer | No | Maximum waiting time (in milliseconds) for establishing new connections during a Blue/Green switchover when blue and green traffic is temporarily suspended. | `30000` | `30000` | +| `bgBaselineMs` | Integer | No | The baseline interval (ms) for checking Blue/Green Deployment status. It's highly recommended to keep this parameter below 900000ms (15 minutes). | `60000` | `60000` | +| `bgIncreasedMs` | Integer | No | The increased interval (ms) for checking Blue/Green Deployment status. Configure this parameter within the range of 500-2000 milliseconds. | `1000` | `1000` | +| `bgHighMs` | Integer | No | The high-frequency interval (ms) for checking Blue/Green Deployment status. Configure this parameter within the range of 50-500 milliseconds. | `100` | `100` | +| `bgSwitchoverTimeoutMs` | Integer | No | Maximum duration (in milliseconds) allowed for switchover completion. If the switchover process stalls or exceeds this timeframe, the driver will automatically assume completion and resume normal operations. | `180000` | `180000` | +| `bgSuspendNewBlueConnections` | Boolean | No | Enables Blue/Green Deployment switchover to suspend new blue connection requests while the switchover process is in progress. | `false` | `false` | + +The plugin establishes dedicated monitoring connections to track Blue/Green Deployment status. To apply specific configurations to these monitoring connections, add the `blue-green-monitoring-` prefix to any configuration parameter, as shown in the following example: + +```typescript +params = { + plugins: "bg", + wrapperConnectTimeout: 60000, + blue_green_monitoring_wrapperConnectTimeout: 10000 +}; + +const client = new AwsPGClient(params); +await client.connect(); +``` + +> [!WARNING]\ +> **Always ensure you provide a non-zero network timeout value to the Blue/Green Deployment Plugin** + +## Plan your Blue/Green switchover in advance + +To optimize Blue/Green switchover support with the AWS Advanced NodeJS Wrapper, advance planning is essential. Please follow these recommended steps: + +1. Create a Blue/Green Deployment for your database. +2. Configure your application by incorporating the `bg` plugin along with any additional parameters of your choice, then deploy your application to the corresponding environment. +3. The order of steps 1 and 2 is flexible and can be performed in either sequence. +4. Allow sufficient time for the deployed application with the active Blue/Green plugin to collect deployment status information. This process typically requires several minutes. +5. Initiate the Blue/Green Deployment switchover through the AWS Console, CLI, or RDS API. +6. Monitor the process until the switchover completes successfully or rolls back. This may take several minutes. +7. Review the switchover summary in the application logs. This requires setting the log level to `debug`. For more information, see [Logging](/docs/using-the-nodejs-wrapper/UsingTheNodejsWrapper.md#logging). +8. Update your application by deactivating the `bg` plugin through its removal from your application configuration. Redeploy your application afterward. Note that an active Blue/Green plugin produces no adverse effects once the switchover has been completed. +9. Delete the Blue/Green Deployment through the appropriate AWS interface. +10. The sequence of steps 8 and 9 is flexible and can be executed in either order based on your preference. + +Here's an example of a switchover summary. Time zero corresponds to the beginning of the active switchover phase. Time offsets indicate the start time of each specific switchover phase. + +``` +---------------------------------------------------------------------------- +timestamp time offset (ms) event +---------------------------------------------------------------------------- +2025-04-23T17:39:23.529507Z -46468 ms NOT_CREATED +2025-04-23T17:39:23.795213Z -46202 ms CREATED +2025-04-23T17:40:07.411020Z -2585 ms PREPARATION +2025-04-23T17:40:09.996344Z 0 ms IN_PROGRESS +2025-04-23T17:40:17.429581Z 7434 ms POST +2025-04-23T17:40:35.853160Z 25857 ms Green topology changed +2025-04-23T17:40:48.537135Z 38543 ms Blue DNS updated +2025-04-23T17:42:23.163572Z 133174 ms Green DNS removed +2025-04-23T17:42:26.536226Z 136547 ms COMPLETED +---------------------------------------------------------------------------- +``` diff --git a/jest.integration.config.json b/jest.integration.config.json index 287f1b8b..75258d22 100644 --- a/jest.integration.config.json +++ b/jest.integration.config.json @@ -1,4 +1,5 @@ { + "testTimeout": 3600000, "moduleFileExtensions": ["ts", "js", "json"], "testMatch": ["/tests/integration/container/tests/*.(spec|test).ts|tsx"], "transform": { diff --git a/mysql/lib/dialect/aurora_mysql_database_dialect.ts b/mysql/lib/dialect/aurora_mysql_database_dialect.ts index 2954d550..c2843edc 100644 --- a/mysql/lib/dialect/aurora_mysql_database_dialect.ts +++ b/mysql/lib/dialect/aurora_mysql_database_dialect.ts @@ -24,12 +24,11 @@ import { HostRole } from "../../../common/lib/host_role"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { WrapperProperties } from "../../../common/lib/wrapper_property"; -import { - MonitoringRdsHostListProvider -} from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; +import { MonitoringRdsHostListProvider } from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; import { PluginService } from "../../../common/lib/plugin_service"; +import { BlueGreenDialect, BlueGreenResult } from "../../../common/lib/database_dialect/blue_green_dialect"; -export class AuroraMySQLDatabaseDialect extends MySQLDatabaseDialect implements TopologyAwareDatabaseDialect { +export class AuroraMySQLDatabaseDialect extends MySQLDatabaseDialect implements TopologyAwareDatabaseDialect, BlueGreenDialect { private static readonly TOPOLOGY_QUERY: string = "SELECT server_id, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END as is_writer, " + "cpu, REPLICA_LAG_IN_MILLISECONDS as 'lag', LAST_UPDATE_TIMESTAMP as last_update_timestamp " + @@ -44,6 +43,10 @@ export class AuroraMySQLDatabaseDialect extends MySQLDatabaseDialect implements "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = @@aurora_server_id"; private static readonly AURORA_VERSION_QUERY = "SHOW VARIABLES LIKE 'aurora_version'"; + private static readonly BG_STATUS_QUERY: string = "SELECT * FROM mysql.rds_topology"; + private static readonly TOPOLOGY_TABLE_EXIST_QUERY: string = + "SELECT 1 AS tmp FROM information_schema.tables WHERE table_schema = 'mysql' AND table_name = 'rds_topology'"; + getHostListProvider(props: Map, originalUrl: string, hostListProviderService: HostListProviderService): HostListProvider { if (WrapperProperties.PLUGINS.get(props).includes("failover2")) { return new MonitoringRdsHostListProvider(props, originalUrl, hostListProviderService, (hostListProviderService)); @@ -111,4 +114,22 @@ export class AuroraMySQLDatabaseDialect extends MySQLDatabaseDialect implements getDialectUpdateCandidates(): string[] { return [DatabaseDialectCodes.RDS_MULTI_AZ_MYSQL]; } + + async isBlueGreenStatusAvailable(clientWrapper: ClientWrapper): Promise { + try { + const [rows] = await clientWrapper.query(AuroraMySQLDatabaseDialect.TOPOLOGY_TABLE_EXIST_QUERY); + return !!rows[0]; + } catch { + return false; + } + } + + async getBlueGreenStatus(clientWrapper: ClientWrapper): Promise { + const results: BlueGreenResult[] = []; + const [rows] = await clientWrapper.query(AuroraMySQLDatabaseDialect.BG_STATUS_QUERY); + for (const row of rows) { + results.push(new BlueGreenResult(row.version, row.endpoint, row.port, row.role, row.status)); + } + return results.length > 0 ? results : null; + } } diff --git a/mysql/lib/dialect/mysql_database_dialect.ts b/mysql/lib/dialect/mysql_database_dialect.ts index 83d551fd..084ea6b2 100644 --- a/mysql/lib/dialect/mysql_database_dialect.ts +++ b/mysql/lib/dialect/mysql_database_dialect.ts @@ -17,9 +17,7 @@ import { DatabaseDialect, DatabaseType } from "../../../common/lib/database_dialect/database_dialect"; import { HostListProviderService } from "../../../common/lib/host_list_provider_service"; import { HostListProvider } from "../../../common/lib/host_list_provider/host_list_provider"; -import { - ConnectionStringHostListProvider -} from "../../../common/lib/host_list_provider/connection_string_host_list_provider"; +import { ConnectionStringHostListProvider } from "../../../common/lib/host_list_provider/connection_string_host_list_provider"; import { AwsWrapperError, UnsupportedMethodError } from "../../../common/lib/utils/errors"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { TransactionIsolationLevel } from "../../../common/lib/utils/transaction_isolation_level"; diff --git a/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts b/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts index 98fa75e7..73b65381 100644 --- a/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts +++ b/mysql/lib/dialect/rds_multi_az_mysql_database_dialect.ts @@ -28,9 +28,7 @@ import { RdsHostListProvider } from "../../../common/lib/host_list_provider/rds_ import { FailoverRestriction } from "../../../common/lib/plugins/failover/failover_restriction"; import { WrapperProperties } from "../../../common/lib/wrapper_property"; import { PluginService } from "../../../common/lib/plugin_service"; -import { - MonitoringRdsHostListProvider -} from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; +import { MonitoringRdsHostListProvider } from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; export class RdsMultiAZMySQLDatabaseDialect extends MySQLDatabaseDialect implements TopologyAwareDatabaseDialect { private static readonly TOPOLOGY_QUERY: string = "SELECT id, endpoint, port FROM mysql.rds_topology"; @@ -45,13 +43,31 @@ export class RdsMultiAZMySQLDatabaseDialect extends MySQLDatabaseDialect impleme private static readonly IS_READER_QUERY_COLUMN_NAME: string = "is_reader"; async isDialect(targetClient: ClientWrapper): Promise { - const res = await targetClient.query(RdsMultiAZMySQLDatabaseDialect.TOPOLOGY_TABLE_EXIST_QUERY).catch(() => false); + let res = await targetClient.query(RdsMultiAZMySQLDatabaseDialect.TOPOLOGY_TABLE_EXIST_QUERY).catch(() => false); if (!res) { return false; } - return !!(await targetClient.query(RdsMultiAZMySQLDatabaseDialect.TOPOLOGY_QUERY).catch(() => false)); + res = await targetClient.query(RdsMultiAZMySQLDatabaseDialect.TOPOLOGY_QUERY).catch(() => false); + if (!res) { + return false; + } + + return await targetClient + .query("SHOW VARIABLES LIKE 'report_host'") + .then((res) => { + // | Variable\_name | Value | + // | :--- | :--- | + // | report\_host | 0.0.0.0 | + + if (!res) { + return false; + } + + return !!res[0][0]["Value"]; + }) + .catch(() => false); } getHostListProvider(props: Map, originalUrl: string, hostListProviderService: HostListProviderService): HostListProvider { diff --git a/mysql/lib/dialect/rds_mysql_database_dialect.ts b/mysql/lib/dialect/rds_mysql_database_dialect.ts index eb7a5e73..9cfd297d 100644 --- a/mysql/lib/dialect/rds_mysql_database_dialect.ts +++ b/mysql/lib/dialect/rds_mysql_database_dialect.ts @@ -17,10 +17,16 @@ import { MySQLDatabaseDialect } from "./mysql_database_dialect"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; +import { BlueGreenDialect, BlueGreenResult } from "../../../common/lib/database_dialect/blue_green_dialect"; + +export class RdsMySQLDatabaseDialect extends MySQLDatabaseDialect implements BlueGreenDialect { + private static readonly BG_STATUS_QUERY: string = "SELECT * FROM mysql.rds_topology"; + + private static readonly TOPOLOGY_TABLE_EXIST_QUERY: string = + "SELECT 1 AS tmp FROM information_schema.tables WHERE" + " table_schema = 'mysql' AND table_name = 'rds_topology'"; -export class RdsMySQLDatabaseDialect extends MySQLDatabaseDialect { getDialectUpdateCandidates(): string[] { - return [DatabaseDialectCodes.RDS_MULTI_AZ_MYSQL, DatabaseDialectCodes.AURORA_MYSQL]; + return [DatabaseDialectCodes.AURORA_MYSQL, DatabaseDialectCodes.RDS_MULTI_AZ_MYSQL]; } async isDialect(targetClient: ClientWrapper): Promise { @@ -51,4 +57,22 @@ export class RdsMySQLDatabaseDialect extends MySQLDatabaseDialect { getDialectName(): string { return this.dialectName; } + + async isBlueGreenStatusAvailable(clientWrapper: ClientWrapper): Promise { + try { + const [rows] = await clientWrapper.query(RdsMySQLDatabaseDialect.TOPOLOGY_TABLE_EXIST_QUERY); + return !!rows[0]; + } catch { + return false; + } + } + + async getBlueGreenStatus(clientWrapper: ClientWrapper): Promise { + const results: BlueGreenResult[] = []; + const [rows] = await clientWrapper.query(RdsMySQLDatabaseDialect.BG_STATUS_QUERY); + for (const row of rows) { + results.push(new BlueGreenResult(row.version, row.endpoint, row.port, row.role, row.status)); + } + return results.length > 0 ? results : null; + } } diff --git a/mysql/lib/mysql_error_handler.ts b/mysql/lib/mysql_error_handler.ts index 6c6ed7fe..9674c252 100644 --- a/mysql/lib/mysql_error_handler.ts +++ b/mysql/lib/mysql_error_handler.ts @@ -22,6 +22,8 @@ import { ClientWrapper } from "../../common/lib/client_wrapper"; export class MySQLErrorHandler implements ErrorHandler { private static readonly SQLSTATE_ACCESS_ERROR = "28000"; private unexpectedError: Error | null = null; + protected static readonly SYNTAX_ERROR_CODES = ["42000", "42S02"]; + protected static readonly SYNTAX_ERROR_MESSAGE = "You have an error in your SQL syntax"; protected noOpListener(error: any) { // Ignore the received error. @@ -53,6 +55,18 @@ export class MySQLErrorHandler implements ErrorHandler { ); } + isSyntaxError(e: Error): boolean { + if (Object.prototype.hasOwnProperty.call(e, "code")) { + // @ts-ignore + for (const code of MySQLErrorHandler.SYNTAX_ERROR_CODES) { + if (e["code"] === code) { + return true; + } + } + } + return e.message.includes(MySQLErrorHandler.SYNTAX_ERROR_MESSAGE); + } + hasLoginError(): boolean { return this.unexpectedError !== null && this.isLoginError(this.unexpectedError); } diff --git a/package-lock.json b/package-lock.json index 484fe5e2..14e1859e 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,10 +14,12 @@ ], "dependencies": { "@types/i18n": "^0.13.12", + "ascii-table": "^0.0.9", "async-mutex": "^0.5.0", "dotenv": "^16.4.5", "globals": "^16.1.0", "i18n": "^0.15.1", + "lodash": "^4.17.21", "winston": "3.17.0" }, "devDependencies": { @@ -46,6 +48,7 @@ "@opentelemetry/semantic-conventions": "^1.27.0", "@types/i18n": "^0.13.12", "@types/jest": "^29.5.13", + "@types/lodash": "^4.17.20", "@types/node": "^22.10.7", "@types/node-fetch": "^2.6.11", "@types/pg": "^8.15.2", @@ -8239,6 +8242,12 @@ "dev": true, "license": "MIT" }, + "node_modules/@types/lodash": { + "version": "4.17.20", + "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.17.20.tgz", + "integrity": "sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==", + "dev": true + }, "node_modules/@types/mime": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.5.tgz", @@ -9073,6 +9082,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/ascii-table": { + "version": "0.0.9", + "resolved": "https://registry.npmjs.org/ascii-table/-/ascii-table-0.0.9.tgz", + "integrity": "sha512-xpkr6sCDIYTPqzvjG8M3ncw1YOTaloWZOyrUmicoEifBEKzQzt+ooUpRpQ/AbOoJfO/p2ZKiyp79qHThzJDulQ==" + }, "node_modules/astral-regex": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-2.0.0.tgz", @@ -14653,8 +14667,7 @@ "node_modules/lodash": { "version": "4.17.21", "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" }, "node_modules/lodash.camelcase": { "version": "4.3.0", diff --git a/package.json b/package.json index a03106f4..180215c6 100644 --- a/package.json +++ b/package.json @@ -59,6 +59,7 @@ "@opentelemetry/semantic-conventions": "^1.27.0", "@types/i18n": "^0.13.12", "@types/jest": "^29.5.13", + "@types/lodash": "^4.17.20", "@types/node": "^22.10.7", "@types/node-fetch": "^2.6.11", "@types/pg": "^8.15.2", @@ -96,10 +97,12 @@ }, "dependencies": { "@types/i18n": "^0.13.12", + "ascii-table": "^0.0.9", "async-mutex": "^0.5.0", "dotenv": "^16.4.5", "globals": "^16.1.0", "i18n": "^0.15.1", + "lodash": "^4.17.21", "winston": "3.17.0" }, "overrides": { diff --git a/pg/lib/abstract_pg_error_handler.ts b/pg/lib/abstract_pg_error_handler.ts index 59d8b2c0..2f92e568 100644 --- a/pg/lib/abstract_pg_error_handler.ts +++ b/pg/lib/abstract_pg_error_handler.ts @@ -18,9 +18,12 @@ import { ErrorHandler } from "../../common/lib/error_handler"; import { ClientWrapper } from "../../common/lib/client_wrapper"; import { logger } from "../../common/logutils"; import { Messages } from "../../common/lib/utils/messages"; +import { error } from "winston"; export abstract class AbstractPgErrorHandler implements ErrorHandler { protected unexpectedError: Error | null = null; + protected static readonly SYNTAX_ERROR_CODE = "42601"; + protected static readonly SYNTAX_ERROR_MESSAGE = "syntax error"; abstract getNetworkErrors(): string[]; @@ -60,6 +63,14 @@ export abstract class AbstractPgErrorHandler implements ErrorHandler { return false; } + isSyntaxError(e: Error): boolean { + if (Object.prototype.hasOwnProperty.call(e, "code")) { + // @ts-ignore + return AbstractPgErrorHandler.SYNTAX_ERROR_CODE === e["code"]; + } + return e.message.includes(AbstractPgErrorHandler.SYNTAX_ERROR_MESSAGE); + } + hasLoginError(): boolean { return this.unexpectedError !== null && this.isLoginError(this.unexpectedError); } diff --git a/pg/lib/dialect/aurora_pg_database_dialect.ts b/pg/lib/dialect/aurora_pg_database_dialect.ts index 4fbc4e35..b8fd3e93 100644 --- a/pg/lib/dialect/aurora_pg_database_dialect.ts +++ b/pg/lib/dialect/aurora_pg_database_dialect.ts @@ -25,12 +25,12 @@ import { ClientWrapper } from "../../../common/lib/client_wrapper"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { LimitlessDatabaseDialect } from "../../../common/lib/database_dialect/limitless_database_dialect"; import { WrapperProperties } from "../../../common/lib/wrapper_property"; -import { - MonitoringRdsHostListProvider -} from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; +import { MonitoringRdsHostListProvider } from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; import { PluginService } from "../../../common/lib/plugin_service"; +import { BlueGreenDialect, BlueGreenResult } from "../../../common/lib/database_dialect/blue_green_dialect"; -export class AuroraPgDatabaseDialect extends PgDatabaseDialect implements TopologyAwareDatabaseDialect, LimitlessDatabaseDialect { +export class AuroraPgDatabaseDialect extends PgDatabaseDialect implements TopologyAwareDatabaseDialect, LimitlessDatabaseDialect, BlueGreenDialect { + private static readonly VERSION = process.env.npm_package_version; private static readonly TOPOLOGY_QUERY: string = "SELECT server_id, CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN TRUE ELSE FALSE END AS is_writer, " + "CPU, COALESCE(REPLICA_LAG_IN_MSEC, 0) AS lag, LAST_UPDATE_TIMESTAMP " + @@ -45,6 +45,10 @@ export class AuroraPgDatabaseDialect extends PgDatabaseDialect implements Topolo private static readonly IS_WRITER_QUERY: string = "SELECT server_id " + "FROM aurora_replica_status() " + "WHERE SESSION_ID = 'MASTER_SESSION_ID' AND SERVER_ID = aurora_db_instance_identifier()"; + private static readonly BG_STATUS_QUERY: string = `SELECT * FROM get_blue_green_fast_switchover_metadata('aws_advanced_nodejs_wrapper-${AuroraPgDatabaseDialect.VERSION}')`; + + private static readonly TOPOLOGY_TABLE_EXIST_QUERY: string = "SELECT 'get_blue_green_fast_switchover_metadata'::regproc"; + getHostListProvider(props: Map, originalUrl: string, hostListProviderService: HostListProviderService): HostListProvider { if (WrapperProperties.PLUGINS.get(props).includes("failover2")) { return new MonitoringRdsHostListProvider(props, originalUrl, hostListProviderService, (hostListProviderService)); @@ -120,4 +124,22 @@ export class AuroraPgDatabaseDialect extends PgDatabaseDialect implements Topolo getLimitlessRoutersQuery(): string { return "select router_endpoint, load from aurora_limitless_router_endpoints()"; } + + async isBlueGreenStatusAvailable(clientWrapper: ClientWrapper): Promise { + try { + const result = await clientWrapper.query(AuroraPgDatabaseDialect.TOPOLOGY_TABLE_EXIST_QUERY); + return !!result.rows[0]; + } catch { + return false; + } + } + + async getBlueGreenStatus(clientWrapper: ClientWrapper): Promise { + const results: BlueGreenResult[] = []; + const result = await clientWrapper.query(AuroraPgDatabaseDialect.BG_STATUS_QUERY); + for (const row of result.rows) { + results.push(new BlueGreenResult(row.version, row.endpoint, row.port, row.role, row.status)); + } + return results.length > 0 ? results : null; + } } diff --git a/pg/lib/dialect/pg_database_dialect.ts b/pg/lib/dialect/pg_database_dialect.ts index 2b0cc9f7..da5c006d 100644 --- a/pg/lib/dialect/pg_database_dialect.ts +++ b/pg/lib/dialect/pg_database_dialect.ts @@ -17,9 +17,7 @@ import { DatabaseDialect, DatabaseType } from "../../../common/lib/database_dialect/database_dialect"; import { HostListProviderService } from "../../../common/lib/host_list_provider_service"; import { HostListProvider } from "../../../common/lib/host_list_provider/host_list_provider"; -import { - ConnectionStringHostListProvider -} from "../../../common/lib/host_list_provider/connection_string_host_list_provider"; +import { ConnectionStringHostListProvider } from "../../../common/lib/host_list_provider/connection_string_host_list_provider"; import { AwsWrapperError, UnsupportedMethodError } from "../../../common/lib/utils/errors"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { TransactionIsolationLevel } from "../../../common/lib/utils/transaction_isolation_level"; diff --git a/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts b/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts index 45f2536f..ff46faa6 100644 --- a/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts +++ b/pg/lib/dialect/rds_multi_az_pg_database_dialect.ts @@ -29,9 +29,7 @@ import { ErrorHandler } from "../../../common/lib/error_handler"; import { MultiAzPgErrorHandler } from "../multi_az_pg_error_handler"; import { WrapperProperties } from "../../../common/lib/wrapper_property"; import { PluginService } from "../../../common/lib/plugin_service"; -import { - MonitoringRdsHostListProvider -} from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; +import { MonitoringRdsHostListProvider } from "../../../common/lib/host_list_provider/monitoring/monitoring_host_list_provider"; export class RdsMultiAZPgDatabaseDialect extends PgDatabaseDialect implements TopologyAwareDatabaseDialect { constructor() { diff --git a/pg/lib/dialect/rds_pg_database_dialect.ts b/pg/lib/dialect/rds_pg_database_dialect.ts index 32bb58fd..6ac940cc 100644 --- a/pg/lib/dialect/rds_pg_database_dialect.ts +++ b/pg/lib/dialect/rds_pg_database_dialect.ts @@ -17,12 +17,19 @@ import { PgDatabaseDialect } from "./pg_database_dialect"; import { DatabaseDialectCodes } from "../../../common/lib/database_dialect/database_dialect_codes"; import { ClientWrapper } from "../../../common/lib/client_wrapper"; +import { BlueGreenDialect, BlueGreenResult } from "../../../common/lib/database_dialect/blue_green_dialect"; + +export class RdsPgDatabaseDialect extends PgDatabaseDialect implements BlueGreenDialect { + private static readonly VERSION = process.env.npm_package_version; -export class RdsPgDatabaseDialect extends PgDatabaseDialect { private static readonly EXTENSIONS_SQL: string = "SELECT (setting LIKE '%rds_tools%') AS rds_tools, (setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils " + "FROM pg_settings WHERE name='rds.extensions'"; + private static readonly BG_STATUS_QUERY: string = `SELECT * FROM rds_tools.show_topology('aws_advanced_nodejs_wrapper-${RdsPgDatabaseDialect.VERSION}')`; + + private static readonly TOPOLOGY_TABLE_EXIST_QUERY: string = "SELECT 'rds_tools.show_topology'::regproc"; + getDialectUpdateCandidates(): string[] { return [DatabaseDialectCodes.RDS_MULTI_AZ_PG, DatabaseDialectCodes.AURORA_PG]; } @@ -47,4 +54,22 @@ export class RdsPgDatabaseDialect extends PgDatabaseDialect { getDialectName(): string { return this.dialectName; } + + async isBlueGreenStatusAvailable(clientWrapper: ClientWrapper): Promise { + try { + const result = await clientWrapper.query(RdsPgDatabaseDialect.TOPOLOGY_TABLE_EXIST_QUERY); + return !!result.rows[0]; + } catch { + return false; + } + } + + async getBlueGreenStatus(clientWrapper: ClientWrapper): Promise { + const results: BlueGreenResult[] = []; + const result = await clientWrapper.query(RdsPgDatabaseDialect.BG_STATUS_QUERY); + for (const row of result.rows) { + results.push(new BlueGreenResult(row.version, row.endpoint, row.port, row.role, row.status)); + } + return results.length > 0 ? results : null; + } } diff --git a/tests/integration/container/tests/blue_green_deployment.test.ts b/tests/integration/container/tests/blue_green_deployment.test.ts new file mode 100644 index 00000000..30d2c960 --- /dev/null +++ b/tests/integration/container/tests/blue_green_deployment.test.ts @@ -0,0 +1,1177 @@ +/* + Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"). + You may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +import { features } from "./config"; +import { TestEnvironmentFeatures } from "./utils/test_environment_features"; +import { AuroraTestUtility } from "./utils/aurora_test_utility"; +import { TestEnvironment } from "./utils/test_environment"; +import { logger } from "../../../../common/logutils"; +import { DriverHelper } from "./utils/driver_helper"; +import { ProxyHelper } from "./utils/proxy_helper"; +import { PluginManager } from "../../../../common/lib"; +import { TestInstanceInfo } from "./utils/test_instance_info"; +import { TestDatabaseInfo } from "./utils/test_database_info"; +import { RdsUtils } from "../../../../common/lib/utils/rds_utils"; +import { BlueGreenDeployment, DBCluster } from "@aws-sdk/client-rds"; +import { DatabaseEngineDeployment } from "./utils/database_engine_deployment"; +import { DBInstance } from "@aws-sdk/client-rds/dist-types/models/models_0"; +import { DatabaseEngine } from "./utils/database_engine"; +import { getTimeInNanos, sleep } from "../../../../common/lib/utils/utils"; +import { BlueGreenRole } from "../../../../common/lib/plugins/bluegreen/blue_green_role"; +import { AwsClient } from "../../../../common/lib/aws_client"; +import { DatabaseDialectCodes } from "../../../../common/lib/database_dialect/database_dialect_codes"; +import { promisify } from "util"; +import { lookup } from "dns"; +import AsciiTable from "ascii-table"; +import { TestEnvironmentRequest } from "./utils/test_environment_request"; +import { Signer } from "@aws-sdk/rds-signer"; +import { fromNodeProviderChain } from "@aws-sdk/credential-providers"; +import { BlueGreenPlugin } from "../../../../common/lib/plugins/bluegreen/blue_green_plugin"; + +const itIf = features.includes(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) ? it : it.skip; + +const INCLUDE_CLUSTER_ENDPOINTS = false; +const INCLUDE_WRITER_AND_READER_ONLY = false; +const VERSION = process.env.npm_package_version; +const rdsUtil = new RdsUtils(); +let auroraUtil = new AuroraTestUtility(); + +const MYSQL_BG_STATUS_QUERY = + "SELECT id, SUBSTRING_INDEX(endpoint, '.', 1) as hostId, endpoint, port, role, status, version" + " FROM mysql.rds_topology"; + +const PG_AURORA_BG_STATUS_QUERY = + "SELECT id, SPLIT_PART(endpoint, '.', 1) as hostId, endpoint, port, role, status, version" + + " FROM get_blue_green_fast_switchover_metadata('aws_advanced_nodejs_wrapper')"; + +const PG_RDS_BG_STATUS_QUERY = `SELECT * + FROM rds_tools.show_topology('aws_advanced_nodejs_wrapper-${VERSION}')`; + +const TEST_CLUSTER_ID = "test-cluster-id"; + +let env: TestEnvironment; +let request: TestEnvironmentRequest; +let info: TestDatabaseInfo; +let instances: TestInstanceInfo[]; +let driver; +let client: any; +let secondaryClient: any; +let initClientFunc: (props: any) => any; + +const results: Map = new Map(); +let unhandledErrors: Error[] = []; + +class BooleanContainer { + private value: boolean; + + constructor(initialValue: boolean = false) { + this.value = initialValue; + } + + get(): boolean { + return this.value; + } + + set(value: boolean): void { + this.value = value; + } +} + +class TimeHolder { + startTime: bigint; + endTime: bigint; + error: string; + holdNano: bigint; + + constructor(startTime: bigint, endTime: bigint, holdNano?: bigint, error?: string) { + this.startTime = startTime; + this.endTime = endTime; + this.error = error; + this.holdNano = holdNano; + } +} + +class QueryResult { + queryRole: string; + queryVersion: string; + queryNewStatus: string; + + constructor(queryRole: string, queryVersion: string, queryNewStatus: string) { + this.queryRole = queryRole; + this.queryVersion = queryVersion; + this.queryNewStatus = queryNewStatus; + } +} + +class BlueGreenResults { + startTime: bigint; + promiseSyncTime: bigint; + bgTriggerTime: bigint; + directBlueLostConnectionTime: bigint; + directBlueIdleLostConnectionTime: bigint; + wrapperBlueIdleLostConnectionTime: bigint; + wrapperGreenLostConnectionTime: bigint; + dnsBlueChangedTime: bigint; + dnsBlueError: string = null; + dnsGreenRemovedTime: bigint; + greenHostChangeNameTime: bigint; + blueStatusTime: Map = new Map(); + greenStatusTime: Map = new Map(); + blueWrapperConnectTimes: TimeHolder[] = []; + blueWrapperExecuteTimes: TimeHolder[] = []; + greenWrapperExecuteTimes: TimeHolder[] = []; + greenDirectIamIpWithBlueHostConnectTimes: TimeHolder[] = []; + greenDirectIamIpWithGreenHostConnectTimes: TimeHolder[] = []; +} + +async function getBlueGreenEndpoints(blueGreenId: string): Promise { + const blueGreenDeployment: BlueGreenDeployment | null = await auroraUtil.getBlueGreenDeployment(blueGreenId); + if (blueGreenDeployment === null) { + throw new Error(`BG not found: ` + blueGreenId); + } + switch (request.deployment) { + case DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE: { + const blueInstance: DBInstance = await auroraUtil.getRdsInstanceInfoByArn(blueGreenDeployment.Source); + if (blueInstance === undefined) { + throw new Error("Blue instance not found."); + } + const greenInstance: DBInstance = await auroraUtil.getRdsInstanceInfoByArn(blueGreenDeployment.Target); + if (greenInstance === undefined) { + throw new Error("Green instance not found."); + } + + return [blueInstance.Endpoint.Address, greenInstance.Endpoint.Address]; + } + case DatabaseEngineDeployment.AURORA: { + const endpoints: string[] = []; + const blueCluster: DBCluster = await auroraUtil.getClusterByArn(blueGreenDeployment.Source); + if (blueCluster === undefined) { + throw new Error("Blue cluster not found."); + } + + if (INCLUDE_CLUSTER_ENDPOINTS) { + endpoints.push(info.clusterEndpoint); + } + + if (INCLUDE_WRITER_AND_READER_ONLY) { + endpoints.push(instances[0].host); + if (instances.length > 1) { + endpoints.push(instances[1].host); + } + } else { + instances.forEach((info) => endpoints.push(info.host)); + } + + const greenCluster: DBCluster = await auroraUtil.getClusterByArn(blueGreenDeployment.Target); + if (greenCluster === undefined) { + throw new Error("Green cluster not found."); + } + + if (INCLUDE_CLUSTER_ENDPOINTS) { + endpoints.push(greenCluster.Endpoint); + } + + const instanceIdClient = await openConnectionWithRetry(initDefaultConfig(info.clusterEndpoint, info.clusterEndpointPort, info.defaultDbName)); + const instanceIds: string[] = await auroraUtil.getRdsInstanceIds(request.engine, request.deployment, instanceIdClient); + if (instanceIds.length < 1) { + throw new Error("Can't find green cluster instances."); + } + + const instancePattern: string = rdsUtil.getRdsInstanceHostPattern(greenCluster.Endpoint); + if (INCLUDE_WRITER_AND_READER_ONLY) { + endpoints.push(instancePattern.replace("?", instanceIds[0])); + if (instanceIds.length > 1) { + endpoints.push(instancePattern.replace("?", instanceIds[1])); + } + } else { + instanceIds.forEach((instanceId) => endpoints.push(instancePattern.replace("?", instanceId))); + } + + return endpoints; + } + } +} + +function initDefaultConfig(host: string, port: number, dbName: string) { + let config: any = { + user: env.databaseInfo.username, + host: host, + database: dbName, + password: env.databaseInfo.password, + port: port, + enableTelemetry: true, + telemetryTracesBackend: "OTLP", + telemetryMetricsBackend: "OTLP", + plugins: "" + }; + config = DriverHelper.addDriverSpecificConfiguration(config, env.engine); + return config; +} + +function initWrapperConfig(host: string, port: number, dbName: string) { + const config = initDefaultConfig(host, port, dbName); + config["clusterId"] = TEST_CLUSTER_ID; + + const databaseEngine: DatabaseEngine = env.info.request.engine; + switch (env.info.request.deployment) { + case DatabaseEngineDeployment.AURORA: + switch (databaseEngine) { + case DatabaseEngine.MYSQL: + config["dialect"] = DatabaseDialectCodes.AURORA_MYSQL; + break; + case DatabaseEngine.PG: + config["dialect"] = DatabaseDialectCodes.AURORA_PG; + break; + default: + // do nothing + } + break; + case DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE: + switch (databaseEngine) { + case DatabaseEngine.MYSQL: + config["dialect"] = DatabaseDialectCodes.RDS_MYSQL; + break; + case DatabaseEngine.PG: + config["dialect"] = DatabaseDialectCodes.RDS_PG; + break; + default: + // do nothing + } + break; + default: + // do nothing + } + + if (env.info.request.features.includes(TestEnvironmentFeatures.IAM)) { + config["iamRegion"] = env.region; + config["user"] = env.info.iamUserName; + config["plugins"] = "bg,iam"; + } else { + config["plugins"] = "bg"; + } + return config; +} + +function processResult(result: any): QueryResult[] { + const results: QueryResult[] = []; + switch (env.info.request.engine) { + case DatabaseEngine.MYSQL: + for (const row of result[0]) { + results.push(new QueryResult(row.role, row.version, row.status)); + } + break; + case DatabaseEngine.PG: + for (const row of result.rows) { + results.push(new QueryResult(row.role, row.version, row.status)); + } + break; + default: + throw new Error(`Unsupported engine type: ${env.info.request.engine}`); + } + + return results; +} + +describe("blue green", () => { + beforeEach(async () => { + logger.info(`Test started: ${expect.getState().currentTestName}`); + env = await TestEnvironment.getCurrent(); + request = env.info.request; + info = env.info.databaseInfo; + instances = info.instances; + auroraUtil = new AuroraTestUtility(env.region, env.rdsEndpoint); + driver = DriverHelper.getDriverForDatabaseEngine(env.engine); + initClientFunc = DriverHelper.getClient(driver); + await ProxyHelper.enableAllConnectivity(); + await TestEnvironment.verifyClusterStatus(auroraUtil); + + client = null; + secondaryClient = null; + }); + + afterEach(async () => { + if (client !== null) { + try { + await client.end(); + } catch (error) { + // pass + } + } + + if (secondaryClient !== null) { + try { + await secondaryClient.end(); + } catch (error) { + // pass + } + } + await PluginManager.releaseResources(); + logger.info(`Test finished: ${expect.getState().currentTestName}`); + }); + + itIf("switchover", async () => { + results.clear(); + unhandledErrors = []; + + const iamEnabled: boolean = env.info.request.features.includes(TestEnvironmentFeatures.IAM); + + const startTimeNano: bigint = process.hrtime.bigint(); + + const stop = new BooleanContainer(false); + const promises: Promise[] = []; + + const instance: TestInstanceInfo = instances[0]; + const dbName: string = info.defaultDbName; + + const topologyInstances: string[] = await getBlueGreenEndpoints(info.blueGreenDeploymentId); + logger.debug(`topologyInstances: \n${topologyInstances.join("\n")}`); + + for (const host of topologyInstances) { + const hostId: string = host.split(".")[0]; + results.set(hostId, new BlueGreenResults()); + + if (rdsUtil.isNotGreenAndOldPrefixInstance(host)) { + // Direct topology monitoring + promises.push(getDirectTopologyMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Direct blue connectivity monitoring + promises.push(getDirectBlueConnectivityMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Direct blue idle connectivity monitoring + promises.push(getDirectBlueIdleConnectivityMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Wrapper blue idle connectivity monitoring + promises.push(getWrapperBlueIdleConnectivityMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Wrapper blue executing connectivity monitoring + promises.push(getWrapperBlueExecutingConnectivityMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Wrapper blue new connection monitoring + promises.push(getWrapperBlueNewConnectionMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Blue DNS monitoring + promises.push(getBlueDnsMonitoringPromise(hostId, host, stop, results.get(hostId))); + } + + if (rdsUtil.isGreenInstance(host)) { + // Direct topology monitoring + promises.push(getDirectTopologyMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Wrapper green connectivity monitoring + promises.push(getWrapperGreenConnectivityMonitoringPromise(hostId, host, instance.port, dbName, stop, results.get(hostId))); + + // Green DNS monitoring + promises.push(getGreenDnsMonitoringPromise(hostId, host, stop, results.get(hostId))); + + if (iamEnabled) { + promises.push( + getGreenIamConnectivityMonitoringPromise( + hostId, + "BlueHostToken", + rdsUtil.removeGreenInstancePrefix(host), + host, + instance.port, + dbName, + stop, + results.get(hostId), + results.get(hostId).greenDirectIamIpWithBlueHostConnectTimes, + false, + true + ) + ); + + promises.push( + getGreenIamConnectivityMonitoringPromise( + hostId, + "GreenHostToken", + host, + host, + instance.port, + dbName, + stop, + results.get(hostId), + results.get(hostId).greenDirectIamIpWithGreenHostConnectTimes, + true, + false + ) + ); + } + } + } + + promises.push(getBlueGreenSwitchoverTriggerPromise(info.blueGreenDeploymentId, results)); + + results.forEach((value, key) => (value.startTime = startTimeNano)); + + await sleep(1_200_000); + + logger.debug(`Stopping all promises`); + stop.set(true); + await sleep(5000); + printMetrics(); + + if (unhandledErrors.length > 0) { + logUnhandledErrors(); + await PluginManager.releaseResources(); + fail("There are unhandled errors."); + } + + stop.set(true); + await PluginManager.releaseResources(); + }); +}); + +async function getDirectTopologyMonitoringPromise( + hostId: string, + host: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults +) { + let query: string; + switch (env.info.request.engine) { + case DatabaseEngine.MYSQL: + query = MYSQL_BG_STATUS_QUERY; + break; + case DatabaseEngine.PG: + switch (env.info.request.deployment) { + case DatabaseEngineDeployment.AURORA: + query = PG_AURORA_BG_STATUS_QUERY; + break; + case DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE: + query = PG_RDS_BG_STATUS_QUERY; + break; + default: + throw new Error(`Unsupported deployment ${env.info.request.deployment}`); + } + break; + default: + throw new Error(`Unsupported engine ${env.info.request.engine}`); + } + const dbConfig = await initDefaultConfig(host, port, dbName); + + try { + client = await openConnectionWithRetry(dbConfig); + + logger.debug(`[DirectTopology @ ${hostId}] connection opened.`); + + await sleep(1000); + + logger.debug(`[DirectTopology @ ${hostId}] Starting BG statuses monitoring.`); + + const endTime: bigint = process.hrtime.bigint() + BigInt(900_000_000_000); // 15 minutes + + while (!stop.get() && process.hrtime.bigint() < endTime) { + try { + if (client == null) { + client = await openConnectionWithRetry(dbConfig); + logger.debug(`[DirectTopology @ ${hostId} connection re-opened.`); + } + + const res = await client.query(query); + const queryResults: QueryResult[] = processResult(res); + + for (const queryResult of queryResults) { + const newStatus: string = queryResult.queryNewStatus; + + const isGreen: boolean = BlueGreenRole.parseRole(queryResult.queryRole, queryResult.queryVersion) === BlueGreenRole.TARGET; + + if (isGreen) { + const hasStatus = results.greenStatusTime.has(newStatus); + if (!hasStatus) { + logger.debug(`[DirectTopology @ ${hostId} status changed to: ${newStatus}`); + results.greenStatusTime.set(newStatus, process.hrtime.bigint()); + } + } else { + const hasStatus = results.blueStatusTime.has(newStatus); + if (!hasStatus) { + logger.debug(`[DirectTopology @ ${hostId} status changed to: ${newStatus}`); + results.blueStatusTime.set(newStatus, process.hrtime.bigint()); + } + } + } + + await sleep(100); + } catch (e: any) { + logger.debug(`[DirectTopology @ ${hostId} error: ${e.message}`); + await closeConnection(client); + client = null; + } + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[DirectTopology @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[DirectTopology @ ${hostId}] promise is completed.`); + } +} + +async function closeConnection(client: AwsClient) { + try { + if (client != null && (await client.isValid())) { + await client.end(); + } + } catch (e: any) { + // do nothing + } +} + +// Blue host +// Checking: connectivity, SELECT 1 +async function getDirectBlueConnectivityMonitoringPromise( + hostId: string, + host: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults +) { + const dbConfig = await initDefaultConfig(host, port, dbName); + + try { + client = await openConnectionWithRetry(dbConfig); + + logger.debug(`[DirectBlueConnectivity @ ${hostId}] connection opened.`); + + await sleep(300_000); + + logger.debug(`[DirectBlueConnectivity @ ${hostId}] Starting connectivity monitoring.`); + + while (!stop.get()) { + try { + await client.query("SELECT 1"); + await sleep(1000); + } catch (e: any) { + logger.debug(`[DirectBlueConnectivity @ ${hostId} error: ${e.message}`); + results.directBlueLostConnectionTime = process.hrtime.bigint(); + break; + } + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[DirectBlueConnectivity @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[DirectBlueConnectivity @ ${hostId}] promise is completed.`); + } +} + +// Blue host +// Check: connectivity, isClosed() +async function getDirectBlueIdleConnectivityMonitoringPromise( + hostId: string, + host: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults +) { + const dbConfig = await initDefaultConfig(host, port, dbName); + + try { + client = await openConnectionWithRetry(dbConfig); + + logger.debug(`[DirectBlueIdleConnectivity @ ${hostId}] connection opened.`); + + await sleep(300_000); + + logger.debug(`[DirectBlueIdleConnectivity @ ${hostId}] Starting connectivity monitoring.`); + + while (!stop.get()) { + try { + await client.query("SELECT 1"); + await sleep(1000); + } catch (e: any) { + logger.debug(`[DirectBlueIdleConnectivity @ ${hostId} error: ${e.message}`); + results.directBlueLostConnectionTime = process.hrtime.bigint(); + break; + } + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[DirectBlueIdleConnectivity @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[DirectBlueIdleConnectivity @ ${hostId}] promise is completed.`); + } +} + +// Blue host +// Check: connectivity, isClosed() +async function getWrapperBlueIdleConnectivityMonitoringPromise( + hostId: string, + host: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults +) { + const dbConfig = await initWrapperConfig(host, port, dbName); + + try { + client = await openConnectionWithRetry(dbConfig); + + logger.debug(`[WrapperBlueIdle @ ${hostId}] connection opened.`); + + await sleep(300_000); + + logger.debug(`[WrapperBlueIdle @ ${hostId}] Starting connectivity monitoring.`); + + while (!stop.get()) { + try { + if (!(await client.isValid())) { + results.wrapperBlueIdleLostConnectionTime = process.hrtime.bigint(); + break; + } + await sleep(1000); + } catch (e: any) { + logger.debug(`[WrapperBlueIdle @ ${hostId} error: ${e.message}`); + results.wrapperBlueIdleLostConnectionTime = process.hrtime.bigint(); + break; + } + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[WrapperBlueIdle @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[WrapperBlueIdle @ ${hostId}] promise is completed.`); + } +} + +// Blue host +// Check: connectivity, SELECT sleep(5) +// Expect: long execution time (longer than 5s) during active phase of switchover +async function getWrapperBlueExecutingConnectivityMonitoringPromise( + hostId: string, + host: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults +) { + const dbConfig = await initWrapperConfig(host, port, dbName); + let query; + switch (env.info.request.engine) { + case DatabaseEngine.PG: + query = "SELECT PG_SLEEP(5)"; + break; + case DatabaseEngine.MYSQL: + query = "SELECT SLEEP(5)"; + break; + default: + throw new Error(`Unsupported database engine: ${env.info.request.engine}`); + } + + try { + client = initClientFunc(dbConfig); + await client.connect(); + + logger.debug(`[WrapperBlueExecute @ ${hostId}] connection opened.`); + + await sleep(300_000); + + logger.debug(`[WrapperBlueExecute @ ${hostId}] Starting connectivity monitoring.`); + + const bgPlugin: BlueGreenPlugin = client.unwrapPlugin(BlueGreenPlugin); + + while (!stop.get()) { + const startTime = process.hrtime.bigint(); + let endTime: bigint; + try { + await client.query(query); + endTime = process.hrtime.bigint(); + + results.blueWrapperConnectTimes.push(new TimeHolder(startTime, endTime, bgPlugin.getHoldTimeNano())); + logger.debug(`[WrapperBlueExecute @ ${hostId}] results: ${JSON.stringify(results)}.`); + } catch (e: any) { + endTime = process.hrtime.bigint(); + results.blueWrapperConnectTimes.push(new TimeHolder(startTime, endTime, bgPlugin.getHoldTimeNano(), e.message)); + if (!(await client.isValid())) { + break; + } + } + await sleep(1000); + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[WrapperBlueExecute @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[WrapperBlueExecute @ ${hostId}] promise is completed.`); + } +} + +// Blue host +// Check: connectivity, opening a new connection +// Expect: longer opening connection time during active phase of switchover +async function getWrapperBlueNewConnectionMonitoringPromise( + hostId: string, + host: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults +) { + const dbConfig = await initWrapperConfig(host, port, dbName); + try { + await sleep(300_000); + + logger.debug(`[WrapperBlueNewConnection @ ${hostId}] Starting connectivity monitoring.`); + + const bgPlugin: BlueGreenPlugin = client.unwrapPlugin(BlueGreenPlugin); + + while (!stop.get()) { + const startTime = process.hrtime.bigint(); + let endTime; + try { + client = initClientFunc(dbConfig); + await client.connect(); + endTime = process.hrtime.bigint(); + results.blueWrapperExecuteTimes.push(new TimeHolder(startTime, endTime, bgPlugin.getHoldTimeNano())); + } catch (e: any) { + endTime = process.hrtime.bigint(); + results.blueWrapperExecuteTimes.push(new TimeHolder(startTime, endTime, bgPlugin.getHoldTimeNano(), e.message)); + if (!(await client.isValid())) { + break; + } + } + await sleep(1000); + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[WrapperBlueNewConnection @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[WrapperBlueNewConnection @ ${hostId}] promise is completed.`); + } +} + +async function getBlueDnsMonitoringPromise(hostId: string, host: string, stop: BooleanContainer, results: BlueGreenResults) { + await sleep(300_000); + + try { + const ip: string = (await promisify(lookup)(host, {})).address; + logger.debug(`[BlueDNS @ ${hostId}] ${host} -> ${ip}`); + + while (!stop.get()) { + await sleep(1000); + try { + const temp: string = (await promisify(lookup)(host, {})).address; + } catch (e: any) { + results.dnsGreenRemovedTime = process.hrtime.bigint(); + break; + } + } + } catch (e: any) { + logger.debug(`[BlueDNS @ ${hostId}] unhandled error: ${e.message}`); + results.dnsGreenRemovedTime = process.hrtime.bigint(); + } finally { + logger.debug(`[BlueDNS @ ${hostId}] promise is complete.`); + } +} + +async function getWrapperGreenConnectivityMonitoringPromise( + hostId: string, + host: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults +) { + const dbConfig = await initWrapperConfig(host, port, dbName); + + let startTime; + let endTime; + + try { + client = await openConnectionWithRetry(dbConfig); + + await sleep(300_000); + + logger.debug(`[WrapperGreenConnectivity @ ${hostId}] Starting connectivity monitoring.`); + + const bgPlugin: BlueGreenPlugin = client.unwrapPlugin(BlueGreenPlugin); + + while (!stop.get()) { + try { + startTime = process.hrtime.bigint(); + await client.query("SELECT 1"); + endTime = process.hrtime.bigint(); + results.greenWrapperExecuteTimes.push(new TimeHolder(startTime, endTime, bgPlugin.getHoldTimeNano())); + await sleep(1000); + } catch (e: any) { + results.greenWrapperExecuteTimes.push(new TimeHolder(startTime, endTime, bgPlugin.getHoldTimeNano(), e.message)); + if (!(await client.isValid())) { + results.wrapperGreenLostConnectionTime = getTimeInNanos(); + break; + } + logger.debug(`[WrapperGreenConnectivity @ ${hostId} error: ${e.message}`); + break; + } + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[WrapperGreenConnectivity @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[WrapperGreenConnectivity @ ${hostId}] promise is completed.`); + } +} + +async function getGreenDnsMonitoringPromise(hostId: string, host: string, stop: BooleanContainer, results: BlueGreenResults) { + const ip: string = (await promisify(lookup)(host, {})).address; + logger.debug(`[GreenDNS @ ${hostId}] ${host} -> ${ip}`); + try { + while (!stop.get()) { + await sleep(1000); + try { + const temp: string = (await promisify(lookup)(host, {})).address; + } catch (e: any) { + results.dnsGreenRemovedTime = process.hrtime.bigint(); + break; + } + } + } catch (e: any) { + logger.debug(`[GreenDNS @ ${hostId}] unhandled error: ${e.message}`); + results.dnsGreenRemovedTime = process.hrtime.bigint(); + } finally { + logger.debug(`[GreenDNS @ ${hostId}] promise is complete.`); + } +} + +// Green host +// Check: connectivity (opening a new connection) with IAM when using host IP address +// Expect: lose connectivity after green host changes its name (green prefix to no-prefix) +async function getGreenIamConnectivityMonitoringPromise( + hostId: string, + prefix: string, + iamTokenHost: string, + connectHost: string, + port: number, + dbName: string, + stop: BooleanContainer, + results: BlueGreenResults, + timeHolders: TimeHolder[], + notifyOnFirstError: boolean, + exitOnFirstSuccess: boolean +) { + await sleep(300_000); + + try { + const greenHostConnectIp: string = (await promisify(lookup)(connectHost, {})).address; + + logger.debug(`[DirectGreenIamIp${prefix} @ ${hostId}] Starting connectivity monitoring ${iamTokenHost}.`); + + while (!stop.get()) { + const signer = new Signer({ + hostname: iamTokenHost, + port: port, + region: env.region, + credentials: fromNodeProviderChain(), + username: env.info.iamUserName + }); + + const token: string = await signer.getAuthToken(); + + let endTime: bigint; + logger.warn(`greenHostConnectIp: ${greenHostConnectIp}`); + let config: any = { + user: env.info.iamUserName, + host: greenHostConnectIp, + database: dbName, + password: token, + port: port, + plugins: "" + }; + config = DriverHelper.addDriverSpecificConfiguration(config, env.engine); + + const startTime: bigint = process.hrtime.bigint(); + try { + const client = initClientFunc(config); + await client.connect(); + endTime = process.hrtime.bigint(); + timeHolders.push(new TimeHolder(startTime, endTime)); + if (exitOnFirstSuccess) { + if (results.greenHostChangeNameTime === BigInt(0)) { + results.greenHostChangeNameTime = process.hrtime.bigint(); + } + logger.debug(`[DirectGreenIamIp${prefix} @ ${hostId}] Successfully connected. Exiting.`); + return; + } + } catch (error: any) { + logger.debug(`[DirectGreenIamIp${prefix} @ ${hostId}] error: ${error.message}`); + endTime = process.hrtime.bigint(); + timeHolders.push(new TimeHolder(startTime, endTime)); + if (notifyOnFirstError && (error.message.contains("Access denied") || error.message.contains("PAM"))) { + if (results.greenHostChangeNameTime === BigInt(0)) { + results.greenHostChangeNameTime = process.hrtime.bigint(); + } + logger.debug(`[DirectGreenIamIp${prefix} @ ${hostId}] The first login error. Exiting thread.`); + return; + } + } + + await closeConnection(client); + client = null; + await sleep(1000); + } + } catch (e: any) { + unhandledErrors.push(e); + logger.debug(`[DirectGreenIamIp${prefix} @ ${hostId}] unhandled error: ${e.message}`); + } finally { + await closeConnection(client); + logger.debug(`[DirectGreenIamIp${prefix} @ ${hostId}] promise is completed.`); + } +} + +async function getBlueGreenSwitchoverTriggerPromise(blueGreenId: string, results: Map) { + await sleep(300_000); + const threadsSyncTime: bigint = process.hrtime.bigint(); + results.forEach((value, key) => (value.promiseSyncTime = threadsSyncTime)); + await sleep(30000); + + await auroraUtil.switchoverBlueGreenDeployment(blueGreenId); + const bgTriggerTime: bigint = process.hrtime.bigint(); + results.forEach((value, key) => { + logger.warn(`bgTriggerTime: ${bgTriggerTime}`); + value.bgTriggerTime = bgTriggerTime; + }); +} + +async function openConnectionWithRetry(config: any) { + const client = initClientFunc(config); + let tries = 0; + while (tries < 10) { + try { + await client.connect(); + return client; + } catch (error: any) { + // do nothing + logger.error(error.message); + } + tries++; + } + throw new Error("Can't open connection"); +} + +function printMetrics(): void { + const bgTriggerTime: bigint = + Array.from(results.values()) + .map((blueGreenResults) => blueGreenResults.bgTriggerTime) + .find(Boolean) || + (() => { + throw new Error("Can't get bgTriggerTime"); + })(); + + const metricsTable = new AsciiTable().setBorder("|", "-", "+", "+"); + + metricsTable.setHeading( + "Instance/endpoint", + "startTime", + "promisesSync", + "direct Blue conn dropped (idle)", + "direct Blue conn dropped (SELECT 1)", + "wrapper Blue conn dropped (idle)", + "wrapper Green conn dropped (SELECT 1)", + "Blue DNS updated", + "Green DNS removed", + "Green host certificate change" + ); + + // Sort entries by green instance first, then by name + const sortedEntries = Array.from(results.entries()).sort((a, b) => { + // First sort by green/blue + const greenCompare = (rdsUtil.isGreenInstance(a[0] + ".") ? 1 : 0) - (rdsUtil.isGreenInstance(b[0] + ".") ? 1 : 0); + if (greenCompare !== 0) return greenCompare; + + // Then sort by name + return rdsUtil.removeGreenInstancePrefix(a[0]).toLowerCase().localeCompare(rdsUtil.removeGreenInstancePrefix(b[0]).toLowerCase()); + }); + + if (sortedEntries.length === 0) { + metricsTable.addRow("No entries"); + } + + for (const [key, value] of sortedEntries) { + const startTime = Number(value.startTime - bgTriggerTime) / 1000000; + const promisesSyncTime = Number(value.promiseSyncTime - bgTriggerTime) / 1000000; + const directBlueIdleLostConnectionTime = getFormattedNanoTime(value.directBlueIdleLostConnectionTime, bgTriggerTime); + const directBlueLostConnectionTime = getFormattedNanoTime(value.directBlueLostConnectionTime, bgTriggerTime); + const wrapperBlueIdleLostConnectionTime = getFormattedNanoTime(value.wrapperBlueIdleLostConnectionTime, bgTriggerTime); + const wrapperGreenLostConnectionTime = getFormattedNanoTime(value.wrapperGreenLostConnectionTime, bgTriggerTime); + const dnsBlueChangedTime = getFormattedNanoTime(value.dnsBlueChangedTime, bgTriggerTime); + const dnsGreenRemovedTime = getFormattedNanoTime(value.dnsGreenRemovedTime, bgTriggerTime); + const greenHostChangeNameTime = getFormattedNanoTime(value.greenHostChangeNameTime, bgTriggerTime); + + metricsTable.addRow( + key, + startTime, + promisesSyncTime, + directBlueIdleLostConnectionTime, + directBlueLostConnectionTime, + wrapperBlueIdleLostConnectionTime, + wrapperGreenLostConnectionTime, + dnsBlueChangedTime, + dnsGreenRemovedTime, + greenHostChangeNameTime + ); + } + + logger.debug("\n" + renderTable(metricsTable, true)); + + // Print host status times + for (const [key, value] of sortedEntries) { + if (value.blueStatusTime.size === 0 && value.greenStatusTime.size === 0) { + continue; + } + printHostStatusTimes(key, value, bgTriggerTime); + } + + // Print wrapper connection times to Blue + for (const [key, value] of sortedEntries) { + if (value.blueWrapperConnectTimes.length === 0) { + continue; + } + printDurationTimes(key, "Wrapper connection time (ms) to Blue", value.blueWrapperConnectTimes, bgTriggerTime); + } + + // Print wrapper IAM connection times to Green + for (const [key, value] of sortedEntries) { + if (value.greenDirectIamIpWithGreenHostConnectTimes.length === 0) { + continue; + } + printDurationTimes( + key, + "Wrapper IAM (green token) connection time (ms) to Green", + value.greenDirectIamIpWithGreenHostConnectTimes, + bgTriggerTime + ); + } + + // Print wrapper execution times to Blue + for (const [key, value] of sortedEntries) { + if (value.blueWrapperExecuteTimes.length === 0) { + continue; + } + printDurationTimes(key, "Wrapper execution time (ms) to Blue", value.blueWrapperExecuteTimes, bgTriggerTime); + } + + // Print wrapper execution times to Green + for (const [key, value] of sortedEntries) { + if (value.greenWrapperExecuteTimes.length === 0) { + continue; + } + printDurationTimes(key, "Wrapper execution time (ms) to Green", value.greenWrapperExecuteTimes, bgTriggerTime); + } +} + +function getFormattedNanoTime(timeNano: bigint, timeZeroNano: bigint): string { + return !timeNano ? "-" : `${Number(timeNano - timeZeroNano) / 1000000} ms`; +} + +function printHostStatusTimes(host: string, results: BlueGreenResults, timeZeroNano: bigint): void { + const statusMap = new Map(); + + // Combine blue and green status times + results.blueStatusTime.forEach((value, key) => statusMap.set(key, value)); + results.greenStatusTime.forEach((value, key) => statusMap.set(key, value)); + + const metricsTable = new AsciiTable().setBorder("|", "-", "+", "+"); + + metricsTable.setHeading("Status", "SOURCE", "TARGET"); + + // Sort status names by their values + const sortedStatusNames = Array.from(statusMap.entries()) + .sort((a, b) => Number(a[1] - b[1])) + .map((entry) => entry[0]); + + for (const status of sortedStatusNames) { + const sourceTime = results.blueStatusTime.has(status) ? `${Number(results.blueStatusTime.get(status) - timeZeroNano) / 1000000} ms` : ""; + const targetTime = results.greenStatusTime.has(status) ? `${Number(results.greenStatusTime.get(status) - timeZeroNano) / 1000000} ms` : ""; + + metricsTable.addRow(status, sourceTime, targetTime); + } + + logger.debug(`\n${host}:\n${renderTable(metricsTable, true)}`); +} + +function printDurationTimes(host: string, title: string, times: TimeHolder[], timeZeroNano: bigint): void { + const metricsTable = new AsciiTable().setBorder("|", "-", "+", "+"); + + metricsTable.setHeading("Connect at (ms)", "Connect time/duration (ms)", "Error"); + + // Calculate p99 + const p99nano = getPercentile( + times.map((x) => x.endTime - x.startTime), + 99.0 + ); + const p99 = Number(p99nano) / 1000000; + + metricsTable.addRow("p99", p99, ""); + + const firstConnect = times[0]; + metricsTable.addRow( + Number(firstConnect.startTime - timeZeroNano) / 1000000, + Number(firstConnect.endTime - firstConnect.startTime) / 1000000, + firstConnect.error == null ? "" : firstConnect.error.substring(0, Math.min(firstConnect.error.length, 100)).replace("\n", " ") + "..." + ); + + // Add rows for times exceeding p99 + for (const timeHolder of times) { + if (Number(timeHolder.endTime - timeHolder.startTime) / 1000000 > p99) { + metricsTable.addRow( + Number(timeHolder.startTime - timeZeroNano) / 1000000, + Number(timeHolder.endTime - timeHolder.startTime) / 1000000, + timeHolder.error == null ? "" : timeHolder.error.substring(0, Math.min(timeHolder.error.length, 100)).replace("\n", " ") + "..." + ); + } + } + + const lastConnect = times[times.length - 1]; + metricsTable.addRow( + Number(lastConnect.startTime - timeZeroNano) / 1000000, + Number(lastConnect.endTime - lastConnect.startTime) / 1000000, + lastConnect.error == null ? "" : lastConnect.error.substring(0, Math.min(lastConnect.error.length, 100)).replace("\n", " ") + "..." + ); + + logger.debug(`\n${host}: ${title}\n${renderTable(metricsTable, false)}`); +} + +function getPercentile(input: bigint[], percentile: number): bigint { + if (!input || input.length === 0) { + return 0n; + } + + const sortedList = [...input].sort((a, b) => Number(a - b)); + const rank = percentile === 0 ? 1 : Math.ceil((percentile / 100.0) * input.length); + return sortedList[rank - 1]; +} + +function renderTable(table: AsciiTable, leftAlignForColumn0: boolean): string { + if (leftAlignForColumn0) { + table.setAlignLeft(0); + } + + return table.toString(); +} + +function logUnhandledErrors(): void { + for (const error of unhandledErrors) { + logger.debug(`Unhandled exception: ${error.message}`); + } +} diff --git a/tests/integration/container/tests/custom_endpoint.test.ts b/tests/integration/container/tests/custom_endpoint.test.ts index 9ddda06a..13c2de21 100644 --- a/tests/integration/container/tests/custom_endpoint.test.ts +++ b/tests/integration/container/tests/custom_endpoint.test.ts @@ -209,7 +209,7 @@ async function deleteEndpoint(rdsClient: RDSClient, endpointId: string): Promise describeIf("custom endpoint", () => { beforeAll(async () => { env = await TestEnvironment.getCurrent(); - const clusterId = env.auroraClusterName; + const clusterId = env.rdsDbName; const region = env.region; rdsClient = new RDSClient({ region: region }); @@ -239,7 +239,7 @@ describeIf("custom endpoint", () => { beforeEach(async () => { await TestEnvironment.verifyClusterStatus(); - currentWriter = await auroraTestUtility.getClusterWriterInstanceId(env.info.auroraClusterName); + currentWriter = await auroraTestUtility.getClusterWriterInstanceId(env.info.rdsDbName); logger.info(`Test started: ${expect.getState().currentTestName}`); }, 1000000); @@ -259,7 +259,7 @@ describeIf("custom endpoint", () => { "test custom endpoint failover - strict reader", async (usingFailover1: boolean) => { endpointId3 = `test-endpoint-3-${randomUUID()}`; - await createEndpoint(env.auroraClusterName, env.instances.slice(0, 2), endpointId3, "READER"); + await createEndpoint(env.rdsDbName, env.instances.slice(0, 2), endpointId3, "READER"); endpointInfo3 = await waitUntilEndpointAvailable(endpointId3); const config = await initDefaultConfig(endpointInfo3.Endpoint, env.databaseInfo.instanceEndpointPort, false, "strict-reader", usingFailover1); @@ -275,7 +275,7 @@ describeIf("custom endpoint", () => { // Use failover API to break connection. await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged( currentWriter, - env.info.auroraClusterName, + env.info.rdsDbName, instanceId === instance1 ? instance1 : instance2 ); @@ -287,7 +287,7 @@ describeIf("custom endpoint", () => { const newInstanceId: string = await auroraTestUtility.queryInstanceId(client); expect(newEndpointMembers.includes(newInstanceId)).toBeTruthy(); - const newWriter = await auroraTestUtility.getClusterWriterInstanceId(env.info.auroraClusterName); + const newWriter = await auroraTestUtility.getClusterWriterInstanceId(env.info.rdsDbName); expect(newInstanceId).not.toBe(newWriter); await deleteEndpoint(rdsClient, endpointId3); @@ -399,7 +399,7 @@ describeIf("custom endpoint", () => { } // Use failover API to break connection. - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(currentWriter, env.info.auroraClusterName, nextWriter); + await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(currentWriter, env.info.rdsDbName, nextWriter); await expect(auroraTestUtility.queryInstanceId(client)).rejects.toThrow(FailoverSuccessError); @@ -409,7 +409,7 @@ describeIf("custom endpoint", () => { const newInstanceId: string = await auroraTestUtility.queryInstanceId(client); expect(newEndpointMembers.includes(newInstanceId)).toBeTruthy(); - const newWriter = await auroraTestUtility.getClusterWriterInstanceId(env.info.auroraClusterName); + const newWriter = await auroraTestUtility.getClusterWriterInstanceId(env.info.rdsDbName); expect(newInstanceId).toBe(newWriter); }, 1000000 @@ -441,7 +441,7 @@ describeIf("custom endpoint", () => { } else { nextWriter = instanceId === instance1 ? instance1 : instance2; } - await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(currentWriter, env.info.auroraClusterName, nextWriter); + await auroraTestUtility.failoverClusterAndWaitUntilWriterChanged(currentWriter, env.info.rdsDbName, nextWriter); await expect(auroraTestUtility.queryInstanceId(client)).rejects.toThrow(FailoverSuccessError); diff --git a/tests/integration/container/tests/read_write_splitting.test.ts b/tests/integration/container/tests/read_write_splitting.test.ts index 9c03c069..4cb0e4d9 100644 --- a/tests/integration/container/tests/read_write_splitting.test.ts +++ b/tests/integration/container/tests/read_write_splitting.test.ts @@ -17,12 +17,7 @@ import { TestEnvironment } from "./utils/test_environment"; import { DriverHelper } from "./utils/driver_helper"; import { AuroraTestUtility } from "./utils/aurora_test_utility"; -import { - AwsWrapperError, - FailoverFailedError, - FailoverSuccessError, - TransactionResolutionUnknownError -} from "../../../../common/lib/utils/errors"; +import { AwsWrapperError, FailoverFailedError, FailoverSuccessError, TransactionResolutionUnknownError } from "../../../../common/lib/utils/errors"; import { DatabaseEngine } from "./utils/database_engine"; import { QueryResult } from "pg"; import { ProxyHelper } from "./utils/proxy_helper"; diff --git a/tests/integration/container/tests/secrets_manager.test.ts b/tests/integration/container/tests/secrets_manager.test.ts index fdbf8a93..b8c0df89 100644 --- a/tests/integration/container/tests/secrets_manager.test.ts +++ b/tests/integration/container/tests/secrets_manager.test.ts @@ -23,12 +23,7 @@ import { logger } from "../../../../common/logutils"; import { TestEnvironmentFeatures } from "./utils/test_environment_features"; import { features, instanceCount } from "./config"; import { PluginManager } from "../../../../common/lib"; -import { - CreateSecretCommand, - CreateSecretCommandOutput, - DeleteSecretCommand, - SecretsManagerClient -} from "@aws-sdk/client-secrets-manager"; +import { CreateSecretCommand, CreateSecretCommandOutput, DeleteSecretCommand, SecretsManagerClient } from "@aws-sdk/client-secrets-manager"; import { RDSClient } from "@aws-sdk/client-rds"; import { AuroraTestUtility } from "./utils/aurora_test_utility"; import { ProxyHelper } from "./utils/proxy_helper"; diff --git a/tests/integration/container/tests/utils/aurora_test_utility.ts b/tests/integration/container/tests/utils/aurora_test_utility.ts index 5735e620..af1014e1 100644 --- a/tests/integration/container/tests/utils/aurora_test_utility.ts +++ b/tests/integration/container/tests/utils/aurora_test_utility.ts @@ -15,16 +15,21 @@ */ import { + BlueGreenDeployment, CreateDBInstanceCommand, + DBCluster, DBInstanceAlreadyExistsFault, DBInstanceNotFoundFault, DeleteDBInstanceCommand, + DescribeBlueGreenDeploymentsCommand, DescribeDBClustersCommand, DescribeDBInstancesCommand, FailoverDBClusterCommand, InvalidDBInstanceStateFault, RDSClient, - RebootDBInstanceCommand + RebootDBInstanceCommand, + SwitchoverBlueGreenDeploymentCommand, + SwitchoverBlueGreenDeploymentResponse } from "@aws-sdk/client-rds"; import { TestEnvironment } from "./test_environment"; import * as dns from "dns"; @@ -35,14 +40,22 @@ import { sleep } from "../../../../../common/lib/utils/utils"; import { logger } from "../../../../../common/logutils"; import { TestInstanceInfo } from "./test_instance_info"; import { TestEnvironmentInfo } from "./test_environment_info"; +import { DatabaseEngine } from "./database_engine"; +import { DatabaseEngineDeployment } from "./database_engine_deployment"; const instanceClass: string = "db.r5.large"; export class AuroraTestUtility { private client: RDSClient; - constructor(region: string = "us-east-1") { - this.client = new RDSClient({ region: region }); + constructor(region: string = "us-east-1", endpoint: string = null) { + this.client = + endpoint === null + ? new RDSClient({ region: region }) + : new RDSClient({ + region: region, + endpoint: endpoint + }); } async getDbInstance(instanceId: string): Promise { @@ -92,6 +105,86 @@ export class AuroraTestUtility { logger.info(`Instance ${instanceId} status: ${status.toLowerCase()}`); } + async getRdsInstanceIds(engine: DatabaseEngine, deployment: DatabaseEngineDeployment, client: any) { + let retrieveTopologySql: string; + switch (deployment) { + case DatabaseEngineDeployment.AURORA: + switch (engine) { + case DatabaseEngine.MYSQL: + retrieveTopologySql = + "SELECT SERVER_ID, SESSION_ID FROM information_schema.replica_host_status ORDER BY IF(SESSION_ID = 'MASTER_SESSION_ID', 0, 1)"; + break; + case DatabaseEngine.PG: + retrieveTopologySql = + "SELECT SERVER_ID, SESSION_ID FROM aurora_replica_status() ORDER BY CASE WHEN SESSION_ID = 'MASTER_SESSION_ID' THEN 0 ELSE 1 END"; + break; + default: + throw new Error(`Unsupported database engine: ${engine}`); + } + break; + case DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER: + switch (engine) { + case DatabaseEngine.MYSQL: { + const replicaWriterId: string = await this.getMultiAzMysqlReplicaWriterInstanceId(client); + retrieveTopologySql = `SELECT SUBSTRING_INDEX(endpoint, '.', 1) as SERVER_ID + FROM mysql.rds_topology + ORDER BY CASE WHEN id = ${replicaWriterId == null ? "@@server_id" : `'${replicaWriterId}'`} THEN 0 ELSE 1 END, + SUBSTRING_INDEX(endpoint, '.', 1)`; + break; + } + case DatabaseEngine.PG: + retrieveTopologySql = + "SELECT SUBSTRING(endpoint FROM 0 FOR POSITION('.' IN endpoint)) as SERVER_ID FROM rds_tools.show_topology() ORDER BY CASE WHEN id = (SELECT MAX(multi_az_db_cluster_source_dbi_resource_id) FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()) THEN 0 ELSE 1 END, endpoint"; + + break; + default: + throw new Error(`Unsupported database engine: ${engine}`); + } + break; + case DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE: + switch (engine) { + case DatabaseEngine.MYSQL: + retrieveTopologySql = "SELECT SUBSTRING_INDEX(endpoint, '.', 1) as SERVER_ID FROM mysql.rds_topology"; + break; + case DatabaseEngine.PG: + retrieveTopologySql = "SELECT SUBSTRING(endpoint FROM 0 FOR POSITION('.' IN endpoint)) as SERVER_ID" + " FROM rds_tools.show_topology()"; + + break; + default: + throw new Error(`Unsupported database engine: ${engine}`); + } + break; + default: + throw new Error(`Unsupported database engine deployment: ${deployment}`); + } + + const auroraInstances: string[] = []; + const result = await client.query(retrieveTopologySql); + switch (engine) { + case DatabaseEngine.MYSQL: + for (const row of result[0]) { + auroraInstances.push(row.SERVER_ID); + } + break; + case DatabaseEngine.PG: + for (const row of result.rows) { + auroraInstances.push(row.server_id); + } + break; + default: + throw new Error(`Unsupported database engine: ${engine}`); + } + return auroraInstances; + } + + private async getMultiAzMysqlReplicaWriterInstanceId(client: any): Promise { + const result = await client.query("SHOW REPLICA STATUS"); + if (result.length > 0) { + return result[0].Source_Server_id; + } + return null; + } + async waitUntilClusterHasDesiredStatus(clusterId: string, desiredStatus: string = "available") { let clusterInfo = await this.getDbCluster(clusterId); if (clusterInfo === null) { @@ -136,7 +229,7 @@ export class AuroraTestUtility { async failoverClusterAndWaitUntilWriterChanged(initialWriter?: string, clusterId?: string, targetWriterId?: string) { if (this.isNullOrUndefined(clusterId)) { - clusterId = (await TestEnvironment.getCurrent()).info.auroraClusterName; + clusterId = (await TestEnvironment.getCurrent()).info.rdsDbName; } if (this.isNullOrUndefined(initialWriter)) { @@ -169,7 +262,7 @@ export class AuroraTestUtility { async failoverClusterToTarget(clusterId?: string, targetInstanceId?: string): Promise { const info = (await TestEnvironment.getCurrent()).info; if (clusterId == null) { - clusterId = info.auroraClusterName; + clusterId = info.rdsDbName; } await this.waitUntilClusterHasDesiredStatus(clusterId); @@ -227,7 +320,7 @@ export class AuroraTestUtility { async isDbInstanceWriter(instanceId: string, clusterId?: string) { if (clusterId === undefined) { - clusterId = (await TestEnvironment.getCurrent()).info.auroraClusterName; + clusterId = (await TestEnvironment.getCurrent()).info.rdsDbName; } const clusterInfo = await this.getDbCluster(clusterId); if (clusterInfo === null || clusterInfo.DBClusterMembers === undefined) { @@ -245,7 +338,7 @@ export class AuroraTestUtility { async getClusterWriterInstanceId(clusterId?: string) { if (clusterId === undefined) { - clusterId = (await TestEnvironment.getCurrent()).info.auroraClusterName; + clusterId = (await TestEnvironment.getCurrent()).info.rdsDbName; } const clusterInfo = await this.getDbCluster(clusterId); @@ -270,7 +363,7 @@ export class AuroraTestUtility { const info: TestEnvironmentInfo = (await TestEnvironment.getCurrent()).info; const command = new CreateDBInstanceCommand({ DBInstanceIdentifier: instanceId, - DBClusterIdentifier: info.auroraClusterName, + DBClusterIdentifier: info.rdsDbName, DBInstanceClass: instanceClass, PubliclyAccessible: true, Engine: info.databaseEngine, @@ -351,4 +444,52 @@ export class AuroraTestUtility { } return instances.length; } + + async getClusterByArn(clusterArn: string): Promise { + const command = new DescribeDBClustersCommand({ + DBClusterIdentifier: clusterArn + }); + const clusters: DBCluster[] | undefined = (await this.client.send(command)).DBClusters; + if (!clusters) { + return null; + } + return clusters[0]; + } + + async getRdsInstanceInfoByArn(instanceArn: string): Promise { + const command = new DescribeDBInstancesCommand({ + DBInstanceIdentifier: instanceArn + }); + const instances: DBInstance[] | undefined = (await this.client.send(command)).DBInstances; + if (!instances) { + return null; + } + return instances[0]; + } + + async getBlueGreenDeployment(blueGreenId: string): Promise { + const command = new DescribeBlueGreenDeploymentsCommand({ + BlueGreenDeploymentIdentifier: blueGreenId + }); + try { + const blueGreenDeployments = (await this.client.send(command)).BlueGreenDeployments; + if (blueGreenDeployments === undefined || blueGreenDeployments.length === 0) { + return null; + } + + return blueGreenDeployments[0]; + } catch { + return null; + } + } + + async switchoverBlueGreenDeployment(blueGreenId: string) { + const command = new SwitchoverBlueGreenDeploymentCommand({ + BlueGreenDeploymentIdentifier: blueGreenId + }); + const response: SwitchoverBlueGreenDeploymentResponse = await this.client.send(command); + if (response.BlueGreenDeployment !== undefined) { + logger.debug("switchoverBlueGreenDeployment request is sent."); + } + } } diff --git a/tests/integration/container/tests/utils/database_engine_deployment.ts b/tests/integration/container/tests/utils/database_engine_deployment.ts index 2dfc1b7b..3f42540c 100644 --- a/tests/integration/container/tests/utils/database_engine_deployment.ts +++ b/tests/integration/container/tests/utils/database_engine_deployment.ts @@ -18,5 +18,6 @@ export enum DatabaseEngineDeployment { DOCKER = "DOCKER", RDS = "RDS", RDS_MULTI_AZ_CLUSTER = "RDS_MULTI_AZ_CLUSTER", + RDS_MULTI_AZ_INSTANCE = "RDS_MULTI_AZ_INSTANCE", AURORA = "AURORA" } diff --git a/tests/integration/container/tests/utils/driver_helper.ts b/tests/integration/container/tests/utils/driver_helper.ts index 3b733da1..8c0fc6af 100644 --- a/tests/integration/container/tests/utils/driver_helper.ts +++ b/tests/integration/container/tests/utils/driver_helper.ts @@ -20,6 +20,7 @@ import { AwsPGClient } from "../../../../../pg/lib"; import { DatabaseEngine } from "./database_engine"; import { AwsClient } from "../../../../../common/lib/aws_client"; import { DatabaseEngineDeployment } from "./database_engine_deployment"; +import { readFileSync } from "fs"; export class DriverHelper { static getClient(driver: TestDriver) { @@ -141,15 +142,15 @@ export class DriverHelper { } static addDriverSpecificConfiguration(props: any, engine: DatabaseEngine, performance: boolean = false) { - if (engine === DatabaseEngine.PG && !performance) { - props["ssl"] = { rejectUnauthorized: false }; - } - props["wrapperConnectTimeout"] = 3000; props["wrapperQueryTimeout"] = 120000; props["monitoring_wrapperQueryTimeout"] = 3000; props["monitoring_wrapperConnectTimeout"] = 3000; props["failureDetectionTime"] = 1000; + props["ssl"] = { + rejectUnauthorized: false, + ca: readFileSync("/app/global-bundle.pem").toString() + }; return props; } } diff --git a/tests/integration/container/tests/utils/test_database_info.ts b/tests/integration/container/tests/utils/test_database_info.ts index ab39ee4e..a2f7832a 100644 --- a/tests/integration/container/tests/utils/test_database_info.ts +++ b/tests/integration/container/tests/utils/test_database_info.ts @@ -16,6 +16,8 @@ import { TestInstanceInfo } from "./test_instance_info"; import { DBInstance } from "@aws-sdk/client-rds/dist-types/models/models_0"; +import { instance } from "ts-mockito"; +import { logger } from "../../../../../common/logutils"; export class TestDatabaseInfo { private readonly _username: string; @@ -27,6 +29,8 @@ export class TestDatabaseInfo { private readonly _clusterReadOnlyEndpointPort: number; private readonly _instanceEndpointSuffix: string; private readonly _instanceEndpointPort: number; + private readonly _blueGreenDeploymentId: string; + private readonly _clusterParameterGroupName: string; private readonly _instances: TestInstanceInfo[] = []; constructor(databaseInfo: { [s: string]: any }) { @@ -39,6 +43,8 @@ export class TestDatabaseInfo { this._clusterReadOnlyEndpointPort = Number(databaseInfo["clusterReadOnlyEndpointPort"]); this._instanceEndpointSuffix = String(databaseInfo["instanceEndpointSuffix"]); this._instanceEndpointPort = Number(databaseInfo["instanceEndpointPort"]); + this._blueGreenDeploymentId = String(databaseInfo["blueGreenDeploymentId"]); + this._clusterParameterGroupName = String(databaseInfo["clusterParameterGroupName"]); this._instances = Array.from(databaseInfo["instances"], (x: DBInstance) => { return new TestInstanceInfo(x); @@ -101,6 +107,14 @@ export class TestDatabaseInfo { return this._instances; } + get blueGreenDeploymentId(): string { + return this._blueGreenDeploymentId; + } + + get clusterParameterGroupName(): string { + return this._clusterParameterGroupName; + } + getInstance(instanceName: string): TestInstanceInfo { const instance = this._instances.find((instance) => instance.instanceId === instanceName); if (instance === undefined) { diff --git a/tests/integration/container/tests/utils/test_environment.ts b/tests/integration/container/tests/utils/test_environment.ts index c07ff0a0..8ce9a60f 100644 --- a/tests/integration/container/tests/utils/test_environment.ts +++ b/tests/integration/container/tests/utils/test_environment.ts @@ -59,7 +59,7 @@ export class TestEnvironment { return TestEnvironment.env; } - static async verifyClusterStatus() { + static async verifyClusterStatus(auroraUtility?: AuroraTestUtility) { const info = TestEnvironment.env?.info; if (info?.request.deployment === DatabaseEngineDeployment.AURORA || info?.request.deployment === DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { let remainingTries = 3; @@ -67,10 +67,12 @@ export class TestEnvironment { while (remainingTries-- > 0 && !success) { try { - const auroraUtility = new AuroraTestUtility(info.region); - await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName); - info.databaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.auroraClusterName)); - info.proxyDatabaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.auroraClusterName)); + if (auroraUtility === undefined) { + auroraUtility = new AuroraTestUtility(info.region); + } + await auroraUtility.waitUntilClusterHasDesiredStatus(info.rdsDbName); + info.databaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.rdsDbName)); + info.proxyDatabaseInfo.moveInstanceFirst(await auroraUtility.getClusterWriterInstanceId(info.rdsDbName)); success = true; } catch (error: any) { switch (info?.request.deployment) { @@ -87,7 +89,7 @@ export class TestEnvironment { } if (!success) { - fail(`Cluster ${info.auroraClusterName} is not healthy`); + fail(`Cluster ${info.rdsDbName} is not healthy`); } } } @@ -169,7 +171,7 @@ export class TestEnvironment { static async verifyAllInstancesHasRightState(...allowedStatuses: string[]) { const info = TestEnvironment.env?.info; const auroraUtility = new AuroraTestUtility(info?.region); - if (!info?.auroraClusterName) { + if (!info?.rdsDbName) { fail(`Invalid cluster`); } const instanceIds: (string | undefined)[] | undefined = info?.databaseInfo.instances.map((instance) => instance.instanceId); @@ -181,16 +183,16 @@ export class TestEnvironment { static async rebootAllClusterInstances() { const info = TestEnvironment.env?.info; const auroraUtility = new AuroraTestUtility(info?.region); - if (!info?.auroraClusterName) { + if (!info?.rdsDbName) { fail(`Invalid cluster`); } - await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName!); + await auroraUtility.waitUntilClusterHasDesiredStatus(info.rdsDbName!); const instanceIds: (string | undefined)[] | undefined = info?.databaseInfo.instances.map((instance) => instance.instanceId); for (const instance of instanceIds) { await auroraUtility.rebootInstance(instance); } - await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName!); + await auroraUtility.waitUntilClusterHasDesiredStatus(info.rdsDbName!); for (const instance of instanceIds) { await auroraUtility.waitUntilInstanceHasRightState(instance, "available"); } @@ -199,10 +201,10 @@ export class TestEnvironment { static async rebootCluster() { const info = TestEnvironment.env?.info; const auroraUtility = new AuroraTestUtility(info?.region); - if (!info?.auroraClusterName) { + if (!info?.rdsDbName) { fail(`Invalid cluster`); } - await auroraUtility.waitUntilClusterHasDesiredStatus(info.auroraClusterName!); + await auroraUtility.waitUntilClusterHasDesiredStatus(info.rdsDbName!); const instanceIds: (string | undefined)[] | undefined = info?.databaseInfo.instances.map((instance) => instance.instanceId); for (const instance of instanceIds) { @@ -302,7 +304,7 @@ export class TestEnvironment { environment.proxies[instance.instanceId] = new ProxyInfo(proxies[environment.instances[i].url], host, proxyControlPort); } - if (environment.proxyDatabaseInfo.clusterEndpoint !== undefined) { + if (environment.proxyDatabaseInfo.clusterEndpoint != null && environment.proxyDatabaseInfo.clusterEndpoint !== "null") { const client = new Toxiproxy(TestEnvironment.createProxyUrl(environment.proxyDatabaseInfo.clusterEndpoint, proxyControlPort)); const proxy = await client.get(`${environment.databaseInfo.clusterEndpoint}:${environment.databaseInfo.clusterEndpointPort}`); @@ -315,7 +317,7 @@ export class TestEnvironment { } } - if (environment.proxyDatabaseInfo.clusterReadOnlyEndpoint !== undefined) { + if (environment.proxyDatabaseInfo.clusterReadOnlyEndpoint != null && environment.proxyDatabaseInfo.clusterReadOnlyEndpoint !== "null") { const client = new Toxiproxy(TestEnvironment.createProxyUrl(environment.proxyDatabaseInfo.clusterReadOnlyEndpoint, proxyControlPort)); const proxy = await client.get(`${environment.databaseInfo.clusterReadOnlyEndpoint}:${environment.databaseInfo.clusterReadOnlyEndpointPort}`); @@ -384,6 +386,10 @@ export class TestEnvironment { return this.info.region; } + get rdsEndpoint(): string { + return this.info.rdsEndpoint; + } + get engine(): DatabaseEngine { return this.info.request.engine; } @@ -392,8 +398,8 @@ export class TestEnvironment { return this.info.request.deployment; } - get auroraClusterName(): string { - return this.info.auroraClusterName; + get rdsDbName(): string { + return this.info.rdsDbName; } private static createProxyUrl(host: string, port: number) { diff --git a/tests/integration/container/tests/utils/test_environment_features.ts b/tests/integration/container/tests/utils/test_environment_features.ts index 5cd2708f..14c5150c 100644 --- a/tests/integration/container/tests/utils/test_environment_features.ts +++ b/tests/integration/container/tests/utils/test_environment_features.ts @@ -25,5 +25,6 @@ export enum TestEnvironmentFeatures { RUN_AUTOSCALING_TESTS_ONLY = "RUN_AUTOSCALING_TESTS_ONLY", SKIP_MYSQL_DRIVER_TESTS = "SKIP_MYSQL_DRIVER_TESTS", SKIP_PG_DRIVER_TESTS = "SKIP_PG_DRIVER_TESTS", - RDS_MULTI_AZ_CLUSTER_SUPPORTED = "RDS_MULTI_AZ_CLUSTER_SUPPORTED" + RDS_MULTI_AZ_CLUSTER_SUPPORTED = "RDS_MULTI_AZ_CLUSTER_SUPPORTED", + BLUE_GREEN_DEPLOYMENT = "BLUE_GREEN_DEPLOYMENT" } diff --git a/tests/integration/container/tests/utils/test_environment_info.ts b/tests/integration/container/tests/utils/test_environment_info.ts index 6a7389d8..b8854168 100644 --- a/tests/integration/container/tests/utils/test_environment_info.ts +++ b/tests/integration/container/tests/utils/test_environment_info.ts @@ -25,7 +25,7 @@ export class TestEnvironmentInfo { private readonly _awsSecretAccessKey: string; private readonly _awsSessionToken: string; private readonly _region: string; - private readonly _auroraClusterName: string; + private readonly _rdsDbName: string; private readonly _iamUserName: string; private readonly _databaseInfo: TestDatabaseInfo; private readonly _proxyDatabaseInfo: TestProxyDatabaseInfo; @@ -33,6 +33,7 @@ export class TestEnvironmentInfo { private readonly _metricsTelemetryInfo: TestTelemetryInfo; private readonly _databaseEngine: string; private readonly _databaseEngineVersion: string; + private readonly _rdsEndpoint: string; constructor(testInfo: { [s: string]: any }) { this._request = new TestEnvironmentRequest(testInfo["request"]); @@ -40,7 +41,8 @@ export class TestEnvironmentInfo { this._awsSecretAccessKey = String(testInfo["awsSecretAccessKey"]); this._awsSessionToken = String(testInfo["awsSessionToken"]); this._region = String(testInfo["region"]); - this._auroraClusterName = String(testInfo["auroraClusterName"]); + this._rdsEndpoint = String(testInfo["rdsEndpoint"]); + this._rdsDbName = String(testInfo["rdsDbName"]); this._iamUserName = String(testInfo["iamUsername"]); this._databaseInfo = new TestDatabaseInfo(testInfo["databaseInfo"]); @@ -73,8 +75,12 @@ export class TestEnvironmentInfo { return this._region; } - get auroraClusterName(): string { - return this._auroraClusterName; + get rdsEndpoint(): string { + return this._rdsEndpoint; + } + + get rdsDbName(): string { + return this._rdsDbName; } get databaseEngineVersion(): string { diff --git a/tests/integration/host/build.gradle.kts b/tests/integration/host/build.gradle.kts index 3ce5acbb..bb63213b 100644 --- a/tests/integration/host/build.gradle.kts +++ b/tests/integration/host/build.gradle.kts @@ -14,33 +14,37 @@ repositories { dependencies { testImplementation("org.checkerframework:checker-qual:3.26.0") - testImplementation("org.junit.platform:junit-platform-commons:1.9.0") - testImplementation("org.junit.platform:junit-platform-engine:1.9.0") - testImplementation("org.junit.platform:junit-platform-launcher:1.9.0") - testImplementation("org.junit.platform:junit-platform-suite-engine:1.9.0") - testImplementation("org.junit.jupiter:junit-jupiter-api:5.9.1") - testImplementation("org.junit.jupiter:junit-jupiter-params:5.9.1") + testImplementation("org.junit.platform:junit-platform-commons:1.11.3") + testImplementation("org.junit.platform:junit-platform-engine:1.11.3") + testImplementation("org.junit.platform:junit-platform-launcher:1.11.3") + testImplementation("org.junit.platform:junit-platform-suite-engine:1.11.3") + testImplementation("org.junit.jupiter:junit-jupiter-api:5.11.3") + testImplementation("org.junit.jupiter:junit-jupiter-params:5.11.3") testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine") - testImplementation("org.apache.commons:commons-dbcp2:2.9.0") - testImplementation("org.postgresql:postgresql:42.5.0") - testImplementation("mysql:mysql-connector-java:8.0.30") - testImplementation("org.mockito:mockito-inline:4.8.0") - testImplementation("software.amazon.awssdk:rds:2.20.49") - testImplementation("software.amazon.awssdk:ec2:2.20.61") - testImplementation("software.amazon.awssdk:secretsmanager:2.20.49") - testImplementation("org.testcontainers:testcontainers:1.17.4") - testImplementation("org.testcontainers:postgresql:1.17.5") - testImplementation("org.testcontainers:mysql:1.17.+") - testImplementation("org.testcontainers:junit-jupiter:1.17.4") - testImplementation("org.testcontainers:toxiproxy:1.20.3") - testImplementation("org.apache.poi:poi-ooxml:5.2.2") - testImplementation("org.slf4j:slf4j-simple:2.0.3") - testImplementation("com.fasterxml.jackson.core:jackson-databind:2.14.2") - testImplementation("com.amazonaws:aws-xray-recorder-sdk-core:2.14.0") - testImplementation("io.opentelemetry:opentelemetry-sdk:1.29.0") - testImplementation("io.opentelemetry:opentelemetry-sdk-metrics:1.29.0") - testImplementation("io.opentelemetry:opentelemetry-exporter-otlp:1.29.0") + testImplementation("org.apache.commons:commons-dbcp2:2.12.0") + testImplementation("org.postgresql:postgresql:42.7.4") + testImplementation("com.mysql:mysql-connector-j:9.1.0") + testImplementation("org.mockito:mockito-inline:4.11.0") // 4.11.0 is the last version compatible with Java 8 + testImplementation("software.amazon.awssdk:ec2:2.29.34") + testImplementation("software.amazon.awssdk:rds:2.29.34") + testImplementation("software.amazon.awssdk:sts:2.29.34") + + // Note: all org.testcontainers dependencies should have the same version + testImplementation("org.testcontainers:testcontainers:1.20.4") + testImplementation("org.testcontainers:mysql:1.20.4") + testImplementation("org.testcontainers:postgresql:1.20.4") + testImplementation("org.testcontainers:mariadb:1.20.4") + testImplementation("org.testcontainers:junit-jupiter:1.20.4") + testImplementation("org.testcontainers:toxiproxy:1.20.4") + testImplementation("org.apache.poi:poi-ooxml:5.3.0") + testImplementation("org.slf4j:slf4j-simple:2.0.13") + testImplementation("com.fasterxml.jackson.core:jackson-databind:2.17.1") + testImplementation("com.amazonaws:aws-xray-recorder-sdk-core:2.18.2") + testImplementation("io.opentelemetry:opentelemetry-sdk:1.44.1") + testImplementation("io.opentelemetry:opentelemetry-sdk-metrics:1.44.1") + testImplementation("io.opentelemetry:opentelemetry-exporter-otlp:1.44.1") + testImplementation("de.vandermeer:asciitable:0.3.2") } tasks.test { @@ -87,7 +91,8 @@ tasks.register("test-aurora") { doFirst { systemProperty("exclude-docker", "true") systemProperty("exclude-performance", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") } } @@ -99,7 +104,9 @@ tasks.register("test-aurora-postgres") { systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -112,7 +119,9 @@ tasks.register("test-aurora-mysql") { systemProperty("exclude-performance", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -131,7 +140,9 @@ tasks.register("test-aurora-pg-performance") { systemProperty("exclude-docker", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -142,7 +153,9 @@ tasks.register("test-aurora-mysql-performance") { systemProperty("exclude-docker", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -156,6 +169,7 @@ tasks.register("test-multi-az-postgres") { systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") systemProperty("exclude-aurora", "true") + systemProperty("exclude-bg", "true") } } @@ -168,6 +182,7 @@ tasks.register("test-multi-az-mysql") { systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") systemProperty("exclude-aurora", "true") + systemProperty("exclude-bg", "true") } } @@ -177,7 +192,9 @@ tasks.register("test-autoscaling") { doFirst { systemProperty("exclude-docker", "true") systemProperty("exclude-performance", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("test-autoscaling", "true") } } @@ -188,7 +205,9 @@ tasks.register("test-autoscaling-mysql") { doFirst { systemProperty("exclude-docker", "true") systemProperty("exclude-performance", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") systemProperty("test-autoscaling", "true") @@ -201,13 +220,69 @@ tasks.register("test-autoscaling-postgres") { doFirst { systemProperty("exclude-docker", "true") systemProperty("exclude-performance", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") systemProperty("test-autoscaling", "true") } } +tasks.register("test-bgd-mysql-aurora") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + } +} + +tasks.register("test-bgd-mysql-multiaz") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-aurora", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "false") + } +} + +tasks.register("test-bgd-pg-aurora") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + } +} + +tasks.register("test-bgd-pg-multiaz") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.runTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-mysql-driver", "true") + systemProperty("exclude-mysql-engine", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-aurora", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "false") + } +} + // Debug tasks.register("debug-all-environments") { @@ -224,7 +299,9 @@ tasks.register("debug-docker") { doFirst { systemProperty("exclude-aurora", "true") systemProperty("exclude-performance", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -234,7 +311,9 @@ tasks.register("debug-aurora") { doFirst { systemProperty("exclude-docker", "true") systemProperty("exclude-performance", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -246,7 +325,9 @@ tasks.register("debug-aurora-pg") { systemProperty("exclude-performance", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -258,7 +339,9 @@ tasks.register("debug-aurora-mysql") { systemProperty("exclude-performance", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -269,7 +352,9 @@ tasks.register("debug-aurora-pg-performance") { systemProperty("exclude-docker", "true") systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -280,7 +365,9 @@ tasks.register("debug-aurora-mysql-performance") { systemProperty("exclude-docker", "true") systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") } } @@ -293,6 +380,7 @@ tasks.register("debug-multi-az-mysql") { systemProperty("exclude-pg-driver", "true") systemProperty("exclude-pg-engine", "true") systemProperty("exclude-aurora", "true") + systemProperty("exclude-bg", "true") } } @@ -305,6 +393,7 @@ tasks.register("debug-multi-az-postgres") { systemProperty("exclude-mysql-driver", "true") systemProperty("exclude-mysql-engine", "true") systemProperty("exclude-aurora", "true") + systemProperty("exclude-bg", "true") } } @@ -314,7 +403,23 @@ tasks.register("debug-autoscaling") { doFirst { systemProperty("exclude-docker", "true") systemProperty("exclude-performance", "true") - systemProperty("exclude-multi-az", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + systemProperty("exclude-bg", "true") systemProperty("test-autoscaling", "true") } } + +tasks.register("debug-bgd-mysql-aurora") { + group = "verification" + filter.includeTestsMatching("integration.host.TestRunner.debugTests") + doFirst { + systemProperty("exclude-docker", "true") + systemProperty("exclude-pg-driver", "true") + systemProperty("exclude-pg-engine", "true") + systemProperty("exclude-performance", "true") + systemProperty("exclude-multi-az-cluster", "true") + systemProperty("exclude-multi-az-instance", "true") + } +} + diff --git a/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java b/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java index 4d21fa6a..4b47e591 100644 --- a/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java +++ b/tests/integration/host/src/test/java/integration/host/DatabaseEngineDeployment.java @@ -20,5 +20,6 @@ public enum DatabaseEngineDeployment { DOCKER, RDS, RDS_MULTI_AZ_CLUSTER, + RDS_MULTI_AZ_INSTANCE, AURORA } diff --git a/tests/integration/host/src/test/java/integration/host/TestDatabaseInfo.java b/tests/integration/host/src/test/java/integration/host/TestDatabaseInfo.java index 01dbd3ce..c955e68c 100644 --- a/tests/integration/host/src/test/java/integration/host/TestDatabaseInfo.java +++ b/tests/integration/host/src/test/java/integration/host/TestDatabaseInfo.java @@ -34,6 +34,10 @@ public class TestDatabaseInfo { private String instanceEndpointSuffix; // "XYZ.us-west-2.rds.amazonaws.com" private int instanceEndpointPort; + private String blueGreenDeploymentId; + + private String clusterParameterGroupName = null; + private final ArrayList instances = new ArrayList<>(); public TestDatabaseInfo() {} @@ -116,16 +120,19 @@ public TestInstanceInfo getInstance(String instanceName) { throw new RuntimeException("Instance " + instanceName + " not found."); } - public void moveInstanceFirst(String instanceName) { - for (int i = 0; i < this.instances.size(); i++) { - TestInstanceInfo currentInstance = this.instances.get(i); - if (instanceName != null && instanceName.equals(currentInstance.getInstanceId())) { - // move this instance to position 0 - this.instances.remove(i); - this.instances.add(0, currentInstance); - return; - } - } - throw new RuntimeException("Instance " + instanceName + " not found."); + public String getBlueGreenDeploymentId() { + return this.blueGreenDeploymentId; + } + + public void setBlueGreenDeploymentId(final String blueGreenDeploymentId) { + this.blueGreenDeploymentId = blueGreenDeploymentId; + } + + public String getClusterParameterGroupName() { + return this.clusterParameterGroupName; + } + + public void setClusterParameterGroupName(String clusterParameterGroupName) { + this.clusterParameterGroupName = clusterParameterGroupName; } } diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java index f4ae43db..ad7ddfca 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentConfig.java @@ -2,23 +2,27 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; +import integration.host.util.AuroraTestUtility; +import integration.host.util.ContainerHelper; +import integration.host.util.StringUtils; +import org.testcontainers.containers.GenericContainer; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.ToxiproxyContainer; +import org.testcontainers.shaded.org.apache.commons.lang3.NotImplementedException; +import software.amazon.awssdk.services.rds.model.BlueGreenDeployment; +import software.amazon.awssdk.services.rds.model.DBCluster; +import software.amazon.awssdk.services.rds.model.DBInstance; import java.io.IOException; import java.net.URISyntaxException; import java.net.UnknownHostException; import java.sql.SQLException; import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.atomic.AtomicInteger; import java.util.logging.Logger; -import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.Network; -import org.testcontainers.containers.ToxiproxyContainer; -import org.testcontainers.shaded.org.apache.commons.lang3.NotImplementedException; -import integration.host.util.AuroraTestUtility; -import integration.host.util.ContainerHelper; -import integration.host.util.StringUtils; -import software.amazon.awssdk.services.rds.model.DBCluster; - public class TestEnvironmentConfig implements AutoCloseable { private static final Logger LOGGER = Logger.getLogger(TestEnvironmentConfig.class.getName()); @@ -33,13 +37,17 @@ public class TestEnvironmentConfig implements AutoCloseable { private final TestEnvironmentInfo info = new TestEnvironmentInfo(); // only this info is passed to test container + private static final AtomicInteger ipAddressUsageRefCount = new AtomicInteger(0); + // The following variables are local to host portion of test environment. They are not shared with a // test container. private int numOfInstances; - private boolean reuseAuroraDbCluster; - private String auroraClusterName; // "cluster-mysql" - private String auroraClusterDomain; // "XYZ.us-west-2.rds.amazonaws.com" + private boolean reuseDb; + private String rdsDbName; // "cluster-mysql", "instance-name", "rds-multi-az-cluster-name" + private String rdsDbDomain; // "XYZ.us-west-2.rds.amazonaws.com" + private String rdsEndpoint; // "https://rds-int.amazon.com" + public String rdsDbRegion; // Expected values: "latest", "default", or engine version, for example, "15.4" // If left as empty, will use default version @@ -74,7 +82,8 @@ private TestEnvironmentConfig(TestEnvironmentRequest request) { public static TestEnvironmentConfig build(TestEnvironmentRequest request) throws URISyntaxException, SQLException { TestEnvironmentConfig env = new TestEnvironmentConfig(request); - switch (request.getDatabaseEngineDeployment()) { + final DatabaseEngineDeployment deployment = request.getDatabaseEngineDeployment(); + switch (deployment) { case DOCKER: initDatabaseParams(env); createDatabaseContainers(env); @@ -88,18 +97,19 @@ public static TestEnvironmentConfig build(TestEnvironmentRequest request) throws TestEnvironmentFeatures.FAILOVER_SUPPORTED.toString()); } + if (request.getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT)) { + throw new UnsupportedOperationException( + TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT.toString()); + } + break; case AURORA: case RDS_MULTI_AZ_CLUSTER: - initDatabaseParams(env); - createDbCluster(env); - request.getFeatures().add(TestEnvironmentFeatures.RDS_MULTI_AZ_CLUSTER_SUPPORTED); + case RDS_MULTI_AZ_INSTANCE: + createAuroraOrMultiAzEnvironment(env); - if (request.getFeatures().contains(TestEnvironmentFeatures.IAM)) { - if (request.getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { - throw new RuntimeException("IAM isn't supported by " + DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER); - } - configureIamAccess(env); + if (request.getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT)) { + createBlueGreenDeployment(env); } break; @@ -127,6 +137,169 @@ public static TestEnvironmentConfig build(TestEnvironmentRequest request) throws return env; } + private static void authorizeRunnerIpAddress(TestEnvironmentConfig env) { + DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + if (deployment == DatabaseEngineDeployment.AURORA + || deployment == DatabaseEngineDeployment.RDS + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + // These environment require creating external database cluster that should be publicly available. + // Corresponding AWS Security Groups should be configured and the test task runner IP address + // should be whitelisted. + + if (env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED)) { + if (ipAddressUsageRefCount.incrementAndGet() == 1) { + authorizeIP(env); + } else { + LOGGER.finest("IP usage count: " + ipAddressUsageRefCount.get()); + } + } + } + } + + private static void createAuroraOrMultiAzEnvironment(TestEnvironmentConfig env) { + initRandomBase(env); + initDatabaseParams(env); + initAwsCredentials(env); + + final TestEnvironmentRequest request = env.info.getRequest(); + switch (request.getDatabaseEngineDeployment()) { + case RDS_MULTI_AZ_INSTANCE: + initEnv(env); + authorizeRunnerIpAddress(env); + createMultiAzInstance(env); + configureIamAccess(env); + break; + case RDS_MULTI_AZ_CLUSTER: + initEnv(env); + authorizeRunnerIpAddress(env); + createDbCluster(env); + configureIamAccess(env); + break; + case AURORA: + initEnv(env); + authorizeRunnerIpAddress(env); + + if (!env.reuseDb + && env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT)) { + createCustomClusterParameterGroup(env); + } + createDbCluster(env); + configureIamAccess(env); + break; + default: + throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); + } + + } + + private static void createBlueGreenDeployment(TestEnvironmentConfig env) { + + if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.AURORA) { + DBCluster clusterInfo = env.auroraUtil.getClusterInfo(env.rdsDbName); + if (env.reuseDb) { + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeploymentBySource(clusterInfo.dbClusterArn()); + if (bgDeployment != null) { + env.info.getDatabaseInfo().setBlueGreenDeploymentId(bgDeployment.blueGreenDeploymentIdentifier()); + waitForBlueGreenClustersHaveRightState(env, bgDeployment); + return; + } + } + + // otherwise, create a new BG deployment + final String blueGreenId = env.auroraUtil.createBlueGreenDeployment( + env.rdsDbName, clusterInfo.dbClusterArn()); + env.info.getDatabaseInfo().setBlueGreenDeploymentId(blueGreenId); + + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeployment(blueGreenId); + if (bgDeployment != null) { + waitForBlueGreenClustersHaveRightState(env, bgDeployment); + } + + } else if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE) { + DBInstance instanceInfo = env.auroraUtil.getRdsInstanceInfo(env.rdsDbName); + if (env.reuseDb) { + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeploymentBySource(instanceInfo.dbInstanceArn()); + if (bgDeployment != null) { + env.info.getDatabaseInfo().setBlueGreenDeploymentId(bgDeployment.blueGreenDeploymentIdentifier()); + waitForBlueGreenInstancesHaveRightState(env, bgDeployment); + return; + } + } + + // otherwise, create a new BG deployment + final String blueGreenId = env.auroraUtil.createBlueGreenDeployment( + env.rdsDbName, instanceInfo.dbInstanceArn()); + env.info.getDatabaseInfo().setBlueGreenDeploymentId(blueGreenId); + + BlueGreenDeployment bgDeployment = env.auroraUtil.getBlueGreenDeployment(blueGreenId); + if (bgDeployment != null) { + waitForBlueGreenInstancesHaveRightState(env, bgDeployment); + } + + } else { + LOGGER.warning("BG Deployments are supported for RDS MultiAz Instances and Aurora clusters only." + + " Proceed without creating BG Deployment."); + } + } + + private static void waitForBlueGreenClustersHaveRightState(TestEnvironmentConfig env, BlueGreenDeployment bgDeployment) { + + DBCluster blueClusterInfo = env.auroraUtil.getClusterByArn(bgDeployment.source()); + if (blueClusterInfo != null) { + try { + env.auroraUtil.waitUntilClusterHasRightState(blueClusterInfo.dbClusterIdentifier()); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + + DBCluster greenClusterInfo = env.auroraUtil.getClusterByArn(bgDeployment.target()); + if (greenClusterInfo != null) { + try { + env.auroraUtil.waitUntilClusterHasRightState(greenClusterInfo.dbClusterIdentifier()); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + } + + private static void waitForBlueGreenInstancesHaveRightState(TestEnvironmentConfig env, BlueGreenDeployment bgDeployment) { + + DBInstance blueInstanceInfo = env.auroraUtil.getRdsInstanceInfoByArn(bgDeployment.source()); + if (blueInstanceInfo != null) { + try { + env.auroraUtil.waitUntilInstanceHasRightState( + blueInstanceInfo.dbInstanceIdentifier(), "available"); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + + DBInstance greenInstanceInfo = env.auroraUtil.getRdsInstanceInfoByArn(bgDeployment.target()); + if (greenInstanceInfo != null) { + try { + env.auroraUtil.waitUntilInstanceHasRightState( + greenInstanceInfo.dbInstanceIdentifier(), "available"); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + } + + private static void createCustomClusterParameterGroup(TestEnvironmentConfig env) { + String groupName = String.format("test-cpg-%s", env.info.getRandomBase()); + String engine = getDbEngine(env.info.getRequest()); + String engineVersion = getDbEngineVersion(env); + env.auroraUtil.createCustomClusterParameterGroup( + groupName, engine, engineVersion, env.info.getRequest().getDatabaseEngine()); + env.info.getDatabaseInfo().setClusterParameterGroupName(groupName); + } + private static void createDatabaseContainers(TestEnvironmentConfig env) { ContainerHelper containerHelper = new ContainerHelper(); @@ -197,7 +370,35 @@ private static void createDatabaseContainers(TestEnvironmentConfig env) { } } - private static void createDbCluster(TestEnvironmentConfig env) throws URISyntaxException, SQLException { + private static void initEnv(TestEnvironmentConfig env) { + env.rdsDbRegion = System.getenv("RDS_DB_REGION"); + env.info.setRegion( + !StringUtils.isNullOrEmpty(env.rdsDbRegion) + ? env.rdsDbRegion + : "us-east-2"); + + env.reuseDb = Boolean.parseBoolean(System.getenv("REUSE_RDS_DB")); + env.rdsDbName = System.getenv("RDS_DB_NAME"); // "cluster-mysql", "instance-name", "cluster-multi-az-name" + env.rdsDbDomain = System.getenv("RDS_DB_DOMAIN"); // "XYZ.us-west-2.rds.amazonaws.com" + env.rdsEndpoint = System.getenv("RDS_ENDPOINT"); // "https://rds-int.amazon.com" + + env.auroraMySqlDbEngineVersion = System.getenv("MYSQL_VERSION"); + env.auroraPgDbEngineVersion = System.getenv("PG_VERSION"); + env.rdsMySqlDbEngineVersion = System.getenv("MYSQL_VERSION"); + env.rdsPgDbEngineVersion = System.getenv("PG_VERSION"); + + env.info.setRdsEndpoint(env.rdsEndpoint); + + env.auroraUtil = + new AuroraTestUtility( + env.info.getRegion(), + env.rdsEndpoint, + env.awsAccessKeyId, + env.awsSecretAccessKey, + env.awsSessionToken); + } + + private static void createDbCluster(TestEnvironmentConfig env) { switch (env.info.getRequest().getDatabaseInstances()) { case SINGLE_INSTANCE: @@ -209,11 +410,21 @@ private static void createDbCluster(TestEnvironmentConfig env) throws URISyntaxE initAwsCredentials(env); env.numOfInstances = env.info.getRequest().getNumOfInstances(); - if (env.numOfInstances < 1 || env.numOfInstances > 15) { - LOGGER.warning( - env.numOfInstances + " instances were requested but the requested number must be " - + "between 1 and 15. 5 instances will be used as a default."); - env.numOfInstances = 5; + if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.AURORA) { + if (env.numOfInstances < 1 || env.numOfInstances > 15) { + LOGGER.warning( + env.numOfInstances + " instances were requested but the requested number must be " + + "between 1 and 15. 5 instances will be used as a default."); + env.numOfInstances = 5; + } + } + if (env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER) { + if (env.numOfInstances != 3) { + LOGGER.warning( + env.numOfInstances + " instances were requested but the requested number must be 3. " + + "3 instances will be used as a default."); + env.numOfInstances = 3; + } } createDbCluster(env, env.numOfInstances); @@ -223,51 +434,27 @@ private static void createDbCluster(TestEnvironmentConfig env) throws URISyntaxE } } - private static void createDbCluster(TestEnvironmentConfig env, int numOfInstances) throws URISyntaxException, SQLException { - - env.info.setRegion( - !StringUtils.isNullOrEmpty(System.getenv("RDS_DB_REGION")) - ? System.getenv("RDS_DB_REGION") - : "us-east-1"); - - env.reuseAuroraDbCluster = - !StringUtils.isNullOrEmpty(System.getenv("REUSE_RDS_CLUSTER")) - && Boolean.parseBoolean(System.getenv("REUSE_RDS_CLUSTER")); - env.auroraClusterName = System.getenv("RDS_CLUSTER_NAME"); // "cluster-mysql" - env.auroraClusterDomain = - System.getenv("RDS_CLUSTER_DOMAIN"); // "XYZ.us-west-2.rds.amazonaws.com" - env.auroraMySqlDbEngineVersion = - System.getenv("AURORA_MYSQL_DB_ENGINE_VERSION"); // "latest", "default" - env.auroraPgDbEngineVersion = - System.getenv("AURORA_PG_DB_ENGINE_VERSION"); - env.rdsMySqlDbEngineVersion = System.getenv("RDS_MYSQL_DB_ENGINE_VERSION"); // "latest", "default" - env.rdsPgDbEngineVersion = System.getenv("RDS_PG_DB_ENGINE_VERSION"); - - env.auroraUtil = - new AuroraTestUtility( - env.info.getRegion(), - env.info.getRdsEndpoint(), - env.awsAccessKeyId, - env.awsSecretAccessKey, - env.awsSessionToken); - - ArrayList instances = new ArrayList<>(); + private static void createDbCluster(TestEnvironmentConfig env, int numOfInstances) { - if (env.reuseAuroraDbCluster) { - if (StringUtils.isNullOrEmpty(env.auroraClusterDomain)) { - throw new RuntimeException("Environment variable RDS_CLUSTER_DOMAIN is required when testing against an existing Aurora DB cluster."); + if (env.reuseDb) { + if (StringUtils.isNullOrEmpty(env.rdsDbDomain)) { + throw new RuntimeException("Environment variable RDS_DB_DOMAIN is required."); } - if (!env.auroraUtil.doesClusterExist(env.auroraClusterName)) { + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + throw new RuntimeException("Environment variable RDS_DB_NAME is required."); + } + + if (!env.auroraUtil.doesClusterExist(env.rdsDbName)) { throw new RuntimeException( "It's requested to reuse existing DB cluster but it doesn't exist: " - + env.auroraClusterName - + "." - + env.auroraClusterDomain); + + env.rdsDbName + + ".cluster-" + + env.rdsDbDomain); } LOGGER.finer( - "Reuse existing cluster " + env.auroraClusterName + ".cluster-" + env.auroraClusterDomain); + "Reuse existing cluster " + env.rdsDbName + ".cluster-" + env.rdsDbDomain); - DBCluster clusterInfo = env.auroraUtil.getClusterInfo(env.auroraClusterName); + DBCluster clusterInfo = env.auroraUtil.getClusterInfo(env.rdsDbName); DatabaseEngine existingClusterDatabaseEngine = env.auroraUtil.getClusterEngine(clusterInfo); if (existingClusterDatabaseEngine != env.info.getRequest().getDatabaseEngine()) { @@ -281,83 +468,105 @@ private static void createDbCluster(TestEnvironmentConfig env, int numOfInstance env.info.setDatabaseEngine(clusterInfo.engine()); env.info.setDatabaseEngineVersion(clusterInfo.engineVersion()); - instances.addAll(env.auroraUtil.getClusterInstanceIds(env.auroraClusterName)); - } else { - if (StringUtils.isNullOrEmpty(env.auroraClusterName)) { - env.auroraClusterName = getRandomName(env.info.getRequest()); - LOGGER.finer("Cluster to create: " + env.auroraClusterName); + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + int remainingTries = 5; + boolean clusterExists = false; + while (remainingTries-- > 0) { + env.rdsDbName = getRandomName(env.info.getRequest()); + if (env.auroraUtil.doesClusterExist(env.rdsDbName)) { + clusterExists = true; + env.info.setRandomBase(null); + initRandomBase(env); + LOGGER.finest("Cluster " + env.rdsDbName + " already exists. Pick up another name."); + } else { + clusterExists = false; + LOGGER.finer("Cluster to create: " + env.rdsDbName); + break; + } + } + if (clusterExists) { + throw new RuntimeException("Can't pick up a cluster name."); + } } try { - final TestEnvironmentRequest request = env.info.getRequest(); - String engine = getDbEngine(request); + String engine = getDbEngine(env.info.getRequest()); String engineVersion = getDbEngineVersion(env); if (StringUtils.isNullOrEmpty(engineVersion)) { throw new RuntimeException("Failed to get engine version."); } + String instanceClass = env.auroraUtil.getDbInstanceClass(env.info.getRequest()); LOGGER.finer("Using " + engine + " " + engineVersion); - String instanceClass = getDbInstanceClass(env.info.getRequest()); + env.auroraUtil.createCluster( + env.info.getDatabaseInfo().getUsername(), + env.info.getDatabaseInfo().getPassword(), + env.info.getDatabaseInfo().getDefaultDbName(), + env.rdsDbName, + env.info.getRequest().getDatabaseEngineDeployment(), + env.info.getRegion(), + engine, + instanceClass, + engineVersion, + env.info.getDatabaseInfo().getClusterParameterGroupName(), + numOfInstances); + + List dbInstances = env.auroraUtil.getDBInstances(env.rdsDbName); + if (dbInstances.isEmpty()) { + throw new RuntimeException("Failed to get instance information for cluster " + env.rdsDbName); + } - env.auroraClusterDomain = - env.auroraUtil.createCluster( - env.info.getDatabaseInfo().getUsername(), - env.info.getDatabaseInfo().getPassword(), - env.info.getDatabaseInfo().getDefaultDbName(), - env.auroraClusterName, - env.info.getRequest().getDatabaseEngineDeployment(), - engine, - instanceClass, - engineVersion, - numOfInstances, - instances); + final String instanceEndpoint = dbInstances.get(0).endpoint().address(); + env.rdsDbDomain = instanceEndpoint.substring(instanceEndpoint.indexOf(".") + 1); env.info.setDatabaseEngine(engine); env.info.setDatabaseEngineVersion(engineVersion); LOGGER.finer( - "Created a new cluster " + env.auroraClusterName + ".cluster-" + env.auroraClusterDomain); + "Created a new cluster " + env.rdsDbName + ".cluster-" + env.rdsDbDomain); } catch (Exception e) { - LOGGER.finer("Error creating a cluster " + env.auroraClusterName + ". " + e.getMessage()); + LOGGER.finer("Error creating a cluster " + env.rdsDbName + ". " + e.getMessage()); // remove cluster and instances - LOGGER.finer("Deleting cluster " + env.auroraClusterName); - env.auroraUtil.deleteCluster(env.auroraClusterName); - LOGGER.finer("Deleted cluster " + env.auroraClusterName); + LOGGER.finer("Deleting cluster " + env.rdsDbName); + env.auroraUtil.deleteCluster(env.rdsDbName, env.info.getRequest().getDatabaseEngineDeployment(), false); + LOGGER.finer("Deleted cluster " + env.rdsDbName); throw new RuntimeException(e); } } - env.info.setAuroraClusterName(env.auroraClusterName); + env.info.setRdsDbName(env.rdsDbName); int port = getPort(env.info.getRequest()); env.info .getDatabaseInfo() - .setClusterEndpoint(env.auroraClusterName + ".cluster-" + env.auroraClusterDomain, port); + .setClusterEndpoint(env.rdsDbName + ".cluster-" + env.rdsDbDomain, port); env.info .getDatabaseInfo() .setClusterReadOnlyEndpoint( - env.auroraClusterName + ".cluster-ro-" + env.auroraClusterDomain, port); - env.info.getDatabaseInfo().setInstanceEndpointSuffix(env.auroraClusterDomain, port); + env.rdsDbName + ".cluster-ro-" + env.rdsDbDomain, port); + env.info.getDatabaseInfo().setInstanceEndpointSuffix(env.rdsDbDomain, port); + List instances = env.auroraUtil.getTestInstancesInfo(env.rdsDbName); env.info.getDatabaseInfo().getInstances().clear(); env.info.getDatabaseInfo().getInstances().addAll(instances); + // Make sure the cluster is available and accessible. try { - env.runnerIP = env.auroraUtil.getPublicIPAddress(); - } catch (UnknownHostException e) { - throw new RuntimeException(e); + env.auroraUtil.waitUntilClusterHasRightState(env.rdsDbName); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); } - env.auroraUtil.ec2AuthorizeIP(env.runnerIP); final DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); final DatabaseEngine engine = env.info.getRequest().getDatabaseEngine(); final TestDatabaseInfo info = env.info.getDatabaseInfo(); - - if (DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER.equals(deployment) && DatabaseEngine.PG.equals(engine)) { + if (DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER.equals(deployment) + || DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE.equals(deployment)) { final String url = String.format( "%s%s:%d/%s", @@ -374,6 +583,192 @@ private static void createDbCluster(TestEnvironmentConfig env, int numOfInstance } } + private static void createMultiAzInstance(TestEnvironmentConfig env) { + env.auroraUtil = + new AuroraTestUtility( + env.info.getRegion(), + env.rdsEndpoint, + env.awsAccessKeyId, + env.awsSecretAccessKey, + env.awsSessionToken); + + ArrayList instances = new ArrayList<>(); + + if (env.reuseDb) { + if (StringUtils.isNullOrEmpty(env.rdsDbDomain)) { + throw new RuntimeException("Environment variable RDS_DB_DOMAIN is required."); + } + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + throw new RuntimeException("Environment variable RDS_DB_NAME is required."); + } + + if (!env.auroraUtil.doesInstanceExist(env.rdsDbName)) { + throw new RuntimeException( + "It's requested to reuse existing RDS instance but it doesn't exist: " + + env.rdsDbName + + "." + + env.rdsDbDomain); + } + LOGGER.finer( + "Reuse existing RDS Instance " + env.rdsDbName + "." + env.rdsDbDomain); + + DBInstance instanceInfo = env.auroraUtil.getRdsInstanceInfo(env.rdsDbName); + + DatabaseEngine existingRdsInstanceDatabaseEngine = env.auroraUtil.getRdsInstanceEngine(instanceInfo); + if (existingRdsInstanceDatabaseEngine != env.info.getRequest().getDatabaseEngine()) { + throw new RuntimeException( + "Existing RDS Instance is " + + existingRdsInstanceDatabaseEngine + + " instance. " + + env.info.getRequest().getDatabaseEngine() + + " is expected."); + } + + env.info.setDatabaseEngine(instanceInfo.engine()); + env.info.setDatabaseEngineVersion(instanceInfo.engineVersion()); + instances.add(new TestInstanceInfo( + instanceInfo.dbInstanceIdentifier(), + instanceInfo.endpoint().address(), + instanceInfo.endpoint().port())); + + } else { + if (StringUtils.isNullOrEmpty(env.rdsDbName)) { + env.rdsDbName = getRandomName(env.info.getRequest()); + LOGGER.finer("RDS Instance to create: " + env.rdsDbName); + } + + try { + String engine = getDbEngine(env.info.getRequest()); + String engineVersion = getDbEngineVersion(env); + if (StringUtils.isNullOrEmpty(engineVersion)) { + throw new RuntimeException("Failed to get engine version."); + } + String instanceClass = env.auroraUtil.getDbInstanceClass(env.info.getRequest()); + + LOGGER.finer("Using " + engine + " " + engineVersion); + + env.rdsDbDomain = + env.auroraUtil.createMultiAzInstance( + env.info.getDatabaseInfo().getUsername(), + env.info.getDatabaseInfo().getPassword(), + env.info.getDatabaseInfo().getDefaultDbName(), + env.rdsDbName, + env.info.getRequest().getDatabaseEngineDeployment(), + engine, + instanceClass, + engineVersion, + instances); + + env.info.setDatabaseEngine(engine); + env.info.setDatabaseEngineVersion(engineVersion); + LOGGER.finer( + "Created a new RDS Instance " + env.rdsDbName + "." + env.rdsDbDomain); + } catch (Exception e) { + + LOGGER.finer("Error creating a RDS Instance " + env.rdsDbName + ". " + e); + + // remove RDS instance + LOGGER.finer("Deleting RDS Instance " + env.rdsDbName); + env.auroraUtil.deleteMultiAzInstance(env.rdsDbName, false); + LOGGER.finer("Deleted RDS Instance " + env.rdsDbName); + + throw new RuntimeException(e); + } + } + + int port = getPort(env.info.getRequest()); + env.info.getDatabaseInfo().setInstanceEndpointSuffix(env.rdsDbDomain, port); + + env.info.getDatabaseInfo().getInstances().clear(); + env.info.getDatabaseInfo().getInstances().addAll(instances); + + final DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + final DatabaseEngine engine = env.info.getRequest().getDatabaseEngine(); + final TestDatabaseInfo info = env.info.getDatabaseInfo(); + String url; + switch (deployment) { + case RDS_MULTI_AZ_INSTANCE: + url = + String.format( + "%s%s:%d/%s", + DriverHelper.getDriverProtocol(engine), + instances.get(0).getHost(), + port, + info.getDefaultDbName()); + + if (engine == DatabaseEngine.PG) { + env.auroraUtil.createRdsExtension( + engine, + url, + info.getUsername(), + info.getPassword()); + } + + break; + case RDS_MULTI_AZ_CLUSTER: + url = + String.format( + "%s%s:%d/%s", + DriverHelper.getDriverProtocol(engine), + info.getClusterEndpoint(), + port, + info.getDefaultDbName()); + + if (engine == DatabaseEngine.PG) { + env.auroraUtil.createRdsExtension( + engine, + url, + info.getUsername(), + info.getPassword()); + } + + break; + default: + throw new UnsupportedOperationException(deployment.toString()); + } + } + + private static void authorizeIP(TestEnvironmentConfig env) { + try { + env.runnerIP = env.auroraUtil.getPublicIPAddress(); + LOGGER.finest("Test runner IP: " + env.runnerIP); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + env.auroraUtil.ec2AuthorizeIP(env.runnerIP); + LOGGER.finest(String.format("Test runner IP %s authorized. Usage count: %d", + env.runnerIP, ipAddressUsageRefCount.get())); + } + + private static void deAuthorizeIP(TestEnvironmentConfig env) { + if (ipAddressUsageRefCount.decrementAndGet() == 0) { + if (env.runnerIP == null) { + try { + env.runnerIP = env.auroraUtil.getPublicIPAddress(); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + } + if (!env.reuseDb) { + env.auroraUtil.ec2DeauthorizesIP(env.runnerIP); + LOGGER.finest(String.format("Test runner IP %s de-authorized. Usage count: %d", + env.runnerIP, ipAddressUsageRefCount.get())); + } else { + LOGGER.finest("The IP address usage count hit 0, but the REUSE_RDS_DB was set to true, so IP " + + "de-authorization was skipped."); + } + } else { + LOGGER.finest("IP usage count: " + ipAddressUsageRefCount.get()); + } + } + + private static void initRandomBase(TestEnvironmentConfig env) { + String randomBase = env.info.getRandomBase(); + if (StringUtils.isNullOrEmpty(randomBase)) { + env.info.setRandomBase(generateRandom()); + } + } + private static String getRandomName(TestEnvironmentRequest request) { switch (request.getDatabaseEngine()) { case MYSQL: @@ -385,11 +780,26 @@ private static String getRandomName(TestEnvironmentRequest request) { } } + private static String generateRandom() { + String alphabet = "0123456789abcdefghijklmnopqrstuvwxyz"; + + int n = alphabet.length(); + StringBuilder result = new StringBuilder(); + Random r = new Random(); + + for (int i = 0; i < 10; i++) { + result.append(alphabet.charAt(r.nextInt(n))); + } + + return result.toString(); + } + private static String getDbEngine(TestEnvironmentRequest request) { switch (request.getDatabaseEngineDeployment()) { case AURORA: return getAuroraEngine(request); case RDS: + case RDS_MULTI_AZ_INSTANCE: case RDS_MULTI_AZ_CLUSTER: return getEngine(request); default: @@ -403,6 +813,7 @@ private static String getDbEngineVersion(TestEnvironmentConfig env) { case AURORA: return getAuroraDbEngineVersion(env); case RDS: + case RDS_MULTI_AZ_INSTANCE: case RDS_MULTI_AZ_CLUSTER: return getRdsEngineVersion(env); default: @@ -426,7 +837,7 @@ private static String getRdsEngineVersion(TestEnvironmentConfig env) { default: throw new NotImplementedException(request.getDatabaseEngine().toString()); } - return findDbEngineVersion(env, engineName, systemPropertyVersion.toLowerCase()); + return findDbEngineVersion(env, engineName, systemPropertyVersion); } private static String getAuroraDbEngineVersion(TestEnvironmentConfig env) { @@ -445,14 +856,14 @@ private static String getAuroraDbEngineVersion(TestEnvironmentConfig env) { default: throw new NotImplementedException(request.getDatabaseEngine().toString()); } - return findDbEngineVersion(env, engineName, systemPropertyVersion.toLowerCase()); + return findDbEngineVersion(env, engineName, systemPropertyVersion); } private static String findDbEngineVersion(TestEnvironmentConfig env, String engineName, String systemPropertyVersion) { if (systemPropertyVersion == null) { return env.auroraUtil.getDefaultVersion(engineName); } - switch (systemPropertyVersion) { + switch (systemPropertyVersion.toLowerCase()) { case "default": return env.auroraUtil.getDefaultVersion(engineName); case "latest": @@ -484,18 +895,6 @@ private static String getEngine(TestEnvironmentRequest request) { } } - private static String getDbInstanceClass(TestEnvironmentRequest request) { - switch (request.getDatabaseEngineDeployment()) { - case AURORA: - return "db.r5.large"; - case RDS: - case RDS_MULTI_AZ_CLUSTER: - return "db.m5d.large"; - default: - throw new NotImplementedException(request.getDatabaseEngine().toString()); - } - } - private static int getPort(TestEnvironmentRequest request) { switch (request.getDatabaseEngine()) { case MYSQL: @@ -728,33 +1127,63 @@ private static String getContainerBaseImageName(TestEnvironmentRequest request) } private static void configureIamAccess(TestEnvironmentConfig env) { - - if (env.info.getRequest().getDatabaseEngineDeployment() != DatabaseEngineDeployment.AURORA) { - throw new UnsupportedOperationException( - env.info.getRequest().getDatabaseEngineDeployment().toString()); + if (!env.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.IAM)) { + return; } + final DatabaseEngineDeployment deployment = env.info.getRequest().getDatabaseEngineDeployment(); + env.info.setIamUsername( !StringUtils.isNullOrEmpty(System.getenv("IAM_USER")) ? System.getenv("IAM_USER") : "jane_doe"); - if (!env.reuseAuroraDbCluster) { - final String url = - String.format( + + if (!env.reuseDb) { + try { + Class.forName(DriverHelper.getDriverClassname(env.info.getRequest().getDatabaseEngine())); + } catch (ClassNotFoundException e) { + throw new RuntimeException( + "Driver not found: " + + DriverHelper.getDriverClassname(env.info.getRequest().getDatabaseEngine()), + e); + } + + String url; + switch (deployment) { + case AURORA: + case RDS_MULTI_AZ_CLUSTER: + url = String.format( "%s%s:%d/%s", DriverHelper.getDriverProtocol(env.info.getRequest().getDatabaseEngine()), env.info.getDatabaseInfo().getClusterEndpoint(), env.info.getDatabaseInfo().getClusterEndpointPort(), env.info.getDatabaseInfo().getDefaultDbName()); + break; + case RDS_MULTI_AZ_INSTANCE: + url = String.format( + "%s%s:%d/%s", + DriverHelper.getDriverProtocol(env.info.getRequest().getDatabaseEngine()), + env.info.getDatabaseInfo().getInstances().get(0).getHost(), + env.info.getDatabaseInfo().getInstances().get(0).getPort(), + env.info.getDatabaseInfo().getDefaultDbName()); + break; + default: + throw new UnsupportedOperationException(deployment.toString()); + } try { + final boolean useRdsTools = env.info.getRequest().getFeatures() + .contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) + && env.info.getRequest().getDatabaseEngine() == DatabaseEngine.PG + && env.info.getRequest().getDatabaseEngineDeployment() == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE; env.auroraUtil.addAuroraAwsIamUser( env.info.getRequest().getDatabaseEngine(), url, env.info.getDatabaseInfo().getUsername(), env.info.getDatabaseInfo().getPassword(), env.info.getIamUsername(), - env.info.getDatabaseInfo().getDefaultDbName()); + env.info.getDatabaseInfo().getDefaultDbName(), + useRdsTools); } catch (SQLException e) { throw new RuntimeException("Error configuring IAM access.", e); @@ -818,7 +1247,15 @@ public void close() throws Exception { switch (this.info.getRequest().getDatabaseEngineDeployment()) { case AURORA: - deleteAuroraDbCluster(); + if (this.info.getRequest().getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) + && !StringUtils.isNullOrEmpty(this.info.getDatabaseInfo().getBlueGreenDeploymentId())) { + deleteBlueGreenDeployment(); + deleteDbCluster(true); + deleteCustomClusterParameterGroup(this.info.getDatabaseInfo().getClusterParameterGroupName()); + } else { + deleteDbCluster(false); + } + deAuthorizeIP(this); break; case RDS: throw new NotImplementedException(this.info.getRequest().getDatabaseEngineDeployment().toString()); @@ -827,15 +1264,116 @@ public void close() throws Exception { } } - private void deleteAuroraDbCluster() { - if (!this.reuseAuroraDbCluster && !StringUtils.isNullOrEmpty(this.runnerIP)) { - auroraUtil.ec2DeauthorizesIP(runnerIP); + private void deleteDbCluster(boolean waitForCompletion) { + if (!this.reuseDb) { + LOGGER.finest("Deleting cluster " + this.rdsDbName + ".cluster-" + this.rdsDbDomain); + auroraUtil.deleteCluster( + this.rdsDbName, this.info.getRequest().getDatabaseEngineDeployment(), waitForCompletion); + LOGGER.finest("Deleted cluster " + this.rdsDbName + ".cluster-" + this.rdsDbDomain); } + } + + private void deleteBlueGreenDeployment() throws InterruptedException { - if (!this.reuseAuroraDbCluster) { - LOGGER.finest("Deleting cluster " + this.auroraClusterName + ".cluster-" + this.auroraClusterDomain); - auroraUtil.deleteCluster(this.auroraClusterName); - LOGGER.finest("Deleted cluster " + this.auroraClusterName + ".cluster-" + this.auroraClusterDomain); + BlueGreenDeployment blueGreenDeployment; + + switch (this.info.getRequest().getDatabaseEngineDeployment()) { + case AURORA: + if (this.reuseDb) { + break; + } + + blueGreenDeployment = auroraUtil.getBlueGreenDeployment(this.info.getDatabaseInfo().getBlueGreenDeploymentId()); + + if (blueGreenDeployment == null) { + return; + } + + auroraUtil.deleteBlueGreenDeployment(this.info.getDatabaseInfo().getBlueGreenDeploymentId(), true); + + // Remove extra DB cluster + + // For BGD in AVAILABLE status: source = blue, target = green + // For BGD in SWITCHOVER_COMPLETED: source = old1, target = blue + LOGGER.finest("BG source: " + blueGreenDeployment.source()); + LOGGER.finest("BG target: " + blueGreenDeployment.target()); + + if ("SWITCHOVER_COMPLETED".equals(blueGreenDeployment.status())) { + // Delete old1 cluster + DBCluster old1ClusterInfo = auroraUtil.getClusterByArn(blueGreenDeployment.source()); + if (old1ClusterInfo != null) { + auroraUtil.waitUntilClusterHasRightState(old1ClusterInfo.dbClusterIdentifier(), "available"); + LOGGER.finest("Deleting Aurora cluster " + old1ClusterInfo.dbClusterIdentifier()); + auroraUtil.deleteCluster( + old1ClusterInfo.dbClusterIdentifier(), + this.info.getRequest().getDatabaseEngineDeployment(), + true); + LOGGER.finest("Deleted Aurora cluster " + old1ClusterInfo.dbClusterIdentifier()); + } + } else { + // Delete green cluster + DBCluster greenClusterInfo = auroraUtil.getClusterByArn(blueGreenDeployment.target()); + if (greenClusterInfo != null) { + auroraUtil.promoteClusterToStandalone(blueGreenDeployment.target()); + LOGGER.finest("Deleting Aurora cluster " + greenClusterInfo.dbClusterIdentifier()); + auroraUtil.deleteCluster( + greenClusterInfo.dbClusterIdentifier(), + this.info.getRequest().getDatabaseEngineDeployment(), + true); + LOGGER.finest("Deleted Aurora cluster " + greenClusterInfo.dbClusterIdentifier()); + } + } + break; + case RDS_MULTI_AZ_INSTANCE: + if (this.reuseDb) { + break; + } + + blueGreenDeployment = auroraUtil.getBlueGreenDeployment(this.info.getDatabaseInfo().getBlueGreenDeploymentId()); + + if (blueGreenDeployment == null) { + return; + } + + auroraUtil.deleteBlueGreenDeployment(this.info.getDatabaseInfo().getBlueGreenDeploymentId(), true); + + // For BGD in AVAILABLE status: source = blue, target = green + // For BGD in SWITCHOVER_COMPLETED: source = old1, target = blue + LOGGER.finest("BG source: " + blueGreenDeployment.source()); + LOGGER.finest("BG target: " + blueGreenDeployment.target()); + + if ("SWITCHOVER_COMPLETED".equals(blueGreenDeployment.status())) { + // Delete old1 cluster + DBInstance old1InstanceInfo = auroraUtil.getRdsInstanceInfoByArn(blueGreenDeployment.source()); + if (old1InstanceInfo != null) { + LOGGER.finest("Deleting MultiAz Instance " + old1InstanceInfo.dbInstanceIdentifier()); + auroraUtil.deleteMultiAzInstance(old1InstanceInfo.dbInstanceIdentifier(), true); + LOGGER.finest("Deleted MultiAz Instance " + old1InstanceInfo.dbInstanceIdentifier()); + } + } else { + // Delete green cluster + DBInstance greenInstanceInfo = auroraUtil.getRdsInstanceInfoByArn(blueGreenDeployment.target()); + if (greenInstanceInfo != null) { + auroraUtil.promoteInstanceToStandalone(blueGreenDeployment.target()); + LOGGER.finest("Deleting MultiAz Instance " + greenInstanceInfo.dbInstanceIdentifier()); + auroraUtil.deleteMultiAzInstance(greenInstanceInfo.dbInstanceIdentifier(), true); + LOGGER.finest("Deleted MultiAz Instance " + greenInstanceInfo.dbInstanceIdentifier()); + } + } + break; + default: + throw new RuntimeException("Unsupported " + this.info.getRequest().getDatabaseEngineDeployment()); + } + } + + private void deleteCustomClusterParameterGroup(String groupName) { + if (this.reuseDb) { + return; + } + try { + this.auroraUtil.deleteCustomClusterParameterGroup(groupName); + } catch (Exception ex) { + LOGGER.finest(String.format("Error deleting cluster parameter group %s. %s", groupName, ex)); } } } diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentFeatures.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentFeatures.java index 7ddbc5ad..2e863f32 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentFeatures.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentFeatures.java @@ -16,6 +16,8 @@ package integration.host; +import com.sun.glass.ui.Accessible; + public enum TestEnvironmentFeatures { IAM, SECRETS_MANAGER, @@ -29,5 +31,6 @@ public enum TestEnvironmentFeatures { SKIP_PG_DRIVER_TESTS, TELEMETRY_TRACES_ENABLED, TELEMETRY_METRICS_ENABLED, - RDS_MULTI_AZ_CLUSTER_SUPPORTED + RDS_MULTI_AZ_CLUSTER_SUPPORTED, + BLUE_GREEN_DEPLOYMENT; } diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java index e57b7631..37665a8b 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentInfo.java @@ -26,7 +26,7 @@ public class TestEnvironmentInfo { private String region; private String rdsEndpoint; - private String auroraClusterName; + private String rdsDbName; private String iamUsername; private TestDatabaseInfo databaseInfo; @@ -36,6 +36,13 @@ public class TestEnvironmentInfo { private TestTelemetryInfo tracesTelemetryInfo; private TestTelemetryInfo metricsTelemetryInfo; + private String blueGreenDeploymentId; + + private String clusterParameterGroupName = null; + + // Random alphanumeric combination that is used to form a test cluster name or an instance name. + private String randomBase = null; + public TestDatabaseInfo getDatabaseInfo() { return this.databaseInfo; } @@ -80,12 +87,12 @@ public String getRegion() { return this.region; } - public String getRdsEndpoint() { - return this.rdsEndpoint; + public String getRdsDbName() { + return rdsDbName; } - public String getAuroraClusterName() { - return this.auroraClusterName; + public String getRdsEndpoint() { + return this.rdsEndpoint; } public String getIamUsername() { @@ -100,12 +107,12 @@ public void setRegion(String region) { this.region = region; } - public void setRdsEndpoint(String rdsEndpoint) { - this.rdsEndpoint = rdsEndpoint; + public void setRdsDbName(String dbName) { + this.rdsDbName = dbName; } - public void setAuroraClusterName(String auroraClusterName) { - this.auroraClusterName = auroraClusterName; + public void setRdsEndpoint(String rdsEndpoint) { + this.rdsEndpoint = rdsEndpoint; } public void setDatabaseInfo(TestDatabaseInfo databaseInfo) { @@ -147,4 +154,12 @@ public void setAwsSessionToken(String awsSessionToken) { public void setIamUsername(String iamUsername) { this.iamUsername = iamUsername; } + + public String getRandomBase() { + return this.randomBase; + } + + public void setRandomBase(String randomBase) { + this.randomBase = randomBase; + } } diff --git a/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java b/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java index 06e3b619..341ea7e0 100644 --- a/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java +++ b/tests/integration/host/src/test/java/integration/host/TestEnvironmentProvider.java @@ -10,6 +10,7 @@ import java.util.Set; import java.util.logging.Logger; import java.util.stream.Stream; + import org.junit.jupiter.api.extension.Extension; import org.junit.jupiter.api.extension.ExtensionContext; import org.junit.jupiter.api.extension.TestTemplateInvocationContext; @@ -45,7 +46,9 @@ public Stream provideTestTemplateInvocationContex final boolean excludeDocker = Boolean.parseBoolean(System.getProperty("exclude-docker", "false")); final boolean excludeAurora = Boolean.parseBoolean(System.getProperty("exclude-aurora", "false")); - final boolean excludeMultiAZ = Boolean.parseBoolean(System.getProperty("exclude-multi-az", "false")); + final boolean excludeMultiAZCluster = Boolean.parseBoolean(System.getProperty("exclude-multi-az-cluster", "false")); + final boolean excludeMultiAZInstance = Boolean.parseBoolean(System.getProperty("exclude-multi-az-instance", "false")); + final boolean excludeBg = Boolean.parseBoolean(System.getProperty("exclude-bg", "false")); final boolean excludePerformance = Boolean.parseBoolean(System.getProperty("exclude-performance", "false")); final boolean excludeMysqlEngine = @@ -76,7 +79,10 @@ public Stream provideTestTemplateInvocationContex // Not in use. continue; } - if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER && excludeMultiAZ) { + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER && excludeMultiAZCluster) { + continue; + } + if (deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE && excludeMultiAZInstance) { continue; } for (DatabaseEngine engine : DatabaseEngine.values()) { @@ -113,32 +119,33 @@ public Stream provideTestTemplateInvocationContex } resultContextList.add( - getEnvironment( - new TestEnvironmentRequest( - engine, - instances, - instances == DatabaseInstances.SINGLE_INSTANCE ? 1 : numOfInstances, - deployment, - TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, - TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED, - deployment == DatabaseEngineDeployment.DOCKER ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, - deployment == DatabaseEngineDeployment.DOCKER || excludeFailover - ? null - : TestEnvironmentFeatures.FAILOVER_SUPPORTED, - deployment == DatabaseEngineDeployment.DOCKER - || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER - || excludeIam - ? null - : TestEnvironmentFeatures.IAM, - excludeSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER, - excludePerformance ? null : TestEnvironmentFeatures.PERFORMANCE, - excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, - excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null, - excludeTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED, - excludeMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED, - // AWS credentials are required for XRay telemetry - excludeTracesTelemetry && excludeMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED))); + getEnvironment( + new TestEnvironmentRequest( + engine, + instances, + instances == DatabaseInstances.SINGLE_INSTANCE ? 1 : numOfInstances, + deployment, + TestEnvironmentFeatures.NETWORK_OUTAGES_ENABLED, + TestEnvironmentFeatures.ABORT_CONNECTION_SUPPORTED, + deployment == DatabaseEngineDeployment.DOCKER ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, + deployment == DatabaseEngineDeployment.DOCKER || excludeFailover + ? null + : TestEnvironmentFeatures.FAILOVER_SUPPORTED, + deployment == DatabaseEngineDeployment.DOCKER + || deployment == DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER + || excludeIam + ? null + : TestEnvironmentFeatures.IAM, + excludeSecretsManager ? null : TestEnvironmentFeatures.SECRETS_MANAGER, + excludePerformance ? null : TestEnvironmentFeatures.PERFORMANCE, + excludeMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, + excludePgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null, + excludeBg ? null : TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT, + excludeTracesTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_TRACES_ENABLED, + excludeMetricsTelemetry ? null : TestEnvironmentFeatures.TELEMETRY_METRICS_ENABLED, + // AWS credentials are required for XRay telemetry + excludeTracesTelemetry && excludeMetricsTelemetry ? null : TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED))); } } } diff --git a/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java b/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java index e6d03fda..89a5d8d3 100644 --- a/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java +++ b/tests/integration/host/src/test/java/integration/host/util/AuroraTestUtility.java @@ -16,17 +16,11 @@ package integration.host.util; -import integration.host.DatabaseEngine; -import integration.host.DatabaseEngineDeployment; -import integration.host.DriverHelper; -import integration.host.TestEnvironment; -import integration.host.TestEnvironmentInfo; -import integration.host.TestInstanceInfo; -import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; -import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; -import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; -import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; -import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import integration.host.*; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.testcontainers.shaded.org.apache.commons.lang3.NotImplementedException; +import software.amazon.awssdk.auth.credentials.*; +import software.amazon.awssdk.core.exception.SdkClientException; import software.amazon.awssdk.core.waiters.WaiterResponse; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.ec2.Ec2Client; @@ -49,13 +43,16 @@ import java.sql.Statement; import java.time.Duration; import java.time.Instant; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Optional; -import java.util.Random; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.*; import java.util.concurrent.TimeUnit; import java.util.logging.Logger; +import java.util.stream.Collectors; + +import static integration.host.DatabaseEngineDeployment.RDS_MULTI_AZ_INSTANCE; +import static org.junit.jupiter.api.Assertions.fail; /** * Creates and destroys AWS RDS Clusters and Instances. To use this functionality the following environment variables @@ -64,6 +61,7 @@ public class AuroraTestUtility { private static final Logger LOGGER = Logger.getLogger(AuroraTestUtility.class.getName()); + private static final int MULTI_AZ_SIZE = 3; // Default values private String dbUsername = "my_test_username"; @@ -74,11 +72,12 @@ public class AuroraTestUtility { private String dbEngine = "aurora-postgresql"; private String dbEngineVersion = "13.9"; private String dbInstanceClass = "db.r5.large"; - private String storageType = "io1"; - private int allocatedStorage = 100; - private int iops = 1000; private final Region dbRegion; private final String dbSecGroup = "default"; + private @Nullable String clusterParameterGroupName; + private static final String DEFAULT_STORAGE_TYPE = "gp3"; + private static final int DEFAULT_IOPS = 64000; + private static final int DEFAULT_ALLOCATED_STORAGE = 400; private int numOfInstances = 5; private List instances = new ArrayList<>(); @@ -93,8 +92,7 @@ public AuroraTestUtility(String region, String endpoint) throws URISyntaxExcepti } public AuroraTestUtility( - String region, String rdsEndpoint, String awsAccessKeyId, String awsSecretAccessKey, String awsSessionToken) - throws URISyntaxException { + String region, String rdsEndpoint, String awsAccessKeyId, String awsSecretAccessKey, String awsSessionToken) { this( getRegionInternal(region), @@ -114,15 +112,18 @@ public AuroraTestUtility( * Availability Zones, and Local Zones * @param credentialsProvider Specific AWS credential provider */ - public AuroraTestUtility(Region region, String rdsEndpoint, AwsCredentialsProvider credentialsProvider) - throws URISyntaxException { + public AuroraTestUtility(Region region, String rdsEndpoint, AwsCredentialsProvider credentialsProvider) { dbRegion = region; final RdsClientBuilder rdsClientBuilder = RdsClient.builder() .region(dbRegion) .credentialsProvider(credentialsProvider); if (!StringUtils.isNullOrEmpty(rdsEndpoint)) { - rdsClientBuilder.endpointOverride(new URI(rdsEndpoint)); + try { + rdsClientBuilder.endpointOverride(new URI(rdsEndpoint)); + } catch (URISyntaxException e) { + throw new RuntimeException(e); + } } rdsClient = rdsClientBuilder.build(); @@ -143,91 +144,186 @@ protected static Region getRegionInternal(String rdsRegion) { } /** - * Creates RDS Cluster/Instances and waits until they are up, and proper IP whitelisting for databases. + * Creates an RDS cluster based on the passed in details. After the cluster is created, this method will wait + * until it is available, adds the current IP address to the default security group, and create a database with the + * given name within the cluster. * - * @param username Master username for access to database - * @param password Master password for access to database - * @param dbName Database name - * @param identifier Database cluster identifier - * @param engine Database engine to use, refer to - * https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html - * @param instanceClass instance class, refer to - * https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html + * @param username the master username for access to the database + * @param password the master password for access to the database + * @param dbName the database to create within the cluster + * @param identifier the cluster identifier + * @param deployment the engine deployment to use + * @param region the region that the cluster should be created in + * @param engine the engine to use, refer to + * CreateDbClusterRequest.engine + * @param instanceClass the instance class, refer to + * Supported instance classes * @param version the database engine's version - * @return An endpoint for one of the instances + * @param numInstances the number of instances to create for the cluster * @throws InterruptedException when clusters have not started after 30 minutes */ - public String createCluster( + public void createCluster( String username, String password, String dbName, String identifier, DatabaseEngineDeployment deployment, + String region, String engine, String instanceClass, String version, - int numOfInstances, - ArrayList instances) + @Nullable String clusterParameterGroupName, + int numInstances) throws InterruptedException { - this.dbUsername = username; - this.dbPassword = password; - this.dbName = dbName; - this.dbIdentifier = identifier; - this.dbEngineDeployment = deployment; - this.dbEngine = engine; - this.dbInstanceClass = instanceClass; - this.dbEngineVersion = version; - this.numOfInstances = numOfInstances; - this.instances = instances; - - switch (this.dbEngineDeployment) { + + switch (deployment) { case AURORA: - return createAuroraCluster(); + createAuroraCluster( + username, password, dbName, identifier, region, engine, instanceClass, + version, clusterParameterGroupName, numInstances); + break; case RDS_MULTI_AZ_CLUSTER: - return createMultiAzCluster(); + if (numInstances != MULTI_AZ_SIZE) { + throw new RuntimeException( + "A multi-az cluster with " + numInstances + " instances was requested, but multi-az clusters must have " + + MULTI_AZ_SIZE + " instances."); + } + createMultiAzCluster( + username, password, dbName, identifier, region, engine, instanceClass, version); + break; default: - throw new UnsupportedOperationException(this.dbEngineDeployment.toString()); + throw new UnsupportedOperationException(deployment.toString()); } } + public String createMultiAzInstance( + String username, + String password, + String dbName, + String identifier, + DatabaseEngineDeployment deployment, + String engine, + String instanceClass, + String version, + ArrayList instances) { + + if (deployment != RDS_MULTI_AZ_INSTANCE) { + throw new UnsupportedOperationException(deployment.toString()); + } + + CreateDbInstanceResponse response = rdsClient.createDBInstance(CreateDbInstanceRequest.builder() + .dbInstanceIdentifier(identifier) + .publiclyAccessible(true) + .dbName(dbName) + .masterUsername(username) + .masterUserPassword(password) + .enableIAMDatabaseAuthentication(true) + .multiAZ(true) + .engine(engine) + .engineVersion(version) + .dbInstanceClass(instanceClass) + .enablePerformanceInsights(false) + .backupRetentionPeriod(1) + .storageEncrypted(true) + .storageType(DEFAULT_STORAGE_TYPE) + .allocatedStorage(DEFAULT_ALLOCATED_STORAGE) + .iops(DEFAULT_IOPS) + .tags(this.getTag()) + .build()); + + // Wait for all instances to be up + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBInstanceAvailable( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-instance-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.maxAttempts(240).waitTimeout(Duration.ofMinutes(240))); + + if (waiterResponse.matched().exception().isPresent()) { + deleteMultiAzInstance(identifier, false); + throw new RuntimeException( + "Unable to start AWS RDS Instance after waiting for 240 minutes"); + } + + DescribeDbInstancesResponse dbInstancesResult = waiterResponse.matched().response().orElse(null); + if (dbInstancesResult == null) { + throw new RuntimeException("Unable to get instance details."); + } + + final String endpoint = dbInstancesResult.dbInstances().get(0).endpoint().address(); + final String rdsDomainPrefix = endpoint.substring(endpoint.indexOf('.') + 1); + + for (DBInstance instance : dbInstancesResult.dbInstances()) { + instances.add( + new TestInstanceInfo( + instance.dbInstanceIdentifier(), + instance.endpoint().address(), + instance.endpoint().port())); + } + + return rdsDomainPrefix; + } + /** - * Creates RDS Cluster/Instances and waits until they are up, and proper IP whitelisting for databases. + * Creates an RDS Aurora cluster based on the passed in details. After the cluster is created, this method will wait + * until it is available, adds the current IP address to the default security group, and create a database with the + * given name within the cluster. * - * @return An endpoint for one of the instances + * @param username the master username for access to the database + * @param password the master password for access to the database + * @param dbName the database to create within the cluster + * @param identifier the cluster identifier + * @param region the region that the cluster should be created in + * @param engine the engine to use, refer to + * CreateDbClusterRequest.engine + * @param instanceClass the instance class, refer to + * Supported instance classes + * @param version the database engine's version + * @param numInstances the number of instances to create for the cluster * @throws InterruptedException when clusters have not started after 30 minutes */ - public String createAuroraCluster() throws InterruptedException { - // Create Cluster - final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); - + public void createAuroraCluster( + String username, + String password, + String dbName, + String identifier, + String region, + String engine, + String instanceClass, + String version, + @Nullable String clusterParameterGroupName, + int numInstances) + throws InterruptedException { final CreateDbClusterRequest dbClusterRequest = CreateDbClusterRequest.builder() - .dbClusterIdentifier(dbIdentifier) + .dbClusterIdentifier(identifier) .databaseName(dbName) - .masterUsername(dbUsername) - .masterUserPassword(dbPassword) - .sourceRegion(dbRegion.id()) + .masterUsername(username) + .masterUserPassword(password) + .sourceRegion(region) .enableIAMDatabaseAuthentication(true) - .engine(dbEngine) - .engineVersion(dbEngineVersion) + .engine(engine) + .engineVersion(version) .storageEncrypted(true) - .tags(testRunnerTag) + .tags(this.getTag()) + .dbClusterParameterGroupName(clusterParameterGroupName) .build(); rdsClient.createDBCluster(dbClusterRequest); // Create Instances - for (int i = 1; i <= numOfInstances; i++) { - final String instanceName = dbIdentifier + "-" + i; + for (int i = 1; i <= numInstances; i++) { + final String instanceName = identifier + "-" + i; rdsClient.createDBInstance( CreateDbInstanceRequest.builder() - .dbClusterIdentifier(dbIdentifier) + .dbClusterIdentifier(identifier) .dbInstanceIdentifier(instanceName) - .dbInstanceClass(dbInstanceClass) - .engine(dbEngine) - .engineVersion(dbEngineVersion) + .dbInstanceClass(instanceClass) + .engine(engine) + .engineVersion(version) .publiclyAccessible(true) - .tags(testRunnerTag) + .tags(this.getTag()) .build()); } @@ -237,122 +333,99 @@ public String createAuroraCluster() throws InterruptedException { waiter.waitUntilDBInstanceAvailable( (requestBuilder) -> requestBuilder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build()), - (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(30))); + Filter.builder().name("db-cluster-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.maxAttempts(480).waitTimeout(Duration.ofMinutes(240))); if (waiterResponse.matched().exception().isPresent()) { - deleteCluster(); + deleteCluster(identifier, DatabaseEngineDeployment.AURORA, false); throw new InterruptedException( "Unable to start AWS RDS Cluster & Instances after waiting for 30 minutes"); } - - final DescribeDbInstancesResponse dbInstancesResult = - rdsClient.describeDBInstances( - (builder) -> - builder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build())); - final String endpoint = dbInstancesResult.dbInstances().get(0).endpoint().address(); - final String clusterDomainSuffix = endpoint.substring(endpoint.indexOf('.') + 1); - - for (DBInstance instance : dbInstancesResult.dbInstances()) { - this.instances.add( - new TestInstanceInfo( - instance.dbInstanceIdentifier(), - instance.endpoint().address(), - instance.endpoint().port())); - } - - return clusterDomainSuffix; } /** - * Creates RDS Cluster/Instances and waits until they are up, and proper IP whitelisting for databases. + * Creates an RDS multi-az cluster based on the passed in details. After the cluster is created, this method will wait + * until it is available, adds the current IP address to the default security group, and create a database with the + * given name within the cluster. * - * @return An endpoint for one of the instances + * @param username the master username for access to the database + * @param password the master password for access to the database + * @param dbName the database to create within the cluster + * @param identifier the cluster identifier + * @param region the region that the cluster should be created in + * @param engine the engine to use, refer to + * CreateDbClusterRequest.engine + * @param instanceClass the instance class, refer to + * Supported instance classes + * @param version the database engine's version * @throws InterruptedException when clusters have not started after 30 minutes */ - public String createMultiAzCluster() throws InterruptedException { - // Create Cluster - final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); + public void createMultiAzCluster(String username, + String password, + String dbName, + String identifier, + String region, + String engine, + String instanceClass, + String version) + throws InterruptedException { CreateDbClusterRequest.Builder clusterBuilder = CreateDbClusterRequest.builder() - .dbClusterIdentifier(dbIdentifier) + .dbClusterIdentifier(identifier) .publiclyAccessible(true) .databaseName(dbName) - .masterUsername(dbUsername) - .masterUserPassword(dbPassword) - .sourceRegion(dbRegion.id()) - .engine(dbEngine) - .engineVersion(dbEngineVersion) + .masterUsername(username) + .masterUserPassword(password) + .sourceRegion(region) + .engine(engine) + .engineVersion(version) .enablePerformanceInsights(false) .backupRetentionPeriod(1) .storageEncrypted(true) - .tags(testRunnerTag); - - clusterBuilder = - clusterBuilder.allocatedStorage(allocatedStorage) - .dbClusterInstanceClass(dbInstanceClass) - .storageType(storageType) - .iops(iops); + .tags(this.getTag()) + .allocatedStorage(DEFAULT_ALLOCATED_STORAGE) + .dbClusterInstanceClass(instanceClass) + .storageType(DEFAULT_STORAGE_TYPE) + .iops(DEFAULT_IOPS); rdsClient.createDBCluster(clusterBuilder.build()); - // For multi-AZ deployments, the cluster instances are created automatically. - - // Wait for all instances to be up + // For multi-AZ deployments, the cluster instances are created automatically. Wait for all instances to be up. final RdsWaiter waiter = rdsClient.waiter(); WaiterResponse waiterResponse = waiter.waitUntilDBInstanceAvailable( (requestBuilder) -> requestBuilder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build()), + Filter.builder().name("db-cluster-id").values(identifier).build()), (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(30))); if (waiterResponse.matched().exception().isPresent()) { - deleteCluster(); + deleteCluster(identifier, DatabaseEngineDeployment.RDS_MULTI_AZ_CLUSTER, false); throw new InterruptedException( "Unable to start AWS RDS Cluster & Instances after waiting for 30 minutes"); } - - final DescribeDbInstancesResponse dbInstancesResult = - rdsClient.describeDBInstances( - (builder) -> - builder.filters( - Filter.builder().name("db-cluster-id").values(dbIdentifier).build())); - final String endpoint = dbInstancesResult.dbInstances().get(0).endpoint().address(); - final String clusterDomainSuffix = endpoint.substring(endpoint.indexOf('.') + 1); - - for (DBInstance instance : dbInstancesResult.dbInstances()) { - this.instances.add( - new TestInstanceInfo( - instance.dbInstanceIdentifier(), - instance.endpoint().address(), - instance.endpoint().port())); - } - - return clusterDomainSuffix; } /** * Creates an RDS instance under the current cluster and waits until it is up. * - * @param instanceId the desired instance ID of the new instance - * @return the instance info of the new instance + * @param instanceClass the desired instance class of the new instance + * @param instanceId the desired instance ID of the new instance + * @return the instance info for the new instance * @throws InterruptedException if the new instance is not available within 5 minutes */ - public TestInstanceInfo createInstance(String instanceId) throws InterruptedException { - final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); + public TestInstanceInfo createInstance(String instanceClass, String instanceId) throws InterruptedException { final TestEnvironmentInfo info = TestEnvironment.getCurrent().getInfo(); rdsClient.createDBInstance( CreateDbInstanceRequest.builder() - .dbClusterIdentifier(info.getAuroraClusterName()) + .dbClusterIdentifier(info.getRdsDbName()) .dbInstanceIdentifier(instanceId) - .dbInstanceClass(dbInstanceClass) + .dbInstanceClass(instanceClass) .engine(info.getDatabaseEngine()) .engineVersion(info.getDatabaseEngineVersion()) .publiclyAccessible(true) - .tags(testRunnerTag) + .tags(this.getTag()) .build()); // Wait for the instance to become available @@ -384,12 +457,18 @@ public TestInstanceInfo createInstance(String instanceId) throws InterruptedExce } DBInstance instance = dbInstancesResult.dbInstances().get(0); - TestInstanceInfo instanceInfo = new TestInstanceInfo( + return new TestInstanceInfo( instance.dbInstanceIdentifier(), instance.endpoint().address(), instance.endpoint().port()); - this.instances.add(instanceInfo); - return instanceInfo; + } + + public List getDBInstances(String clusterId) { + final DescribeDbInstancesResponse dbInstancesResult = + rdsClient.describeDBInstances( + (builder) -> + builder.filters(Filter.builder().name("db-cluster-id").values(clusterId).build())); + return dbInstancesResult.dbInstances(); } /** @@ -404,8 +483,6 @@ public void deleteInstance(TestInstanceInfo instanceToDelete) throws Interrupted .dbInstanceIdentifier(instanceToDelete.getInstanceId()) .skipFinalSnapshot(true) .build()); - this.instances.remove(instanceToDelete); - final RdsWaiter waiter = rdsClient.waiter(); WaiterResponse waiterResponse = waiter.waitUntilDBInstanceDeleted( (requestBuilder) -> requestBuilder.filters( @@ -420,6 +497,60 @@ public void deleteInstance(TestInstanceInfo instanceToDelete) throws Interrupted } } + public void createCustomClusterParameterGroup( + String groupName, String engine, String engineVersion, DatabaseEngine databaseEngine) { + CreateDbClusterParameterGroupResponse response = rdsClient.createDBClusterParameterGroup( + CreateDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .description("Test custom cluster parameter group for BGD.") + .dbParameterGroupFamily(this.getAuroraParameterGroupFamily(engine, engineVersion)) + .build()); + + if (!response.sdkHttpResponse().isSuccessful()) { + throw new RuntimeException("Error creating custom cluster parameter group. " + response.sdkHttpResponse()); + } + + ModifyDbClusterParameterGroupResponse response2; + switch (databaseEngine) { + case MYSQL: + response2 = rdsClient.modifyDBClusterParameterGroup( + ModifyDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .parameters(Parameter.builder() + .parameterName("binlog_format") + .parameterValue("ROW") + .applyMethod(ApplyMethod.PENDING_REBOOT) + .build()) + .build()); + break; + case PG: + response2 = rdsClient.modifyDBClusterParameterGroup( + ModifyDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .parameters(Parameter.builder() + .parameterName("rds.logical_replication") + .parameterValue("true") + .applyMethod(ApplyMethod.PENDING_REBOOT) + .build()) + .build()); + break; + default: + throw new UnsupportedOperationException(databaseEngine.toString()); + } + + if (!response2.sdkHttpResponse().isSuccessful()) { + throw new RuntimeException("Error updating parameter. " + response2.sdkHttpResponse()); + } + } + + public void deleteCustomClusterParameterGroup(String groupName) { + rdsClient.deleteDBClusterParameterGroup( + DeleteDbClusterParameterGroupRequest.builder() + .dbClusterParameterGroupName(groupName) + .build() + ); + } + /** * Gets public IP. * @@ -507,46 +638,49 @@ public void ec2DeauthorizesIP(String ipAddress) { } /** - * Destroys all instances and clusters. Removes IP from EC2 whitelist. + * Deletes the specified cluster and removes the current IP address from the default security group. * - * @param identifier database identifier to delete - */ - public void deleteCluster(String identifier) { - dbIdentifier = identifier; - deleteCluster(); - } - - /** - * Destroys all instances and clusters. Removes IP from EC2 whitelist. + * @param identifier the cluster identifier for the cluster to delete + * @param deployment the engine deployment for the cluster to delete + * @param waitForCompletion if true, wait for cluster completely deleted */ - public void deleteCluster() { - - switch (this.dbEngineDeployment) { + public void deleteCluster(String identifier, DatabaseEngineDeployment deployment, boolean waitForCompletion) { + switch (deployment) { case AURORA: - this.deleteAuroraCluster(); + this.deleteAuroraCluster(identifier, waitForCompletion); break; case RDS_MULTI_AZ_CLUSTER: - this.deleteMultiAzCluster(); + this.deleteMultiAzCluster(identifier, waitForCompletion); break; default: - throw new UnsupportedOperationException(this.dbEngineDeployment.toString()); + throw new UnsupportedOperationException(deployment.toString()); } } /** - * Destroys all instances and clusters. Removes IP from EC2 whitelist. + * Deletes the specified Aurora cluster and removes the current IP address from the default security group. + * + * @param identifier the cluster identifier for the cluster to delete + * @param waitForCompletion if true, wait for cluster completely deleted */ - public void deleteAuroraCluster() { + public void deleteAuroraCluster(String identifier, boolean waitForCompletion) { + DBCluster dbCluster = getDBCluster(identifier); + if (dbCluster == null) { + return; + } + List members = dbCluster.dbClusterMembers(); + // Tear down instances - for (int i = 1; i <= numOfInstances; i++) { + for (DBClusterMember member : members) { try { rdsClient.deleteDBInstance( DeleteDbInstanceRequest.builder() - .dbInstanceIdentifier(dbIdentifier + "-" + i) + .dbInstanceIdentifier(member.dbInstanceIdentifier()) .skipFinalSnapshot(true) .build()); } catch (Exception ex) { - LOGGER.finest("Error deleting instance " + dbIdentifier + "-" + i + ". " + ex.getMessage()); + LOGGER.finest("Error deleting instance '" + + member.dbInstanceIdentifier() + "' of Aurora cluster: " + ex.getMessage()); // Ignore this error and continue with other instances } } @@ -556,7 +690,7 @@ public void deleteAuroraCluster() { while (--remainingAttempts > 0) { try { DeleteDbClusterResponse response = rdsClient.deleteDBCluster( - (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(dbIdentifier))); + (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(identifier))); if (response.sdkHttpResponse().isSuccessful()) { break; } @@ -564,23 +698,44 @@ public void deleteAuroraCluster() { } catch (DbClusterNotFoundException ex) { // ignore + return; + } catch (InvalidDbClusterStateException ex) { + throw new RuntimeException("Error deleting db cluster " + identifier, ex); } catch (Exception ex) { - LOGGER.warning("Error deleting db cluster " + dbIdentifier + ": " + ex); + LOGGER.warning("Error deleting db cluster " + identifier + ": " + ex); + return; + } + } + + if (waitForCompletion) { + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBClusterDeleted( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-cluster-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(60))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new RuntimeException( + "Unable to delete AWS Aurora Cluster after waiting for 60 minutes"); } } } /** - * Destroys all instances and clusters. + * Deletes the specified multi-az cluster and removes the current IP address from the default security group. + * + * @param identifier the cluster identifier for the cluster to delete + * @param waitForCompletion if true, wait for cluster completely deleted */ - public void deleteMultiAzCluster() { - // deleteDBinstance requests are not necessary to delete a multi-az cluster. + public void deleteMultiAzCluster(String identifier, boolean waitForCompletion) { // Tear down cluster int remainingAttempts = 5; while (--remainingAttempts > 0) { try { DeleteDbClusterResponse response = rdsClient.deleteDBCluster( - (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(dbIdentifier))); + (builder -> builder.skipFinalSnapshot(true).dbClusterIdentifier(identifier))); if (response.sdkHttpResponse().isSuccessful()) { break; } @@ -588,12 +743,114 @@ public void deleteMultiAzCluster() { } catch (DbClusterNotFoundException ex) { // ignore + return; + } catch (Exception ex) { + LOGGER.warning("Error deleting db cluster " + identifier + ": " + ex); + return; + } + } + + if (waitForCompletion) { + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBClusterDeleted( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-cluster-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(60))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new RuntimeException( + "Unable to delete RDS MultiAz Cluster after waiting for 60 minutes"); + } + } + } + + public void deleteMultiAzInstance(final String identifier, boolean waitForCompletion) { + // Tear down MultiAz Instance + int remainingAttempts = 5; + while (--remainingAttempts > 0) { + try { + DeleteDbInstanceResponse response = rdsClient.deleteDBInstance( + builder -> builder.skipFinalSnapshot(true).dbInstanceIdentifier(identifier).build()); + if (response.sdkHttpResponse().isSuccessful()) { + break; + } + TimeUnit.SECONDS.sleep(30); + + } catch (InvalidDbInstanceStateException invalidDbInstanceStateException) { + // Instance is already being deleted. + // ignore it + LOGGER.finest("MultiAz Instance " + identifier + " is already being deleted. " + + invalidDbInstanceStateException); + break; + } catch (DbInstanceNotFoundException ex) { + // ignore + LOGGER.warning("Error deleting db MultiAz Instance " + identifier + ". Instance not found: " + ex); + break; } catch (Exception ex) { - LOGGER.warning("Error deleting db cluster " + dbIdentifier + ": " + ex); + LOGGER.warning("Error deleting db MultiAz Instance " + identifier + ": " + ex); + } + } + + if (waitForCompletion) { + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBInstanceDeleted( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-instance-id").values(identifier).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(60))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new RuntimeException( + "Unable to delete RDS MultiAz Instance after waiting for 60 minutes"); } } } + public void promoteClusterToStandalone(String clusterArn) { + if (StringUtils.isNullOrEmpty(clusterArn)) { + return; + } + + DBCluster clusterInfo = getClusterByArn(clusterArn); + + if (clusterInfo == null || StringUtils.isNullOrEmpty(clusterInfo.replicationSourceIdentifier())) { + return; + } + + PromoteReadReplicaDbClusterResponse response = rdsClient.promoteReadReplicaDBCluster( + PromoteReadReplicaDbClusterRequest.builder().dbClusterIdentifier(clusterInfo.dbClusterIdentifier()).build()); + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.warning("Error promoting DB cluster to standalone cluster: " + + response.sdkHttpResponse().statusCode() + + " " + + response.sdkHttpResponse().statusText().orElse("")); + } + } + + public void promoteInstanceToStandalone(String instanceArn) { + if (StringUtils.isNullOrEmpty(instanceArn)) { + return; + } + + DBInstance instanceInfo = getRdsInstanceInfoByArn(instanceArn); + + if (instanceInfo == null || StringUtils.isNullOrEmpty(instanceInfo.readReplicaSourceDBInstanceIdentifier())) { + return; + } + + PromoteReadReplicaResponse response = rdsClient.promoteReadReplica( + PromoteReadReplicaRequest.builder().dbInstanceIdentifier(instanceInfo.dbInstanceIdentifier()).build()); + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.warning("Error promoting DB instance to standalone instance: " + + response.sdkHttpResponse().statusCode() + + " " + + response.sdkHttpResponse().statusText().orElse("")); + } + } + public boolean doesClusterExist(final String clusterId) { final DescribeDbClustersRequest request = DescribeDbClustersRequest.builder().dbClusterIdentifier(clusterId).build(); @@ -605,6 +862,17 @@ public boolean doesClusterExist(final String clusterId) { return true; } + public boolean doesInstanceExist(final String instanceId) { + final DescribeDbInstancesRequest request = + DescribeDbInstancesRequest.builder().dbInstanceIdentifier(instanceId).build(); + try { + DescribeDbInstancesResponse response = rdsClient.describeDBInstances(request); + return response.sdkHttpResponse().isSuccessful(); + } catch (DbInstanceNotFoundException ex) { + return false; + } + } + public DBCluster getClusterInfo(final String clusterId) { final DescribeDbClustersRequest request = DescribeDbClustersRequest.builder().dbClusterIdentifier(clusterId).build(); @@ -616,6 +884,43 @@ public DBCluster getClusterInfo(final String clusterId) { return response.dbClusters().get(0); } + public DBCluster getClusterByArn(final String clusterArn) { + final DescribeDbClustersRequest request = + DescribeDbClustersRequest.builder() + .filters(Filter.builder().name("db-cluster-id").values(clusterArn).build()) + .build(); + final DescribeDbClustersResponse response = rdsClient.describeDBClusters(request); + if (!response.hasDbClusters()) { + return null; + } + + return response.dbClusters().get(0); + } + + public DBInstance getRdsInstanceInfo(final String instanceId) { + final DescribeDbInstancesRequest request = + DescribeDbInstancesRequest.builder().dbInstanceIdentifier(instanceId).build(); + final DescribeDbInstancesResponse response = rdsClient.describeDBInstances(request); + if (!response.hasDbInstances()) { + throw new RuntimeException("RDS Instance " + instanceId + " not found."); + } + + return response.dbInstances().get(0); + } + + public DBInstance getRdsInstanceInfoByArn(final String instanceArn) { + final DescribeDbInstancesRequest request = + DescribeDbInstancesRequest.builder().filters( + Filter.builder().name("db-instance-id").values(instanceArn).build()) + .build(); + final DescribeDbInstancesResponse response = rdsClient.describeDBInstances(request); + if (!response.hasDbInstances()) { + return null; + } + + return response.dbInstances().get(0); + } + public DatabaseEngine getClusterEngine(final DBCluster cluster) { switch (cluster.engine()) { case "aurora-postgresql": @@ -629,6 +934,152 @@ public DatabaseEngine getClusterEngine(final DBCluster cluster) { } } + public static String getDbInstanceClass(TestEnvironmentRequest request) { + switch (request.getDatabaseEngineDeployment()) { + case AURORA: + return request.getFeatures().contains(TestEnvironmentFeatures.BLUE_GREEN_DEPLOYMENT) + ? "db.r7g.2xlarge" + : "db.r5.large"; + case RDS: + case RDS_MULTI_AZ_INSTANCE: + case RDS_MULTI_AZ_CLUSTER: + return "db.m5d.large"; + default: + throw new NotImplementedException(request.getDatabaseEngineDeployment().toString()); + } + } + + public DatabaseEngine getRdsInstanceEngine(final DBInstance instance) { + switch (instance.engine()) { + case "postgres": + return DatabaseEngine.PG; + case "mysql": + return DatabaseEngine.MYSQL; + default: + throw new UnsupportedOperationException(instance.engine()); + } + } + + public String getAuroraParameterGroupFamily(String engine, String engineVersion) { + switch (engine) { + case "aurora-postgresql": + return "aurora-postgresql16"; + case "aurora-mysql": + return "aurora-mysql8.0"; + default: + throw new UnsupportedOperationException(engine); + } + } + + public List getTestInstancesInfo(final String clusterId) { + List dbInstances = getDBInstances(clusterId); + List instancesInfo = new ArrayList<>(); + for (DBInstance dbInstance : dbInstances) { + instancesInfo.add( + new TestInstanceInfo( + dbInstance.dbInstanceIdentifier(), + dbInstance.endpoint().address(), + dbInstance.endpoint().port())); + } + + return instancesInfo; + } + + public void waitUntilClusterHasRightState(String clusterId) throws InterruptedException { + waitUntilClusterHasRightState(clusterId, "available"); + } + + public void waitUntilClusterHasRightState(String clusterId, String... allowedStatuses) throws InterruptedException { + String status = getDBCluster(clusterId).status(); + LOGGER.finest("Cluster status: " + status + ", waiting for status: " + String.join(", ", allowedStatuses)); + final Set allowedStatusSet = Arrays.stream(allowedStatuses) + .map(String::toLowerCase) + .collect(Collectors.toSet()); + final long waitTillNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(15); + while (!allowedStatusSet.contains(status.toLowerCase()) && waitTillNanoTime > System.nanoTime()) { + TimeUnit.MILLISECONDS.sleep(1000); + String tmpStatus = getDBCluster(clusterId).status(); + if (!tmpStatus.equalsIgnoreCase(status)) { + LOGGER.finest("Cluster status (waiting): " + tmpStatus); + } + status = tmpStatus; + } + LOGGER.finest("Cluster status (after wait): " + status); + } + + public DBCluster getDBCluster(String clusterId) { + DescribeDbClustersResponse dbClustersResult = null; + int remainingTries = 5; + while (remainingTries-- > 0) { + try { + dbClustersResult = rdsClient.describeDBClusters((builder) -> builder.dbClusterIdentifier(clusterId)); + break; + } catch (DbClusterNotFoundException ex) { + return null; + } catch (SdkClientException sdkClientException) { + if (remainingTries == 0) { + throw sdkClientException; + } + } + } + + if (dbClustersResult == null) { + fail("Unable to get DB cluster info for cluster with ID " + clusterId); + } + + final List dbClusterList = dbClustersResult.dbClusters(); + return dbClusterList.get(0); + } + + public DBInstance getDBInstance(String instanceId) { + DescribeDbInstancesResponse dbInstanceResult = null; + int remainingTries = 5; + while (remainingTries-- > 0) { + try { + dbInstanceResult = rdsClient.describeDBInstances((builder) -> builder.dbInstanceIdentifier(instanceId)); + break; + } catch (SdkClientException sdkClientException) { + if (remainingTries == 0) { + throw sdkClientException; + } + + try { + TimeUnit.SECONDS.sleep(30); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + throw new RuntimeException(ex); + } + } + } + + if (dbInstanceResult == null) { + fail("Unable to get DB instance info for instance with ID " + instanceId); + } + + final List dbClusterList = dbInstanceResult.dbInstances(); + return dbClusterList.get(0); + } + + public void waitUntilInstanceHasRightState(String instanceId, String... allowedStatuses) throws InterruptedException { + + String status = getDBInstance(instanceId).dbInstanceStatus(); + LOGGER.finest("Instance " + instanceId + " status: " + status + + ", waiting for status: " + String.join(", ", allowedStatuses)); + final Set allowedStatusSet = Arrays.stream(allowedStatuses) + .map(String::toLowerCase) + .collect(Collectors.toSet()); + final long waitTillNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(15); + while (!allowedStatusSet.contains(status.toLowerCase()) && waitTillNanoTime > System.nanoTime()) { + TimeUnit.MILLISECONDS.sleep(1000); + String tmpStatus = getDBInstance(instanceId).dbInstanceStatus(); + if (!tmpStatus.equalsIgnoreCase(status)) { + LOGGER.finest("Instance " + instanceId + " status (waiting): " + tmpStatus); + } + status = tmpStatus; + } + LOGGER.finest("Instance " + instanceId + " status (after wait): " + status); + } + public List getClusterInstanceIds(final String clusterId) { final DescribeDbInstancesResponse dbInstancesResult = rdsClient.describeDBInstances( @@ -663,9 +1114,9 @@ public void addAuroraAwsIamUser( String userName, String password, String dbUser, - String databaseName) + String databaseName, + boolean useRdsTools) throws SQLException { - AuroraTestUtility.registerDriver(databaseEngine); try (final Connection conn = DriverManager.getConnection(connectionUrl, userName, password); final Statement stmt = conn.createStatement()) { @@ -675,13 +1126,28 @@ public void addAuroraAwsIamUser( stmt.execute("DROP USER IF EXISTS " + dbUser + ";"); stmt.execute( "CREATE USER " + dbUser + " IDENTIFIED WITH AWSAuthenticationPlugin AS 'RDS';"); - stmt.execute("GRANT ALL PRIVILEGES ON " + databaseName + ".* TO '" + dbUser + "'@'%';"); + if (!StringUtils.isNullOrEmpty(databaseName)) { + stmt.execute("GRANT ALL PRIVILEGES ON " + databaseName + ".* TO '" + dbUser + "'@'%';"); + } else { + stmt.execute("GRANT ALL PRIVILEGES ON `%`.* TO '" + dbUser + "'@'%';"); + } + + // BG switchover status needs it. + stmt.execute("GRANT SELECT ON mysql.* TO '" + dbUser + "'@'%';"); break; case PG: stmt.execute("DROP USER IF EXISTS " + dbUser + ";"); stmt.execute("CREATE USER " + dbUser + ";"); stmt.execute("GRANT rds_iam TO " + dbUser + ";"); - stmt.execute("GRANT ALL PRIVILEGES ON DATABASE " + databaseName + " TO " + dbUser + ";"); + if (!StringUtils.isNullOrEmpty(databaseName)) { + stmt.execute("GRANT ALL PRIVILEGES ON DATABASE " + databaseName + " TO " + dbUser + ";"); + } + + if (useRdsTools) { + // BG switchover status needs it. + stmt.execute("GRANT USAGE ON SCHEMA rds_tools TO " + dbUser + ";"); + stmt.execute("GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA rds_tools TO " + dbUser + ";"); + } break; default: throw new UnsupportedOperationException(databaseEngine.toString()); @@ -693,13 +1159,14 @@ public void createRdsExtension( DatabaseEngine databaseEngine, String connectionUrl, String userName, - String password) - throws SQLException { + String password) { AuroraTestUtility.registerDriver(databaseEngine); try (final Connection conn = DriverManager.getConnection(connectionUrl, userName, password); final Statement stmt = conn.createStatement()) { stmt.execute("CREATE EXTENSION IF NOT EXISTS rds_tools"); + } catch (SQLException e) { + throw new RuntimeException("An exception occurred while creating the rds_tools extension.", e); } } @@ -737,4 +1204,198 @@ private List getEngineVersions(String engine) { } return res; } + + public String createBlueGreenDeployment(String name, String sourceArn) { + + final String blueGreenName = "bgd-" + name; + + CreateBlueGreenDeploymentResponse response = null; + int count = 10; + while (response == null && count-- > 0) { + try { + response = rdsClient.createBlueGreenDeployment( + CreateBlueGreenDeploymentRequest.builder() + .blueGreenDeploymentName(blueGreenName) + .source(sourceArn) + .tags(this.getTag()) + .build()); + } catch (RdsException ex) { + if (ex.statusCode() != 500 || count == 0) { + throw ex; + } + + LOGGER.finest("Can't send createBlueGreenDeployment request. Wait 1min and try again."); + + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + } + + if (response == null) { + throw new RuntimeException("Can't send createBlueGreenDeployment request."); + } + + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.finest(String.format("createBlueGreenDeployment response: %d, %s", + response.sdkHttpResponse().statusCode(), + response.sdkHttpResponse().statusText())); + throw new RuntimeException(response.sdkHttpResponse().statusText().orElse("Unspecified error.")); + } else { + LOGGER.finest("createBlueGreenDeployment request is sent"); + } + + String blueGreenId = response.blueGreenDeployment().blueGreenDeploymentIdentifier(); + + BlueGreenDeployment blueGreenDeployment = getBlueGreenDeployment(blueGreenId); + long end = System.nanoTime() + TimeUnit.MINUTES.toNanos(240); + while ((blueGreenDeployment == null || !blueGreenDeployment.status().equalsIgnoreCase("available")) + && System.nanoTime() < end) { + try { + TimeUnit.SECONDS.sleep(60); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + blueGreenDeployment = getBlueGreenDeployment(blueGreenId); + } + + if (blueGreenDeployment == null || !blueGreenDeployment.status().equalsIgnoreCase("available")) { + throw new RuntimeException("BlueGreen Deployment " + blueGreenId + " isn't available."); + } + + return blueGreenId; + } + + public void waitUntilBlueGreenDeploymentHasRightState(String blueGreenId, String... allowedStatuses) { + + String status = getBlueGreenDeployment(blueGreenId).status(); + LOGGER.finest("BGD status: " + status + ", waiting for status: " + String.join(", ", allowedStatuses)); + final Set allowedStatusSet = Arrays.stream(allowedStatuses) + .map(String::toLowerCase) + .collect(Collectors.toSet()); + final long waitTillNanoTime = System.nanoTime() + TimeUnit.MINUTES.toNanos(15); + while (!allowedStatusSet.contains(status.toLowerCase()) && waitTillNanoTime > System.nanoTime()) { + try { + TimeUnit.MILLISECONDS.sleep(1000); + } catch (InterruptedException ex) { + throw new RuntimeException(ex); + } + String tmpStatus = getBlueGreenDeployment(blueGreenId).status(); + if (!tmpStatus.equalsIgnoreCase(status)) { + LOGGER.finest("BGD status (waiting): " + tmpStatus); + } + status = tmpStatus; + } + LOGGER.finest("BGD status (after wait): " + status); + + if (!allowedStatusSet.contains(status.toLowerCase())) { + throw new RuntimeException("BlueGreen Deployment " + blueGreenId + " has wrong status."); + } + } + + public void switchoverBlueGreenDeployment(String blueGreenId) { + SwitchoverBlueGreenDeploymentResponse response = rdsClient.switchoverBlueGreenDeployment( + SwitchoverBlueGreenDeploymentRequest.builder() + .blueGreenDeploymentIdentifier(blueGreenId) + .build()); + + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.finest(String.format("switchoverBlueGreenDeployment response: %d, %s", + response.sdkHttpResponse().statusCode(), + response.sdkHttpResponse().statusText())); + throw new RuntimeException(response.sdkHttpResponse().statusText().orElse("Unspecified error.")); + } else { + LOGGER.finest("switchoverBlueGreenDeployment request is sent"); + } + } + + public boolean doesBlueGreenDeploymentExist(String blueGreenId) { + try { + DescribeBlueGreenDeploymentsResponse response = rdsClient.describeBlueGreenDeployments( + builder -> builder.blueGreenDeploymentIdentifier(blueGreenId)); + return response.blueGreenDeployments() != null && !response.blueGreenDeployments().isEmpty(); + } catch (BlueGreenDeploymentNotFoundException ex) { + LOGGER.finest("blueGreenDeployments not found"); + return false; + } + } + + public BlueGreenDeployment getBlueGreenDeployment(String blueGreenId) { + try { + DescribeBlueGreenDeploymentsResponse response = rdsClient.describeBlueGreenDeployments( + builder -> builder.blueGreenDeploymentIdentifier(blueGreenId)); + if (response.hasBlueGreenDeployments()) { + return response.blueGreenDeployments().get(0); + } + return null; + } catch (BlueGreenDeploymentNotFoundException ex) { + return null; + } + } + + public BlueGreenDeployment getBlueGreenDeploymentBySource(String sourceArn) { + try { + DescribeBlueGreenDeploymentsResponse response = rdsClient.describeBlueGreenDeployments( + builder -> builder.filters(f -> f.name("source").values(sourceArn))); + if (!response.blueGreenDeployments().isEmpty()) { + return response.blueGreenDeployments().get(0); + } + return null; + } catch (BlueGreenDeploymentNotFoundException ex) { + return null; + } + } + + public void deleteBlueGreenDeployment(String blueGreenId, boolean waitForCompletion) { + + if (!doesBlueGreenDeploymentExist(blueGreenId)) { + return; + } + + waitUntilBlueGreenDeploymentHasRightState(blueGreenId, "available", "switchover_completed"); + + DeleteBlueGreenDeploymentResponse response = rdsClient.deleteBlueGreenDeployment( + DeleteBlueGreenDeploymentRequest.builder() + .blueGreenDeploymentIdentifier(blueGreenId) + .build()); + + if (!response.sdkHttpResponse().isSuccessful()) { + LOGGER.finest(String.format("deleteBlueGreenDeployment response: %d, %s", + response.sdkHttpResponse().statusCode(), + response.sdkHttpResponse().statusText())); + throw new RuntimeException(response.sdkHttpResponse().statusText().orElse("Unspecified error.")); + } else { + LOGGER.finest("deleteBlueGreenDeployment request is sent"); + } + + if (waitForCompletion) { + long endTimeNano = System.nanoTime() + TimeUnit.MINUTES.toNanos(120); + while (doesBlueGreenDeploymentExist(blueGreenId) && endTimeNano > System.nanoTime()) { + try { + TimeUnit.MINUTES.sleep(1); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + return; + } + } + + if (doesBlueGreenDeploymentExist(blueGreenId)) { + throw new RuntimeException( + "Unable to delete Blue/Green Deployment after waiting for 120 minutes"); + } + } + } + + private Tag getTag() { + ZoneId zoneId = ZoneId.of("America/Los_Angeles"); + ZonedDateTime zdt = Instant.now().atZone(zoneId); + String timeStr = zdt.format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss zzz")); + return Tag.builder() + .key("env").value("test-runner") + .key("created").value(timeStr) + .build(); + } } diff --git a/tests/unit/adfs_credentials_provider.test.ts b/tests/unit/adfs_credentials_provider.test.ts index d3c5a221..a591c3de 100644 --- a/tests/unit/adfs_credentials_provider.test.ts +++ b/tests/unit/adfs_credentials_provider.test.ts @@ -17,9 +17,7 @@ import { WrapperProperties } from "../../common/lib/wrapper_property"; import { readFileSync } from "fs"; import { anything, instance, mock, spy, when } from "ts-mockito"; -import { - AdfsCredentialsProviderFactory -} from "../../common/lib/plugins/federated_auth/adfs_credentials_provider_factory"; +import { AdfsCredentialsProviderFactory } from "../../common/lib/plugins/federated_auth/adfs_credentials_provider_factory"; import { PluginServiceImpl } from "../../common/lib/plugin_service"; import { NullTelemetryFactory } from "../../common/lib/utils/telemetry/null_telemetry_factory"; diff --git a/tests/unit/aurora_connection_tracker.test.ts b/tests/unit/aurora_connection_tracker.test.ts index 86890e46..3f0f4888 100644 --- a/tests/unit/aurora_connection_tracker.test.ts +++ b/tests/unit/aurora_connection_tracker.test.ts @@ -20,9 +20,7 @@ import { HostRole } from "../../common/lib/host_role"; import { SimpleHostAvailabilityStrategy } from "../../common/lib/host_availability/simple_host_availability_strategy"; import { PluginServiceImpl } from "../../common/lib/plugin_service"; import { anything, instance, mock, reset, verify, when } from "ts-mockito"; -import { - AuroraConnectionTrackerPlugin -} from "../../common/lib/plugins/connection_tracker/aurora_connection_tracker_plugin"; +import { AuroraConnectionTrackerPlugin } from "../../common/lib/plugins/connection_tracker/aurora_connection_tracker_plugin"; import { OpenedConnectionTracker } from "../../common/lib/plugins/connection_tracker/opened_connection_tracker"; import { RdsUtils } from "../../common/lib/utils/rds_utils"; import { RdsUrlType } from "../../common/lib/utils/rds_url_type"; diff --git a/tests/unit/aurora_initial_connection_strategy_plugin.test.ts b/tests/unit/aurora_initial_connection_strategy_plugin.test.ts index f4c75e9a..cf48fb2e 100644 --- a/tests/unit/aurora_initial_connection_strategy_plugin.test.ts +++ b/tests/unit/aurora_initial_connection_strategy_plugin.test.ts @@ -14,9 +14,7 @@ limitations under the License. */ -import { - AuroraInitialConnectionStrategyPlugin -} from "../../common/lib/plugins/aurora_initial_connection_strategy_plugin"; +import { AuroraInitialConnectionStrategyPlugin } from "../../common/lib/plugins/aurora_initial_connection_strategy_plugin"; import { PluginServiceImpl } from "../../common/lib/plugin_service"; import { anything, instance, mock, reset, spy, verify, when } from "ts-mockito"; import { WrapperProperties } from "../../common/lib/wrapper_property"; diff --git a/tests/unit/aws_secrets_manager_plugin.test.ts b/tests/unit/aws_secrets_manager_plugin.test.ts index f2c11930..3bb166cc 100644 --- a/tests/unit/aws_secrets_manager_plugin.test.ts +++ b/tests/unit/aws_secrets_manager_plugin.test.ts @@ -15,11 +15,7 @@ */ import { SecretsManagerClient, SecretsManagerServiceException } from "@aws-sdk/client-secrets-manager"; -import { - AwsSecretsManagerPlugin, - Secret, - SecretCacheKey -} from "../../common/lib/authentication/aws_secrets_manager_plugin"; +import { AwsSecretsManagerPlugin, Secret, SecretCacheKey } from "../../common/lib/authentication/aws_secrets_manager_plugin"; import { AwsClient } from "../../common/lib/aws_client"; import { SimpleHostAvailabilityStrategy } from "../../common/lib/host_availability/simple_host_availability_strategy"; import { HostInfo } from "../../common/lib/host_info"; diff --git a/tests/unit/database_dialect.test.ts b/tests/unit/database_dialect.test.ts index 55ece622..6b2d0875 100644 --- a/tests/unit/database_dialect.test.ts +++ b/tests/unit/database_dialect.test.ts @@ -58,8 +58,9 @@ const MYSQL_QUERY = "SHOW VARIABLES LIKE 'version_comment'"; const RDS_MYSQL_QUERY = "SHOW VARIABLES LIKE 'version_comment'"; const AURORA_MYSQL_QUERY = "SHOW VARIABLES LIKE 'aurora_version'"; const TAZ_MYSQL_QUERIES: string[] = [ - "SELECT 1 AS tmp FROM information_schema.tables WHERE" + " table_schema = 'mysql' AND table_name = 'rds_topology'", - "SELECT id, endpoint, port FROM mysql.rds_topology" + "SELECT 1 AS tmp FROM information_schema.tables WHERE table_schema = 'mysql' AND table_name = 'rds_topology'", + "SELECT id, endpoint, port FROM mysql.rds_topology", + "SHOW VARIABLES LIKE 'report_host'" ]; const PG_QUERY = "SELECT 1 FROM pg_proc LIMIT 1"; const RDS_PG_QUERY = @@ -68,7 +69,8 @@ const RDS_PG_QUERY = const AURORA_PG_QUERY = "SELECT (setting LIKE '%aurora_stat_utils%') AS aurora_stat_utils FROM pg_settings WHERE name='rds.extensions'"; const TAZ_PG_QUERIES: string[] = [ "SELECT 1 AS tmp FROM information_schema.routines WHERE routine_schema='rds_tools' AND routine_name='multi_az_db_cluster_source_dbi_resource_id'", - "SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()" + "SELECT multi_az_db_cluster_source_dbi_resource_id FROM rds_tools.multi_az_db_cluster_source_dbi_resource_id()", + "SELECT 'rds_tools.show_topology'::regproc" ]; const mysqlResult = [ diff --git a/tests/unit/exponential_backoff_host_availability_strategy.test.ts b/tests/unit/exponential_backoff_host_availability_strategy.test.ts index 78e06a7c..f13eafd2 100644 --- a/tests/unit/exponential_backoff_host_availability_strategy.test.ts +++ b/tests/unit/exponential_backoff_host_availability_strategy.test.ts @@ -14,9 +14,7 @@ limitations under the License. */ -import { - ExponentialBackoffHostAvailabilityStrategy -} from "../../common/lib/host_availability/exponential_backoff_host_availability_strategy"; +import { ExponentialBackoffHostAvailabilityStrategy } from "../../common/lib/host_availability/exponential_backoff_host_availability_strategy"; import { HostAvailability } from "../../common/lib/host_availability/host_availability"; import { sleep } from "../../common/lib/utils/utils"; import { WrapperProperties } from "../../common/lib/wrapper_property"; diff --git a/tests/unit/failover2_plugin.test.ts b/tests/unit/failover2_plugin.test.ts index b89961da..a19ad77b 100644 --- a/tests/unit/failover2_plugin.test.ts +++ b/tests/unit/failover2_plugin.test.ts @@ -23,12 +23,7 @@ import { RdsHostListProvider } from "../../common/lib/host_list_provider/rds_hos import { HostRole } from "../../common/lib/host_role"; import { PluginService, PluginServiceImpl } from "../../common/lib/plugin_service"; import { FailoverMode } from "../../common/lib/plugins/failover/failover_mode"; -import { - AwsWrapperError, - FailoverFailedError, - FailoverSuccessError, - TransactionResolutionUnknownError -} from "../../common/lib/utils/errors"; +import { AwsWrapperError, FailoverFailedError, FailoverSuccessError, TransactionResolutionUnknownError } from "../../common/lib/utils/errors"; import { RdsUrlType } from "../../common/lib/utils/rds_url_type"; import { RdsUtils } from "../../common/lib/utils/rds_utils"; import { WrapperProperties } from "../../common/lib/wrapper_property"; diff --git a/tests/unit/failover_plugin.test.ts b/tests/unit/failover_plugin.test.ts index 949f09f8..cc7a49fa 100644 --- a/tests/unit/failover_plugin.test.ts +++ b/tests/unit/failover_plugin.test.ts @@ -28,12 +28,7 @@ import { ClusterAwareReaderFailoverHandler } from "../../common/lib/plugins/fail import { ReaderFailoverResult } from "../../common/lib/plugins/failover/reader_failover_result"; import { ClusterAwareWriterFailoverHandler } from "../../common/lib/plugins/failover/writer_failover_handler"; import { WriterFailoverResult } from "../../common/lib/plugins/failover/writer_failover_result"; -import { - AwsWrapperError, - FailoverFailedError, - FailoverSuccessError, - TransactionResolutionUnknownError -} from "../../common/lib/utils/errors"; +import { AwsWrapperError, FailoverFailedError, FailoverSuccessError, TransactionResolutionUnknownError } from "../../common/lib/utils/errors"; import { RdsUrlType } from "../../common/lib/utils/rds_url_type"; import { RdsUtils } from "../../common/lib/utils/rds_utils"; import { WrapperProperties } from "../../common/lib/wrapper_property"; diff --git a/tests/unit/host_availability_strategy_factory.test.ts b/tests/unit/host_availability_strategy_factory.test.ts index 3d4f68cd..ae5a7c70 100644 --- a/tests/unit/host_availability_strategy_factory.test.ts +++ b/tests/unit/host_availability_strategy_factory.test.ts @@ -16,9 +16,7 @@ import { HostAvailabilityStrategyFactory } from "../../common/lib/host_availability/host_availability_strategy_factory"; import { SimpleHostAvailabilityStrategy } from "../../common/lib/host_availability/simple_host_availability_strategy"; -import { - ExponentialBackoffHostAvailabilityStrategy -} from "../../common/lib/host_availability/exponential_backoff_host_availability_strategy"; +import { ExponentialBackoffHostAvailabilityStrategy } from "../../common/lib/host_availability/exponential_backoff_host_availability_strategy"; import { WrapperProperties } from "../../common/lib/wrapper_property"; describe("hostAvailabilityStrategyFactoryTests", () => { diff --git a/tests/unit/internal_pool_connection_provider.test.ts b/tests/unit/internal_pool_connection_provider.test.ts index a35d8021..59f4a857 100644 --- a/tests/unit/internal_pool_connection_provider.test.ts +++ b/tests/unit/internal_pool_connection_provider.test.ts @@ -34,9 +34,7 @@ import { AwsMySQLClient } from "../../mysql/lib"; import { MySQLDatabaseDialect } from "../../mysql/lib/dialect/mysql_database_dialect"; import { MySQL2DriverDialect } from "../../mysql/lib/dialect/mysql2_driver_dialect"; import { PoolClientWrapper } from "../../common/lib/pool_client_wrapper"; -import { - SlidingExpirationCacheWithCleanupTask -} from "../../common/lib/utils/sliding_expiration_cache_with_cleanup_task"; +import { SlidingExpirationCacheWithCleanupTask } from "../../common/lib/utils/sliding_expiration_cache_with_cleanup_task"; const user1 = "user1"; const user2 = "user2"; diff --git a/tests/unit/okta_credentials_provider.test.ts b/tests/unit/okta_credentials_provider.test.ts index fe71c551..9c7a2015 100644 --- a/tests/unit/okta_credentials_provider.test.ts +++ b/tests/unit/okta_credentials_provider.test.ts @@ -17,9 +17,7 @@ import { instance, mock, spy, when } from "ts-mockito"; import { WrapperProperties } from "../../common/lib/wrapper_property"; import { readFileSync } from "fs"; -import { - OktaCredentialsProviderFactory -} from "../../common/lib/plugins/federated_auth/okta_credentials_provider_factory"; +import { OktaCredentialsProviderFactory } from "../../common/lib/plugins/federated_auth/okta_credentials_provider_factory"; import { PluginServiceImpl } from "../../common/lib/plugin_service"; import { NullTelemetryFactory } from "../../common/lib/utils/telemetry/null_telemetry_factory"; import { jest } from "@jest/globals"; diff --git a/tests/unit/sliding_expiration_cache.test.ts b/tests/unit/sliding_expiration_cache.test.ts index bedba777..490e772f 100644 --- a/tests/unit/sliding_expiration_cache.test.ts +++ b/tests/unit/sliding_expiration_cache.test.ts @@ -16,9 +16,7 @@ import { SlidingExpirationCache } from "../../common/lib/utils/sliding_expiration_cache"; import { convertNanosToMs, sleep } from "../../common/lib/utils/utils"; -import { - SlidingExpirationCacheWithCleanupTask -} from "../../common/lib/utils/sliding_expiration_cache_with_cleanup_task"; +import { SlidingExpirationCacheWithCleanupTask } from "../../common/lib/utils/sliding_expiration_cache_with_cleanup_task"; class DisposableItem { shouldDispose: boolean;