diff --git a/CHANGELOG.md b/CHANGELOG.md index 30d391669..5631d8ea7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -304,7 +304,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### :magic_wand: Added - Developer plugin to help test various scenarios including events like network outages and database cluster failover. This plugin is NOT intended to be used in production environments and is only for testing ([PR #531](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/531)). - Documentation: - - Developer plugin. See [UsingTheJdbcDriver](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#list-of-available-plugins) and [UsingTheDeveloperPlugin](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheDeveloperPlugin.md). + - Developer plugin. See [UsingTheJdbcDriver](https://github.com/aws/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/UsingTheJdbcDriver.md#list-of-available-plugins) and [UsingTheDeveloperPlugin](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/docs/using-the-jdbc-driver/using-plugins/UsingTheDeveloperPlugin.md). - MySQL code samples ([PR #532](https://github.com/awslabs/aws-advanced-jdbc-wrapper/pull/532)). - Add a Table of Contents section for the sample codes on README.md. See [README.md](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/README.md#examples). - Sample tutorial and code example for Vert.x. See the [tutorial](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/examples/VertxExample/README.md) and [code example](https://github.com/awslabs/aws-advanced-jdbc-wrapper/blob/main/examples/VertxExample/src/main/java/com/example/starter/MainVerticle.java). diff --git a/README.md b/README.md index de0eb2b18..81569bb73 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,7 @@ You can find our driver by searching in The Central Repository with GroupId and | `failureDetectionEnabled` | `HostMonitoringConnectionPlugin.FAILURE_DETECTION_ENABLED` | [HostMonitoringPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md) | | `failureDetectionInterval` | `HostMonitoringConnectionPlugin.FAILURE_DETECTION_INTERVAL` | [HostMonitoringPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md) | | `failureDetectionTime` | `HostMonitoringConnectionPlugin.FAILURE_DETECTION_TIME` | [HostMonitoringPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md) | -| `monitorDisposalTime` | `MonitorServiceImpl.MONITOR_DISPOSAL_TIME_MS` | [HostMonitoringPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md) | +| `monitorDisposalTime` | `HostMonitorServiceImpl.MONITOR_DISPOSAL_TIME_MS` | [HostMonitoringPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheHostMonitoringPlugin.md) | | `iamDefaultPort` | `IamAuthConnectionPlugin.IAM_DEFAULT_PORT` | [IamAuthenticationPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md) | | `iamHost` | `IamAuthConnectionPlugin.IAM_HOST` | [IamAuthenticationPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md) | | `iamRegion` | `IamAuthConnectionPlugin.IAM_REGION` | [IamAuthenticationPlugin](./docs/using-the-jdbc-driver/using-plugins/UsingTheIamAuthenticationPlugin.md) | diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java index 76f488112..6cb455805 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java @@ -65,15 +65,16 @@ import software.amazon.jdbc.benchmarks.testplugin.BenchmarkPluginFactory; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.profile.ConfigurationProfile; +import software.amazon.jdbc.profile.ConfigurationProfileBuilder; +import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.telemetry.DefaultTelemetryFactory; import software.amazon.jdbc.util.telemetry.GaugeCallable; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryGauge; -import software.amazon.jdbc.profile.ConfigurationProfile; -import software.amazon.jdbc.profile.ConfigurationProfileBuilder; -import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.wrapper.ConnectionWrapper; @State(Scope.Benchmark) @@ -94,6 +95,7 @@ public class ConnectionPluginManagerBenchmarks { @Mock ConnectionProvider mockConnectionProvider; @Mock ConnectionWrapper mockConnectionWrapper; + @Mock FullServicesContainer mockServicesContainer; @Mock PluginService mockPluginService; @Mock PluginManagerService mockPluginManagerService; @Mock TelemetryFactory mockTelemetryFactory; @@ -138,6 +140,7 @@ public void setUpIteration() throws Exception { when(mockResultSet.getString(eq(FIELD_SESSION_ID))).thenReturn(WRITER_SESSION_ID); when(mockResultSet.getString(eq(FIELD_SERVER_ID))) .thenReturn("myInstance1.domain.com", "myInstance2.domain.com", "myInstance3.domain.com"); + when(mockServicesContainer.getPluginService()).thenReturn(mockPluginService); when(mockPluginService.getCurrentConnection()).thenReturn(mockConnection); when(mockPluginService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); @@ -163,11 +166,11 @@ public void setUpIteration() throws Exception { null, mockConnectionWrapper, telemetryFactory); - pluginManager.init(mockPluginService, propertiesWithPlugins, mockPluginManagerService, configurationProfile); + pluginManager.init(mockServicesContainer, propertiesWithPlugins, mockPluginManagerService, configurationProfile); pluginManagerWithNoPlugins = new ConnectionPluginManager(mockConnectionProvider, null, mockConnectionWrapper, telemetryFactory); - pluginManagerWithNoPlugins.init(mockPluginService, propertiesWithoutPlugins, mockPluginManagerService, null); + pluginManagerWithNoPlugins.init(mockServicesContainer, propertiesWithoutPlugins, mockPluginManagerService, null); } @TearDown(Level.Iteration) @@ -179,7 +182,7 @@ public void tearDownIteration() throws Exception { public ConnectionPluginManager initConnectionPluginManagerWithNoPlugins() throws SQLException { final ConnectionPluginManager manager = new ConnectionPluginManager(mockConnectionProvider, null, mockConnectionWrapper, mockTelemetryFactory); - manager.init(mockPluginService, propertiesWithoutPlugins, mockPluginManagerService, configurationProfile); + manager.init(mockServicesContainer, propertiesWithoutPlugins, mockPluginManagerService, configurationProfile); return manager; } @@ -187,7 +190,7 @@ public ConnectionPluginManager initConnectionPluginManagerWithNoPlugins() throws public ConnectionPluginManager initConnectionPluginManagerWithPlugins() throws SQLException { final ConnectionPluginManager manager = new ConnectionPluginManager(mockConnectionProvider, null, mockConnectionWrapper, mockTelemetryFactory); - manager.init(mockPluginService, propertiesWithPlugins, mockPluginManagerService, configurationProfile); + manager.init(mockServicesContainer, propertiesWithPlugins, mockPluginManagerService, configurationProfile); return manager; } diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java index 4b464a079..22148f312 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java @@ -63,6 +63,9 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.connection.ConnectionService; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.GaugeCallable; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryCounter; @@ -89,7 +92,11 @@ public class PluginBenchmarks { private final HostSpec writerHostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) .host(TEST_HOST).port(TEST_PORT).build(); + @Mock private StorageService mockStorageService; + @Mock private MonitorService mockMonitorService; + @Mock private ConnectionService mockConnectionService; @Mock private PluginService mockPluginService; + @Mock private TargetDriverDialect mockTargetDriverDialect; @Mock private Dialect mockDialect; @Mock private ConnectionPluginManager mockConnectionPluginManager; @Mock private TelemetryFactory mockTelemetryFactory; @@ -158,29 +165,31 @@ public void initAndReleaseBaseLine() { @Benchmark public ConnectionWrapper initAndReleaseWithExecutionTimePlugin() throws SQLException { - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useExecutionTimePlugin(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService)) { + try (ConnectionWrapper wrapper = getConnectionWrapper(useExecutionTimePlugin(), CONNECTION_STRING)) { wrapper.releaseResources(); return wrapper; } } - @Benchmark - public ConnectionWrapper initAndReleaseWithAuroraHostListPlugin() throws SQLException { - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useAuroraHostListPlugin(), - CONNECTION_STRING, + private ConnectionWrapper getConnectionWrapper(Properties props, String connString) throws SQLException { + return new TestConnectionWrapper( + props, + connString, + mockConnectionProvider, + mockTargetDriverDialect, mockConnectionPluginManager, mockTelemetryFactory, mockPluginService, mockHostListProviderService, - mockPluginManagerService)) { + mockPluginManagerService, + mockStorageService, + mockMonitorService, + mockConnectionService); + } + + @Benchmark + public ConnectionWrapper initAndReleaseWithAuroraHostListPlugin() throws SQLException { + try (ConnectionWrapper wrapper = getConnectionWrapper(useAuroraHostListPlugin(), CONNECTION_STRING)) { wrapper.releaseResources(); return wrapper; } @@ -188,14 +197,8 @@ public ConnectionWrapper initAndReleaseWithAuroraHostListPlugin() throws SQLExce @Benchmark public ConnectionWrapper initAndReleaseWithExecutionTimeAndAuroraHostListPlugins() throws SQLException { - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useExecutionTimeAndAuroraHostListPlugins(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService)) { + try (ConnectionWrapper wrapper = + getConnectionWrapper(useExecutionTimeAndAuroraHostListPlugins(), CONNECTION_STRING)) { wrapper.releaseResources(); return wrapper; } @@ -203,14 +206,7 @@ public ConnectionWrapper initAndReleaseWithExecutionTimeAndAuroraHostListPlugins @Benchmark public ConnectionWrapper initAndReleaseWithReadWriteSplittingPlugin() throws SQLException { - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useReadWriteSplittingPlugin(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService)) { + try (ConnectionWrapper wrapper = getConnectionWrapper(useReadWriteSplittingPlugin(), CONNECTION_STRING)) { wrapper.releaseResources(); return wrapper; } @@ -219,14 +215,8 @@ public ConnectionWrapper initAndReleaseWithReadWriteSplittingPlugin() throws SQL @Benchmark public ConnectionWrapper initAndReleaseWithAuroraHostListAndReadWriteSplittingPlugin() throws SQLException { - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useAuroraHostListAndReadWriteSplittingPlugin(), - PG_CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService)) { + try (ConnectionWrapper wrapper = + getConnectionWrapper(useAuroraHostListAndReadWriteSplittingPlugin(), PG_CONNECTION_STRING)) { wrapper.releaseResources(); return wrapper; } @@ -237,14 +227,7 @@ public ConnectionWrapper initAndReleaseWithReadWriteSplittingPlugin_internalConn HikariPooledConnectionProvider provider = new HikariPooledConnectionProvider((hostSpec, props) -> new HikariConfig()); Driver.setCustomConnectionProvider(provider); - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useReadWriteSplittingPlugin(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService)) { + try (ConnectionWrapper wrapper = getConnectionWrapper(useReadWriteSplittingPlugin(), CONNECTION_STRING)) { wrapper.releaseResources(); ConnectionProviderManager.releaseResources(); Driver.resetCustomConnectionProvider(); @@ -258,14 +241,8 @@ public ConnectionWrapper initAndReleaseWithAuroraHostListAndReadWriteSplittingPl HikariPooledConnectionProvider provider = new HikariPooledConnectionProvider((hostSpec, props) -> new HikariConfig()); Driver.setCustomConnectionProvider(provider); - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useAuroraHostListAndReadWriteSplittingPlugin(), - PG_CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService)) { + try (ConnectionWrapper wrapper = getConnectionWrapper( + useAuroraHostListAndReadWriteSplittingPlugin(), PG_CONNECTION_STRING)) { wrapper.releaseResources(); ConnectionProviderManager.releaseResources(); Driver.resetCustomConnectionProvider(); @@ -275,14 +252,7 @@ public ConnectionWrapper initAndReleaseWithAuroraHostListAndReadWriteSplittingPl @Benchmark public Statement executeStatementBaseline() throws SQLException { - try (ConnectionWrapper wrapper = new TestConnectionWrapper( - useExecutionTimePlugin(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService); + try (ConnectionWrapper wrapper = getConnectionWrapper(useExecutionTimePlugin(), CONNECTION_STRING); Statement statement = wrapper.createStatement()) { return statement; } @@ -291,14 +261,7 @@ public Statement executeStatementBaseline() throws SQLException { @Benchmark public ResultSet executeStatementWithExecutionTimePlugin() throws SQLException { try ( - ConnectionWrapper wrapper = new TestConnectionWrapper( - useExecutionTimePlugin(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService); + ConnectionWrapper wrapper = getConnectionWrapper(useExecutionTimePlugin(), CONNECTION_STRING); Statement statement = wrapper.createStatement(); ResultSet resultSet = statement.executeQuery("some sql")) { return resultSet; @@ -308,14 +271,7 @@ public ResultSet executeStatementWithExecutionTimePlugin() throws SQLException { @Benchmark public ResultSet executeStatementWithTelemetryDisabled() throws SQLException { try ( - ConnectionWrapper wrapper = new TestConnectionWrapper( - disabledTelemetry(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService); + ConnectionWrapper wrapper = getConnectionWrapper(disabledTelemetry(), CONNECTION_STRING); Statement statement = wrapper.createStatement(); ResultSet resultSet = statement.executeQuery("some sql")) { return resultSet; @@ -325,14 +281,7 @@ public ResultSet executeStatementWithTelemetryDisabled() throws SQLException { @Benchmark public ResultSet executeStatementWithTelemetry() throws SQLException { try ( - ConnectionWrapper wrapper = new TestConnectionWrapper( - useTelemetry(), - CONNECTION_STRING, - mockConnectionPluginManager, - mockTelemetryFactory, - mockPluginService, - mockHostListProviderService, - mockPluginManagerService); + ConnectionWrapper wrapper = getConnectionWrapper(useTelemetry(), CONNECTION_STRING); Statement statement = wrapper.createStatement(); ResultSet resultSet = statement.executeQuery("some sql")) { return resultSet; diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java index 66cfbd28f..d0ec5c063 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/testplugin/TestConnectionWrapper.java @@ -20,24 +20,44 @@ import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.ConnectionPluginManager; +import software.amazon.jdbc.ConnectionProvider; import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.connection.ConnectionService; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.wrapper.ConnectionWrapper; // Test class allowing for mocks to be used with ConnectionWrapper logic public class TestConnectionWrapper extends ConnectionWrapper { - public TestConnectionWrapper(@NonNull Properties props, - @NonNull String url, - @NonNull ConnectionPluginManager connectionPluginManager, + public TestConnectionWrapper( + @NonNull final Properties props, + @NonNull final String url, + @NonNull final ConnectionProvider defaultConnectionProvider, + @NonNull final TargetDriverDialect driverDialect, + @NonNull final ConnectionPluginManager connectionPluginManager, @NonNull final TelemetryFactory telemetryFactory, - @NonNull PluginService pluginService, - @NonNull HostListProviderService hostListProviderService, - @NonNull PluginManagerService pluginManagerService) + @NonNull final PluginService pluginService, + @NonNull final HostListProviderService hostListProviderService, + @NonNull final PluginManagerService pluginManagerService, + @NonNull final StorageService storageService, + @NonNull final MonitorService monitorService, + @NonNull final ConnectionService connectionService) throws SQLException { - super(props, url, connectionPluginManager, telemetryFactory, pluginService, hostListProviderService, - pluginManagerService); + super( + props, + url, + defaultConnectionProvider, + driverDialect, + connectionPluginManager, + telemetryFactory, + pluginService, + hostListProviderService, + pluginManagerService, + storageService, monitorService, connectionService); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java index c7c5afff9..07b8570fd 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/C3P0PooledConnectionProvider.java @@ -35,7 +35,7 @@ import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; -import software.amazon.jdbc.util.SlidingExpirationCache; +import software.amazon.jdbc.util.storage.SlidingExpirationCache; public class C3P0PooledConnectionProvider implements PooledConnectionProvider, CanReleaseResources { diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java index aed8fef87..411e40cd8 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginChainBuilder.java @@ -51,6 +51,7 @@ import software.amazon.jdbc.plugin.staledns.AuroraStaleDnsPluginFactory; import software.amazon.jdbc.plugin.strategy.fastestresponse.FastestResponseStrategyPluginFactory; import software.amazon.jdbc.profile.ConfigurationProfile; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.SqlState; import software.amazon.jdbc.util.StringUtils; @@ -142,7 +143,7 @@ public PluginFactoryInfo(final ConnectionPluginFactory factory, final int weight } public List getPlugins( - final PluginService pluginService, + final FullServicesContainer servicesContainer, final ConnectionProvider defaultConnProvider, final ConnectionProvider effectiveConnProvider, final PluginManagerService pluginManagerService, @@ -218,16 +219,20 @@ public List getPlugins( // make a chain of connection plugins plugins = new ArrayList<>(pluginFactories.size() + 1); for (final ConnectionPluginFactory factory : pluginFactories) { - plugins.add(factory.getInstance(pluginService, props)); + if (factory instanceof ServicesContainerPluginFactory) { + ServicesContainerPluginFactory servicesContainerPluginFactory = (ServicesContainerPluginFactory) factory; + plugins.add(servicesContainerPluginFactory.getInstance(servicesContainer, props)); + } else { + plugins.add(factory.getInstance(servicesContainer.getPluginService(), props)); + } } - } else { plugins = new ArrayList<>(1); // one spot for default connection plugin } // add default connection plugin to the tail final ConnectionPlugin defaultPlugin = new DefaultConnectionPlugin( - pluginService, + servicesContainer.getPluginService(), defaultConnProvider, effectiveConnProvider, pluginManagerService); diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginFactory.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginFactory.java index 5f186bdde..8471cdbd4 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginFactory.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginFactory.java @@ -17,10 +17,14 @@ package software.amazon.jdbc; import java.util.Properties; +import software.amazon.jdbc.util.FullServicesContainer; /** * Interface for connection plugin factories. This class implements ways to initialize a connection * plugin. + * + * @apiNote Consider using {@link ServicesContainerPluginFactory} for new implementations as it provides access to all + * services in the {@link FullServicesContainer}. */ public interface ConnectionPluginFactory { diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java index cc7390ade..8757e2c0a 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionPluginManager.java @@ -29,6 +29,7 @@ import java.util.logging.Logger; import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; +import org.jetbrains.annotations.NotNull; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.plugin.AuroraConnectionTrackerPlugin; import software.amazon.jdbc.plugin.AuroraInitialConnectionStrategyPlugin; @@ -48,6 +49,7 @@ import software.amazon.jdbc.plugin.staledns.AuroraStaleDnsPlugin; import software.amazon.jdbc.plugin.strategy.fastestresponse.FastestResponseStrategyPlugin; import software.amazon.jdbc.profile.ConfigurationProfile; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Utils; import software.amazon.jdbc.util.WrapperUtils; @@ -98,6 +100,7 @@ public class ConnectionPluginManager implements CanReleaseResources, Wrapper { protected final @NonNull ConnectionProvider defaultConnProvider; protected final @Nullable ConnectionProvider effectiveConnProvider; protected final ConnectionWrapper connectionWrapper; + protected FullServicesContainer servicesContainer; protected PluginService pluginService; protected TelemetryFactory telemetryFactory; protected boolean isTelemetryInUse; @@ -108,7 +111,7 @@ public class ConnectionPluginManager implements CanReleaseResources, Wrapper { public ConnectionPluginManager( final @NonNull ConnectionProvider defaultConnProvider, final @Nullable ConnectionProvider effectiveConnProvider, - final @NonNull ConnectionWrapper connectionWrapper, + final @Nullable ConnectionWrapper connectionWrapper, final @NonNull TelemetryFactory telemetryFactory) { this.defaultConnProvider = defaultConnProvider; this.effectiveConnProvider = effectiveConnProvider; @@ -167,27 +170,28 @@ public void unlock() { *

The {@link DefaultConnectionPlugin} will always be initialized and attached as the last * connection plugin in the chain. * - * @param pluginService a reference to a plugin service that plugin can use + * @param servicesContainer the service container for the services required by this class. * @param props the configuration of the connection * @param pluginManagerService a reference to a plugin manager service * @param configurationProfile a profile configuration defined by the user * @throws SQLException if errors occurred during the execution */ public void init( - final PluginService pluginService, + final FullServicesContainer servicesContainer, final Properties props, final PluginManagerService pluginManagerService, @Nullable ConfigurationProfile configurationProfile) throws SQLException { this.props = props; - this.pluginService = pluginService; - this.telemetryFactory = pluginService.getTelemetryFactory(); + this.servicesContainer = servicesContainer; + this.pluginService = servicesContainer.getPluginService(); + this.telemetryFactory = servicesContainer.getTelemetryFactory(); this.isTelemetryInUse = telemetryFactory.inUse(); ConnectionPluginChainBuilder pluginChainBuilder = new ConnectionPluginChainBuilder(); this.plugins = pluginChainBuilder.getPlugins( - this.pluginService, + this.servicesContainer, this.defaultConnProvider, this.effectiveConnProvider, pluginManagerService, diff --git a/wrapper/src/main/java/software/amazon/jdbc/Driver.java b/wrapper/src/main/java/software/amazon/jdbc/Driver.java index 4ecd53f0a..4ef71adb7 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/Driver.java +++ b/wrapper/src/main/java/software/amazon/jdbc/Driver.java @@ -20,7 +20,6 @@ import java.sql.DriverManager; import java.sql.DriverPropertyInfo; import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -44,8 +43,8 @@ import software.amazon.jdbc.plugin.DataCacheConnectionPlugin; import software.amazon.jdbc.plugin.OpenedConnectionTracker; import software.amazon.jdbc.plugin.customendpoint.CustomEndpointMonitorImpl; -import software.amazon.jdbc.plugin.customendpoint.CustomEndpointPlugin; -import software.amazon.jdbc.plugin.efm.MonitorThreadContainer; +import software.amazon.jdbc.plugin.efm.HostMonitorThreadContainer; +import software.amazon.jdbc.plugin.efm2.HostMonitorServiceImpl; import software.amazon.jdbc.plugin.federatedauth.FederatedAuthCacheHolder; import software.amazon.jdbc.plugin.federatedauth.OktaAuthCacheHolder; import software.amazon.jdbc.plugin.iam.IamAuthCacheHolder; @@ -59,11 +58,16 @@ import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialectManager; import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.CoreServicesContainer; import software.amazon.jdbc.util.DriverInfo; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.FullServicesContainerImpl; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.StringUtils; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.DefaultTelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -105,6 +109,18 @@ public class Driver implements java.sql.Driver { } } + private final StorageService storageService; + private final MonitorService monitorService; + + public Driver() { + this(CoreServicesContainer.getInstance()); + } + + public Driver(CoreServicesContainer coreServicesContainer) { + this.storageService = coreServicesContainer.getStorageService(); + this.monitorService = coreServicesContainer.getMonitorService(); + } + public static void register() throws SQLException { if (isRegistered()) { throw new IllegalStateException( @@ -216,14 +232,17 @@ public Connection connect(final String url, final Properties info) throws SQLExc effectiveConnectionProvider = configurationProfile.getConnectionProvider(); } + FullServicesContainer + servicesContainer = new FullServicesContainerImpl(storageService, monitorService, telemetryFactory); + return new ConnectionWrapper( + servicesContainer, props, driverUrl, defaultConnectionProvider, effectiveConnectionProvider, targetDriverDialect, - configurationProfile, - telemetryFactory); + configurationProfile); } catch (Exception ex) { if (context != null) { @@ -248,7 +267,7 @@ public boolean acceptsURL(final String url) throws SQLException { } @Override - public DriverPropertyInfo[] getPropertyInfo(final String url, final Properties info) throws SQLException { + public DriverPropertyInfo[] getPropertyInfo(final String url, final Properties info) { final Properties copy = new Properties(info); final String databaseName = ConnectionUrlParser.parseDatabaseFromUrl(url); if (!StringUtils.isNullOrEmpty(databaseName)) { @@ -282,7 +301,7 @@ public boolean jdbcCompliant() { } @Override - public Logger getParentLogger() throws SQLFeatureNotSupportedException { + public Logger getParentLogger() { return PARENT_LOGGER; } @@ -392,6 +411,7 @@ public static void resetConnectionInitFunc() { } public static void clearCaches() { + CoreServicesContainer.getInstance().getStorageService().clearAll(); RdsUtils.clearCache(); RdsHostListProvider.clearAll(); PluginServiceImpl.clearCache(); @@ -410,13 +430,12 @@ public static void clearCaches() { } public static void releaseResources() { - software.amazon.jdbc.plugin.efm2.MonitorServiceImpl.closeAllMonitors(); - MonitorThreadContainer.releaseInstance(); + CoreServicesContainer.getInstance().getMonitorService().stopAndRemoveAll(); + HostMonitorServiceImpl.closeAllMonitors(); + HostMonitorThreadContainer.releaseInstance(); ConnectionProviderManager.releaseResources(); - CustomEndpointPlugin.closeMonitors(); HikariPoolsHolder.closeAllPools(); HostResponseTimeServiceImpl.closeAllMonitors(); - MonitoringRdsHostListProvider.closeAllMonitors(); clearCaches(); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index a0aca078a..47e1968cc 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -43,7 +43,7 @@ import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; -import software.amazon.jdbc.util.SlidingExpirationCache; +import software.amazon.jdbc.util.storage.SlidingExpirationCache; public class HikariPooledConnectionProvider implements PooledConnectionProvider, CanReleaseResources { diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPoolsHolder.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPoolsHolder.java index 7bec68a49..7eb2389e3 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPoolsHolder.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPoolsHolder.java @@ -17,7 +17,7 @@ package software.amazon.jdbc; import software.amazon.jdbc.util.Pair; -import software.amazon.jdbc.util.SlidingExpirationCache; +import software.amazon.jdbc.util.storage.SlidingExpirationCache; public class HikariPoolsHolder { static SlidingExpirationCache databasePools = diff --git a/wrapper/src/main/java/software/amazon/jdbc/LeastConnectionsHostSelector.java b/wrapper/src/main/java/software/amazon/jdbc/LeastConnectionsHostSelector.java index 2ada66281..88b90624c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/LeastConnectionsHostSelector.java +++ b/wrapper/src/main/java/software/amazon/jdbc/LeastConnectionsHostSelector.java @@ -27,7 +27,7 @@ import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Pair; -import software.amazon.jdbc.util.SlidingExpirationCache; +import software.amazon.jdbc.util.storage.SlidingExpirationCache; public class LeastConnectionsHostSelector implements HostSelector { public static final String STRATEGY_LEAST_CONNECTIONS = "leastConnections"; diff --git a/wrapper/src/main/java/software/amazon/jdbc/PartialPluginService.java b/wrapper/src/main/java/software/amazon/jdbc/PartialPluginService.java new file mode 100644 index 000000000..67027195b --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/PartialPluginService.java @@ -0,0 +1,700 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.cleanup.CanReleaseResources; +import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.exceptions.ExceptionHandler; +import software.amazon.jdbc.exceptions.ExceptionManager; +import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.hostavailability.HostAvailabilityStrategyFactory; +import software.amazon.jdbc.hostlistprovider.StaticHostListProvider; +import software.amazon.jdbc.profile.ConfigurationProfile; +import software.amazon.jdbc.states.SessionStateService; +import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.CacheMap; +import software.amazon.jdbc.util.telemetry.TelemetryFactory; + +/** + * A {@link PluginService} containing some methods that are not intended to be called. This class is intended to be used + * by monitors, which require a {@link PluginService}, but are not expected to need or use some of the methods defined + * by the {@link PluginService} interface. The methods that are not expected to be called will log a warning or throw an + * {@link UnsupportedOperationException} when called. + */ +public class PartialPluginService implements PluginService, CanReleaseResources, HostListProviderService, + PluginManagerService { + + private static final Logger LOGGER = Logger.getLogger(PluginServiceImpl.class.getName()); + protected static final long DEFAULT_HOST_AVAILABILITY_CACHE_EXPIRE_NANO = TimeUnit.MINUTES.toNanos(5); + + protected static final CacheMap hostAvailabilityExpiringCache = new CacheMap<>(); + protected final FullServicesContainer servicesContainer; + protected final ConnectionPluginManager pluginManager; + protected final Properties props; + protected volatile HostListProvider hostListProvider; + protected List allHosts = new ArrayList<>(); + protected HostSpec currentHostSpec; + protected HostSpec initialConnectionHostSpec; + protected boolean isInTransaction; + protected final ExceptionManager exceptionManager; + protected final @Nullable ExceptionHandler exceptionHandler; + protected final String originalUrl; + protected final String driverProtocol; + protected TargetDriverDialect targetDriverDialect; + protected Dialect dbDialect; + protected @Nullable final ConfigurationProfile configurationProfile; + protected final ConnectionProviderManager connectionProviderManager; + + public PartialPluginService( + @NonNull final FullServicesContainer servicesContainer, + @NonNull final Properties props, + @NonNull final String originalUrl, + @NonNull final String targetDriverProtocol, + @NonNull final TargetDriverDialect targetDriverDialect, + @NonNull final Dialect dbDialect) { + this( + servicesContainer, + new ExceptionManager(), + props, + originalUrl, + targetDriverProtocol, + targetDriverDialect, + dbDialect, + null); + } + + public PartialPluginService( + @NonNull final FullServicesContainer servicesContainer, + @NonNull final ExceptionManager exceptionManager, + @NonNull final Properties props, + @NonNull final String originalUrl, + @NonNull final String targetDriverProtocol, + @NonNull final TargetDriverDialect targetDriverDialect, + @NonNull final Dialect dbDialect, + @Nullable final ConfigurationProfile configurationProfile) { + this.servicesContainer = servicesContainer; + this.pluginManager = servicesContainer.getConnectionPluginManager(); + this.props = props; + this.originalUrl = originalUrl; + this.driverProtocol = targetDriverProtocol; + this.targetDriverDialect = targetDriverDialect; + this.dbDialect = dbDialect; + this.configurationProfile = configurationProfile; + this.exceptionManager = exceptionManager; + this.connectionProviderManager = new ConnectionProviderManager( + this.pluginManager.getDefaultConnProvider(), + this.pluginManager.getEffectiveConnProvider()); + + this.exceptionHandler = this.configurationProfile != null && this.configurationProfile.getExceptionHandler() != null + ? this.configurationProfile.getExceptionHandler() + : null; + } + + @Override + public Connection getCurrentConnection() { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getCurrentConnection"})); + } + + @Override + public HostSpec getCurrentHostSpec() { + if (this.currentHostSpec == null) { + this.currentHostSpec = this.initialConnectionHostSpec; + + if (this.currentHostSpec == null) { + if (this.getAllHosts().isEmpty()) { + throw new RuntimeException(Messages.get("PluginServiceImpl.hostListEmpty")); + } + + this.currentHostSpec = this.getWriter(this.getAllHosts()); + final List allowedHosts = this.getHosts(); + if (!Utils.containsUrl(allowedHosts, this.currentHostSpec.getUrl())) { + throw new RuntimeException( + Messages.get("PluginServiceImpl.currentHostNotAllowed", + new Object[] { + currentHostSpec == null ? "" : currentHostSpec.getUrl(), + Utils.logTopology(allowedHosts, "")}) + ); + } + + if (this.currentHostSpec == null) { + this.currentHostSpec = this.getHosts().get(0); + } + } + if (this.currentHostSpec == null) { + throw new RuntimeException("Current host is undefined."); + } + LOGGER.finest(() -> "Set current host to " + this.currentHostSpec); + } + return this.currentHostSpec; + } + + public void setInitialConnectionHostSpec(final @NonNull HostSpec initialConnectionHostSpec) { + this.initialConnectionHostSpec = initialConnectionHostSpec; + } + + @Override + public HostSpec getInitialConnectionHostSpec() { + return this.initialConnectionHostSpec; + } + + @Override + public String getOriginalUrl() { + return this.originalUrl; + } + + @Override + public void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts) { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setAllowedAndBlockedHosts"})); + } + + @Override + public boolean acceptsStrategy(HostRole role, String strategy) throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"acceptsStrategy"})); + } + + @Override + public HostSpec getHostSpecByStrategy(HostRole role, String strategy) throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getHostSpecByStrategy"})); + } + + @Override + public HostSpec getHostSpecByStrategy(List hosts, HostRole role, String strategy) throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getHostSpecByStrategy"})); + } + + @Override + public HostRole getHostRole(Connection conn) throws SQLException { + return this.hostListProvider.getHostRole(conn); + } + + private HostSpec getWriter(final @NonNull List hosts) { + for (final HostSpec hostSpec : hosts) { + if (hostSpec.getRole() == HostRole.WRITER) { + return hostSpec; + } + } + return null; + } + + @Override + @Deprecated + public ConnectionProvider getConnectionProvider() { + return this.pluginManager.defaultConnProvider; + } + + public boolean isPooledConnectionProvider(HostSpec host, Properties props) { + final ConnectionProvider connectionProvider = + this.connectionProviderManager.getConnectionProvider(this.driverProtocol, host, props); + return (connectionProvider instanceof PooledConnectionProvider); + } + + @Override + public String getDriverProtocol() { + return this.driverProtocol; + } + + @Override + public void setCurrentConnection( + final @NonNull Connection connection, final @NonNull HostSpec hostSpec) throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setCurrentConnection"})); + } + + @Override + public EnumSet setCurrentConnection( + final @NonNull Connection connection, + final @NonNull HostSpec hostSpec, + @Nullable final ConnectionPlugin skipNotificationForThisPlugin) + throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setCurrentConnection"})); + } + + protected EnumSet compare( + final @NonNull HostSpec hostSpecA, + final @NonNull HostSpec hostSpecB) { + + final EnumSet changes = EnumSet.noneOf(NodeChangeOptions.class); + + if (!hostSpecA.getHost().equals(hostSpecB.getHost()) + || hostSpecA.getPort() != hostSpecB.getPort()) { + changes.add(NodeChangeOptions.HOSTNAME); + } + + if (hostSpecA.getRole() != hostSpecB.getRole()) { + if (hostSpecB.getRole() == HostRole.WRITER) { + changes.add(NodeChangeOptions.PROMOTED_TO_WRITER); + } else if (hostSpecB.getRole() == HostRole.READER) { + changes.add(NodeChangeOptions.PROMOTED_TO_READER); + } + } + + if (hostSpecA.getAvailability() != hostSpecB.getAvailability()) { + if (hostSpecB.getAvailability() == HostAvailability.AVAILABLE) { + changes.add(NodeChangeOptions.WENT_UP); + } else if (hostSpecB.getAvailability() == HostAvailability.NOT_AVAILABLE) { + changes.add(NodeChangeOptions.WENT_DOWN); + } + } + + if (!changes.isEmpty()) { + changes.add(NodeChangeOptions.NODE_CHANGED); + } + + return changes; + } + + @Override + public List getAllHosts() { + return this.allHosts; + } + + @Override + public List getHosts() { + AllowedAndBlockedHosts hostPermissions = this.servicesContainer.getStorageService().get( + AllowedAndBlockedHosts.class, this.initialConnectionHostSpec.getUrl()); + if (hostPermissions == null) { + return this.allHosts; + } + + List hosts = this.allHosts; + Set allowedHostIds = hostPermissions.getAllowedHostIds(); + Set blockedHostIds = hostPermissions.getBlockedHostIds(); + + if (!Utils.isNullOrEmpty(allowedHostIds)) { + hosts = hosts.stream() + .filter((hostSpec -> allowedHostIds.contains(hostSpec.getHostId()))) + .collect(Collectors.toList()); + } + + if (!Utils.isNullOrEmpty(blockedHostIds)) { + hosts = hosts.stream() + .filter((hostSpec -> !blockedHostIds.contains(hostSpec.getHostId()))) + .collect(Collectors.toList()); + } + + return hosts; + } + + @Override + public void setAvailability(final @NonNull Set hostAliases, final @NonNull HostAvailability availability) { + + if (hostAliases.isEmpty()) { + return; + } + + final List hostsToChange = this.getAllHosts().stream() + .filter((host) -> hostAliases.contains(host.asAlias()) + || host.getAliases().stream().anyMatch(hostAliases::contains)) + .distinct() + .collect(Collectors.toList()); + + if (hostsToChange.isEmpty()) { + LOGGER.finest(() -> Messages.get("PluginServiceImpl.hostsChangelistEmpty")); + return; + } + + final Map> changes = new HashMap<>(); + for (final HostSpec host : hostsToChange) { + final HostAvailability currentAvailability = host.getAvailability(); + host.setAvailability(availability); + hostAvailabilityExpiringCache.put(host.getUrl(), availability, + DEFAULT_HOST_AVAILABILITY_CACHE_EXPIRE_NANO); + if (currentAvailability != availability) { + final EnumSet hostChanges; + if (availability == HostAvailability.AVAILABLE) { + hostChanges = EnumSet.of(NodeChangeOptions.WENT_UP, NodeChangeOptions.NODE_CHANGED); + } else { + hostChanges = EnumSet.of(NodeChangeOptions.WENT_DOWN, NodeChangeOptions.NODE_CHANGED); + } + changes.put(host.getUrl(), hostChanges); + } + } + + if (!changes.isEmpty()) { + this.pluginManager.notifyNodeListChanged(changes); + } + } + + @Override + public boolean isInTransaction() { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"isInTransaction"})); + } + + @Override + public void setInTransaction(final boolean inTransaction) { + this.isInTransaction = inTransaction; + } + + @Override + public HostListProvider getHostListProvider() { + return this.hostListProvider; + } + + @Override + public void refreshHostList() throws SQLException { + final List updatedHostList = this.getHostListProvider().refresh(); + if (!Objects.equals(updatedHostList, this.allHosts)) { + updateHostAvailability(updatedHostList); + setNodeList(this.allHosts, updatedHostList); + } + } + + @Override + public void refreshHostList(final Connection connection) throws SQLException { + final List updatedHostList = this.getHostListProvider().refresh(connection); + if (!Objects.equals(updatedHostList, this.allHosts)) { + updateHostAvailability(updatedHostList); + setNodeList(this.allHosts, updatedHostList); + } + } + + @Override + public void forceRefreshHostList() throws SQLException { + final List updatedHostList = this.getHostListProvider().forceRefresh(); + if (updatedHostList != null) { + updateHostAvailability(updatedHostList); + setNodeList(this.allHosts, updatedHostList); + } + } + + @Override + public void forceRefreshHostList(final Connection connection) throws SQLException { + final List updatedHostList = this.getHostListProvider().forceRefresh(connection); + if (updatedHostList != null) { + updateHostAvailability(updatedHostList); + setNodeList(this.allHosts, updatedHostList); + } + } + + @Override + public boolean forceRefreshHostList(final boolean shouldVerifyWriter, final long timeoutMs) + throws SQLException { + + final HostListProvider hostListProvider = this.getHostListProvider(); + if (!(hostListProvider instanceof BlockingHostListProvider)) { + throw new UnsupportedOperationException( + Messages.get("PluginServiceImpl.requiredBlockingHostListProvider", + new Object[] {hostListProvider.getClass().getName()})); + } + + try { + final List updatedHostList = + ((BlockingHostListProvider) hostListProvider).forceRefresh(shouldVerifyWriter, timeoutMs); + if (updatedHostList != null) { + updateHostAvailability(updatedHostList); + setNodeList(this.allHosts, updatedHostList); + return true; + } + } catch (TimeoutException ex) { + // do nothing. + LOGGER.finest(Messages.get("PluginServiceImpl.forceRefreshTimeout", new Object[] {timeoutMs})); + } + return false; + } + + void setNodeList(@Nullable final List oldHosts, + @Nullable final List newHosts) { + + final Map oldHostMap = oldHosts == null + ? new HashMap<>() + : oldHosts.stream().collect(Collectors.toMap(HostSpec::getUrl, (value) -> value)); + + final Map newHostMap = newHosts == null + ? new HashMap<>() + : newHosts.stream().collect(Collectors.toMap(HostSpec::getUrl, (value) -> value)); + + final Map> changes = new HashMap<>(); + + for (final Map.Entry entry : oldHostMap.entrySet()) { + final HostSpec correspondingNewHost = newHostMap.get(entry.getKey()); + if (correspondingNewHost == null) { + // host deleted + changes.put(entry.getKey(), EnumSet.of(NodeChangeOptions.NODE_DELETED)); + } else { + // host maybe changed + final EnumSet hostChanges = compare(entry.getValue(), correspondingNewHost); + if (!hostChanges.isEmpty()) { + changes.put(entry.getKey(), hostChanges); + } + } + } + + for (final Map.Entry entry : newHostMap.entrySet()) { + if (!oldHostMap.containsKey(entry.getKey())) { + // host added + changes.put(entry.getKey(), EnumSet.of(NodeChangeOptions.NODE_ADDED)); + } + } + + if (!changes.isEmpty()) { + this.allHosts = newHosts != null ? newHosts : new ArrayList<>(); + this.pluginManager.notifyNodeListChanged(changes); + } + } + + @Override + public boolean isStaticHostListProvider() { + return this.getHostListProvider() instanceof StaticHostListProvider; + } + + @Override + public void setHostListProvider(final HostListProvider hostListProvider) { + this.hostListProvider = hostListProvider; + } + + @Override + public Connection connect(final HostSpec hostSpec, final Properties props) throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"connect"})); + } + + @Override + public Connection connect( + final HostSpec hostSpec, + final Properties props, + final @Nullable ConnectionPlugin pluginToSkip) + throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"connect"})); + } + + @Override + public Connection forceConnect( + final HostSpec hostSpec, + final Properties props) + throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"forceConnect"})); + } + + @Override + public Connection forceConnect( + final HostSpec hostSpec, + final Properties props, + final @Nullable ConnectionPlugin pluginToSkip) + throws SQLException { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"forceConnect"})); + } + + private void updateHostAvailability(final List hosts) { + for (final HostSpec host : hosts) { + final HostAvailability availability = hostAvailabilityExpiringCache.get(host.getUrl()); + if (availability != null) { + host.setAvailability(availability); + } + } + } + + @Override + public void releaseResources() { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"releaseResources"})); + } + + @Override + public boolean isNetworkException(Throwable throwable) { + return this.isNetworkException(throwable, this.targetDriverDialect); + } + + @Override + public boolean isNetworkException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isNetworkException(throwable, targetDriverDialect); + } + return this.exceptionManager.isNetworkException(this.dbDialect, throwable, targetDriverDialect); + } + + @Override + public boolean isNetworkException(final String sqlState) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isNetworkException(sqlState); + } + return this.exceptionManager.isNetworkException(this.dbDialect, sqlState); + } + + @Override + public boolean isLoginException(Throwable throwable) { + return this.isLoginException(throwable, this.targetDriverDialect); + } + + @Override + public boolean isLoginException(final Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isLoginException(throwable, targetDriverDialect); + } + return this.exceptionManager.isLoginException(this.dbDialect, throwable, targetDriverDialect); + } + + @Override + public boolean isLoginException(final String sqlState) { + if (this.exceptionHandler != null) { + return this.exceptionHandler.isLoginException(sqlState); + } + return this.exceptionManager.isLoginException(this.dbDialect, sqlState); + } + + @Override + public Dialect getDialect() { + return this.dbDialect; + } + + @Override + public TargetDriverDialect getTargetDriverDialect() { + return this.targetDriverDialect; + } + + @Override + public void updateDialect(final @NonNull Connection connection) { + // do nothing. This method is called after connecting in DefaultConnectionPlugin but the dialect passed to the + // constructor should already be updated and verified. + } + + @Override + public HostSpec identifyConnection(Connection connection) throws SQLException { + return this.getHostListProvider().identifyConnection(connection); + } + + @Override + public void fillAliases(Connection connection, HostSpec hostSpec) throws SQLException { + if (hostSpec == null) { + return; + } + + if (!hostSpec.getAliases().isEmpty()) { + LOGGER.finest(() -> Messages.get("PluginServiceImpl.nonEmptyAliases", new Object[] {hostSpec.getAliases()})); + return; + } + + hostSpec.addAlias(hostSpec.asAlias()); + + // Add the host name and port, this host name is usually the internal IP address. + try (final Statement stmt = connection.createStatement()) { + try (final ResultSet rs = stmt.executeQuery(this.getDialect().getHostAliasQuery())) { + while (rs.next()) { + hostSpec.addAlias(rs.getString(1)); + } + } + } catch (final SQLException sqlException) { + // log and ignore + LOGGER.finest(() -> Messages.get("PluginServiceImpl.failedToRetrieveHostPort")); + } + + // Add the instance endpoint if the current connection is associated with a topology aware database cluster. + final HostSpec host = this.identifyConnection(connection); + if (host != null) { + hostSpec.addAlias(host.asAliases().toArray(new String[] {})); + } + } + + @Override + public HostSpecBuilder getHostSpecBuilder() { + return new HostSpecBuilder(new HostAvailabilityStrategyFactory().create(this.props)); + } + + @Override + public Properties getProperties() { + return this.props; + } + + public TelemetryFactory getTelemetryFactory() { + return this.pluginManager.getTelemetryFactory(); + } + + public String getTargetName() { + return this.pluginManager.getDefaultConnProvider().getTargetName(); + } + + @Override + public @NonNull SessionStateService getSessionStateService() { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getSessionStateService"})); + } + + public T getPlugin(final Class pluginClazz) { + for (ConnectionPlugin p : this.pluginManager.plugins) { + if (pluginClazz.isAssignableFrom(p.getClass())) { + return pluginClazz.cast(p); + } + } + return null; + } + + @Override + public void setStatus(Class clazz, @Nullable T status, boolean clusterBound) { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setStatus"})); + } + + @Override + public void setStatus(Class clazz, @Nullable T status, String key) { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"setStatus"})); + } + + @Override + public T getStatus(@NonNull Class clazz, boolean clusterBound) { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getStatus"})); + } + + @Override + public T getStatus(@NonNull Class clazz, String key) { + throw new UnsupportedOperationException( + Messages.get("PartialPluginService.unexpectedMethodCall", new Object[] {"getStatus"})); + } + + public boolean isPluginInUse(final Class pluginClazz) { + try { + return this.pluginManager.isWrapperFor(pluginClazz); + } catch (SQLException e) { + return false; + } + } + + public static void clearCache() { + hostAvailabilityExpiringCache.clear(); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginService.java b/wrapper/src/main/java/software/amazon/jdbc/PluginService.java index 77fd3c976..fe4dc8cc6 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PluginService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PluginService.java @@ -87,7 +87,9 @@ EnumSet setCurrentConnection( * Set the collection of hosts that should be allowed and/or blocked for connections. * * @param allowedAndBlockedHosts An object defining the allowed and blocked sets of hosts. + * @deprecated use StorageService#set(key, allowedAndBlockedHosts) instead. */ + @Deprecated void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts); /** diff --git a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java index f53e56042..38695c144 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/PluginServiceImpl.java @@ -31,7 +31,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Logger; import java.util.stream.Collectors; @@ -51,9 +50,10 @@ import software.amazon.jdbc.states.SessionStateService; import software.amazon.jdbc.states.SessionStateServiceImpl; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; -import software.amazon.jdbc.util.CacheMap; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.CacheMap; import software.amazon.jdbc.util.telemetry.TelemetryFactory; public class PluginServiceImpl implements PluginService, CanReleaseResources, @@ -63,6 +63,7 @@ public class PluginServiceImpl implements PluginService, CanReleaseResources, protected static final long DEFAULT_HOST_AVAILABILITY_CACHE_EXPIRE_NANO = TimeUnit.MINUTES.toNanos(5); protected static final CacheMap hostAvailabilityExpiringCache = new CacheMap<>(); + protected final FullServicesContainer servicesContainer; protected static final CacheMap statusesExpiringCache = new CacheMap<>(); protected static final long DEFAULT_STATUS_CACHE_EXPIRE_NANO = TimeUnit.MINUTES.toNanos(60); @@ -73,7 +74,6 @@ public class PluginServiceImpl implements PluginService, CanReleaseResources, private final String driverProtocol; protected volatile HostListProvider hostListProvider; protected List allHosts = new ArrayList<>(); - protected AtomicReference allowedAndBlockedHosts = new AtomicReference<>(); protected Connection currentConnection; protected HostSpec currentHostSpec; protected HostSpec initialConnectionHostSpec; @@ -91,14 +91,15 @@ public class PluginServiceImpl implements PluginService, CanReleaseResources, protected final ReentrantLock connectionSwitchLock = new ReentrantLock(); public PluginServiceImpl( - @NonNull final ConnectionPluginManager pluginManager, + @NonNull final FullServicesContainer servicesContainer, @NonNull final Properties props, @NonNull final String originalUrl, @NonNull final String targetDriverProtocol, @NonNull final TargetDriverDialect targetDriverDialect) throws SQLException { - this(pluginManager, + this( + servicesContainer, new ExceptionManager(), props, originalUrl, @@ -110,13 +111,14 @@ public PluginServiceImpl( } public PluginServiceImpl( - @NonNull final ConnectionPluginManager pluginManager, + @NonNull final FullServicesContainer servicesContainer, @NonNull final Properties props, @NonNull final String originalUrl, @NonNull final String targetDriverProtocol, @NonNull final TargetDriverDialect targetDriverDialect, @Nullable final ConfigurationProfile configurationProfile) throws SQLException { - this(pluginManager, + this( + servicesContainer, new ExceptionManager(), props, originalUrl, @@ -128,7 +130,7 @@ public PluginServiceImpl( } public PluginServiceImpl( - @NonNull final ConnectionPluginManager pluginManager, + @NonNull final FullServicesContainer servicesContainer, @NonNull final ExceptionManager exceptionManager, @NonNull final Properties props, @NonNull final String originalUrl, @@ -137,7 +139,8 @@ public PluginServiceImpl( @NonNull final TargetDriverDialect targetDriverDialect, @Nullable final ConfigurationProfile configurationProfile, @Nullable final SessionStateService sessionStateService) throws SQLException { - this.pluginManager = pluginManager; + this.servicesContainer = servicesContainer; + this.pluginManager = servicesContainer.getConnectionPluginManager(); this.props = props; this.originalUrl = originalUrl; this.driverProtocol = targetDriverProtocol; @@ -215,8 +218,9 @@ public String getOriginalUrl() { } @Override + @Deprecated public void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts) { - this.allowedAndBlockedHosts.set(allowedAndBlockedHosts); + this.servicesContainer.getStorageService().set(this.initialConnectionHostSpec.getHost(), allowedAndBlockedHosts); } @Override @@ -410,7 +414,8 @@ public List getAllHosts() { @Override public List getHosts() { - AllowedAndBlockedHosts hostPermissions = this.allowedAndBlockedHosts.get(); + AllowedAndBlockedHosts hostPermissions = this.servicesContainer.getStorageService() + .get(AllowedAndBlockedHosts.class, this.initialConnectionHostSpec.getUrl()); if (hostPermissions == null) { return this.allHosts; } @@ -726,7 +731,7 @@ public void updateDialect(final @NonNull Connection connection) throws SQLExcept } final HostListProviderSupplier supplier = this.dialect.getHostListProvider(); - this.setHostListProvider(supplier.getProvider(props, this.originalUrl, this, this)); + this.setHostListProvider(supplier.getProvider(this.props, this.originalUrl, this.servicesContainer)); this.refreshHostList(connection); } @@ -803,6 +808,7 @@ public static void clearCache() { hostAvailabilityExpiringCache.clear(); } + @Deprecated // Use StorageService#set instead. public void setStatus(final Class clazz, final @Nullable T status, final boolean clusterBound) { String clusterId = null; if (clusterBound) { @@ -815,6 +821,7 @@ public void setStatus(final Class clazz, final @Nullable T status, final this.setStatus(clazz, status, clusterId); } + @Deprecated // Use StorageService#set instead. public void setStatus(final Class clazz, final @Nullable T status, final String key) { final String cacheKey = this.getStatusCacheKey(clazz, key); if (status == null) { @@ -824,6 +831,7 @@ public void setStatus(final Class clazz, final @Nullable T status, final } } + @Deprecated // Use StorageService#get instead. public T getStatus(final @NonNull Class clazz, final boolean clusterBound) { String clusterId = null; if (clusterBound) { @@ -836,6 +844,7 @@ public T getStatus(final @NonNull Class clazz, final boolean clusterBound return this.getStatus(clazz, clusterId); } + @Deprecated // Use StorageService#get instead. public T getStatus(final @NonNull Class clazz, String key) { return clazz.cast(statusesExpiringCache.get(this.getStatusCacheKey(clazz, key))); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/RoundRobinHostSelector.java b/wrapper/src/main/java/software/amazon/jdbc/RoundRobinHostSelector.java index b458677d6..205af0574 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/RoundRobinHostSelector.java +++ b/wrapper/src/main/java/software/amazon/jdbc/RoundRobinHostSelector.java @@ -30,9 +30,9 @@ import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.hostavailability.HostAvailability; -import software.amazon.jdbc.util.CacheMap; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.StringUtils; +import software.amazon.jdbc.util.storage.CacheMap; public class RoundRobinHostSelector implements HostSelector { public static final AwsWrapperProperty ROUND_ROBIN_HOST_WEIGHT_PAIRS = new AwsWrapperProperty( diff --git a/wrapper/src/main/java/software/amazon/jdbc/ServicesContainerPluginFactory.java b/wrapper/src/main/java/software/amazon/jdbc/ServicesContainerPluginFactory.java new file mode 100644 index 000000000..54be6db78 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/ServicesContainerPluginFactory.java @@ -0,0 +1,39 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc; + +import java.util.Properties; +import software.amazon.jdbc.util.FullServicesContainer; + +/** + * A factory for plugins that utilizes a {@link FullServicesContainer}. This interface extends + * {@link ConnectionPluginFactory} to provide additional flexibility in plugin instantiation while maintaining backward + * compatibility. + * + *

Implementations of this interface can access all services in the {@link FullServicesContainer} when creating + * connection plugins, rather than being limited to just the {@link PluginService}

+ */ +public interface ServicesContainerPluginFactory extends ConnectionPluginFactory { + /** + * Get an instance of a {@link ConnectionPlugin}. + * + * @param servicesContainer the service container containing the services to be used by the {@link ConnectionPlugin}. + * @param props to be used by the {@link ConnectionPlugin}. + * @return an instance of a {@link ConnectionPlugin}. + */ + ConnectionPlugin getInstance(FullServicesContainer servicesContainer, Properties props); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java index db96a78be..e64352899 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraMysqlDialect.java @@ -22,6 +22,7 @@ import java.sql.Statement; import java.util.Collections; import java.util.List; +import software.amazon.jdbc.PluginService; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; @@ -88,23 +89,22 @@ public boolean isDialect(final Connection connection) { @Override public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> { - + return (properties, initialUrl, servicesContainer) -> { + final PluginService pluginService = servicesContainer.getPluginService(); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { return new MonitoringRdsHostListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY, - IS_WRITER_QUERY, - pluginService); + IS_WRITER_QUERY); } return new AuroraHostListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY); diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java index 612b8aebc..ad21fb9f9 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/AuroraPgDialect.java @@ -21,6 +21,7 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.logging.Logger; +import software.amazon.jdbc.PluginService; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.failover2.FailoverConnectionPlugin; @@ -134,23 +135,22 @@ public boolean isDialect(final Connection connection) { @Override public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> { - + return (properties, initialUrl, servicesContainer) -> { + final PluginService pluginService = servicesContainer.getPluginService(); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { return new MonitoringRdsHostListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY, - IS_WRITER_QUERY, - pluginService); + IS_WRITER_QUERY); } return new AuroraHostListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY); diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java index 8dd6e262a..d29a1b3dd 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/DialectManager.java @@ -30,13 +30,13 @@ import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.util.CacheMap; import software.amazon.jdbc.util.ConnectionUrlParser; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.CacheMap; public class DialectManager implements DialectProvider { diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java index ae10b389b..0dfe44dc5 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/HostListProviderSupplier.java @@ -19,14 +19,12 @@ import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.HostListProvider; -import software.amazon.jdbc.HostListProviderService; -import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.util.FullServicesContainer; @FunctionalInterface public interface HostListProviderSupplier { @NonNull HostListProvider getProvider( final @NonNull Properties properties, final String initialUrl, - final @NonNull HostListProviderService hostListProviderService, - final @NonNull PluginService pluginService); + final @NonNull FullServicesContainer servicesContainer); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java index 9c27ff08f..3b368a8a1 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MariaDbDialect.java @@ -104,8 +104,8 @@ public List getDialectUpdateCandidates() { } public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> - new ConnectionStringHostListProvider(properties, initialUrl, hostListProviderService); + return (properties, initialUrl, servicesContainer) -> + new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java index 64dcf24bd..de9f181d3 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/MysqlDialect.java @@ -105,8 +105,8 @@ public List getDialectUpdateCandidates() { } public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> - new ConnectionStringHostListProvider(properties, initialUrl, hostListProviderService); + return (properties, initialUrl, servicesContainer) -> + new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java index b3b89af41..87c4c705c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/PgDialect.java @@ -106,8 +106,8 @@ public List getDialectUpdateCandidates() { @Override public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> - new ConnectionStringHostListProvider(properties, initialUrl, hostListProviderService); + return (properties, initialUrl, servicesContainer) -> + new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java index bf3e773b8..930cf1631 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterMysqlDialect.java @@ -25,6 +25,7 @@ import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.PluginService; import software.amazon.jdbc.hostlistprovider.RdsMultiAzDbClusterListProvider; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsMultiAzHostListProvider; import software.amazon.jdbc.plugin.failover.FailoverRestriction; @@ -93,17 +94,16 @@ public boolean isDialect(final Connection connection) { @Override public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> { - + return (properties, initialUrl, servicesContainer) -> { + final PluginService pluginService = servicesContainer.getPluginService(); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { return new MonitoringRdsMultiAzHostListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY, - pluginService, FETCH_WRITER_NODE_QUERY, FETCH_WRITER_NODE_QUERY_COLUMN_NAME); @@ -111,7 +111,7 @@ public HostListProviderSupplier getHostListProvider() { return new RdsMultiAzDbClusterListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY, diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java index d6c7f7ab0..2466bd277 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/RdsMultiAzDbClusterPgDialect.java @@ -22,6 +22,7 @@ import java.sql.Statement; import java.util.List; import java.util.logging.Logger; +import software.amazon.jdbc.PluginService; import software.amazon.jdbc.exceptions.ExceptionHandler; import software.amazon.jdbc.exceptions.MultiAzDbClusterPgExceptionHandler; import software.amazon.jdbc.hostlistprovider.RdsMultiAzDbClusterListProvider; @@ -112,17 +113,16 @@ public boolean isDialect(final Connection connection) { @Override public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> { - + return (properties, initialUrl, servicesContainer) -> { + final PluginService pluginService = servicesContainer.getPluginService(); if (pluginService.isPluginInUse(FailoverConnectionPlugin.class)) { return new MonitoringRdsMultiAzHostListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY, - pluginService, FETCH_WRITER_NODE_QUERY, FETCH_WRITER_NODE_QUERY_COLUMN_NAME); @@ -131,7 +131,7 @@ public HostListProviderSupplier getHostListProvider() { return new RdsMultiAzDbClusterListProvider( properties, initialUrl, - hostListProviderService, + servicesContainer, TOPOLOGY_QUERY, NODE_ID_QUERY, IS_READER_QUERY, diff --git a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java index cb5482936..65b9eb544 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java +++ b/wrapper/src/main/java/software/amazon/jdbc/dialect/UnknownDialect.java @@ -81,8 +81,8 @@ public List getDialectUpdateCandidates() { @Override public HostListProviderSupplier getHostListProvider() { - return (properties, initialUrl, hostListProviderService, pluginService) -> - new ConnectionStringHostListProvider(properties, initialUrl, hostListProviderService); + return (properties, initialUrl, servicesContainer) -> + new ConnectionStringHostListProvider(properties, initialUrl, servicesContainer.getHostListProviderService()); } @Override diff --git a/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java b/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java index b7eb2ab04..626fdcba2 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ds/AwsWrapperDataSource.java @@ -47,11 +47,16 @@ import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialectManager; import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.CoreServicesContainer; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.FullServicesContainerImpl; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.SqlState; import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.WrapperUtils; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.DefaultTelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -67,6 +72,9 @@ public class AwsWrapperDataSource implements DataSource, Referenceable, Serializ private static final String SERVER_NAME = "serverName"; private static final String SERVER_PORT = "serverPort"; + private final StorageService storageService; + private final MonitorService monitorService; + static { try { if (!Driver.isRegistered()) { @@ -89,6 +97,15 @@ public class AwsWrapperDataSource implements DataSource, Referenceable, Serializ protected @Nullable String database; private int loginTimeout = 0; + public AwsWrapperDataSource() { + this(CoreServicesContainer.getInstance()); + } + + public AwsWrapperDataSource(CoreServicesContainer coreServicesContainer) { + this.storageService = coreServicesContainer.getStorageService(); + this.monitorService = coreServicesContainer.getMonitorService(); + } + @Override public Connection getConnection() throws SQLException { setCredentialPropertiesFromUrl(this.jdbcUrl); @@ -256,14 +273,16 @@ ConnectionWrapper createConnectionWrapper( final @NonNull TargetDriverDialect targetDriverDialect, final @Nullable ConfigurationProfile configurationProfile, final TelemetryFactory telemetryFactory) throws SQLException { + FullServicesContainer + servicesContainer = new FullServicesContainerImpl(storageService, monitorService, telemetryFactory); return new ConnectionWrapper( + servicesContainer, props, url, defaultProvider, effectiveProvider, targetDriverDialect, - configurationProfile, - telemetryFactory); + configurationProfile); } public void setTargetDataSourceClassName(@Nullable final String dataSourceClassName) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java index 1d872c026..fc53f9e1d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/AuroraHostListProvider.java @@ -19,7 +19,7 @@ import java.util.Properties; import java.util.logging.Logger; -import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.util.FullServicesContainer; public class AuroraHostListProvider extends RdsHostListProvider { @@ -29,13 +29,13 @@ public class AuroraHostListProvider extends RdsHostListProvider { public AuroraHostListProvider( final Properties properties, final String originalUrl, - final HostListProviderService hostListProviderService, + final FullServicesContainer servicesContainer, final String topologyQuery, final String nodeIdQuery, final String isReaderQuery) { super(properties, originalUrl, - hostListProviderService, + servicesContainer, topologyQuery, nodeIdQuery, isReaderQuery); diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java index 0d59b7c71..e1ef01aec 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsHostListProvider.java @@ -29,6 +29,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Properties; @@ -48,14 +49,15 @@ import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.hostavailability.HostAvailability; -import software.amazon.jdbc.util.CacheMap; import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.SynchronousExecutor; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.CacheMap; public class RdsHostListProvider implements DynamicHostListProvider { @@ -89,10 +91,10 @@ public class RdsHostListProvider implements DynamicHostListProvider { protected static final ConnectionUrlParser connectionUrlParser = new ConnectionUrlParser(); protected static final int defaultTopologyQueryTimeoutMs = 5000; protected static final long suggestedClusterIdRefreshRateNano = TimeUnit.MINUTES.toNanos(10); - protected static final CacheMap> topologyCache = new CacheMap<>(); protected static final CacheMap suggestedPrimaryClusterIdCache = new CacheMap<>(); protected static final CacheMap primaryClusterIdCache = new CacheMap<>(); + protected final FullServicesContainer servicesContainer; protected final HostListProviderService hostListProviderService; protected final String originalUrl; protected final String topologyQuery; @@ -125,13 +127,14 @@ public class RdsHostListProvider implements DynamicHostListProvider { public RdsHostListProvider( final Properties properties, final String originalUrl, - final HostListProviderService hostListProviderService, + final FullServicesContainer servicesContainer, final String topologyQuery, final String nodeIdQuery, final String isReaderQuery) { - this.hostListProviderService = hostListProviderService; this.properties = properties; this.originalUrl = originalUrl; + this.servicesContainer = servicesContainer; + this.hostListProviderService = servicesContainer.getHostListProviderService(); this.topologyQuery = topologyQuery; this.nodeIdQuery = nodeIdQuery; this.isReaderQuery = isReaderQuery; @@ -226,7 +229,7 @@ protected void init() throws SQLException { * Returns an empty list if isn't available or is invalid (doesn't contain a writer). * @throws SQLException if errors occurred while retrieving the topology. */ - public FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException { + protected FetchTopologyResult getTopology(final Connection conn, final boolean forceUpdate) throws SQLException { init(); final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(this.clusterId); @@ -241,14 +244,14 @@ public FetchTopologyResult getTopology(final Connection conn, final boolean forc this.clusterIdChanged(oldClusterId); } - final List cachedHosts = topologyCache.get(this.clusterId); + final List storedHosts = this.getStoredTopology(); // This clusterId is a primary one and is about to create a new entry in the cache. // When a primary entry is created it needs to be suggested for other (non-primary) entries. // Remember a flag to do suggestion after cache is updated. - final boolean needToSuggest = cachedHosts == null && this.isPrimaryClusterId; + final boolean needToSuggest = storedHosts == null && this.isPrimaryClusterId; - if (cachedHosts == null || forceUpdate) { + if (storedHosts == null || forceUpdate) { // need to re-fetch topology @@ -262,7 +265,7 @@ public FetchTopologyResult getTopology(final Connection conn, final boolean forc final List hosts = queryForTopology(conn); if (!Utils.isNullOrEmpty(hosts)) { - topologyCache.put(this.clusterId, hosts, this.refreshRateNano); + this.servicesContainer.getStorageService().set(this.clusterId, new Topology(hosts)); if (needToSuggest) { this.suggestPrimaryCluster(hosts); } @@ -270,22 +273,27 @@ public FetchTopologyResult getTopology(final Connection conn, final boolean forc } } - if (cachedHosts == null) { + if (storedHosts == null) { return new FetchTopologyResult(false, this.initialHostList); } else { // use cached data - return new FetchTopologyResult(true, cachedHosts); + return new FetchTopologyResult(true, storedHosts); } } - protected void clusterIdChanged(final String oldClusterId) { + protected void clusterIdChanged(final String oldClusterId) throws SQLException { // do nothing } protected ClusterSuggestedResult getSuggestedClusterId(final String url) { - for (final Entry> entry : topologyCache.getEntries().entrySet()) { + Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class); + if (entries == null) { + return null; + } + + for (final Entry entry : entries.entrySet()) { final String key = entry.getKey(); // clusterId - final List hosts = entry.getValue(); + final List hosts = entry.getValue().getHosts(); final boolean isPrimaryCluster = primaryClusterIdCache.get(key, false, suggestedClusterIdRefreshRateNano); if (key.equals(url)) { @@ -315,9 +323,14 @@ protected void suggestPrimaryCluster(final @NonNull List primaryCluste primaryClusterHostUrls.add(hostSpec.getUrl()); } - for (final Entry> entry : topologyCache.getEntries().entrySet()) { + Map entries = this.servicesContainer.getStorageService().getEntries(Topology.class); + if (entries == null) { + return; + } + + for (final Entry entry : entries.entrySet()) { final String clusterId = entry.getKey(); - final List clusterHosts = entry.getValue(); + final List clusterHosts = entry.getValue().getHosts(); final boolean isPrimaryCluster = primaryClusterIdCache.get(clusterId, false, suggestedClusterIdRefreshRateNano); final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(clusterId); @@ -492,15 +505,15 @@ protected String getHostEndpoint(final String nodeName) { * @return list of hosts that represents topology. If there's no topology in the cache or the * cached topology is outdated, it returns null. */ - public @Nullable List getCachedTopology() { - return topologyCache.get(this.clusterId); + public @Nullable List getStoredTopology() { + Topology topology = this.servicesContainer.getStorageService().get(Topology.class, this.clusterId); + return topology == null ? null : topology.getHosts(); } /** * Clear topology cache for all clusters. */ public static void clearAll() { - topologyCache.clear(); primaryClusterIdCache.clear(); suggestedPrimaryClusterIdCache.clear(); } @@ -509,7 +522,7 @@ public static void clearAll() { * Clear topology cache for the current cluster. */ public void clear() { - topologyCache.remove(this.clusterId); + this.servicesContainer.getStorageService().remove(Topology.class, this.clusterId); } @Override @@ -555,7 +568,7 @@ public RdsUrlType getRdsUrlType() throws SQLException { } private void validateHostPatternSetting(final String hostPattern) { - if (!this.rdsHelper.isDnsPatternValid(hostPattern)) { + if (!rdsHelper.isDnsPatternValid(hostPattern)) { // "Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host // pattern must contain a '?' // character as a placeholder for the DB instance identifiers of the instances in the cluster" @@ -564,7 +577,7 @@ private void validateHostPatternSetting(final String hostPattern) { throw new RuntimeException(message); } - final RdsUrlType rdsUrlType = this.rdsHelper.identifyRdsType(hostPattern); + final RdsUrlType rdsUrlType = rdsHelper.identifyRdsType(hostPattern); if (rdsUrlType == RdsUrlType.RDS_PROXY) { // "An RDS Proxy url can't be used as the 'clusterInstanceHostPattern' configuration setting." final String message = @@ -583,44 +596,7 @@ private void validateHostPatternSetting(final String hostPattern) { } } - public static void logCache() { - LOGGER.finest(() -> { - final StringBuilder sb = new StringBuilder(); - final Set>> cacheEntries = topologyCache.getEntries().entrySet(); - - if (cacheEntries.isEmpty()) { - sb.append("Cache is empty."); - return sb.toString(); - } - - for (final Entry> entry : cacheEntries) { - final List hosts = entry.getValue(); - final Boolean isPrimaryCluster = primaryClusterIdCache.get(entry.getKey()); - final String suggestedPrimaryClusterId = suggestedPrimaryClusterIdCache.get(entry.getKey()); - - if (sb.length() > 0) { - sb.append("\n"); - } - sb.append("[").append(entry.getKey()).append("]:\n") - .append("\tisPrimaryCluster: ") - .append(isPrimaryCluster != null && isPrimaryCluster).append("\n") - .append("\tsuggestedPrimaryCluster: ") - .append(suggestedPrimaryClusterId).append("\n") - .append("\tHosts: "); - - if (hosts == null) { - sb.append(""); - } else { - for (final HostSpec h : hosts) { - sb.append("\n\t").append(h); - } - } - } - return sb.toString(); - }); - } - - static class FetchTopologyResult { + protected static class FetchTopologyResult { public List hosts; public boolean isCachedData; diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java index 02cd79d88..f6b5106a6 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProvider.java @@ -32,6 +32,7 @@ import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.hostavailability.HostAvailability; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; public class RdsMultiAzDbClusterListProvider extends RdsHostListProvider { @@ -42,7 +43,7 @@ public class RdsMultiAzDbClusterListProvider extends RdsHostListProvider { public RdsMultiAzDbClusterListProvider( final Properties properties, final String originalUrl, - final HostListProviderService hostListProviderService, + final FullServicesContainer servicesContainer, final String topologyQuery, final String nodeIdQuery, final String isReaderQuery, @@ -51,7 +52,7 @@ public RdsMultiAzDbClusterListProvider( ) { super(properties, originalUrl, - hostListProviderService, + servicesContainer, topologyQuery, nodeIdQuery, isReaderQuery); diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/Topology.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/Topology.java new file mode 100644 index 000000000..c72d8f7aa --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/Topology.java @@ -0,0 +1,53 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.hostlistprovider; + +import java.util.List; +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; +import software.amazon.jdbc.HostSpec; + +public class Topology { + private final @NonNull List hosts; + + public Topology(@NonNull List hosts) { + this.hosts = hosts; + } + + public @NonNull List getHosts() { + return hosts; + } + + @Override + public int hashCode() { + return Objects.hash(hosts); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + Topology other = (Topology) obj; + return Objects.equals(hosts, other.hosts); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java index 686dd5049..73ea62399 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitor.java @@ -22,8 +22,9 @@ import java.util.concurrent.TimeoutException; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.util.monitoring.Monitor; -public interface ClusterTopologyMonitor extends AutoCloseable, Runnable { +public interface ClusterTopologyMonitor extends Monitor { boolean canDispose(); diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java index 578dd9453..08630097d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/ClusterTopologyMonitorImpl.java @@ -45,10 +45,9 @@ import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostRole; import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.hostavailability.HostAvailability; -import software.amazon.jdbc.util.CacheMap; +import software.amazon.jdbc.hostlistprovider.Topology; import software.amazon.jdbc.util.ExecutorFactory; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; @@ -56,15 +55,18 @@ import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.SynchronousExecutor; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.connection.ConnectionService; +import software.amazon.jdbc.util.monitoring.AbstractMonitor; +import software.amazon.jdbc.util.storage.StorageService; -public class ClusterTopologyMonitorImpl implements ClusterTopologyMonitor { +public class ClusterTopologyMonitorImpl extends AbstractMonitor implements ClusterTopologyMonitor { private static final Logger LOGGER = Logger.getLogger(ClusterTopologyMonitorImpl.class.getName()); protected static final String MONITORING_PROPERTY_PREFIX = "topology-monitoring-"; protected static final Executor networkTimeoutExecutor = new SynchronousExecutor(); protected static final RdsUtils rdsHelper = new RdsUtils(); - + protected static final long monitorTerminationTimeoutSec = 30; protected static final int defaultTopologyQueryTimeoutMs = 1000; protected static final int closeConnectionNetworkTimeoutMs = 500; @@ -78,12 +80,11 @@ public class ClusterTopologyMonitorImpl implements ClusterTopologyMonitor { protected final long refreshRateNano; protected final long highRefreshRateNano; - protected final long topologyCacheExpirationNano; protected final Properties properties; protected final Properties monitoringProperties; - protected final PluginService pluginService; protected final HostSpec initialHostSpec; - protected final CacheMap> topologyMap; + protected final StorageService storageService; + protected final ConnectionService connectionService; protected final String topologyQuery; protected final String nodeIdQuery; protected final String writerTopologyQuery; @@ -94,7 +95,6 @@ public class ClusterTopologyMonitorImpl implements ClusterTopologyMonitor { protected final AtomicReference writerHostSpec = new AtomicReference<>(null); protected final AtomicReference monitoringConnection = new AtomicReference<>(null); protected boolean isVerifiedWriterConnection = false; - protected final AtomicBoolean stop = new AtomicBoolean(false); protected long highRefreshRateEndTimeNano = 0; protected final Object topologyUpdated = new Object(); protected final AtomicBoolean requestToUpdateTopology = new AtomicBoolean(false); @@ -108,35 +108,30 @@ public class ClusterTopologyMonitorImpl implements ClusterTopologyMonitor { protected final AtomicReference nodeThreadsReaderConnection = new AtomicReference<>(null); protected final AtomicReference> nodeThreadsLatestTopology = new AtomicReference<>(null); - - protected final ExecutorService monitorExecutor = - ExecutorFactory.newSingleThreadExecutor("monitor"); - public ClusterTopologyMonitorImpl( final String clusterId, - final CacheMap> topologyMap, + final StorageService storageService, + final ConnectionService connectionService, final HostSpec initialHostSpec, final Properties properties, - final PluginService pluginService, final HostListProviderService hostListProviderService, final HostSpec clusterInstanceTemplate, final long refreshRateNano, final long highRefreshRateNano, - final long topologyCacheExpirationNano, final String topologyQuery, final String writerTopologyQuery, final String nodeIdQuery) { + super(monitorTerminationTimeoutSec); this.clusterId = clusterId; - this.topologyMap = topologyMap; - this.initialHostSpec = initialHostSpec; - this.pluginService = pluginService; + this.storageService = storageService; + this.connectionService = connectionService; this.hostListProviderService = hostListProviderService; + this.initialHostSpec = initialHostSpec; this.clusterInstanceTemplate = clusterInstanceTemplate; this.properties = properties; this.refreshRateNano = refreshRateNano; this.highRefreshRateNano = highRefreshRateNano; - this.topologyCacheExpirationNano = topologyCacheExpirationNano; this.topologyQuery = topologyQuery; this.writerTopologyQuery = writerTopologyQuery; this.nodeIdQuery = nodeIdQuery; @@ -161,9 +156,6 @@ public ClusterTopologyMonitorImpl( PropertyDefinition.CONNECT_TIMEOUT.set( this.monitoringProperties, String.valueOf(defaultConnectionTimeoutMs)); } - - this.monitorExecutor.submit(this); - this.monitorExecutor.shutdown(); // No more tasks are accepted by the pool. } @Override @@ -184,7 +176,7 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long && System.nanoTime() < this.ignoreNewTopologyRequestsEndTimeNano.get()) { // Previous failover has just completed. We can use results of it without triggering a new topology update. - List currentHosts = this.topologyMap.get(this.clusterId); + List currentHosts = getStoredHosts(); LOGGER.finest( Utils.logTopology(currentHosts, Messages.get("ClusterTopologyMonitorImpl.ignoringTopologyRequest"))); if (currentHosts != null) { @@ -196,7 +188,7 @@ public List forceRefresh(final boolean shouldVerifyWriter, final long final Connection monitoringConnection = this.monitoringConnection.get(); this.monitoringConnection.set(null); this.isVerifiedWriterConnection = false; - this.closeConnection(monitoringConnection, true); + this.closeConnection(monitoringConnection); } return this.waitTillTopologyGetsUpdated(timeoutMs); @@ -216,8 +208,7 @@ public List forceRefresh(@Nullable Connection connection, final long t } protected List waitTillTopologyGetsUpdated(final long timeoutMs) throws TimeoutException { - - List currentHosts = this.topologyMap.get(this.clusterId); + List currentHosts = getStoredHosts(); List latestHosts; synchronized (this.requestToUpdateTopology) { @@ -237,7 +228,7 @@ protected List waitTillTopologyGetsUpdated(final long timeoutMs) throw // Note that we are checking reference equality instead of value equality here. We will break out of the loop if // there is a new entry in the topology map, even if the value of the hosts in latestHosts is the same as // currentHosts. - while (currentHosts == (latestHosts = this.topologyMap.get(this.clusterId)) + while (currentHosts == (latestHosts = getStoredHosts()) && System.nanoTime() < end) { try { synchronized (this.topologyUpdated) { @@ -259,9 +250,13 @@ protected List waitTillTopologyGetsUpdated(final long timeoutMs) throw return latestHosts; } + private List getStoredHosts() { + Topology topology = storageService.get(Topology.class, this.clusterId); + return topology == null ? null : topology.getHosts(); + } + @Override - public void close() throws Exception { - this.stop.set(true); + public void stop() { this.nodeThreadsStop.set(true); this.shutdownNodeExecutorService(); @@ -271,20 +266,25 @@ public void close() throws Exception { this.requestToUpdateTopology.notifyAll(); } - // Waiting for 30s gives a thread enough time to exit monitoring loop and close database connection. - if (!this.monitorExecutor.awaitTermination(30, TimeUnit.SECONDS)) { - this.monitorExecutor.shutdownNow(); - } + super.stop(); } @Override - public void run() { + public void close() { + this.closeConnection(this.monitoringConnection.get()); + this.closeConnection(this.nodeThreadsWriterConnection.get()); + this.closeConnection(this.nodeThreadsReaderConnection.get()); + } + + @Override + public void monitor() { try { LOGGER.finest(() -> Messages.get( "ClusterTopologyMonitorImpl.startMonitoringThread", new Object[]{this.initialHostSpec.getHost()})); - while (!this.stop.get()) { + while (!this.stop.get() && !Thread.currentThread().isInterrupted()) { + this.lastActivityTimestampNanos.set(System.nanoTime()); if (this.isInPanicMode()) { @@ -298,7 +298,7 @@ public void run() { this.nodeThreadsWriterHostSpec.set(null); this.nodeThreadsLatestTopology.set(null); - List hosts = this.topologyMap.get(this.clusterId); + List hosts = getStoredHosts(); if (hosts == null) { // need any connection to get topology hosts = this.openAnyConnectionAndUpdateTopology(); @@ -394,7 +394,7 @@ public void run() { // Do not log topology while in high refresh rate. It's noisy! if (this.highRefreshRateEndTimeNano == 0) { - LOGGER.finest(Utils.logTopology(this.topologyMap.get(this.clusterId))); + LOGGER.finest(Utils.logTopology(getStoredHosts())); } this.delay(false); @@ -408,7 +408,6 @@ public void run() { } catch (final InterruptedException intEx) { Thread.currentThread().interrupt(); - } catch (final Exception ex) { // this should not be reached; log and exit thread if (LOGGER.isLoggable(Level.FINEST)) { @@ -490,7 +489,7 @@ protected List openAnyConnectionAndUpdateTopology() { // open a new connection try { - conn = this.pluginService.forceConnect(this.initialHostSpec, this.monitoringProperties); + conn = this.connectionService.open(this.initialHostSpec, this.monitoringProperties); } catch (SQLException ex) { // can't connect return null; @@ -571,18 +570,12 @@ protected String getNodeId(final Connection connection) { } protected void closeConnection(final @Nullable Connection connection) { - this.closeConnection(connection, true); - } - - protected void closeConnection(final @Nullable Connection connection, final boolean unstableConnection) { try { if (connection != null && !connection.isClosed()) { - if (unstableConnection) { - try { - connection.setNetworkTimeout(networkTimeoutExecutor, closeConnectionNetworkTimeoutMs); - } catch (SQLException ex) { - // do nothing - } + try { + connection.setNetworkTimeout(networkTimeoutExecutor, closeConnectionNetworkTimeoutMs); + } catch (SQLException ex) { + // do nothing } connection.close(); } @@ -629,7 +622,7 @@ protected void delay(boolean useHighRefreshRate) throws InterruptedException { protected void updateTopologyCache(final @NonNull List hosts) { synchronized (this.requestToUpdateTopology) { - this.topologyMap.put(this.clusterId, hosts, this.topologyCacheExpirationNano); + storageService.set(this.clusterId, new Topology(hosts)); synchronized (this.topologyUpdated) { this.requestToUpdateTopology.set(false); @@ -822,14 +815,12 @@ public void run() { if (connection == null) { try { - connection = this.monitor.pluginService.forceConnect( + connection = this.monitor.connectionService.open( hostSpec, this.monitor.monitoringProperties); - this.monitor.pluginService.setAvailability( - hostSpec.asAliases(), HostAvailability.AVAILABLE); } catch (SQLException ex) { - // connect issues - this.monitor.pluginService.setAvailability( - hostSpec.asAliases(), HostAvailability.NOT_AVAILABLE); + // A problem occurred while connecting. We will try again on the next iteration. + TimeUnit.MILLISECONDS.sleep(100); + continue; } } @@ -864,8 +855,7 @@ public void run() { this.monitor.fetchTopologyAndUpdateCache(connection); this.monitor.nodeThreadsWriterHostSpec.set(hostSpec); this.monitor.nodeThreadsStop.set(true); - LOGGER.fine(Utils.logTopology( - this.monitor.topologyMap.get(this.monitor.clusterId))); + LOGGER.fine(Utils.logTopology(this.monitor.getStoredHosts())); } // Setting the connection to null here prevents the final block diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java index 398f9b605..29aae6ddd 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsHostListProvider.java @@ -25,13 +25,16 @@ import java.util.logging.Logger; import software.amazon.jdbc.AwsWrapperProperty; import software.amazon.jdbc.BlockingHostListProvider; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; -import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread; +import software.amazon.jdbc.hostlistprovider.Topology; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.connection.ConnectionService; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; public class MonitoringRdsHostListProvider extends RdsHostListProvider implements BlockingHostListProvider, CanReleaseResources { @@ -44,27 +47,11 @@ public class MonitoringRdsHostListProvider extends RdsHostListProvider "100", "Cluster topology high refresh rate in millis."); - protected static final long CACHE_CLEANUP_NANO = TimeUnit.MINUTES.toNanos(1); - protected static final long MONITOR_EXPIRATION_NANO = TimeUnit.MINUTES.toNanos(15); - - protected static final long TOPOLOGY_CACHE_EXPIRATION_NANO = TimeUnit.MINUTES.toNanos(5); - - protected static final SlidingExpirationCacheWithCleanupThread monitors = - new SlidingExpirationCacheWithCleanupThread<>( - ClusterTopologyMonitor::canDispose, - (monitor) -> { - try { - monitor.close(); - } catch (Exception ex) { - // ignore - } - }, - CACHE_CLEANUP_NANO); - static { PropertyDefinition.registerPluginProperties(MonitoringRdsHostListProvider.class); } + protected final FullServicesContainer servicesContainer; protected final PluginService pluginService; protected final long highRefreshRateNano; protected final String writerTopologyQuery; @@ -72,14 +59,14 @@ public class MonitoringRdsHostListProvider extends RdsHostListProvider public MonitoringRdsHostListProvider( final Properties properties, final String originalUrl, - final HostListProviderService hostListProviderService, + final FullServicesContainer servicesContainer, final String topologyQuery, final String nodeIdQuery, final String isReaderQuery, - final String writerTopologyQuery, - final PluginService pluginService) { - super(properties, originalUrl, hostListProviderService, topologyQuery, nodeIdQuery, isReaderQuery); - this.pluginService = pluginService; + final String writerTopologyQuery) { + super(properties, originalUrl, servicesContainer, topologyQuery, nodeIdQuery, isReaderQuery); + this.servicesContainer = servicesContainer; + this.pluginService = servicesContainer.getPluginService(); this.writerTopologyQuery = writerTopologyQuery; this.highRefreshRateNano = TimeUnit.MILLISECONDS.toNanos( CLUSTER_TOPOLOGY_HIGH_REFRESH_RATE_MS.getLong(this.properties)); @@ -89,41 +76,45 @@ public static void clearCache() { clearAll(); } - public static void closeAllMonitors() { - monitors.getEntries().values().forEach(monitor -> { - try { - monitor.close(); - } catch (Exception ex) { - // ignore - } - }); - monitors.clear(); - clearCache(); - } - @Override protected void init() throws SQLException { super.init(); } - protected ClusterTopologyMonitor initMonitor() { - return monitors.computeIfAbsent(this.clusterId, - (key) -> new ClusterTopologyMonitorImpl( - key, topologyCache, this.initialHostSpec, this.properties, this.pluginService, - this.hostListProviderService, this.clusterInstanceTemplate, - this.refreshRateNano, this.highRefreshRateNano, TOPOLOGY_CACHE_EXPIRATION_NANO, + protected ClusterTopologyMonitor initMonitor() throws SQLException { + return this.servicesContainer.getMonitorService().runIfAbsent( + ClusterTopologyMonitorImpl.class, + this.clusterId, + this.servicesContainer.getStorageService(), + this.pluginService.getTelemetryFactory(), + this.originalUrl, + this.pluginService.getDriverProtocol(), + this.pluginService.getTargetDriverDialect(), + this.pluginService.getDialect(), + this.properties, + (ConnectionService connectionService, PluginService monitorPluginService) -> new ClusterTopologyMonitorImpl( + this.clusterId, + this.servicesContainer.getStorageService(), + connectionService, + this.initialHostSpec, + this.properties, + this.servicesContainer.getHostListProviderService(), + this.clusterInstanceTemplate, + this.refreshRateNano, + this.highRefreshRateNano, this.topologyQuery, this.writerTopologyQuery, - this.nodeIdQuery), - MONITOR_EXPIRATION_NANO); + this.nodeIdQuery)); } @Override protected List queryForTopology(final Connection conn) throws SQLException { - ClusterTopologyMonitor monitor = monitors.get(this.clusterId, MONITOR_EXPIRATION_NANO); + ClusterTopologyMonitor monitor = this.servicesContainer.getMonitorService() + .get(ClusterTopologyMonitorImpl.class, this.clusterId); if (monitor == null) { monitor = this.initMonitor(); } + try { return monitor.forceRefresh(conn, defaultTopologyQueryTimeoutMs); } catch (TimeoutException ex) { @@ -132,18 +123,32 @@ protected List queryForTopology(final Connection conn) throws SQLExcep } @Override - protected void clusterIdChanged(final String oldClusterId) { - final ClusterTopologyMonitor existingMonitor = monitors.get(oldClusterId, MONITOR_EXPIRATION_NANO); + protected void clusterIdChanged(final String oldClusterId) throws SQLException { + MonitorService monitorService = this.servicesContainer.getMonitorService(); + final ClusterTopologyMonitorImpl existingMonitor = + monitorService.get(ClusterTopologyMonitorImpl.class, oldClusterId); if (existingMonitor != null) { - monitors.computeIfAbsent(this.clusterId, (key) -> existingMonitor, MONITOR_EXPIRATION_NANO); - assert monitors.get(this.clusterId, MONITOR_EXPIRATION_NANO) == existingMonitor; + this.servicesContainer.getMonitorService().runIfAbsent( + ClusterTopologyMonitorImpl.class, + this.clusterId, + this.servicesContainer.getStorageService(), + this.pluginService.getTelemetryFactory(), + this.originalUrl, + this.pluginService.getDriverProtocol(), + this.pluginService.getTargetDriverDialect(), + this.pluginService.getDialect(), + this.properties, + (connectionService, pluginService) -> existingMonitor); + assert monitorService.get(ClusterTopologyMonitorImpl.class, this.clusterId) == existingMonitor; existingMonitor.setClusterId(this.clusterId); - monitors.remove(oldClusterId); + monitorService.remove(ClusterTopologyMonitorImpl.class, oldClusterId); } - final List existingHosts = topologyCache.get(oldClusterId); + final StorageService storageService = this.servicesContainer.getStorageService(); + final Topology existingTopology = storageService.get(Topology.class, oldClusterId); + final List existingHosts = existingTopology == null ? null : existingTopology.getHosts(); if (existingHosts != null) { - topologyCache.put(this.clusterId, existingHosts, TOPOLOGY_CACHE_EXPIRATION_NANO); + storageService.set(this.clusterId, new Topology(existingHosts)); } } @@ -151,7 +156,8 @@ protected void clusterIdChanged(final String oldClusterId) { public List forceRefresh(final boolean shouldVerifyWriter, final long timeoutMs) throws SQLException, TimeoutException { - ClusterTopologyMonitor monitor = monitors.get(this.clusterId, MONITOR_EXPIRATION_NANO); + ClusterTopologyMonitor monitor = + this.servicesContainer.getMonitorService().get(ClusterTopologyMonitorImpl.class, this.clusterId); if (monitor == null) { monitor = this.initMonitor(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java index f08448a80..a1b314a75 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MonitoringRdsMultiAzHostListProvider.java @@ -16,10 +16,10 @@ package software.amazon.jdbc.hostlistprovider.monitoring; +import java.sql.SQLException; import java.util.Properties; import java.util.logging.Logger; -import software.amazon.jdbc.HostListProviderService; -import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.util.FullServicesContainer; public class MonitoringRdsMultiAzHostListProvider extends MonitoringRdsHostListProvider { @@ -31,32 +31,50 @@ public class MonitoringRdsMultiAzHostListProvider extends MonitoringRdsHostListP public MonitoringRdsMultiAzHostListProvider( final Properties properties, final String originalUrl, - final HostListProviderService hostListProviderService, + final FullServicesContainer servicesContainer, final String topologyQuery, final String nodeIdQuery, final String isReaderQuery, - final PluginService pluginService, final String fetchWriterNodeQuery, final String fetchWriterNodeColumnName) { - super(properties, originalUrl, hostListProviderService, topologyQuery, nodeIdQuery, isReaderQuery, - "", pluginService); + super( + properties, + originalUrl, + servicesContainer, + topologyQuery, + nodeIdQuery, + isReaderQuery, + ""); this.fetchWriterNodeQuery = fetchWriterNodeQuery; this.fetchWriterNodeColumnName = fetchWriterNodeColumnName; } @Override - protected ClusterTopologyMonitor initMonitor() { - return monitors.computeIfAbsent(this.clusterId, - (key) -> new MultiAzClusterTopologyMonitorImpl( - key, topologyCache, this.initialHostSpec, this.properties, this.pluginService, - this.hostListProviderService, this.clusterInstanceTemplate, - this.refreshRateNano, this.highRefreshRateNano, TOPOLOGY_CACHE_EXPIRATION_NANO, + protected ClusterTopologyMonitor initMonitor() throws SQLException { + return this.servicesContainer.getMonitorService().runIfAbsent(MultiAzClusterTopologyMonitorImpl.class, + this.clusterId, + this.servicesContainer.getStorageService(), + this.pluginService.getTelemetryFactory(), + this.originalUrl, + this.pluginService.getDriverProtocol(), + this.pluginService.getTargetDriverDialect(), + this.pluginService.getDialect(), + this.properties, + (connectionService, pluginService) -> new MultiAzClusterTopologyMonitorImpl( + this.clusterId, + this.servicesContainer.getStorageService(), + connectionService, + this.initialHostSpec, + this.properties, + this.hostListProviderService, + this.clusterInstanceTemplate, + this.refreshRateNano, + this.highRefreshRateNano, this.topologyQuery, this.writerTopologyQuery, this.nodeIdQuery, this.fetchWriterNodeQuery, - this.fetchWriterNodeColumnName), - MONITOR_EXPIRATION_NANO); + this.fetchWriterNodeColumnName)); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java index c313481e3..6e3c3b388 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/hostlistprovider/monitoring/MultiAzClusterTopologyMonitorImpl.java @@ -22,14 +22,13 @@ import java.sql.Statement; import java.sql.Timestamp; import java.time.Instant; -import java.util.List; import java.util.Properties; import java.util.logging.Logger; import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.util.CacheMap; import software.amazon.jdbc.util.StringUtils; +import software.amazon.jdbc.util.connection.ConnectionService; +import software.amazon.jdbc.util.storage.StorageService; public class MultiAzClusterTopologyMonitorImpl extends ClusterTopologyMonitorImpl { @@ -40,23 +39,32 @@ public class MultiAzClusterTopologyMonitorImpl extends ClusterTopologyMonitorImp public MultiAzClusterTopologyMonitorImpl( final String clusterId, - final CacheMap> topologyMap, + final StorageService storageService, + final ConnectionService connectionService, final HostSpec initialHostSpec, final Properties properties, - final PluginService pluginService, final HostListProviderService hostListProviderService, final HostSpec clusterInstanceTemplate, final long refreshRateNano, final long highRefreshRateNano, - final long topologyCacheExpirationNano, final String topologyQuery, final String writerTopologyQuery, final String nodeIdQuery, final String fetchWriterNodeQuery, final String fetchWriterNodeColumnName) { - super(clusterId, topologyMap, initialHostSpec, properties, pluginService, hostListProviderService, - clusterInstanceTemplate, refreshRateNano, highRefreshRateNano, topologyCacheExpirationNano, - topologyQuery, writerTopologyQuery, nodeIdQuery); + super( + clusterId, + storageService, + connectionService, + initialHostSpec, + properties, + hostListProviderService, + clusterInstanceTemplate, + refreshRateNano, + highRefreshRateNano, + topologyQuery, + writerTopologyQuery, + nodeIdQuery); this.fetchWriterNodeQuery = fetchWriterNodeQuery; this.fetchWriterNodeColumnName = fetchWriterNodeColumnName; } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPlugin.java index 570366a39..7bfe67b35 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPlugin.java @@ -40,7 +40,9 @@ import software.amazon.jdbc.plugin.bluegreen.routing.ConnectRouting; import software.amazon.jdbc.plugin.bluegreen.routing.ExecuteRouting; import software.amazon.jdbc.plugin.iam.IamAuthConnectionPlugin; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryFactory; public class BlueGreenConnectionPlugin extends AbstractConnectionPlugin { @@ -71,6 +73,8 @@ public class BlueGreenConnectionPlugin extends AbstractConnectionPlugin { PropertyDefinition.registerPluginProperties(BlueGreenConnectionPlugin.class); } + protected final FullServicesContainer servicesContainer; + protected final StorageService storageService; protected final PluginService pluginService; protected final Properties props; protected BlueGreenProviderSupplier providerSupplier; @@ -89,17 +93,19 @@ public class BlueGreenConnectionPlugin extends AbstractConnectionPlugin { protected final Set subscribedMethods; public BlueGreenConnectionPlugin( - final @NonNull PluginService pluginService, + final @NonNull FullServicesContainer servicesContainer, final @NonNull Properties props) { - this(pluginService, props, BlueGreenStatusProvider::new); + this(servicesContainer, props, BlueGreenStatusProvider::new); } public BlueGreenConnectionPlugin( - final @NonNull PluginService pluginService, + final @NonNull FullServicesContainer servicesContainer, final @NonNull Properties props, final @NonNull BlueGreenProviderSupplier providerSupplier) { - this.pluginService = pluginService; + this.servicesContainer = servicesContainer; + this.storageService = servicesContainer.getStorageService(); + this.pluginService = servicesContainer.getPluginService(); this.props = props; this.telemetryFactory = pluginService.getTelemetryFactory(); this.providerSupplier = providerSupplier; @@ -131,7 +137,7 @@ public Connection connect( try { - this.bgStatus = this.pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + this.bgStatus = this.storageService.get(BlueGreenStatus.class, this.bgdId); if (this.bgStatus == null) { return regularOpenConnection(connectFunc, isInitialConnection); @@ -167,10 +173,11 @@ public Connection connect( props, isInitialConnection, connectFunc, + this.storageService, this.pluginService); if (conn == null) { - this.bgStatus = this.pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + this.bgStatus = this.storageService.get(BlueGreenStatus.class, this.bgdId); if (this.bgStatus == null) { this.endTimeNano.set(this.getNanoTime()); return regularOpenConnection(connectFunc, isInitialConnection); @@ -236,7 +243,7 @@ public T execute( return jdbcMethodFunc.call(); } - this.bgStatus = this.pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + this.bgStatus = this.storageService.get(BlueGreenStatus.class, this.bgdId); if (this.bgStatus == null) { return jdbcMethodFunc.call(); @@ -271,12 +278,13 @@ public T execute( methodName, jdbcMethodFunc, jdbcMethodArgs, + this.storageService, this.pluginService, this.props); if (!result.isPresent()) { - this.bgStatus = this.pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + this.bgStatus = this.storageService.get(BlueGreenStatus.class, this.bgdId); if (this.bgStatus == null) { this.endTimeNano.set(this.getNanoTime()); return jdbcMethodFunc.call(); @@ -306,7 +314,7 @@ public T execute( protected void initProvider() { provider.computeIfAbsent(this.bgdId, - (key) -> this.providerSupplier.create(this.pluginService, this.props, this.bgdId)); + (key) -> this.providerSupplier.create(this.servicesContainer, this.props, this.bgdId)); } // For testing purposes diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPluginFactory.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPluginFactory.java index 1667a39c9..e743d2f9a 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPluginFactory.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenConnectionPluginFactory.java @@ -18,14 +18,21 @@ import java.util.Properties; import software.amazon.jdbc.ConnectionPlugin; -import software.amazon.jdbc.ConnectionPluginFactory; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.plugin.iam.IamAuthConnectionPlugin; - -public class BlueGreenConnectionPluginFactory implements ConnectionPluginFactory { +import software.amazon.jdbc.ServicesContainerPluginFactory; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.Messages; +public class BlueGreenConnectionPluginFactory implements ServicesContainerPluginFactory { @Override public ConnectionPlugin getInstance(final PluginService pluginService, final Properties props) { - return new BlueGreenConnectionPlugin(pluginService, props); + throw new UnsupportedOperationException( + Messages.get( + "ServicesContainerPluginFactory.servicesContainerRequired", new Object[] {"BlueGreenConnectionPlugin"})); + } + + @Override + public ConnectionPlugin getInstance(final FullServicesContainer servicesContainer, final Properties props) { + return new BlueGreenConnectionPlugin(servicesContainer, props); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenProviderSupplier.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenProviderSupplier.java index 7404b8a0c..3cc3bcfa6 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenProviderSupplier.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenProviderSupplier.java @@ -17,10 +17,10 @@ package software.amazon.jdbc.plugin.bluegreen; import java.util.Properties; -import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.util.FullServicesContainer; @FunctionalInterface public interface BlueGreenProviderSupplier { - BlueGreenStatusProvider create(PluginService pluginService, Properties props, String bgdId); + BlueGreenStatusProvider create(FullServicesContainer servicesContainer, Properties props, String bgdId); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java index 290fbac18..a3a717638 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusMonitor.java @@ -44,7 +44,6 @@ import org.checkerframework.checker.nullness.qual.NonNull; import org.checkerframework.checker.nullness.qual.Nullable; import software.amazon.jdbc.HostListProvider; -import software.amazon.jdbc.HostListProviderService; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; import software.amazon.jdbc.PluginService; @@ -54,6 +53,7 @@ import software.amazon.jdbc.plugin.iam.IamAuthConnectionPlugin; import software.amazon.jdbc.util.ConnectionUrlParser; import software.amazon.jdbc.util.ExecutorFactory; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.RdsUtils; @@ -69,6 +69,7 @@ public class BlueGreenStatusMonitor { // Add more versions here if needed. protected static final Set knownVersions = new HashSet<>(Collections.singletonList(latestKnownVersion)); protected final BlueGreenDialect blueGreenDialect; + protected final FullServicesContainer servicesContainer; protected final PluginService pluginService; protected final String bgdId; protected final Properties props; @@ -125,7 +126,7 @@ public BlueGreenStatusMonitor( final @NonNull BlueGreenRole role, final @NonNull String bgdId, final @NonNull HostSpec initialHostSpec, - final @NonNull PluginService pluginService, + final @NonNull FullServicesContainer servicesContainer, final @NonNull Properties props, final @NonNull Map statusCheckIntervalMap, final @Nullable OnBlueGreenStatusChange onBlueGreenStatusChangeFunc) { @@ -133,7 +134,8 @@ public BlueGreenStatusMonitor( this.role = role; this.bgdId = bgdId; this.initialHostSpec = initialHostSpec; - this.pluginService = pluginService; + this.servicesContainer = servicesContainer; + this.pluginService = servicesContainer.getPluginService(); this.props = props; this.statusCheckIntervalMap = statusCheckIntervalMap; this.onBlueGreenStatusChangeFunc = onBlueGreenStatusChangeFunc; @@ -617,8 +619,7 @@ protected void initHostListProvider() { .getProvider( hostListProperties, hostListProviderUrl, - (HostListProviderService) this.pluginService, - this.pluginService); + this.servicesContainer); } else { LOGGER.warning(() -> Messages.get("bgd.hostSpecNull")); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java index b6ec7dca8..09fe7cfbe 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/BlueGreenStatusProvider.java @@ -52,12 +52,14 @@ import software.amazon.jdbc.plugin.bluegreen.routing.SuspendConnectRouting; import software.amazon.jdbc.plugin.bluegreen.routing.SuspendExecuteRouting; import software.amazon.jdbc.plugin.bluegreen.routing.SuspendUntilCorrespondingNodeFoundConnectRouting; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Pair; import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.StorageService; public class BlueGreenStatusProvider { @@ -119,6 +121,8 @@ public class BlueGreenStatusProvider { protected final long switchoverTimeoutNano; protected final boolean suspendNewBlueConnectionsWhenInProgress; + protected final FullServicesContainer servicesContainer; + protected final StorageService storageService; protected final PluginService pluginService; protected final Properties props; protected final String bgdId; @@ -126,11 +130,13 @@ public class BlueGreenStatusProvider { protected final RdsUtils rdsUtils = new RdsUtils(); public BlueGreenStatusProvider( - final @NonNull PluginService pluginService, + final @NonNull FullServicesContainer servicesContainer, final @NonNull Properties props, final @NonNull String bgdId) { - this.pluginService = pluginService; + this.servicesContainer = servicesContainer; + this.storageService = servicesContainer.getStorageService(); + this.pluginService = servicesContainer.getPluginService(); this.props = props; this.bgdId = bgdId; @@ -156,7 +162,7 @@ protected void initMonitoring() { BlueGreenRole.SOURCE, this.bgdId, this.pluginService.getCurrentHostSpec(), - this.pluginService, + this.servicesContainer, this.getMonitoringProperties(), statusCheckIntervalMap, this::prepareStatus); @@ -165,7 +171,7 @@ protected void initMonitoring() { BlueGreenRole.TARGET, this.bgdId, this.pluginService.getCurrentHostSpec(), - this.pluginService, + this.servicesContainer, this.getMonitoringProperties(), statusCheckIntervalMap, this::prepareStatus); @@ -268,8 +274,8 @@ protected void updatePhase(BlueGreenRole role, BlueGreenInterimStatus interimSta } protected void updateStatusCache() { - final BlueGreenStatus latestStatus = this.pluginService.getStatus(BlueGreenStatus.class, this.bgdId); - this.pluginService.setStatus(BlueGreenStatus.class, this.summaryStatus, this.bgdId); + final BlueGreenStatus latestStatus = this.storageService.get(BlueGreenStatus.class, this.bgdId); + this.storageService.set(this.bgdId, this.summaryStatus); this.storePhaseTime(this.summaryStatus.getCurrentPhase()); // Notify all waiting threads that status is updated. diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseConnectRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseConnectRouting.java index 1d29c21f6..276cac529 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseConnectRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseConnectRouting.java @@ -25,8 +25,8 @@ import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.plugin.bluegreen.BlueGreenConnectionPlugin; import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; +import software.amazon.jdbc.util.storage.StorageService; public abstract class BaseConnectRouting extends BaseRouting implements ConnectRouting { @@ -54,6 +54,7 @@ public abstract Connection apply( Properties props, boolean isInitialConnection, JdbcCallable connectFunc, + StorageService storageService, PluginService pluginService) throws SQLException; diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseExecuteRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseExecuteRouting.java index d498378db..62ac29e3c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseExecuteRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseExecuteRouting.java @@ -25,6 +25,7 @@ import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; +import software.amazon.jdbc.util.storage.StorageService; public abstract class BaseExecuteRouting extends BaseRouting implements ExecuteRouting { @@ -52,6 +53,7 @@ public boolean isMatch(HostSpec hostSpec, BlueGreenRole hostRole) { final String methodName, final JdbcCallable jdbcMethodFunc, final Object[] jdbcMethodArgs, + final StorageService storageService, final PluginService pluginService, final Properties props) throws E; diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseRouting.java index b36c18404..3032b2fbc 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/BaseRouting.java @@ -17,8 +17,8 @@ package software.amazon.jdbc.plugin.bluegreen.routing; import java.util.concurrent.TimeUnit; -import software.amazon.jdbc.PluginService; import software.amazon.jdbc.plugin.bluegreen.BlueGreenStatus; +import software.amazon.jdbc.util.storage.StorageService; public abstract class BaseRouting { @@ -29,7 +29,7 @@ protected long getNanoTime() { } @SuppressWarnings("SynchronizationOnLocalVariableOrMethodParameter") - protected void delay(long delayMs, BlueGreenStatus bgStatus, PluginService pluginService, String bgdId) + protected void delay(long delayMs, BlueGreenStatus bgStatus, StorageService storageService, String bgdId) throws InterruptedException { long start = System.nanoTime(); @@ -46,7 +46,7 @@ protected void delay(long delayMs, BlueGreenStatus bgStatus, PluginService plugi } } while ( // check if status reference is changed - bgStatus == pluginService.getStatus(BlueGreenStatus.class, bgdId) + bgStatus == storageService.get(BlueGreenStatus.class, bgdId) && System.nanoTime() < end && !Thread.currentThread().isInterrupted()); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/CloseConnectionExecuteRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/CloseConnectionExecuteRouting.java index 70b236e1b..04e1055a7 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/CloseConnectionExecuteRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/CloseConnectionExecuteRouting.java @@ -27,6 +27,7 @@ import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.WrapperUtils; +import software.amazon.jdbc.util.storage.StorageService; // Close current connection. public class CloseConnectionExecuteRouting extends BaseExecuteRouting { @@ -46,6 +47,7 @@ public Optional apply( final String methodName, final JdbcCallable jdbcMethodFunc, final Object[] jdbcMethodArgs, + final StorageService storageService, final PluginService pluginService, final Properties props) throws E { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ConnectRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ConnectRouting.java index 53c788916..7125453bb 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ConnectRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ConnectRouting.java @@ -24,6 +24,7 @@ import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; +import software.amazon.jdbc.util.storage.StorageService; public interface ConnectRouting { boolean isMatch(HostSpec hostSpec, BlueGreenRole hostRole); @@ -34,5 +35,6 @@ Connection apply( Properties props, boolean isInitialConnection, JdbcCallable connectFunc, + StorageService storageService, PluginService pluginService) throws SQLException; } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ExecuteRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ExecuteRouting.java index 54ea20b5c..39bedfb72 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ExecuteRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/ExecuteRouting.java @@ -24,6 +24,7 @@ import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; +import software.amazon.jdbc.util.storage.StorageService; public interface ExecuteRouting { boolean isMatch(HostSpec hostSpec, BlueGreenRole hostRole); @@ -36,6 +37,7 @@ public interface ExecuteRouting { final String methodName, final JdbcCallable jdbcMethodFunc, final Object[] jdbcMethodArgs, + final StorageService storageService, final PluginService pluginService, final Properties props) throws E; } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughConnectRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughConnectRouting.java index 97e6470b5..361898056 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughConnectRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughConnectRouting.java @@ -26,6 +26,7 @@ import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; +import software.amazon.jdbc.util.storage.StorageService; public class PassThroughConnectRouting extends BaseConnectRouting { @@ -37,9 +38,8 @@ public PassThroughConnectRouting(@Nullable String hostAndPort, @Nullable BlueGre @Override public Connection apply(ConnectionPlugin plugin, HostSpec hostSpec, Properties props, boolean isInitialConnection, - JdbcCallable connectFunc, PluginService pluginService) - throws SQLException { - + JdbcCallable connectFunc, StorageService storageService, + PluginService pluginService) throws SQLException { return connectFunc.call(); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughExecuteRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughExecuteRouting.java index b0fd5147e..ab8b90c86 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughExecuteRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/PassThroughExecuteRouting.java @@ -25,6 +25,7 @@ import software.amazon.jdbc.JdbcCallable; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; +import software.amazon.jdbc.util.storage.StorageService; // Normally execute JDBC call. public class PassThroughExecuteRouting extends BaseExecuteRouting { @@ -44,6 +45,7 @@ public PassThroughExecuteRouting(@Nullable String hostAndPort, @Nullable BlueGre final String methodName, final JdbcCallable jdbcMethodFunc, final Object[] jdbcMethodArgs, + final StorageService storageService, final PluginService pluginService, final Properties props) throws E { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/RejectConnectRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/RejectConnectRouting.java index cd89667b2..87720e128 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/RejectConnectRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/RejectConnectRouting.java @@ -27,6 +27,7 @@ import software.amazon.jdbc.PluginService; import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.storage.StorageService; // Reject an attempt to open a new connection. public class RejectConnectRouting extends BaseConnectRouting { @@ -39,7 +40,8 @@ public RejectConnectRouting(@Nullable String hostAndPort, @Nullable BlueGreenRol @Override public Connection apply(ConnectionPlugin plugin, HostSpec hostSpec, Properties props, boolean isInitialConnection, - JdbcCallable connectFunc, PluginService pluginService) throws SQLException { + JdbcCallable connectFunc, StorageService storageService, + PluginService pluginService) throws SQLException { LOGGER.finest(() -> Messages.get("bgd.inProgressCantConnect")); throw new SQLException(Messages.get("bgd.inProgressCantConnect")); diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SubstituteConnectRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SubstituteConnectRouting.java index ddbe7ba74..e06a649fc 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SubstituteConnectRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SubstituteConnectRouting.java @@ -35,6 +35,7 @@ import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.StorageService; /** * Open a new connection to a provided substitute host. @@ -60,7 +61,8 @@ public SubstituteConnectRouting(@Nullable String hostAndPort, @Nullable BlueGree @Override public Connection apply(ConnectionPlugin plugin, HostSpec hostSpec, Properties props, boolean isInitialConnection, - JdbcCallable connectFunc, PluginService pluginService) throws SQLException { + JdbcCallable connectFunc, StorageService storageService, PluginService pluginService) + throws SQLException { if (!RDS_UTILS.isIP(this.substituteHostSpec.getHost())) { return pluginService.connect(this.substituteHostSpec, props, plugin); diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendConnectRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendConnectRouting.java index 6087330ed..4991e1125 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendConnectRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendConnectRouting.java @@ -33,6 +33,7 @@ import software.amazon.jdbc.plugin.bluegreen.BlueGreenRole; import software.amazon.jdbc.plugin.bluegreen.BlueGreenStatus; import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryTraceLevel; @@ -59,16 +60,16 @@ public Connection apply( Properties props, boolean isInitialConnection, JdbcCallable connectFunc, + StorageService storageService, PluginService pluginService) throws SQLException { LOGGER.finest(() -> Messages.get("bgd.inProgressHoldConnect")); - TelemetryFactory telemetryFactory = pluginService.getTelemetryFactory(); TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext(TELEMETRY_SWITCHOVER, TelemetryTraceLevel.NESTED); - BlueGreenStatus bgStatus = pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + BlueGreenStatus bgStatus = storageService.get(BlueGreenStatus.class, this.bgdId); long timeoutNano = TimeUnit.MILLISECONDS.toNanos(BG_CONNECT_TIMEOUT.getLong(props)); long holdStartTime = this.getNanoTime(); @@ -80,13 +81,13 @@ public Connection apply( && bgStatus.getCurrentPhase() == BlueGreenPhase.IN_PROGRESS) { try { - this.delay(SLEEP_TIME_MS, bgStatus, pluginService, this.bgdId); + this.delay(SLEEP_TIME_MS, bgStatus, storageService, this.bgdId); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } - bgStatus = pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + bgStatus = storageService.get(BlueGreenStatus.class, this.bgdId); } if (bgStatus != null && bgStatus.getCurrentPhase() == BlueGreenPhase.IN_PROGRESS) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendExecuteRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendExecuteRouting.java index cdeb2db0d..18a4b74bd 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendExecuteRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendExecuteRouting.java @@ -33,6 +33,7 @@ import software.amazon.jdbc.plugin.bluegreen.BlueGreenStatus; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.WrapperUtils; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryTraceLevel; @@ -61,16 +62,16 @@ public SuspendExecuteRouting(@Nullable String hostAndPort, @Nullable BlueGreenRo final String methodName, final JdbcCallable jdbcMethodFunc, final Object[] jdbcMethodArgs, + final StorageService storageService, final PluginService pluginService, final Properties props) throws E { LOGGER.finest(Messages.get("bgd.inProgressSuspendMethod", new Object[] {methodName})); - TelemetryFactory telemetryFactory = pluginService.getTelemetryFactory(); TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext(TELEMETRY_SWITCHOVER, TelemetryTraceLevel.NESTED); - BlueGreenStatus bgStatus = pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + BlueGreenStatus bgStatus = storageService.get(BlueGreenStatus.class, this.bgdId); long timeoutNano = TimeUnit.MILLISECONDS.toNanos(BG_CONNECT_TIMEOUT.getLong(props)); long holdStartTime = this.getNanoTime(); @@ -83,13 +84,13 @@ public SuspendExecuteRouting(@Nullable String hostAndPort, @Nullable BlueGreenRo && bgStatus.getCurrentPhase() == BlueGreenPhase.IN_PROGRESS) { try { - this.delay(SLEEP_TIME_MS, bgStatus, pluginService, this.bgdId); + this.delay(SLEEP_TIME_MS, bgStatus, storageService, this.bgdId); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } - bgStatus = pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + bgStatus = storageService.get(BlueGreenStatus.class, this.bgdId); } if (bgStatus != null && bgStatus.getCurrentPhase() == BlueGreenPhase.IN_PROGRESS) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendUntilCorrespondingNodeFoundConnectRouting.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendUntilCorrespondingNodeFoundConnectRouting.java index 27bf00ccf..bb384b73d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendUntilCorrespondingNodeFoundConnectRouting.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/bluegreen/routing/SuspendUntilCorrespondingNodeFoundConnectRouting.java @@ -34,6 +34,7 @@ import software.amazon.jdbc.plugin.bluegreen.BlueGreenStatus; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Pair; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryTraceLevel; @@ -62,17 +63,16 @@ public Connection apply( Properties props, boolean isInitialConnection, JdbcCallable connectFunc, - PluginService pluginService) - throws SQLException { + StorageService storageService, + PluginService pluginService) throws SQLException { LOGGER.finest(() -> Messages.get("bgd.waitConnectUntilCorrespondingNodeFound", new Object[] {hostSpec.getHost()})); - TelemetryFactory telemetryFactory = pluginService.getTelemetryFactory(); TelemetryContext telemetryContext = telemetryFactory.openTelemetryContext(TELEMETRY_SWITCHOVER, TelemetryTraceLevel.NESTED); - BlueGreenStatus bgStatus = pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + BlueGreenStatus bgStatus = storageService.get(BlueGreenStatus.class, this.bgdId); Pair correspondingPair = bgStatus == null ? null : bgStatus.getCorrespondingNodes().get(hostSpec.getHost()); @@ -89,13 +89,13 @@ public Connection apply( && (correspondingPair == null || correspondingPair.getValue2() == null)) { try { - this.delay(SLEEP_TIME_MS, bgStatus, pluginService, this.bgdId); + this.delay(SLEEP_TIME_MS, bgStatus, storageService, this.bgdId); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } - bgStatus = pluginService.getStatus(BlueGreenStatus.class, this.bgdId); + bgStatus = storageService.get(BlueGreenStatus.class, this.bgdId); correspondingPair = bgStatus == null ? null : bgStatus.getCorrespondingNodes().get(hostSpec.getHost()); diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitor.java index fd8e82148..0e50d5844 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitor.java @@ -16,18 +16,13 @@ package software.amazon.jdbc.plugin.customendpoint; +import software.amazon.jdbc.util.monitoring.Monitor; + /** * Interface for custom endpoint monitors. Custom endpoint monitors analyze a given custom endpoint for custom endpoint * information and future changes to the endpoint. */ -public interface CustomEndpointMonitor extends AutoCloseable, Runnable { - - /** - * Evaluates whether the monitor should be disposed. - * - * @return true if the monitor should be disposed, otherwise return false. - */ - boolean shouldDispose(); +public interface CustomEndpointMonitor extends Monitor { /** * Indicates whether the monitor has info about the custom endpoint or not. This will be false if the monitor is new diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImpl.java index c38fc3e15..02b6a7bad 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImpl.java @@ -19,9 +19,7 @@ import static software.amazon.jdbc.plugin.customendpoint.MemberListType.STATIC_LIST; import java.util.List; -import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiFunction; import java.util.logging.Level; import java.util.logging.Logger; @@ -33,10 +31,10 @@ import software.amazon.awssdk.services.rds.model.Filter; import software.amazon.jdbc.AllowedAndBlockedHosts; import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.util.CacheMap; -import software.amazon.jdbc.util.ExecutorFactory; import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.monitoring.AbstractMonitor; +import software.amazon.jdbc.util.storage.CacheMap; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -44,34 +42,32 @@ * The default custom endpoint monitor implementation. This class uses a background thread to monitor a given custom * endpoint for custom endpoint information and future changes to the custom endpoint. */ -public class CustomEndpointMonitorImpl implements CustomEndpointMonitor { +public class CustomEndpointMonitorImpl extends AbstractMonitor implements CustomEndpointMonitor { private static final Logger LOGGER = Logger.getLogger(CustomEndpointPlugin.class.getName()); private static final String TELEMETRY_ENDPOINT_INFO_CHANGED = "customEndpoint.infoChanged.counter"; // Keys are custom endpoint URLs, values are information objects for the associated custom endpoint. protected static final CacheMap customEndpointInfoCache = new CacheMap<>(); protected static final long CUSTOM_ENDPOINT_INFO_EXPIRATION_NANO = TimeUnit.MINUTES.toNanos(5); + protected static final long MONITOR_TERMINATION_TIMEOUT_SEC = 30; - protected final AtomicBoolean stop = new AtomicBoolean(false); protected final RdsClient rdsClient; protected final HostSpec customEndpointHostSpec; protected final String endpointIdentifier; protected final Region region; protected final long refreshRateNano; - - protected final PluginService pluginService; - protected final ExecutorService monitorExecutor = - ExecutorFactory.newSingleThreadExecutor("monitor"); + protected final StorageService storageService; private final TelemetryCounter infoChangedCounter; /** * Constructs a CustomEndpointMonitorImpl instance for the host specified by {@code customEndpointHostSpec}. * - * @param pluginService The plugin service to use to update the set of allowed/blocked hosts according to - * the custom endpoint info. + * @param storageService The storage service used to store the set of allowed/blocked hosts according to the + * custom endpoint info. + * @param telemetryFactory The telemetry factory used to create telemetry data. * @param customEndpointHostSpec The host information for the custom endpoint to be monitored. - * @param endpointIdentifier An endpoint identifier. + * @param endpointIdentifier An endpoint identifier. * @param region The region of the custom endpoint to be monitored. * @param refreshRateNano Controls how often the custom endpoint information should be fetched and analyzed for * changes. The value specified should be in nanoseconds. @@ -79,40 +75,39 @@ public class CustomEndpointMonitorImpl implements CustomEndpointMonitor { * information. */ public CustomEndpointMonitorImpl( - PluginService pluginService, + StorageService storageService, + TelemetryFactory telemetryFactory, HostSpec customEndpointHostSpec, String endpointIdentifier, Region region, long refreshRateNano, BiFunction rdsClientFunc) { - this.pluginService = pluginService; + super(MONITOR_TERMINATION_TIMEOUT_SEC); + this.storageService = storageService; this.customEndpointHostSpec = customEndpointHostSpec; this.endpointIdentifier = endpointIdentifier; this.region = region; this.refreshRateNano = refreshRateNano; this.rdsClient = rdsClientFunc.apply(customEndpointHostSpec, this.region); - TelemetryFactory telemetryFactory = this.pluginService.getTelemetryFactory(); this.infoChangedCounter = telemetryFactory.createCounter(TELEMETRY_ENDPOINT_INFO_CHANGED); - - this.monitorExecutor.submit(this); - this.monitorExecutor.shutdown(); } /** * Analyzes a given custom endpoint for changes to custom endpoint information. */ @Override - public void run() { + public void monitor() { LOGGER.fine( Messages.get( "CustomEndpointMonitorImpl.startingMonitor", - new Object[] { this.customEndpointHostSpec.getHost() })); + new Object[] {this.customEndpointHostSpec.getUrl()})); try { while (!this.stop.get() && !Thread.currentThread().isInterrupted()) { try { long start = System.nanoTime(); + this.lastActivityTimestampNanos.set(System.nanoTime()); final Filter customEndpointFilter = Filter.builder().name("db-cluster-endpoint-type").values("custom").build(); @@ -140,7 +135,7 @@ public void run() { } CustomEndpointInfo endpointInfo = CustomEndpointInfo.fromDBClusterEndpoint(endpoints.get(0)); - CustomEndpointInfo cachedEndpointInfo = customEndpointInfoCache.get(this.customEndpointHostSpec.getHost()); + CustomEndpointInfo cachedEndpointInfo = customEndpointInfoCache.get(this.customEndpointHostSpec.getUrl()); if (cachedEndpointInfo != null && cachedEndpointInfo.equals(endpointInfo)) { long elapsedTime = System.nanoTime() - start; long sleepDuration = Math.max(0, this.refreshRateNano - elapsedTime); @@ -151,7 +146,7 @@ public void run() { LOGGER.fine( Messages.get( "CustomEndpointMonitorImpl.detectedChangeInCustomEndpointInfo", - new Object[] {this.customEndpointHostSpec.getHost(), endpointInfo})); + new Object[] {this.customEndpointHostSpec.getUrl(), endpointInfo})); // The custom endpoint info has changed, so we need to update the set of allowed/blocked hosts. AllowedAndBlockedHosts allowedAndBlockedHosts; @@ -161,9 +156,9 @@ public void run() { allowedAndBlockedHosts = new AllowedAndBlockedHosts(null, endpointInfo.getExcludedMembers()); } - this.pluginService.setAllowedAndBlockedHosts(allowedAndBlockedHosts); + this.storageService.set(this.customEndpointHostSpec.getUrl(), allowedAndBlockedHosts); customEndpointInfoCache.put( - this.customEndpointHostSpec.getHost(), endpointInfo, CUSTOM_ENDPOINT_INFO_EXPIRATION_NANO); + this.customEndpointHostSpec.getUrl(), endpointInfo, CUSTOM_ENDPOINT_INFO_EXPIRATION_NANO); if (this.infoChangedCounter != null) { this.infoChangedCounter.inc(); } @@ -178,68 +173,33 @@ public void run() { LOGGER.log(Level.SEVERE, Messages.get( "CustomEndpointMonitorImpl.exception", - new Object[]{this.customEndpointHostSpec.getHost()}), e); + new Object[] {this.customEndpointHostSpec.getUrl()}), e); } } } catch (InterruptedException e) { LOGGER.fine( Messages.get( "CustomEndpointMonitorImpl.interrupted", - new Object[]{ this.customEndpointHostSpec.getHost() })); + new Object[] {this.customEndpointHostSpec.getUrl()})); Thread.currentThread().interrupt(); } finally { - customEndpointInfoCache.remove(this.customEndpointHostSpec.getHost()); + customEndpointInfoCache.remove(this.customEndpointHostSpec.getUrl()); this.rdsClient.close(); LOGGER.fine( Messages.get( "CustomEndpointMonitorImpl.stoppedMonitor", - new Object[]{ this.customEndpointHostSpec.getHost() })); + new Object[] {this.customEndpointHostSpec.getUrl()})); } } public boolean hasCustomEndpointInfo() { - return customEndpointInfoCache.get(this.customEndpointHostSpec.getHost()) != null; - } - - @Override - public boolean shouldDispose() { - return true; + return customEndpointInfoCache.get(this.customEndpointHostSpec.getUrl()) != null; } - /** - * Stops the custom endpoint monitor. - */ @Override public void close() { - LOGGER.fine( - Messages.get( - "CustomEndpointMonitorImpl.stoppingMonitor", - new Object[]{ this.customEndpointHostSpec.getHost() })); - - this.stop.set(true); - - try { - int terminationTimeoutSec = 5; - if (!this.monitorExecutor.awaitTermination(terminationTimeoutSec, TimeUnit.SECONDS)) { - LOGGER.info( - Messages.get( - "CustomEndpointMonitorImpl.monitorTerminationTimeout", - new Object[]{ terminationTimeoutSec, this.customEndpointHostSpec.getHost() })); - - this.monitorExecutor.shutdownNow(); - } - } catch (InterruptedException e) { - LOGGER.info( - Messages.get( - "CustomEndpointMonitorImpl.interruptedWhileTerminating", - new Object[]{ this.customEndpointHostSpec.getHost() })); - - Thread.currentThread().interrupt(); - this.monitorExecutor.shutdownNow(); - } finally { - customEndpointInfoCache.remove(this.customEndpointHostSpec.getHost()); - this.rdsClient.close(); - } + customEndpointInfoCache.remove(this.customEndpointHostSpec.getUrl()); + this.rdsClient.close(); } /** diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java index c8140cd7b..9844bc2d6 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPlugin.java @@ -35,12 +35,13 @@ import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.authentication.AwsCredentialsManager; import software.amazon.jdbc.plugin.AbstractConnectionPlugin; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.RegionUtils; -import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread; import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.WrapperUtils; +import software.amazon.jdbc.util.monitoring.MonitorErrorResponse; import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -50,23 +51,10 @@ */ public class CustomEndpointPlugin extends AbstractConnectionPlugin { private static final Logger LOGGER = Logger.getLogger(CustomEndpointPlugin.class.getName()); - private static final String TELEMETRY_WAIT_FOR_INFO_COUNTER = "customEndpoint.waitForInfo.counter"; - - protected static final long CACHE_CLEANUP_RATE_NANO = TimeUnit.MINUTES.toNanos(1); + protected static final String TELEMETRY_WAIT_FOR_INFO_COUNTER = "customEndpoint.waitForInfo.counter"; protected static final RegionUtils regionUtils = new RegionUtils(); - protected static final SlidingExpirationCacheWithCleanupThread monitors = - new SlidingExpirationCacheWithCleanupThread<>( - CustomEndpointMonitor::shouldDispose, - (monitor) -> { - try { - monitor.close(); - } catch (Exception ex) { - // ignore - } - }, - CACHE_CLEANUP_RATE_NANO); - - private final Set subscribedMethods; + protected static final Set monitorErrorResponses = + new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)); public static final AwsWrapperProperty CUSTOM_ENDPOINT_INFO_REFRESH_RATE_MS = new AwsWrapperProperty( "customEndpointInfoRefreshRateMs", "30000", @@ -96,11 +84,14 @@ public class CustomEndpointPlugin extends AbstractConnectionPlugin { PropertyDefinition.registerPluginProperties(CustomEndpointPlugin.class); } + protected final FullServicesContainer servicesContainer; protected final PluginService pluginService; + protected final TelemetryFactory telemetryFactory; protected final Properties props; protected final RdsUtils rdsUtils = new RdsUtils(); protected final BiFunction rdsClientFunc; + protected final Set subscribedMethods; protected final TelemetryCounter waitForInfoCounter; protected final boolean shouldWaitForInfo; protected final int waitOnCachedInfoDurationMs; @@ -112,12 +103,12 @@ public class CustomEndpointPlugin extends AbstractConnectionPlugin { /** * Constructs a new CustomEndpointPlugin instance. * - * @param pluginService The plugin service that the custom endpoint plugin should use. - * @param props The properties that the custom endpoint plugin should use. + * @param servicesContainer The service container for the services required by this class. + * @param props The properties that the custom endpoint plugin should use. */ - public CustomEndpointPlugin(final PluginService pluginService, final Properties props) { + public CustomEndpointPlugin(final FullServicesContainer servicesContainer, final Properties props) { this( - pluginService, + servicesContainer, props, (hostSpec, region) -> RdsClient.builder() @@ -129,15 +120,18 @@ public CustomEndpointPlugin(final PluginService pluginService, final Properties /** * Constructs a new CustomEndpointPlugin instance. * - * @param pluginService The plugin service that the custom endpoint plugin should use. - * @param props The properties that the custom endpoint plugin should use. - * @param rdsClientFunc The function to call to obtain an {@link RdsClient} instance. + * @param servicesContainer The service container for the services required by this class. + * @param props The properties that the custom endpoint plugin should use. + * @param rdsClientFunc The function to call to obtain an {@link RdsClient} instance. */ public CustomEndpointPlugin( - final PluginService pluginService, + final FullServicesContainer servicesContainer, final Properties props, final BiFunction rdsClientFunc) { - this.pluginService = pluginService; + this.servicesContainer = servicesContainer; + this.pluginService = servicesContainer.getPluginService(); + this.telemetryFactory = servicesContainer.getTelemetryFactory(); + this.props = props; this.rdsClientFunc = rdsClientFunc; @@ -145,13 +139,21 @@ public CustomEndpointPlugin( this.waitOnCachedInfoDurationMs = WAIT_FOR_CUSTOM_ENDPOINT_INFO_TIMEOUT_MS.getInteger(this.props); this.idleMonitorExpirationMs = CUSTOM_ENDPOINT_MONITOR_IDLE_EXPIRATION_MS.getInteger(this.props); - TelemetryFactory telemetryFactory = pluginService.getTelemetryFactory(); + TelemetryFactory telemetryFactory = servicesContainer.getTelemetryFactory(); this.waitForInfoCounter = telemetryFactory.createCounter(TELEMETRY_WAIT_FOR_INFO_COUNTER); final HashSet methods = new HashSet<>(); methods.add(JdbcMethod.CONNECT.methodName); methods.addAll(this.pluginService.getTargetDriverDialect().getNetworkBoundMethodNames(this.props)); this.subscribedMethods = Collections.unmodifiableSet(methods); + + this.servicesContainer.getMonitorService().registerMonitorTypeIfAbsent( + CustomEndpointMonitorImpl.class, + TimeUnit.MILLISECONDS.toNanos(this.idleMonitorExpirationMs), + TimeUnit.MINUTES.toNanos(1), + monitorErrorResponses, + CustomEndpointInfo.class + ); } @Override @@ -174,7 +176,7 @@ public Connection connect( this.customEndpointHostSpec = hostSpec; LOGGER.finest( Messages.get( - "CustomEndpointPlugin.connectionRequestToCustomEndpoint", new Object[]{ hostSpec.getHost() })); + "CustomEndpointPlugin.connectionRequestToCustomEndpoint", new Object[] {hostSpec.getUrl()})); this.customEndpointId = this.rdsUtils.getRdsClusterId(customEndpointHostSpec.getHost()); if (StringUtils.isNullOrEmpty(customEndpointId)) { @@ -208,23 +210,29 @@ public Connection connect( * @param props The connection properties. * @return {@link CustomEndpointMonitor} */ - protected CustomEndpointMonitor createMonitorIfAbsent(Properties props) { - return monitors.computeIfAbsent( - this.customEndpointHostSpec.getHost(), - (customEndpoint) -> new CustomEndpointMonitorImpl( - this.pluginService, + protected CustomEndpointMonitor createMonitorIfAbsent(Properties props) throws SQLException { + return this.servicesContainer.getMonitorService().runIfAbsent( + CustomEndpointMonitorImpl.class, + this.customEndpointHostSpec.getUrl(), + this.servicesContainer.getStorageService(), + this.pluginService.getTelemetryFactory(), + this.pluginService.getOriginalUrl(), + this.pluginService.getDriverProtocol(), + this.pluginService.getTargetDriverDialect(), + this.pluginService.getDialect(), + this.props, + (connectionService, pluginService) -> new CustomEndpointMonitorImpl( + this.servicesContainer.getStorageService(), + this.servicesContainer.getTelemetryFactory(), this.customEndpointHostSpec, this.customEndpointId, this.region, TimeUnit.MILLISECONDS.toNanos(CUSTOM_ENDPOINT_INFO_REFRESH_RATE_MS.getLong(props)), this.rdsClientFunc - ), - TimeUnit.MILLISECONDS.toNanos(this.idleMonitorExpirationMs) - ); + )); } - /** * If custom endpoint info does not exist for the current custom endpoint, waits a short time for the info to be * made available by the custom endpoint monitor. This is necessary so that other plugins can rely on accurate custom @@ -245,7 +253,7 @@ protected void waitForCustomEndpointInfo(CustomEndpointMonitor monitor) throws S LOGGER.fine( Messages.get( "CustomEndpointPlugin.waitingForCustomEndpointInfo", - new Object[]{ this.customEndpointHostSpec.getHost(), this.waitOnCachedInfoDurationMs })); + new Object[] {this.customEndpointHostSpec.getUrl(), this.waitOnCachedInfoDurationMs})); long waitForEndpointInfoTimeoutNano = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(this.waitOnCachedInfoDurationMs); @@ -259,13 +267,13 @@ protected void waitForCustomEndpointInfo(CustomEndpointMonitor monitor) throws S throw new SQLException( Messages.get( "CustomEndpointPlugin.interruptedThread", - new Object[]{ this.customEndpointHostSpec.getHost() })); + new Object[] {this.customEndpointHostSpec.getUrl()})); } if (!hasCustomEndpointInfo) { throw new SQLException( Messages.get("CustomEndpointPlugin.timedOutWaitingForCustomEndpointInfo", - new Object[]{this.waitOnCachedInfoDurationMs, this.customEndpointHostSpec.getHost()})); + new Object[] {this.waitOnCachedInfoDurationMs, this.customEndpointHostSpec.getUrl()})); } } } @@ -311,13 +319,4 @@ public T execute( return jdbcMethodFunc.call(); } - - /** - * Closes all active custom endpoint monitors. - */ - public static void closeMonitors() { - LOGGER.info(Messages.get("CustomEndpointPlugin.closeMonitors")); - // The clear call automatically calls close() on all monitors. - monitors.clear(); - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginFactory.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginFactory.java index 6687b9395..db79591b5 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginFactory.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginFactory.java @@ -19,19 +19,27 @@ import java.util.Properties; import software.amazon.jdbc.ConnectionPlugin; -import software.amazon.jdbc.ConnectionPluginFactory; import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.ServicesContainerPluginFactory; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; -public class CustomEndpointPluginFactory implements ConnectionPluginFactory { +public class CustomEndpointPluginFactory implements ServicesContainerPluginFactory { @Override public ConnectionPlugin getInstance(final PluginService pluginService, final Properties props) { + throw new UnsupportedOperationException( + Messages.get( + "ServicesContainerPluginFactory.servicesContainerRequired", new Object[] {"CustomEndpointPlugin"})); + } + + @Override + public ConnectionPlugin getInstance(final FullServicesContainer servicesContainer, final Properties props) { try { Class.forName("software.amazon.awssdk.services.rds.RdsClient"); } catch (final ClassNotFoundException e) { throw new RuntimeException(Messages.get("CustomEndpointPluginFactory.awsSdkNotInClasspath")); } - return new CustomEndpointPlugin(pluginService, props); + return new CustomEndpointPlugin(servicesContainer, props); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/MemberListType.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/MemberListType.java index 298c28069..8a30bbecf 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/MemberListType.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/customendpoint/MemberListType.java @@ -17,7 +17,7 @@ package software.amazon.jdbc.plugin.customendpoint; /** - * Enum representing the member list type of a custom endpoint. This information can be used together with a member list + * Enum representing the member list type of custom endpoint. This information can be used together with a member list * to determine which instances are included or excluded from a custom endpoint. */ public enum MemberListType { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/ExecutorServiceInitializer.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/ExecutorServiceInitializer.java index ef9cda6e1..b35d6c459 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/ExecutorServiceInitializer.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/ExecutorServiceInitializer.java @@ -20,7 +20,7 @@ /** * Interface for passing a specific {@link ExecutorService} to use by the {@link - * MonitorThreadContainer}. + * HostMonitorThreadContainer}. */ @FunctionalInterface public interface ExecutorServiceInitializer { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/Monitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitor.java similarity index 73% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm/Monitor.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitor.java index f9e3eb1ca..c95b5a8cf 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/Monitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitor.java @@ -20,17 +20,17 @@ * Interface for monitors. This class uses background threads to monitor servers with one or more * connections for more efficient failure detection during method execution. */ -public interface Monitor extends Runnable { +public interface HostMonitor extends Runnable { - void startMonitoring(MonitorConnectionContext context); + void startMonitoring(HostMonitorConnectionContext context); - void stopMonitoring(MonitorConnectionContext context); + void stopMonitoring(HostMonitorConnectionContext context); - /** Clear all {@link MonitorConnectionContext} associated with this {@link Monitor} instance. */ + /** Clear all {@link HostMonitorConnectionContext} associated with this {@link HostMonitor} instance. */ void clearContexts(); /** - * Whether this {@link Monitor} has stopped monitoring a particular server. + * Whether this {@link HostMonitor} has stopped monitoring a particular server. * * @return true if the monitoring has stopped; false otherwise. */ diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorConnectionContext.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorConnectionContext.java similarity index 93% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorConnectionContext.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorConnectionContext.java index 59c11e5ab..5244d469e 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorConnectionContext.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorConnectionContext.java @@ -30,9 +30,9 @@ * Monitoring context for each connection. This contains each connection's criteria for whether a * server should be considered unhealthy. The context is shared between the main thread and the monitor thread. */ -public class MonitorConnectionContext { +public class HostMonitorConnectionContext { - private static final Logger LOGGER = Logger.getLogger(MonitorConnectionContext.class.getName()); + private static final Logger LOGGER = Logger.getLogger(HostMonitorConnectionContext.class.getName()); private static final Executor ABORT_EXECUTOR = ExecutorFactory.newSingleThreadExecutor("abort"); @@ -42,7 +42,7 @@ public class MonitorConnectionContext { private final long failureDetectionTimeMillis; private final long failureDetectionCount; private final Connection connectionToAbort; - private final Monitor monitor; + private final HostMonitor monitor; private volatile boolean activeContext = true; private volatile boolean nodeUnhealthy = false; @@ -65,8 +65,8 @@ public class MonitorConnectionContext { * node as unhealthy. * @param abortedConnectionsCounter Aborted connection telemetry counter. */ - public MonitorConnectionContext( - final Monitor monitor, + public HostMonitorConnectionContext( + final HostMonitor monitor, final Connection connectionToAbort, final long failureDetectionTimeMillis, final long failureDetectionIntervalMillis, @@ -102,7 +102,7 @@ public long getExpectedActiveMonitoringStartTimeNano() { return this.expectedActiveMonitoringStartTimeNano; } - public Monitor getMonitor() { + public HostMonitor getMonitor() { return this.monitor; } @@ -157,7 +157,7 @@ void abortConnection() { // ignore LOGGER.finest( () -> Messages.get( - "MonitorConnectionContext.exceptionAbortingConnection", + "HostMonitorConnectionContext.exceptionAbortingConnection", new Object[] {sqlEx.getMessage()})); } } @@ -224,7 +224,7 @@ void setConnectionValid( * Math.max(0, this.getFailureDetectionCount()); if (invalidNodeDurationNano >= TimeUnit.MILLISECONDS.toNanos(maxInvalidNodeDurationMillis)) { - LOGGER.fine(() -> Messages.get("MonitorConnectionContext.hostDead", new Object[] {hostName})); + LOGGER.fine(() -> Messages.get("HostMonitorConnectionContext.hostDead", new Object[] {hostName})); this.setNodeUnhealthy(true); this.abortConnection(); return; @@ -232,7 +232,7 @@ void setConnectionValid( LOGGER.finest( () -> Messages.get( - "MonitorConnectionContext.hostNotResponding", + "HostMonitorConnectionContext.hostNotResponding", new Object[] {hostName, this.getFailureCount()})); return; } @@ -242,7 +242,7 @@ void setConnectionValid( this.setNodeUnhealthy(false); LOGGER.finest( - () -> Messages.get("MonitorConnectionContext.hostAlive", + () -> Messages.get("HostMonitorConnectionContext.hostAlive", new Object[] {hostName})); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorImpl.java similarity index 89% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorImpl.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorImpl.java index c87bb1f51..023847636 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorImpl.java @@ -40,7 +40,7 @@ * This class uses a background thread to monitor a particular server with one or more active {@link * Connection}. */ -public class MonitorImpl implements Monitor { +public class HostMonitorImpl implements HostMonitor { static class ConnectionStatus { @@ -53,18 +53,18 @@ static class ConnectionStatus { } } - private static final Logger LOGGER = Logger.getLogger(MonitorImpl.class.getName()); + private static final Logger LOGGER = Logger.getLogger(HostMonitorImpl.class.getName()); private static final long THREAD_SLEEP_WHEN_INACTIVE_MILLIS = 100; private static final long MIN_CONNECTION_CHECK_TIMEOUT_MILLIS = 100; private static final String MONITORING_PROPERTY_PREFIX = "monitoring-"; - final Queue activeContexts = new ConcurrentLinkedQueue<>(); - private final Queue newContexts = new ConcurrentLinkedQueue<>(); + final Queue activeContexts = new ConcurrentLinkedQueue<>(); + private final Queue newContexts = new ConcurrentLinkedQueue<>(); private final PluginService pluginService; private final TelemetryFactory telemetryFactory; private final Properties properties; private final HostSpec hostSpec; - private final MonitorThreadContainer threadContainer; + private final HostMonitorThreadContainer threadContainer; private final long monitorDisposalTimeMillis; private volatile long contextLastUsedTimestampNano; private volatile boolean stopped = false; @@ -78,22 +78,22 @@ static class ConnectionStatus { * Store the monitoring configuration for a connection. * * @param pluginService A service for creating new connections. - * @param hostSpec The {@link HostSpec} of the server this {@link MonitorImpl} + * @param hostSpec The {@link HostSpec} of the server this {@link HostMonitorImpl} * instance is monitoring. * @param properties The {@link Properties} containing additional monitoring * configuration. * @param monitorDisposalTimeMillis Time in milliseconds before stopping the monitoring thread * where there are no active connection to the server this - * {@link MonitorImpl} instance is monitoring. - * @param threadContainer A reference to the {@link MonitorThreadContainer} implementation + * {@link HostMonitorImpl} instance is monitoring. + * @param threadContainer A reference to the {@link HostMonitorThreadContainer} implementation * that initialized this class. */ - public MonitorImpl( + public HostMonitorImpl( final @NonNull PluginService pluginService, @NonNull final HostSpec hostSpec, @NonNull final Properties properties, final long monitorDisposalTimeMillis, - @NonNull final MonitorThreadContainer threadContainer) { + @NonNull final HostMonitorThreadContainer threadContainer) { this.pluginService = pluginService; this.telemetryFactory = pluginService.getTelemetryFactory(); this.hostSpec = hostSpec; @@ -112,9 +112,9 @@ public MonitorImpl( } @Override - public void startMonitoring(final MonitorConnectionContext context) { + public void startMonitoring(final HostMonitorConnectionContext context) { if (this.stopped) { - LOGGER.warning(() -> Messages.get("MonitorImpl.monitorIsStopped", new Object[] {this.hostSpec.getHost()})); + LOGGER.warning(() -> Messages.get("HostMonitorImpl.monitorIsStopped", new Object[] {this.hostSpec.getHost()})); } final long currentTimeNano = this.getCurrentTimeNano(); context.setStartMonitorTimeNano(currentTimeNano); @@ -123,9 +123,9 @@ public void startMonitoring(final MonitorConnectionContext context) { } @Override - public void stopMonitoring(final MonitorConnectionContext context) { + public void stopMonitoring(final HostMonitorConnectionContext context) { if (context == null) { - LOGGER.warning(() -> Messages.get("MonitorImpl.contextNullWarning")); + LOGGER.warning(() -> Messages.get("HostMonitorImpl.contextNullWarning")); return; } @@ -142,7 +142,7 @@ public void clearContexts() { public void run() { LOGGER.finest(() -> Messages.get( - "MonitorImpl.startMonitoringThread", + "HostMonitorImpl.startMonitoringThread", new Object[]{this.hostSpec.getHost()})); try { @@ -151,8 +151,8 @@ public void run() { try { // process new contexts - MonitorConnectionContext newMonitorContext; - MonitorConnectionContext firstAddedNewMonitorContext = null; + HostMonitorConnectionContext newMonitorContext; + HostMonitorConnectionContext firstAddedNewMonitorContext = null; final long currentTimeNano = this.getCurrentTimeNano(); while ((newMonitorContext = this.newContexts.poll()) != null) { @@ -187,8 +187,8 @@ public void run() { final ConnectionStatus status = checkConnectionStatus(this.nodeCheckTimeoutMillis); long delayMillis = -1; - MonitorConnectionContext monitorContext; - MonitorConnectionContext firstAddedMonitorContext = null; + HostMonitorConnectionContext monitorContext; + HostMonitorConnectionContext firstAddedMonitorContext = null; while ((monitorContext = this.activeContexts.poll()) != null) { @@ -261,7 +261,7 @@ public void run() { LOGGER.log( Level.FINEST, Messages.get( - "MonitorImpl.exceptionDuringMonitoringContinue", + "HostMonitorImpl.exceptionDuringMonitoringContinue", new Object[]{this.hostSpec.getHost()}), ex); // We want to print full trace stack of the exception. } @@ -271,7 +271,7 @@ public void run() { // exit thread LOGGER.warning( () -> Messages.get( - "MonitorImpl.interruptedExceptionDuringMonitoring", + "HostMonitorImpl.interruptedExceptionDuringMonitoring", new Object[] {this.hostSpec.getHost()})); } catch (final Exception ex) { // this should not be reached; log and exit thread @@ -279,7 +279,7 @@ public void run() { LOGGER.log( Level.FINEST, Messages.get( - "MonitorImpl.exceptionDuringMonitoringStop", + "HostMonitorImpl.exceptionDuringMonitoringStop", new Object[]{this.hostSpec.getHost()}), ex); // We want to print full trace stack of the exception. } @@ -296,7 +296,7 @@ public void run() { } LOGGER.finest(() -> Messages.get( - "MonitorImpl.stopMonitoringThread", + "HostMonitorImpl.stopMonitoringThread", new Object[]{this.hostSpec.getHost()})); } @@ -335,6 +335,7 @@ ConnectionStatus checkConnectionStatus(final long shortestFailureDetectionInterv LOGGER.finest(() -> "Opening a monitoring connection to " + this.hostSpec.getUrl()); startNano = this.getCurrentTimeNano(); + // TODO: replace with ConnectionService#open this.monitoringConn = this.pluginService.forceConnect(this.hostSpec, monitoringConnProperties); LOGGER.finest(() -> "Opened monitoring connection: " + this.monitoringConn); return new ConnectionStatus(true, this.getCurrentTimeNano() - startNano); diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorInitializer.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorInitializer.java similarity index 77% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorInitializer.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorInitializer.java index 5e25a66af..e920e69ba 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorInitializer.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorInitializer.java @@ -19,8 +19,8 @@ import java.util.Properties; import software.amazon.jdbc.HostSpec; -/** Interface for initialize a new {@link MonitorImpl}. */ +/** Interface for initialize a new {@link HostMonitorImpl}. */ @FunctionalInterface -public interface MonitorInitializer { - Monitor createMonitor(HostSpec hostSpec, Properties properties, MonitorThreadContainer threadContainer); +public interface HostMonitorInitializer { + HostMonitor createMonitor(HostSpec hostSpec, Properties properties, HostMonitorThreadContainer threadContainer); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorService.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorService.java similarity index 81% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorService.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorService.java index 6f034fd75..21d52fc67 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorService.java @@ -25,9 +25,9 @@ * Interface for monitor services. This class implements ways to start and stop monitoring servers * when connections are created. */ -public interface MonitorService { +public interface HostMonitorService { - MonitorConnectionContext startMonitoring( + HostMonitorConnectionContext startMonitoring( Connection connectionToAbort, Set nodeKeys, HostSpec hostSpec, @@ -37,12 +37,12 @@ MonitorConnectionContext startMonitoring( int failureDetectionCount); /** - * Stop monitoring for a connection represented by the given {@link MonitorConnectionContext}. - * Removes the context from the {@link MonitorImpl}. + * Stop monitoring for a connection represented by the given {@link HostMonitorConnectionContext}. + * Removes the context from the {@link HostMonitorImpl}. * - * @param context The {@link MonitorConnectionContext} representing a connection. + * @param context The {@link HostMonitorConnectionContext} representing a connection. */ - void stopMonitoring(MonitorConnectionContext context); + void stopMonitoring(HostMonitorConnectionContext context); /** * Stop monitoring the node for all connections represented by the given set of node keys. diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorServiceImpl.java similarity index 76% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorServiceImpl.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorServiceImpl.java index 04ba07fd7..883179fc8 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorServiceImpl.java @@ -35,9 +35,9 @@ * This class handles the creation and clean up of monitoring threads to servers with one or more * active connections. */ -public class MonitorServiceImpl implements MonitorService { +public class HostMonitorServiceImpl implements HostMonitorService { - private static final Logger LOGGER = Logger.getLogger(MonitorServiceImpl.class.getName()); + private static final Logger LOGGER = Logger.getLogger(HostMonitorServiceImpl.class.getName()); public static final AwsWrapperProperty MONITOR_DISPOSAL_TIME_MS = new AwsWrapperProperty( @@ -46,19 +46,19 @@ public class MonitorServiceImpl implements MonitorService { "Interval in milliseconds for a monitor to be considered inactive and to be disposed."); private final PluginService pluginService; - private MonitorThreadContainer threadContainer; + private HostMonitorThreadContainer threadContainer; - final MonitorInitializer monitorInitializer; + final HostMonitorInitializer monitorInitializer; private Set cachedMonitorNodeKeys = null; - private WeakReference cachedMonitor = null; + private WeakReference cachedMonitor = null; final TelemetryFactory telemetryFactory; final TelemetryCounter abortedConnectionsCounter; - public MonitorServiceImpl(final @NonNull PluginService pluginService) { + public HostMonitorServiceImpl(final @NonNull PluginService pluginService) { this( pluginService, (hostSpec, properties, monitorService) -> - new MonitorImpl( + new HostMonitorImpl( pluginService, hostSpec, properties, @@ -68,19 +68,19 @@ public MonitorServiceImpl(final @NonNull PluginService pluginService) { ExecutorFactory.newCachedThreadPool("monitor")); } - MonitorServiceImpl( + HostMonitorServiceImpl( final PluginService pluginService, - final MonitorInitializer monitorInitializer, + final HostMonitorInitializer monitorInitializer, final ExecutorServiceInitializer executorServiceInitializer) { this.pluginService = pluginService; this.telemetryFactory = pluginService.getTelemetryFactory(); this.abortedConnectionsCounter = telemetryFactory.createCounter("efm.connections.aborted"); this.monitorInitializer = monitorInitializer; - this.threadContainer = MonitorThreadContainer.getInstance(executorServiceInitializer); + this.threadContainer = HostMonitorThreadContainer.getInstance(executorServiceInitializer); } @Override - public MonitorConnectionContext startMonitoring( + public HostMonitorConnectionContext startMonitoring( final Connection connectionToAbort, final Set nodeKeys, final HostSpec hostSpec, @@ -91,11 +91,11 @@ public MonitorConnectionContext startMonitoring( if (nodeKeys.isEmpty()) { throw new IllegalArgumentException(Messages.get( - "MonitorServiceImpl.emptyAliasSet", + "HostMonitorServiceImpl.emptyAliasSet", new Object[] {hostSpec})); } - Monitor monitor = this.cachedMonitor == null ? null : this.cachedMonitor.get(); + HostMonitor monitor = this.cachedMonitor == null ? null : this.cachedMonitor.get(); if (monitor == null || monitor.isStopped() || this.cachedMonitorNodeKeys == null @@ -106,8 +106,8 @@ public MonitorConnectionContext startMonitoring( this.cachedMonitorNodeKeys = Collections.unmodifiableSet(nodeKeys); } - final MonitorConnectionContext context = - new MonitorConnectionContext( + final HostMonitorConnectionContext context = + new HostMonitorConnectionContext( monitor, connectionToAbort, failureDetectionTimeMillis, @@ -121,14 +121,14 @@ public MonitorConnectionContext startMonitoring( } @Override - public void stopMonitoring(@NonNull final MonitorConnectionContext context) { - final Monitor monitor = context.getMonitor(); + public void stopMonitoring(@NonNull final HostMonitorConnectionContext context) { + final HostMonitor monitor = context.getMonitor(); monitor.stopMonitoring(context); } @Override public void stopMonitoringForAllConnections(@NonNull final Set nodeKeys) { - Monitor monitor; + HostMonitor monitor; for (final String nodeKey : nodeKeys) { monitor = this.threadContainer.getMonitor(nodeKey); if (monitor != null) { @@ -144,19 +144,19 @@ public void releaseResources() { } /** - * Get or create a {@link MonitorImpl} for a server. + * Get or create a {@link HostMonitorImpl} for a server. * * @param nodeKeys All references to the server requiring monitoring. * @param hostSpec Information such as hostname of the server. * @param properties The user configuration for the current connection. - * @return A {@link MonitorImpl} object associated with a specific server. + * @return A {@link HostMonitorImpl} object associated with a specific server. */ - protected Monitor getMonitor(final Set nodeKeys, final HostSpec hostSpec, final Properties properties) { + protected HostMonitor getMonitor(final Set nodeKeys, final HostSpec hostSpec, final Properties properties) { return this.threadContainer.getOrCreateMonitor( nodeKeys, () -> monitorInitializer.createMonitor(hostSpec, properties, this.threadContainer)); } - MonitorThreadContainer getThreadContainer() { + HostMonitorThreadContainer getThreadContainer() { return this.threadContainer; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorThreadContainer.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorThreadContainer.java similarity index 67% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorThreadContainer.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorThreadContainer.java index 3dbaea346..266e64630 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/MonitorThreadContainer.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitorThreadContainer.java @@ -32,26 +32,27 @@ * This singleton class keeps track of all the monitoring threads and handles the creation and clean * up of each monitoring thread. */ -public class MonitorThreadContainer { +public class HostMonitorThreadContainer { - private static MonitorThreadContainer singleton = null; - private final Map monitorMap = new ConcurrentHashMap<>(); - private final Map> tasksMap = new ConcurrentHashMap<>(); + private static HostMonitorThreadContainer singleton = null; + private final Map> tasksMap = new ConcurrentHashMap<>(); + // TODO: remove monitorMap and threadPool and submit monitors to MonitorService instead + private final Map monitorMap = new ConcurrentHashMap<>(); private final ExecutorService threadPool; private static final ReentrantLock LOCK_OBJECT = new ReentrantLock(); private static final ReentrantLock MONITOR_LOCK_OBJECT = new ReentrantLock(); /** - * Create an instance of the {@link MonitorThreadContainer}. + * Create an instance of the {@link HostMonitorThreadContainer}. * - * @return a singleton instance of the {@link MonitorThreadContainer}. + * @return a singleton instance of the {@link HostMonitorThreadContainer}. */ - public static MonitorThreadContainer getInstance() { + public static HostMonitorThreadContainer getInstance() { return getInstance(Executors::newCachedThreadPool); } - static MonitorThreadContainer getInstance(final ExecutorServiceInitializer executorServiceInitializer) { - MonitorThreadContainer singletonToReturn = singleton; + static HostMonitorThreadContainer getInstance(final ExecutorServiceInitializer executorServiceInitializer) { + HostMonitorThreadContainer singletonToReturn = singleton; if (singletonToReturn != null) { return singletonToReturn; @@ -60,7 +61,7 @@ static MonitorThreadContainer getInstance(final ExecutorServiceInitializer execu LOCK_OBJECT.lock(); try { if (singleton == null) { - singleton = new MonitorThreadContainer(executorServiceInitializer); + singleton = new HostMonitorThreadContainer(executorServiceInitializer); } singletonToReturn = singleton; } finally { @@ -70,7 +71,7 @@ static MonitorThreadContainer getInstance(final ExecutorServiceInitializer execu } /** - * Release resources held in the {@link MonitorThreadContainer} and clear references to the + * Release resources held in the {@link HostMonitorThreadContainer} and clear references to the * container. */ public static void releaseInstance() { @@ -88,31 +89,31 @@ public static void releaseInstance() { } } - private MonitorThreadContainer(final ExecutorServiceInitializer executorServiceInitializer) { + private HostMonitorThreadContainer(final ExecutorServiceInitializer executorServiceInitializer) { this.threadPool = executorServiceInitializer.createExecutorService(); } - public Map getMonitorMap() { + public Map getMonitorMap() { return monitorMap; } - public Map> getTasksMap() { + public Map> getTasksMap() { return tasksMap; } - Monitor getMonitor(final String node) { + HostMonitor getMonitor(final String node) { return monitorMap.get(node); } - Monitor getOrCreateMonitor(final Set nodeKeys, final Supplier monitorSupplier) { + HostMonitor getOrCreateMonitor(final Set nodeKeys, final Supplier monitorSupplier) { if (nodeKeys.isEmpty()) { - throw new IllegalArgumentException(Messages.get("MonitorThreadContainer.emptyNodeKeys")); + throw new IllegalArgumentException(Messages.get("HostMonitorThreadContainer.emptyNodeKeys")); } MONITOR_LOCK_OBJECT.lock(); try { - Monitor monitor = null; + HostMonitor monitor = null; String anyNodeKey = null; for (final String nodeKey : nodeKeys) { monitor = monitorMap.get(nodeKey); @@ -126,7 +127,7 @@ Monitor getOrCreateMonitor(final Set nodeKeys, final Supplier m monitor = monitorMap.computeIfAbsent( anyNodeKey, k -> { - final Monitor newMonitor = monitorSupplier.get(); + final HostMonitor newMonitor = monitorSupplier.get(); addTask(newMonitor); return newMonitor; }); @@ -139,28 +140,28 @@ Monitor getOrCreateMonitor(final Set nodeKeys, final Supplier m } } - private void populateMonitorMap(final Set nodeKeys, final Monitor monitor) { + private void populateMonitorMap(final Set nodeKeys, final HostMonitor monitor) { for (final String nodeKey : nodeKeys) { monitorMap.putIfAbsent(nodeKey, monitor); } } - void addTask(final Monitor monitor) { + void addTask(final HostMonitor monitor) { tasksMap.computeIfAbsent(monitor, k -> threadPool.submit(monitor)); } /** - * Remove references to the given {@link MonitorImpl} object and stop the background monitoring + * Remove references to the given {@link HostMonitorImpl} object and stop the background monitoring * thread. * - * @param monitor The {@link MonitorImpl} representing a monitoring thread. + * @param monitor The {@link HostMonitorImpl} representing a monitoring thread. */ - public void releaseResource(final Monitor monitor) { + public void releaseResource(final HostMonitor monitor) { if (monitor == null) { return; } - final List monitorList = Collections.singletonList(monitor); + final List monitorList = Collections.singletonList(monitor); MONITOR_LOCK_OBJECT.lock(); try { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPlugin.java index 011996acf..be9f56746 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPlugin.java @@ -78,9 +78,9 @@ public class HostMonitoringConnectionPlugin extends AbstractConnectionPlugin protected final Set subscribedMethods; protected @NonNull Properties properties; - private final @NonNull Supplier monitorServiceSupplier; + private final @NonNull Supplier monitorServiceSupplier; private final @NonNull PluginService pluginService; - private MonitorService monitorService; + private HostMonitorService monitorService; private final RdsUtils rdsHelper; private HostSpec monitoringHostSpec; @@ -103,23 +103,14 @@ public class HostMonitoringConnectionPlugin extends AbstractConnectionPlugin */ public HostMonitoringConnectionPlugin( final @NonNull PluginService pluginService, final @NonNull Properties properties) { - this(pluginService, properties, () -> new MonitorServiceImpl(pluginService), new RdsUtils()); + this(pluginService, properties, () -> new HostMonitorServiceImpl(pluginService), new RdsUtils()); } HostMonitoringConnectionPlugin( final @NonNull PluginService pluginService, final @NonNull Properties properties, - final @NonNull Supplier monitorServiceSupplier, + final @NonNull Supplier monitorServiceSupplier, final RdsUtils rdsHelper) { - if (pluginService == null) { - throw new IllegalArgumentException("pluginService"); - } - if (properties == null) { - throw new IllegalArgumentException("properties"); - } - if (monitorServiceSupplier == null) { - throw new IllegalArgumentException("monitorServiceSupplier"); - } this.pluginService = pluginService; this.properties = properties; this.monitorServiceSupplier = monitorServiceSupplier; @@ -144,7 +135,7 @@ public Set getSubscribedMethods() { } /** - * Executes the given SQL function with {@link MonitorImpl} if connection monitoring is enabled. + * Executes the given SQL function with {@link HostMonitorImpl} if connection monitoring is enabled. * Otherwise, executes the SQL function directly. */ @Override @@ -164,7 +155,7 @@ public T execute( initMonitorService(); T result; - MonitorConnectionContext monitorContext = null; + HostMonitorConnectionContext monitorContext = null; try { LOGGER.finest( diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/Monitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitor.java similarity index 87% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/Monitor.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitor.java index 5689db2ce..8c0300ccd 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/Monitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitor.java @@ -20,9 +20,9 @@ * Interface for monitors. This class uses background threads to monitor servers with one or more * connections for more efficient failure detection during method execution. */ -public interface Monitor extends AutoCloseable, Runnable { +public interface HostMonitor extends AutoCloseable, Runnable { - void startMonitoring(MonitorConnectionContext context); + void startMonitoring(HostMonitorConnectionContext context); boolean canDispose(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorConnectionContext.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorConnectionContext.java similarity index 94% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorConnectionContext.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorConnectionContext.java index 806047147..c8c25da8d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorConnectionContext.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorConnectionContext.java @@ -25,7 +25,7 @@ * Monitoring context for each connection. This contains each connection's criteria for whether a * server should be considered unhealthy. The context is shared between the main thread and the monitor thread. */ -public class MonitorConnectionContext { +public class HostMonitorConnectionContext { private final AtomicReference> connectionToAbortRef; private final AtomicBoolean nodeUnhealthy = new AtomicBoolean(false); @@ -35,7 +35,7 @@ public class MonitorConnectionContext { * * @param connectionToAbort A reference to the connection associated with this context that will be aborted. */ - public MonitorConnectionContext(final Connection connectionToAbort) { + public HostMonitorConnectionContext(final Connection connectionToAbort) { this.connectionToAbortRef = new AtomicReference<>(new WeakReference<>(connectionToAbort)); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorImpl.java similarity index 78% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorImpl.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorImpl.java index 44db2dda9..14baa6669 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorImpl.java @@ -35,32 +35,29 @@ import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.util.ExecutorFactory; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; -import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; -import software.amazon.jdbc.util.telemetry.TelemetryGauge; import software.amazon.jdbc.util.telemetry.TelemetryTraceLevel; /** * This class uses a background thread to monitor a particular server with one or more active {@link * Connection}. */ -public class MonitorImpl implements Monitor { +public class HostMonitorImpl implements HostMonitor { - private static final Logger LOGGER = Logger.getLogger(MonitorImpl.class.getName()); + private static final Logger LOGGER = Logger.getLogger(HostMonitorImpl.class.getName()); private static final long THREAD_SLEEP_NANO = TimeUnit.MILLISECONDS.toNanos(100); private static final String MONITORING_PROPERTY_PREFIX = "monitoring-"; protected static final Executor ABORT_EXECUTOR = ExecutorFactory.newSingleThreadExecutor("abort"); - private final Queue> activeContexts = new ConcurrentLinkedQueue<>(); - private final Map>> newContexts = + private final Queue> activeContexts = new ConcurrentLinkedQueue<>(); + private final Map>> newContexts = new ConcurrentHashMap<>(); private final PluginService pluginService; private final TelemetryFactory telemetryFactory; @@ -68,6 +65,7 @@ public class MonitorImpl implements Monitor { private final HostSpec hostSpec; private final AtomicBoolean stopped = new AtomicBoolean(false); private Connection monitoringConn = null; + // TODO: remove and submit monitors to MonitorService instead private final ExecutorService threadPool = ExecutorFactory.newFixedThreadPool(2, "threadPool"); @@ -79,17 +77,13 @@ public class MonitorImpl implements Monitor { private long failureCount; private boolean nodeUnhealthy = false; - - private final TelemetryGauge newContextsSizeGauge; - private final TelemetryGauge activeContextsSizeGauge; - private final TelemetryGauge nodeHealtyGauge; private final TelemetryCounter abortedConnectionsCounter; /** * Store the monitoring configuration for a connection. * * @param pluginService A service for creating new connections. - * @param hostSpec The {@link HostSpec} of the server this {@link MonitorImpl} + * @param hostSpec The {@link HostSpec} of the server this {@link HostMonitorImpl} * instance is monitoring. * @param properties The {@link Properties} containing additional monitoring * configuration. @@ -98,7 +92,7 @@ public class MonitorImpl implements Monitor { * @param failureDetectionCount A failure detection count. * @param abortedConnectionsCounter Aborted connection telemetry counter. */ - public MonitorImpl( + public HostMonitorImpl( final @NonNull PluginService pluginService, final @NonNull HostSpec hostSpec, final @NonNull Properties properties, @@ -116,22 +110,6 @@ public MonitorImpl( this.failureDetectionCount = failureDetectionCount; this.abortedConnectionsCounter = abortedConnectionsCounter; - final String hostId = StringUtils.isNullOrEmpty(this.hostSpec.getHostId()) - ? this.hostSpec.getHost() - : this.hostSpec.getHostId(); - - this.newContextsSizeGauge = telemetryFactory.createGauge( - String.format("efm2.newContexts.size.%s", hostId), - this::getActiveContextSize); - - this.activeContextsSizeGauge = telemetryFactory.createGauge( - String.format("efm2.activeContexts.size.%s", hostId), - () -> (long) this.activeContexts.size()); - - this.nodeHealtyGauge = telemetryFactory.createGauge( - String.format("efm2.nodeHealthy.%s", hostId), - () -> this.nodeUnhealthy ? 0L : 1L); - this.threadPool.submit(this::newContextRun); // task to handle new contexts this.threadPool.submit(this); // task to handle active monitoring contexts this.threadPool.shutdown(); // No more tasks are accepted by pool. @@ -151,25 +129,21 @@ public void close() throws Exception { this.threadPool.shutdownNow(); } LOGGER.finest(() -> Messages.get( - "MonitorImpl.stopped", + "HostMonitorImpl.stopped", new Object[] {this.hostSpec.getHost()})); } - protected long getActiveContextSize() { - return this.newContexts.values().stream().mapToLong(java.util.Collection::size).sum(); - } - @Override - public void startMonitoring(final MonitorConnectionContext context) { + public void startMonitoring(final HostMonitorConnectionContext context) { if (this.stopped.get()) { - LOGGER.warning(() -> Messages.get("MonitorImpl.monitorIsStopped", new Object[] {this.hostSpec.getHost()})); + LOGGER.warning(() -> Messages.get("HostMonitorImpl.monitorIsStopped", new Object[] {this.hostSpec.getHost()})); } final long currentTimeNano = this.getCurrentTimeNano(); long startMonitoringTimeNano = this.truncateNanoToSeconds( currentTimeNano + this.failureDetectionTimeNano); - Queue> queue = + Queue> queue = this.newContexts.computeIfAbsent( startMonitoringTimeNano, (key) -> new ConcurrentLinkedQueue<>()); @@ -180,11 +154,6 @@ private long truncateNanoToSeconds(final long timeNano) { return TimeUnit.SECONDS.toNanos(TimeUnit.NANOSECONDS.toSeconds(timeNano)); } - public void clearContexts() { - this.newContexts.clear(); - this.activeContexts.clear(); - } - // This method helps to organize unit tests. long getCurrentTimeNano() { return System.nanoTime(); @@ -193,8 +162,8 @@ long getCurrentTimeNano() { public void newContextRun() { LOGGER.finest(() -> Messages.get( - "MonitorImpl.startMonitoringThreadNewContext", - new Object[]{this.hostSpec.getHost()})); + "HostMonitorImpl.startMonitoringThreadNewContext", + new Object[] {this.hostSpec.getHost()})); try { while (!this.stopped.get()) { @@ -206,14 +175,14 @@ public void newContextRun() { // Get entries with key (that is a time in nanos) less or equal than current time. .filter(entry -> entry.getKey() < currentTimeNano) .forEach(entry -> { - final Queue> queue = entry.getValue(); + final Queue> queue = entry.getValue(); processedKeys.add(entry.getKey()); // Each value of found entry is a queue of monitoring contexts awaiting active monitoring. // Add all contexts to an active monitoring contexts queue. // Ignore disposed contexts. - WeakReference contextWeakRef; + WeakReference contextWeakRef; while ((contextWeakRef = queue.poll()) != null) { - MonitorConnectionContext context = contextWeakRef.get(); + HostMonitorConnectionContext context = contextWeakRef.get(); if (context != null && context.isActive()) { this.activeContexts.add(contextWeakRef); } @@ -231,23 +200,23 @@ public void newContextRun() { LOGGER.log( Level.FINEST, Messages.get( - "MonitorImpl.exceptionDuringMonitoringStop", - new Object[]{this.hostSpec.getHost()}), + "HostMonitorImpl.exceptionDuringMonitoringStop", + new Object[] {this.hostSpec.getHost()}), ex); // We want to print full trace stack of the exception. } } LOGGER.finest(() -> Messages.get( - "MonitorImpl.stopMonitoringThreadNewContext", - new Object[]{this.hostSpec.getHost()})); + "HostMonitorImpl.stopMonitoringThreadNewContext", + new Object[] {this.hostSpec.getHost()})); } @Override public void run() { LOGGER.finest(() -> Messages.get( - "MonitorImpl.startMonitoringThread", - new Object[]{this.hostSpec.getHost()})); + "HostMonitorImpl.startMonitoringThread", + new Object[] {this.hostSpec.getHost()})); try { while (!this.stopped.get()) { @@ -263,19 +232,15 @@ public void run() { this.updateNodeHealthStatus(isValid, statusCheckStartTimeNano, statusCheckEndTimeNano); - if (this.nodeUnhealthy) { - this.pluginService.setAvailability(this.hostSpec.asAliases(), HostAvailability.NOT_AVAILABLE); - } - - final List> tmpActiveContexts = new ArrayList<>(); - WeakReference monitorContextWeakRef; + final List> tmpActiveContexts = new ArrayList<>(); + WeakReference monitorContextWeakRef; while ((monitorContextWeakRef = this.activeContexts.poll()) != null) { if (this.stopped.get()) { break; } - MonitorConnectionContext monitorContext = monitorContextWeakRef.get(); + HostMonitorConnectionContext monitorContext = monitorContextWeakRef.get(); if (monitorContext == null) { continue; } @@ -314,8 +279,8 @@ public void run() { LOGGER.log( Level.FINEST, Messages.get( - "MonitorImpl.exceptionDuringMonitoringStop", - new Object[]{this.hostSpec.getHost()}), + "HostMonitorImpl.exceptionDuringMonitoringStop", + new Object[] {this.hostSpec.getHost()}), ex); // We want to print full trace stack of the exception. } } finally { @@ -330,8 +295,8 @@ public void run() { } LOGGER.finest(() -> Messages.get( - "MonitorImpl.stopMonitoringThread", - new Object[]{this.hostSpec.getHost()})); + "HostMonitorImpl.stopMonitoringThread", + new Object[] {this.hostSpec.getHost()})); } /** @@ -363,6 +328,7 @@ boolean checkConnectionStatus() { }); LOGGER.finest(() -> "Opening a monitoring connection to " + this.hostSpec.getUrl()); + // TODO: replace with ConnectionService#open this.monitoringConn = this.pluginService.forceConnect(this.hostSpec, monitoringConnProperties); LOGGER.finest(() -> "Opened monitoring connection: " + this.monitoringConn); return true; @@ -371,12 +337,9 @@ boolean checkConnectionStatus() { // Some drivers, like MySQL Connector/J, execute isValid() in a double of specified timeout time. final int validTimeout = (int) TimeUnit.NANOSECONDS.toSeconds( this.failureDetectionIntervalNano - THREAD_SLEEP_NANO) / 2; - final boolean isValid = this.monitoringConn.isValid(validTimeout); - return isValid; - + return this.monitoringConn.isValid(validTimeout); } catch (final SQLException sqlEx) { return false; - } finally { if (connectContext != null) { connectContext.closeContext(); @@ -401,14 +364,15 @@ private void updateNodeHealthStatus( this.failureDetectionIntervalNano * Math.max(0, this.failureDetectionCount - 1); if (invalidNodeDurationNano >= maxInvalidNodeDurationNano) { - LOGGER.fine(() -> Messages.get("MonitorConnectionContext.hostDead", new Object[] {this.hostSpec.getHost()})); + LOGGER.fine(() -> + Messages.get("HostMonitorConnectionContext.hostDead", new Object[] {this.hostSpec.getHost()})); this.nodeUnhealthy = true; return; } LOGGER.finest( () -> Messages.get( - "MonitorConnectionContext.hostNotResponding", + "HostMonitorConnectionContext.hostNotResponding", new Object[] {this.hostSpec.getHost(), this.failureCount})); return; } @@ -416,7 +380,7 @@ private void updateNodeHealthStatus( if (this.failureCount > 0) { // Node is back alive LOGGER.finest( - () -> Messages.get("MonitorConnectionContext.hostAlive", + () -> Messages.get("HostMonitorConnectionContext.hostAlive", new Object[] {this.hostSpec.getHost()})); } @@ -433,7 +397,7 @@ private void abortConnection(final @NonNull Connection connectionToAbort) { // ignore LOGGER.finest( () -> Messages.get( - "MonitorConnectionContext.exceptionAbortingConnection", + "HostMonitorConnectionContext.exceptionAbortingConnection", new Object[] {sqlEx.getMessage()})); } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorInitializer.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorInitializer.java similarity index 88% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorInitializer.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorInitializer.java index 9027ccc5a..f38d004c3 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorInitializer.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorInitializer.java @@ -20,10 +20,10 @@ import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.util.telemetry.TelemetryCounter; -/** Interface for initialize a new {@link MonitorImpl}. */ +/** Interface for initialize a new {@link HostMonitorImpl}. */ @FunctionalInterface -public interface MonitorInitializer { - Monitor createMonitor( +public interface HostMonitorInitializer { + HostMonitor createMonitor( HostSpec hostSpec, Properties properties, final int failureDetectionTimeMillis, diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorService.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorService.java similarity index 78% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorService.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorService.java index 20512dc4c..35a934057 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorService.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorService.java @@ -24,9 +24,9 @@ * Interface for monitor services. This class implements ways to start and stop monitoring servers * when connections are created. */ -public interface MonitorService { +public interface HostMonitorService { - MonitorConnectionContext startMonitoring( + HostMonitorConnectionContext startMonitoring( Connection connectionToAbort, HostSpec hostSpec, Properties properties, @@ -35,13 +35,13 @@ MonitorConnectionContext startMonitoring( int failureDetectionCount); /** - * Stop monitoring for a connection represented by the given {@link MonitorConnectionContext}. - * Removes the context from the {@link MonitorImpl}. + * Stop monitoring for a connection represented by the given {@link HostMonitorConnectionContext}. + * Removes the context from the {@link HostMonitorImpl}. * - * @param context The {@link MonitorConnectionContext} representing a connection. + * @param context The {@link HostMonitorConnectionContext} representing a connection. * @param connectionToAbort A connection to abort. */ - void stopMonitoring(MonitorConnectionContext context, Connection connectionToAbort); + void stopMonitoring(HostMonitorConnectionContext context, Connection connectionToAbort); void releaseResources(); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorServiceImpl.java similarity index 83% rename from wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorServiceImpl.java rename to wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorServiceImpl.java index de3f0781c..8b6b92f22 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/MonitorServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitorServiceImpl.java @@ -28,7 +28,7 @@ import software.amazon.jdbc.PluginService; import software.amazon.jdbc.util.ExecutorFactory; import software.amazon.jdbc.util.Messages; -import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread; +import software.amazon.jdbc.util.storage.SlidingExpirationCacheWithCleanupThread; import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -36,9 +36,9 @@ * This class handles the creation and clean up of monitoring threads to servers with one or more * active connections. */ -public class MonitorServiceImpl implements MonitorService { +public class HostMonitorServiceImpl implements HostMonitorService { - private static final Logger LOGGER = Logger.getLogger(MonitorServiceImpl.class.getName()); + private static final Logger LOGGER = Logger.getLogger(HostMonitorServiceImpl.class.getName()); public static final AwsWrapperProperty MONITOR_DISPOSAL_TIME_MS = new AwsWrapperProperty( "monitorDisposalTime", @@ -49,10 +49,10 @@ public class MonitorServiceImpl implements MonitorService { protected static final Executor ABORT_EXECUTOR = ExecutorFactory.newSingleThreadExecutor("abort"); - - protected static final SlidingExpirationCacheWithCleanupThread monitors = + // TODO: remove and submit monitors to MonitorService instead + protected static final SlidingExpirationCacheWithCleanupThread monitors = new SlidingExpirationCacheWithCleanupThread<>( - Monitor::canDispose, + HostMonitor::canDispose, (monitor) -> { try { monitor.close(); @@ -63,11 +63,11 @@ public class MonitorServiceImpl implements MonitorService { CACHE_CLEANUP_NANO); protected final PluginService pluginService; - protected final MonitorInitializer monitorInitializer; + protected final HostMonitorInitializer monitorInitializer; protected final TelemetryFactory telemetryFactory; protected final TelemetryCounter abortedConnectionsCounter; - public MonitorServiceImpl(final @NonNull PluginService pluginService) { + public HostMonitorServiceImpl(final @NonNull PluginService pluginService) { this( pluginService, (hostSpec, @@ -76,7 +76,7 @@ public MonitorServiceImpl(final @NonNull PluginService pluginService) { failureDetectionIntervalMillis, failureDetectionCount, abortedConnectionsCounter) -> - new MonitorImpl( + new HostMonitorImpl( pluginService, hostSpec, properties, @@ -86,9 +86,9 @@ public MonitorServiceImpl(final @NonNull PluginService pluginService) { abortedConnectionsCounter)); } - MonitorServiceImpl( + HostMonitorServiceImpl( final @NonNull PluginService pluginService, - final @NonNull MonitorInitializer monitorInitializer) { + final @NonNull HostMonitorInitializer monitorInitializer) { this.pluginService = pluginService; this.telemetryFactory = pluginService.getTelemetryFactory(); this.abortedConnectionsCounter = telemetryFactory.createCounter("efm2.connections.aborted"); @@ -107,7 +107,7 @@ public static void closeAllMonitors() { } @Override - public MonitorConnectionContext startMonitoring( + public HostMonitorConnectionContext startMonitoring( final Connection connectionToAbort, final HostSpec hostSpec, final Properties properties, @@ -115,14 +115,14 @@ public MonitorConnectionContext startMonitoring( final int failureDetectionIntervalMillis, final int failureDetectionCount) { - final Monitor monitor = this.getMonitor( + final HostMonitor monitor = this.getMonitor( hostSpec, properties, failureDetectionTimeMillis, failureDetectionIntervalMillis, failureDetectionCount); - final MonitorConnectionContext context = new MonitorConnectionContext(connectionToAbort); + final HostMonitorConnectionContext context = new HostMonitorConnectionContext(connectionToAbort); monitor.startMonitoring(context); return context; @@ -130,7 +130,7 @@ public MonitorConnectionContext startMonitoring( @Override public void stopMonitoring( - @NonNull final MonitorConnectionContext context, + @NonNull final HostMonitorConnectionContext context, @NonNull Connection connectionToAbort) { if (context.shouldAbort()) { @@ -145,7 +145,7 @@ public void stopMonitoring( // ignore LOGGER.finest( () -> Messages.get( - "MonitorConnectionContext.exceptionAbortingConnection", + "HostMonitorConnectionContext.exceptionAbortingConnection", new Object[] {sqlEx.getMessage()})); } } else { @@ -159,16 +159,16 @@ public void releaseResources() { } /** - * Get or create a {@link MonitorImpl} for a server. + * Get or create a {@link HostMonitorImpl} for a server. * * @param hostSpec Information such as hostname of the server. * @param properties The user configuration for the current connection. * @param failureDetectionTimeMillis A failure detection time in millis. * @param failureDetectionIntervalMillis A failure detection interval in millis. * @param failureDetectionCount A failure detection count. - * @return A {@link MonitorImpl} object associated with a specific server. + * @return A {@link HostMonitorImpl} object associated with a specific server. */ - protected Monitor getMonitor( + protected HostMonitor getMonitor( final HostSpec hostSpec, final Properties properties, final int failureDetectionTimeMillis, diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPlugin.java index 29bb4fa0f..69da581b9 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/efm2/HostMonitoringConnectionPlugin.java @@ -77,9 +77,9 @@ public class HostMonitoringConnectionPlugin extends AbstractConnectionPlugin protected final Set subscribedMethods; protected @NonNull Properties properties; - private final @NonNull Supplier monitorServiceSupplier; + private final @NonNull Supplier monitorServiceSupplier; private final @NonNull PluginService pluginService; - private MonitorService monitorService; + private HostMonitorService monitorService; private final RdsUtils rdsHelper; private HostSpec monitoringHostSpec; protected final boolean isEnabled; @@ -98,23 +98,14 @@ public class HostMonitoringConnectionPlugin extends AbstractConnectionPlugin */ public HostMonitoringConnectionPlugin( final @NonNull PluginService pluginService, final @NonNull Properties properties) { - this(pluginService, properties, () -> new MonitorServiceImpl(pluginService), new RdsUtils()); + this(pluginService, properties, () -> new HostMonitorServiceImpl(pluginService), new RdsUtils()); } HostMonitoringConnectionPlugin( final @NonNull PluginService pluginService, final @NonNull Properties properties, - final @NonNull Supplier monitorServiceSupplier, + final @NonNull Supplier monitorServiceSupplier, final RdsUtils rdsHelper) { - if (pluginService == null) { - throw new IllegalArgumentException("pluginService"); - } - if (properties == null) { - throw new IllegalArgumentException("properties"); - } - if (monitorServiceSupplier == null) { - throw new IllegalArgumentException("monitorServiceSupplier"); - } this.pluginService = pluginService; this.properties = properties; this.monitorServiceSupplier = monitorServiceSupplier; @@ -135,7 +126,7 @@ public Set getSubscribedMethods() { } /** - * Executes the given SQL function with {@link MonitorImpl} if connection monitoring is enabled. + * Executes the given SQL function with {@link HostMonitorImpl} if connection monitoring is enabled. * Otherwise, executes the SQL function directly. */ @Override @@ -160,7 +151,7 @@ public T execute( initMonitorService(); T result; - MonitorConnectionContext monitorContext = null; + HostMonitorConnectionContext monitorContext = null; try { LOGGER.finest( diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java index 9f5899062..f97a2ed4d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareReaderFailoverHandler.java @@ -277,9 +277,14 @@ public List getReaderHostsByPriority(final List hosts) { hostsByPriority.addAll(downHostList); final int numOfReaders = activeReaders.size() + downHostList.size(); - if (writerHost != null && (numOfReaders == 0 - || this.pluginService.getDialect().getFailoverRestrictions() - .contains(FailoverRestriction.ENABLE_WRITER_IN_TASK_B))) { + if (writerHost == null) { + return hostsByPriority; + } + + boolean shouldIncludeWriter = numOfReaders == 0 + || this.pluginService.getDialect().getFailoverRestrictions() + .contains(FailoverRestriction.ENABLE_WRITER_IN_TASK_B); + if (shouldIncludeWriter) { hostsByPriority.add(writerHost); } @@ -391,6 +396,8 @@ public ReaderFailoverResult call() { final Properties copy = new Properties(); copy.putAll(initialConnectionProps); + // TODO: assess whether multi-threaded access to the plugin service is safe. The same plugin service is used by + // both the ConnectionWrapper and this ConnectionAttemptTask in separate threads. final Connection conn = pluginService.forceConnect(this.newHost, copy); pluginService.setAvailability(this.newHost.asAliases(), HostAvailability.AVAILABLE); @@ -402,7 +409,7 @@ public ReaderFailoverResult call() { LOGGER.fine( Messages.get( "ClusterAwareReaderFailoverHandler.readerRequired", - new Object[]{ this.newHost.getUrl(), role })); + new Object[] {this.newHost.getUrl(), role})); try { conn.close(); @@ -413,7 +420,7 @@ public ReaderFailoverResult call() { return FAILED_READER_FAILOVER_RESULT; } } catch (SQLException e) { - LOGGER.fine(Messages.get("ClusterAwareReaderFailoverHandler.errorGettingHostRole", new Object[]{e})); + LOGGER.fine(Messages.get("ClusterAwareReaderFailoverHandler.errorGettingHostRole", new Object[] {e})); try { conn.close(); diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java index 1224a2ad6..47f741b5e 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/failover/ClusterAwareWriterFailoverHandler.java @@ -258,6 +258,8 @@ public WriterFailoverResult call() { conn.close(); } + // TODO: assess whether multi-threaded access to the plugin service is safe. The same plugin service is used + // by both the ConnectionWrapper and this ReconnectToWriterHandler in separate threads. conn = pluginService.forceConnect(this.originalWriterHost, initialConnectionProps); pluginService.forceRefreshHostList(conn); latestTopology = pluginService.getAllHosts(); @@ -465,6 +467,8 @@ private boolean connectToWriter(final HostSpec writerCandidate) { new Object[] {writerCandidate.getUrl()})); try { // connect to the new writer + // TODO: assess whether multi-threaded access to the plugin service is safe. The same plugin service is used + // by both the ConnectionWrapper and this WaitForNewWriterHandler in separate threads. this.currentConnection = pluginService.forceConnect(writerCandidate, initialConnectionProps); pluginService.setAvailability(writerCandidate.asAliases(), HostAvailability.AVAILABLE); return true; diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPlugin.java index 1da725f15..23a1aa8be 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessConnectionPlugin.java @@ -34,7 +34,6 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.plugin.AbstractConnectionPlugin; import software.amazon.jdbc.util.Messages; -import software.amazon.jdbc.util.PropertyUtils; public class LimitlessConnectionPlugin extends AbstractConnectionPlugin { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java index a4f63bada..9a18b9132 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitor.java @@ -31,8 +31,8 @@ import software.amazon.jdbc.util.ExecutorFactory; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.PropertyUtils; -import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.SlidingExpirationCacheWithCleanupThread; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryTraceLevel; @@ -53,6 +53,7 @@ public class LimitlessRouterMonitor implements AutoCloseable, Runnable { protected final TelemetryFactory telemetryFactory; protected Connection monitoringConn = null; + // TODO: remove and submit monitors to MonitorService instead private final ExecutorService threadPool = ExecutorFactory.newFixedThreadPool(1, "threadPool"); private final AtomicBoolean stopped = new AtomicBoolean(false); @@ -194,6 +195,7 @@ private void openConnection() throws SQLException { LOGGER.finest(() -> Messages.get( "LimitlessRouterMonitor.openingConnection", new Object[] {this.hostSpec.getUrl()})); + // TODO: replace with ConnectionService#open this.monitoringConn = this.pluginService.forceConnect(this.hostSpec, this.props); LOGGER.finest(() -> Messages.get( "LimitlessRouterMonitor.openedConnection", diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitorInitializer.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitorInitializer.java index d036f2d42..03680ffa1 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitorInitializer.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterMonitorInitializer.java @@ -18,11 +18,8 @@ import java.util.List; import java.util.Properties; -import java.util.concurrent.atomic.AtomicReference; -import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread; +import software.amazon.jdbc.util.storage.SlidingExpirationCacheWithCleanupThread; @FunctionalInterface public interface LimitlessRouterMonitorInitializer { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java index d40131295..1c5feee1c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/limitless/LimitlessRouterServiceImpl.java @@ -34,8 +34,8 @@ import software.amazon.jdbc.RoundRobinHostSelector; import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.util.Messages; -import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread; import software.amazon.jdbc.util.Utils; +import software.amazon.jdbc.util.storage.SlidingExpirationCacheWithCleanupThread; public class LimitlessRouterServiceImpl implements LimitlessRouterService { private static final Logger LOGGER = @@ -50,7 +50,7 @@ public class LimitlessRouterServiceImpl implements LimitlessRouterService { protected final PluginService pluginService; protected final LimitlessQueryHelper queryHelper; protected final LimitlessRouterMonitorInitializer limitlessRouterMonitorInitializer; - + // TODO: remove and submit monitors to MonitorService instead protected static final SlidingExpirationCacheWithCleanupThread limitlessRouterMonitors = new SlidingExpirationCacheWithCleanupThread<>( limitlessRouterMonitor -> true, diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/FastestResponseStrategyPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/FastestResponseStrategyPlugin.java index 2b317fbd4..6e7f39312 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/FastestResponseStrategyPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/FastestResponseStrategyPlugin.java @@ -39,7 +39,7 @@ import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.RandomHostSelector; import software.amazon.jdbc.plugin.AbstractConnectionPlugin; -import software.amazon.jdbc.util.CacheMap; +import software.amazon.jdbc.util.storage.CacheMap; public class FastestResponseStrategyPlugin extends AbstractConnectionPlugin { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java index 7908455a4..4a7567c81 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/HostResponseTimeServiceImpl.java @@ -27,7 +27,7 @@ import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.util.SlidingExpirationCacheWithCleanupThread; +import software.amazon.jdbc.util.storage.SlidingExpirationCacheWithCleanupThread; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryGauge; @@ -39,6 +39,7 @@ public class HostResponseTimeServiceImpl implements HostResponseTimeService { protected static final long CACHE_EXPIRATION_NANO = TimeUnit.MINUTES.toNanos(10); protected static final long CACHE_CLEANUP_NANO = TimeUnit.MINUTES.toNanos(1); + // TODO: remove and submit monitors to MonitorService instead protected static final SlidingExpirationCacheWithCleanupThread monitoringNodes = new SlidingExpirationCacheWithCleanupThread<>( (monitor) -> true, diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/NodeResponseTimeMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/NodeResponseTimeMonitor.java index 2bb95e96e..a1758f7f0 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/NodeResponseTimeMonitor.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/strategy/fastestresponse/NodeResponseTimeMonitor.java @@ -63,6 +63,7 @@ public class NodeResponseTimeMonitor implements AutoCloseable, Runnable { private Connection monitoringConn = null; + // TODO: remove and submit monitors to MonitorService instead private final ExecutorService threadPool = ExecutorFactory.newFixedThreadPool(1, "threadPool"); @@ -215,6 +216,7 @@ private void openConnection() { LOGGER.finest(() -> Messages.get( "NodeResponseTimeMonitor.openingConnection", new Object[] {this.hostSpec.getUrl()})); + // TODO: replace with ConnectionService#open this.monitoringConn = this.pluginService.forceConnect(this.hostSpec, monitoringConnProperties); LOGGER.finest(() -> Messages.get( "NodeResponseTimeMonitor.openedConnection", diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CoreServicesContainer.java b/wrapper/src/main/java/software/amazon/jdbc/util/CoreServicesContainer.java new file mode 100644 index 000000000..3ac3e0a1b --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CoreServicesContainer.java @@ -0,0 +1,56 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import software.amazon.jdbc.util.events.BatchingEventPublisher; +import software.amazon.jdbc.util.events.EventPublisher; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.monitoring.MonitorServiceImpl; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.storage.StorageServiceImpl; + +/** + * A singleton container object used to instantiate and access core universal services. This class should be used + * instead of directly instantiating core services so that only one instance of each service is instantiated. + * + * @see FullServicesContainer for a container that holds both connection-specific services and core universal + * services. + */ +public class CoreServicesContainer { + private static final CoreServicesContainer INSTANCE = new CoreServicesContainer(); + + private final StorageService storageService; + private final MonitorService monitorService; + + private CoreServicesContainer() { + EventPublisher eventPublisher = new BatchingEventPublisher(); + this.storageService = new StorageServiceImpl(eventPublisher); + this.monitorService = new MonitorServiceImpl(eventPublisher); + } + + public static CoreServicesContainer getInstance() { + return INSTANCE; + } + + public StorageService getStorageService() { + return storageService; + } + + public MonitorService getMonitorService() { + return monitorService; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/ExecutorFactory.java b/wrapper/src/main/java/software/amazon/jdbc/util/ExecutorFactory.java index a211fbdb4..26d6bb234 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/ExecutorFactory.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/ExecutorFactory.java @@ -19,6 +19,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicLong; @@ -37,6 +38,10 @@ public static ExecutorService newFixedThreadPool(int threadCount, String threadN return Executors.newFixedThreadPool(threadCount, getThreadFactory(threadName)); } + public static ScheduledExecutorService newSingleThreadScheduledThreadExecutor(String threadName) { + return Executors.newSingleThreadScheduledExecutor(getThreadFactory(threadName)); + } + private static ThreadFactory getThreadFactory(String threadName) { return THREAD_FACTORY_MAP.computeIfAbsent(threadName, ExecutorFactory::createThreadFactory); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java new file mode 100644 index 000000000..e276b4503 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainer.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import software.amazon.jdbc.ConnectionPluginManager; +import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.PluginManagerService; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.telemetry.TelemetryFactory; + +/** + * A container object used to hold and access the various services required by the driver. This class provides access to + * both connection-specific services required by plugins and monitors as well as core universal services such + * as {@link MonitorService} and {@link StorageService}. + * + * @see CoreServicesContainer + */ +public interface FullServicesContainer { + StorageService getStorageService(); + + MonitorService getMonitorService(); + + TelemetryFactory getTelemetryFactory(); + + ConnectionPluginManager getConnectionPluginManager(); + + HostListProviderService getHostListProviderService(); + + PluginService getPluginService(); + + PluginManagerService getPluginManagerService(); + + void setMonitorService(MonitorService monitorService); + + void setStorageService(StorageService storageService); + + void setTelemetryFactory(TelemetryFactory telemetryFactory); + + void setConnectionPluginManager(ConnectionPluginManager connectionPluginManager); + + void setHostListProviderService(HostListProviderService hostListProviderService); + + void setPluginService(PluginService pluginService); + + void setPluginManagerService(PluginManagerService pluginManagerService); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java new file mode 100644 index 000000000..ef0a0fc53 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/FullServicesContainerImpl.java @@ -0,0 +1,129 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import software.amazon.jdbc.ConnectionPluginManager; +import software.amazon.jdbc.HostListProviderService; +import software.amazon.jdbc.PluginManagerService; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.telemetry.TelemetryFactory; + +public class FullServicesContainerImpl implements FullServicesContainer { + private StorageService storageService; + private MonitorService monitorService; + private TelemetryFactory telemetryFactory; + private ConnectionPluginManager connectionPluginManager; + private HostListProviderService hostListProviderService; + private PluginService pluginService; + private PluginManagerService pluginManagerService; + + public FullServicesContainerImpl( + StorageService storageService, + MonitorService monitorService, + TelemetryFactory telemetryFactory, + ConnectionPluginManager connectionPluginManager, + HostListProviderService hostListProviderService, + PluginService pluginService, + PluginManagerService pluginManagerService) { + this(storageService, monitorService, telemetryFactory); + this.connectionPluginManager = connectionPluginManager; + this.hostListProviderService = hostListProviderService; + this.pluginService = pluginService; + this.pluginManagerService = pluginManagerService; + } + + public FullServicesContainerImpl( + StorageService storageService, + MonitorService monitorService, + TelemetryFactory telemetryFactory) { + this.storageService = storageService; + this.monitorService = monitorService; + this.telemetryFactory = telemetryFactory; + } + + @Override + public StorageService getStorageService() { + return this.storageService; + } + + @Override + public MonitorService getMonitorService() { + return this.monitorService; + } + + @Override + public TelemetryFactory getTelemetryFactory() { + return this.telemetryFactory; + } + + @Override + public ConnectionPluginManager getConnectionPluginManager() { + return this.connectionPluginManager; + } + + @Override + public HostListProviderService getHostListProviderService() { + return this.hostListProviderService; + } + + @Override + public PluginService getPluginService() { + return this.pluginService; + } + + @Override + public PluginManagerService getPluginManagerService() { + return this.pluginManagerService; + } + + @Override + public void setMonitorService(MonitorService monitorService) { + this.monitorService = monitorService; + } + + @Override + public void setStorageService(StorageService storageService) { + this.storageService = storageService; + } + + @Override + public void setTelemetryFactory(TelemetryFactory telemetryFactory) { + this.telemetryFactory = telemetryFactory; + } + + @Override + public void setConnectionPluginManager(ConnectionPluginManager connectionPluginManager) { + this.connectionPluginManager = connectionPluginManager; + } + + @Override + public void setHostListProviderService(HostListProviderService hostListProviderService) { + this.hostListProviderService = hostListProviderService; + } + + @Override + public void setPluginService(PluginService pluginService) { + this.pluginService = pluginService; + } + + @Override + public void setPluginManagerService(PluginManagerService pluginManagerService) { + this.pluginManagerService = pluginManagerService; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionService.java b/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionService.java new file mode 100644 index 000000000..2bdd8161d --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionService.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.connection; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.PluginService; + +public interface ConnectionService { + /** + * Creates an auxiliary connection. Auxiliary connections are driver-internal connections that accomplish various + * specific tasks such as monitoring a host's availability, checking the topology information for a cluster, etc. + * + * @param hostSpec the hostSpec containing the host information for the auxiliary connection. + * @param props the properties for the auxiliary connection. + * @return a new connection to the given host using the given props. + */ + Connection open(HostSpec hostSpec, Properties props) throws SQLException; + + PluginService getPluginService(); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionServiceImpl.java new file mode 100644 index 000000000..73a68fdcb --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/connection/ConnectionServiceImpl.java @@ -0,0 +1,87 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.connection; + +import java.sql.Connection; +import java.sql.SQLException; +import java.util.Properties; +import software.amazon.jdbc.ConnectionPluginManager; +import software.amazon.jdbc.ConnectionProvider; +import software.amazon.jdbc.HostSpec; +import software.amazon.jdbc.PartialPluginService; +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.FullServicesContainerImpl; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.telemetry.TelemetryFactory; + +public class ConnectionServiceImpl implements ConnectionService { + protected final String targetDriverProtocol; + protected final ConnectionPluginManager pluginManager; + protected final PluginService pluginService; + + public ConnectionServiceImpl( + StorageService storageService, + MonitorService monitorService, + TelemetryFactory telemetryFactory, + ConnectionProvider connectionProvider, + String originalUrl, + String targetDriverProtocol, + TargetDriverDialect driverDialect, + Dialect dbDialect, + Properties props) throws SQLException { + this.targetDriverProtocol = targetDriverProtocol; + + FullServicesContainer + servicesContainer = new FullServicesContainerImpl(storageService, monitorService, telemetryFactory); + this.pluginManager = new ConnectionPluginManager( + connectionProvider, + null, + null, + telemetryFactory); + servicesContainer.setConnectionPluginManager(this.pluginManager); + + PartialPluginService partialPluginService = new PartialPluginService( + servicesContainer, + props, + originalUrl, + this.targetDriverProtocol, + driverDialect, + dbDialect + ); + + this.pluginService = partialPluginService; + servicesContainer.setHostListProviderService(partialPluginService); + servicesContainer.setPluginService(partialPluginService); + servicesContainer.setPluginManagerService(partialPluginService); + + this.pluginManager.init(servicesContainer, props, partialPluginService, null); + } + + @Override + public Connection open(HostSpec hostSpec, Properties props) throws SQLException { + return this.pluginManager.forceConnect(this.targetDriverProtocol, hostSpec, props, true, null); + } + + @Override + public PluginService getPluginService() { + return this.pluginService; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/BatchingEventPublisher.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/BatchingEventPublisher.java new file mode 100644 index 000000000..a7f80832a --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/BatchingEventPublisher.java @@ -0,0 +1,100 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.events; + +import java.util.Collections; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.WeakHashMap; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import software.amazon.jdbc.util.ExecutorFactory; + +/** + * An event publisher that periodically publishes a batch of all unique events encountered during the latest time + * interval. Batches do not contain duplicate events; if the current batch receives a duplicate, it will not be + * added to the batch and the original event will only be published once, when the entire batch is published. + */ +public class BatchingEventPublisher implements EventPublisher { + protected static final long DEFAULT_MESSAGE_INTERVAL_NANOS = TimeUnit.SECONDS.toNanos(30); + protected final Map, Set> subscribersMap = new ConcurrentHashMap<>(); + // ConcurrentHashMap.newKeySet() is the recommended way to get a concurrent set. A set is used to prevent duplicate + // event messages from being sent in the same message batch. + protected final Set eventMessages = ConcurrentHashMap.newKeySet(); + protected static final ScheduledExecutorService publishingExecutor = + ExecutorFactory.newSingleThreadScheduledThreadExecutor("bep"); + + public BatchingEventPublisher() { + this(DEFAULT_MESSAGE_INTERVAL_NANOS); + } + + /** + * Constructs a PeriodicEventPublisher instance and submits a thread to periodically send message batches. + * + * @param messageIntervalNanos the rate at which messages batches should be sent, in nanoseconds. + */ + public BatchingEventPublisher(long messageIntervalNanos) { + initPublishingThread(messageIntervalNanos); + } + + protected void initPublishingThread(long messageIntervalNanos) { + publishingExecutor.scheduleAtFixedRate( + this::sendMessages, messageIntervalNanos, messageIntervalNanos, TimeUnit.NANOSECONDS); + } + + protected void sendMessages() { + Iterator iterator = eventMessages.iterator(); + while (iterator.hasNext()) { + Event event = iterator.next(); + iterator.remove(); + Set subscribers = subscribersMap.get(event.getClass()); + if (subscribers == null) { + continue; + } + + for (EventSubscriber subscriber : subscribers) { + subscriber.processEvent(event); + } + } + } + + @Override + public void subscribe(EventSubscriber subscriber, Set> eventClasses) { + for (Class eventClass : eventClasses) { + // The subscriber collection is a weakly referenced set so that we avoid garbage collection issues. + subscribersMap.computeIfAbsent( + eventClass, (k) -> Collections.newSetFromMap(new WeakHashMap<>())).add(subscriber); + } + } + + @Override + public void unsubscribe(EventSubscriber subscriber, Set> eventClasses) { + for (Class eventClass : eventClasses) { + subscribersMap.computeIfPresent(eventClass, (k, v) -> { + v.remove(subscriber); + return v.isEmpty() ? null : v; + }); + } + } + + @Override + public void publish(Event event) { + eventMessages.add(event); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/DataAccessEvent.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/DataAccessEvent.java new file mode 100644 index 000000000..a47bd0842 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/DataAccessEvent.java @@ -0,0 +1,76 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.events; + +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; + +/** + * A class defining a data access event. The class specifies the class of the data that was accessed and the key for the + * data. + */ +public class DataAccessEvent implements Event { + protected @NonNull Class dataClass; + protected @NonNull Object key; + + /** + * Constructor for a DataAccessEvent. + * + * @param dataClass the class of the data that was accessed. + * @param key the key for the data that was accessed. + */ + public DataAccessEvent(@NonNull Class dataClass, @NonNull Object key) { + this.dataClass = dataClass; + this.key = key; + } + + public @NonNull Class getDataClass() { + return dataClass; + } + + public @NonNull Object getKey() { + return key; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null) { + return false; + } + + if (getClass() != obj.getClass()) { + return false; + } + + DataAccessEvent event = (DataAccessEvent) obj; + return Objects.equals(this.dataClass, event.dataClass) + && Objects.equals(this.key, event.key); + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + this.dataClass.hashCode(); + result = prime * result + this.key.hashCode(); + return result; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/Event.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/Event.java new file mode 100644 index 000000000..2714f6786 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/Event.java @@ -0,0 +1,23 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.events; + +/** + * A marker interface for events that need to be communicated between different components. + */ +public interface Event { +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/EventPublisher.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/EventPublisher.java new file mode 100644 index 000000000..21e460a6e --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/EventPublisher.java @@ -0,0 +1,48 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.events; + +import java.util.Set; + +/** + * An event publisher that publishes events to subscribers. Subscribers can specify which types of events they would + * like to receive. + */ +public interface EventPublisher { + /** + * Registers the given subscriber for the given event classes. + * + * @param subscriber the subscriber to be notified when the given event classes occur. + * @param eventClasses the classes of events that the subscriber should be notified of. + */ + void subscribe(EventSubscriber subscriber, Set> eventClasses); + + /** + * Unsubscribes the given subscriber from the given event classes. + * + * @param subscriber the subscriber to unsubscribe from the given event classes. + * @param eventClasses the classes of events that the subscriber wants to unsubscribe from. + */ + void unsubscribe(EventSubscriber subscriber, Set> eventClasses); + + /** + * Publishes an event. All subscribers to the given event class will be notified of the event. + * + * @param event the event to publish. + */ + void publish(Event event); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/events/EventSubscriber.java b/wrapper/src/main/java/software/amazon/jdbc/util/events/EventSubscriber.java new file mode 100644 index 000000000..34e63b452 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/events/EventSubscriber.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.events; + +import java.util.Set; + +/** + * An event subscriber. Subscribers can subscribe to a publisher's events using + * {@link EventPublisher#subscribe(EventSubscriber, Set)}. Subscribers will typically be stored in a + * {@link java.util.HashSet} to prevent duplicate subscriptions, so classes implementing this interface should consider + * whether they need to override {@link Object#equals(Object)} and {@link Object#hashCode()}. + * + * @see EventPublisher + */ +public interface EventSubscriber { + /** + * Processes an event. This method will only be called on this subscriber if it has subscribed to the event class via + * {@link EventPublisher#subscribe}. + * + * @param event the event to process. + */ + void processEvent(Event event); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/AbstractMonitor.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/AbstractMonitor.java new file mode 100644 index 000000000..ca7abcec9 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/AbstractMonitor.java @@ -0,0 +1,127 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.logging.Logger; +import software.amazon.jdbc.plugin.customendpoint.CustomEndpointMonitorImpl; +import software.amazon.jdbc.util.ExecutorFactory; +import software.amazon.jdbc.util.Messages; + +/** + * An AbstractMonitor that implements common monitor logic. + */ +public abstract class AbstractMonitor implements Monitor, Runnable { + private static final Logger LOGGER = Logger.getLogger(AbstractMonitor.class.getName()); + protected final AtomicBoolean stop = new AtomicBoolean(false); + protected final ExecutorService monitorExecutor; + protected final AtomicLong terminationTimeoutSec = new AtomicLong(); + protected final AtomicLong lastActivityTimestampNanos = new AtomicLong(); + protected final AtomicReference state = new AtomicReference<>(); + + + protected AbstractMonitor(long terminationTimeoutSec) { + this.terminationTimeoutSec.set(terminationTimeoutSec); + this.monitorExecutor = ExecutorFactory.newSingleThreadExecutor(getMonitorNameSuffix()); + this.lastActivityTimestampNanos.set(System.nanoTime()); + } + + protected AbstractMonitor(long terminationTimeoutSec, ExecutorService monitorExecutor) { + this.terminationTimeoutSec.set(terminationTimeoutSec); + this.monitorExecutor = monitorExecutor; + this.lastActivityTimestampNanos.set(System.nanoTime()); + } + + @Override + public void start() { + this.monitorExecutor.submit(this); + this.monitorExecutor.shutdown(); + } + + /** + * Starts the monitor workflow, making sure to set the initial state of the monitor. The monitor's workflow is wrapped + * in a try-catch so that unexpected exceptions are reported to the monitor service and the monitor's state is updated + * to {@link MonitorState#ERROR}. + */ + @Override + public void run() { + try { + LOGGER.finest(Messages.get("AbstractMonitor.startingMonitor", new Object[] {this})); + this.state.set(MonitorState.RUNNING); + this.lastActivityTimestampNanos.set(System.nanoTime()); + monitor(); + } catch (Exception e) { + LOGGER.fine(Messages.get("AbstractMonitor.unexpectedError", new Object[] {this, e})); + this.state.set(MonitorState.ERROR); + } + } + + @Override + public void stop() { + LOGGER.fine(Messages.get("AbstractMonitor.stoppingMonitor", new Object[] {this})); + this.stop.set(true); + + try { + if (!this.monitorExecutor.awaitTermination(this.terminationTimeoutSec.get(), TimeUnit.SECONDS)) { + LOGGER.info(Messages.get( + "AbstractMonitor.monitorTerminationTimeout", new Object[] {terminationTimeoutSec, this})); + this.monitorExecutor.shutdownNow(); + } + } catch (InterruptedException e) { + LOGGER.info(Messages.get("AbstractMonitor.interruptedWhileTerminating", new Object[] {this})); + Thread.currentThread().interrupt(); + this.monitorExecutor.shutdownNow(); + } finally { + close(); + this.state.set(MonitorState.STOPPED); + } + } + + @Override + public void close() { + // do nothing. Classes that extend this class should override this method if they open resources that need closing. + } + + @Override + public long getLastActivityTimestampNanos() { + return this.lastActivityTimestampNanos.get(); + } + + @Override + public MonitorState getState() { + return this.state.get(); + } + + @Override + public boolean canDispose() { + return true; + } + + /** + * Forms the suffix for the monitor thread name by abbreviating the concrete class name. For example, a + * {@link CustomEndpointMonitorImpl} will have a suffix of "cemi". + * + * @return the suffix for the monitor thread name. + */ + private String getMonitorNameSuffix() { + return this.getClass().getSimpleName().replaceAll("[a-z]", "").toLowerCase(); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/Monitor.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/Monitor.java new file mode 100644 index 000000000..d4d89dc4c --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/Monitor.java @@ -0,0 +1,62 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +public interface Monitor { + /** + * Submits this monitor in a separate thread to begin its monitoring tasks. + */ + void start(); + + /** + * Executes the monitoring loop for this monitor. This method should be called in the run() method of the thread + * submitted during the call to {@link #start()}. Additionally, the monitoring loop should regularly update the last + * activity timestamp so that the {@link MonitorService} can detect whether the monitor is stuck or not. + */ + void monitor(); + + /** + * Stops the monitoring tasks for this monitor and closes all resources. + */ + void stop(); + + /** + * Closes all resources used by this monitor. This method will be called as part of {@link #stop()}. + */ + void close(); + + /** + * Gets the timestamp for the last action performed by this monitor, in nanoseconds. + * + * @return the timestamp for the last action performed by this monitor, in nanoseconds. + */ + long getLastActivityTimestampNanos(); + + /** + * Gets the current state of this monitor. + * + * @return the current state of this monitor. + */ + MonitorState getState(); + + /** + * Defines whether this monitor can be disposed. + * + * @return true if this monitor can be disposed, otherwise returns false. + */ + boolean canDispose(); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorErrorResponse.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorErrorResponse.java new file mode 100644 index 000000000..e4fb99bc8 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorErrorResponse.java @@ -0,0 +1,25 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +/** + * An enum defining which action to perform if a {@link Monitor} enters an error state or is discovered to be stuck. + */ +public enum MonitorErrorResponse { + NO_ACTION, + RECREATE +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorInitializer.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorInitializer.java new file mode 100644 index 000000000..c4f13e6f9 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorInitializer.java @@ -0,0 +1,25 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +import software.amazon.jdbc.PluginService; +import software.amazon.jdbc.util.connection.ConnectionService; + +public interface MonitorInitializer { + + Monitor createMonitor(ConnectionService connectionService, PluginService pluginService); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorService.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorService.java new file mode 100644 index 000000000..41df2c567 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorService.java @@ -0,0 +1,128 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +import java.sql.SQLException; +import java.util.Properties; +import java.util.Set; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.telemetry.TelemetryFactory; + +public interface MonitorService { + /** + * Registers a new monitor type with the monitor service. This method needs to be called before adding new types of + * monitors to the monitor service, so that the monitor service knows when to dispose of a monitor. Expected monitor + * types will be added automatically during driver initialization, but this method can be called by users if they want + * to add a new monitor type. + * + * @param monitorClass the class of the monitor, eg `CustomEndpointMonitorImpl.class`. + * @param expirationTimeoutNanos how long a monitor should be stored without use before being considered expired, in + * nanoseconds. Expired monitors may be removed and stopped. + * @param heartbeatTimeoutNanos a duration in nanoseconds defining the maximum amount of time that a monitor should + * take between updating its last-updated timestamp. If a monitor has not updated its + * last-updated timestamp within this duration it will be considered stuck. + * @param errorResponses a {@link Set} defining actions to take if the monitor is stuck or in an error state. + * @param producedDataClass the class of data produced by the monitor. + */ + void registerMonitorTypeIfAbsent( + Class monitorClass, + long expirationTimeoutNanos, + long heartbeatTimeoutNanos, + Set errorResponses, + @Nullable Class producedDataClass); + + /** + * Creates and starts the given monitor if it does not already exist and stores it under the given monitor type and + * key. If the monitor already exists, its expiration time will be renewed, even if it was already expired. + * + * @param monitorClass the concrete class of the monitor, eg `CustomEndpointMonitorImpl.class`. + * @param key the key for the monitor, eg + * "custom-endpoint.cluster-custom-XYZ.us-east-2.rds.amazonaws.com:5432". + * @param storageService the storage service for the monitor to use. + * @param telemetryFactory the telemetry factory for creating telemetry data. + * @param originalUrl the URL of the original database connection. + * @param driverProtocol the protocol for the underlying target driver. + * @param driverDialect the target driver dialect. + * @param dbDialect the database dialect. + * @param originalProps the properties of the original database connection. + * @param initializer an initializer function to use to create the monitor if it does not already exist. + * @return the new or existing monitor. + */ + T runIfAbsent( + Class monitorClass, + Object key, + StorageService storageService, + TelemetryFactory telemetryFactory, + String originalUrl, + String driverProtocol, + TargetDriverDialect driverDialect, + Dialect dbDialect, + Properties originalProps, + MonitorInitializer initializer) throws SQLException; + + /** + * Gets the monitor stored at the given key. + * + * @param monitorClass the expected class of the monitor. + * @param key the key for the monitor. + * @return the monitor stored at the given key. + */ + @Nullable + T get(Class monitorClass, Object key); + + /** + * Removes the monitor stored at the given key. If the expected monitor class does not match the actual monitor class + * no action will be performed. + * + * @param monitorClass the expected class of the monitor. + * @param key the key for the monitor. + * @return the monitor that was removed. Returns null if there was no monitor at the given key or the expected monitor + * class did not match the actual monitor class. + */ + @Nullable + T remove(Class monitorClass, Object key); + + /** + * Stops the given monitor and removes it from the monitor service. + * + * @param monitorClass the class of the monitor, eg `CustomEndpointMonitorImpl.class`. + * @param key the key for the monitor, eg + * "custom-endpoint.cluster-custom-XYZ.us-east-2.rds.amazonaws.com:5432". + */ + void stopAndRemove(Class monitorClass, Object key); + + /** + * Stops all monitors for the given type and removes them from the monitor service. + * + * @param monitorClass the class of the monitor, eg `CustomEndpointMonitorImpl.class`. + */ + void stopAndRemoveMonitors(Class monitorClass); + + /** + * Stops all monitors and removes them from the monitor service. + */ + void stopAndRemoveAll(); + + /** + * Releases any resources opened by the monitor service, stops all monitors, and removes all monitors from the monitor + * service. + */ + void releaseResources(); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorServiceImpl.java new file mode 100644 index 000000000..b6804f9a1 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorServiceImpl.java @@ -0,0 +1,393 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +import java.sql.SQLException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.ConnectionProvider; +import software.amazon.jdbc.DriverConnectionProvider; +import software.amazon.jdbc.TargetDriverHelper; +import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.hostlistprovider.Topology; +import software.amazon.jdbc.hostlistprovider.monitoring.ClusterTopologyMonitorImpl; +import software.amazon.jdbc.hostlistprovider.monitoring.MultiAzClusterTopologyMonitorImpl; +import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.ExecutorFactory; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.PropertyUtils; +import software.amazon.jdbc.util.connection.ConnectionServiceImpl; +import software.amazon.jdbc.util.events.DataAccessEvent; +import software.amazon.jdbc.util.events.Event; +import software.amazon.jdbc.util.events.EventPublisher; +import software.amazon.jdbc.util.events.EventSubscriber; +import software.amazon.jdbc.util.storage.ExternallyManagedCache; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.telemetry.TelemetryFactory; + +public class MonitorServiceImpl implements MonitorService, EventSubscriber { + private static final Logger LOGGER = Logger.getLogger(MonitorServiceImpl.class.getName()); + protected static final long DEFAULT_CLEANUP_INTERVAL_NANOS = TimeUnit.MINUTES.toNanos(1); + protected static final Map, Supplier> defaultSuppliers; + + static { + Map, Supplier> suppliers = new HashMap<>(); + Set recreateOnError = + new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)); + MonitorSettings defaultSettings = new MonitorSettings( + TimeUnit.MINUTES.toNanos(15), TimeUnit.MINUTES.toNanos(3), recreateOnError); + + suppliers.put(ClusterTopologyMonitorImpl.class, () -> new CacheContainer(defaultSettings, Topology.class)); + suppliers.put(MultiAzClusterTopologyMonitorImpl.class, () -> new CacheContainer(defaultSettings, Topology.class)); + defaultSuppliers = Collections.unmodifiableMap(suppliers); + } + + protected final EventPublisher publisher; + protected final Map, CacheContainer> monitorCaches = new ConcurrentHashMap<>(); + protected final ScheduledExecutorService cleanupExecutor = + ExecutorFactory.newSingleThreadScheduledThreadExecutor("msi"); + + + public MonitorServiceImpl(EventPublisher publisher) { + this(DEFAULT_CLEANUP_INTERVAL_NANOS, publisher); + } + + /** + * Constructs a MonitorServiceImpl instance, subscribes to the given publisher's data access events, and submits a + * cleanup thread to supervise submitted monitors. + * + * @param cleanupIntervalNanos the interval at which the cleanup thread should check on submitted monitors, in + * nanoseconds. + * @param publisher the publisher to subscribe to for data access events. + */ + public MonitorServiceImpl(long cleanupIntervalNanos, EventPublisher publisher) { + this.publisher = publisher; + this.publisher.subscribe(this, new HashSet<>(Collections.singletonList(DataAccessEvent.class))); + initCleanupThread(cleanupIntervalNanos); + } + + protected void initCleanupThread(long cleanupIntervalNanos) { + cleanupExecutor.scheduleAtFixedRate( + this::checkMonitors, cleanupIntervalNanos, cleanupIntervalNanos, TimeUnit.NANOSECONDS); + } + + protected void checkMonitors() { + LOGGER.finest(Messages.get("MonitorServiceImpl.checkingMonitors")); + for (CacheContainer container : monitorCaches.values()) { + ExternallyManagedCache cache = container.getCache(); + // Note: the map returned by getEntries is a copy of the ExternallyManagedCache map + for (Map.Entry entry : cache.getEntries().entrySet()) { + Object key = entry.getKey(); + MonitorItem removedItem = cache.removeIf(key, mi -> mi.getMonitor().getState() == MonitorState.STOPPED); + if (removedItem != null) { + removedItem.getMonitor().stop(); + continue; + } + + MonitorSettings monitorSettings = container.getSettings(); + removedItem = cache.removeIf(key, mi -> mi.getMonitor().getState() == MonitorState.ERROR); + if (removedItem != null) { + LOGGER.finest( + Messages.get("MonitorServiceImpl.removedErrorMonitor", new Object[] {removedItem.getMonitor()})); + handleMonitorError(container, key, removedItem); + continue; + } + + long inactiveTimeoutNanos = monitorSettings.getInactiveTimeoutNanos(); + removedItem = cache.removeIf( + key, mi -> System.nanoTime() - mi.getMonitor().getLastActivityTimestampNanos() > inactiveTimeoutNanos); + if (removedItem != null) { + // Monitor has been inactive for longer than the inactive timeout and is considered stuck. + LOGGER.fine( + Messages.get("MonitorServiceImpl.monitorStuck", + new Object[] {removedItem.getMonitor(), TimeUnit.NANOSECONDS.toMillis(inactiveTimeoutNanos)})); + handleMonitorError(container, key, removedItem); + continue; + } + + removedItem = cache.removeExpiredIf(key, mi -> mi.getMonitor().canDispose()); + if (removedItem != null) { + LOGGER.fine( + Messages.get("MonitorServiceImpl.removedExpiredMonitor", new Object[] {removedItem.getMonitor()})); + removedItem.getMonitor().stop(); + } + } + } + } + + protected void handleMonitorError( + CacheContainer cacheContainer, + Object key, + MonitorItem errorMonitorItem) { + Monitor monitor = errorMonitorItem.getMonitor(); + monitor.stop(); + + Set errorResponses = cacheContainer.getSettings().getErrorResponses(); + if (errorResponses != null && errorResponses.contains(MonitorErrorResponse.RECREATE)) { + cacheContainer.getCache().computeIfAbsent(key, k -> { + LOGGER.fine(Messages.get("MonitorServiceImpl.recreatingMonitor", new Object[] {monitor})); + MonitorItem newMonitorItem = new MonitorItem(errorMonitorItem.getMonitorSupplier()); + newMonitorItem.getMonitor().start(); + return newMonitorItem; + }); + } + } + + @Override + public void registerMonitorTypeIfAbsent( + Class monitorClass, + long expirationTimeoutNanos, + long heartbeatTimeoutNanos, + Set errorResponses, + @Nullable Class producedDataClass) { + monitorCaches.computeIfAbsent( + monitorClass, + mc -> new CacheContainer( + new MonitorSettings(expirationTimeoutNanos, heartbeatTimeoutNanos, errorResponses), + producedDataClass)); + } + + @Override + public T runIfAbsent( + Class monitorClass, + Object key, + StorageService storageService, + TelemetryFactory telemetryFactory, + String originalUrl, + String driverProtocol, + TargetDriverDialect driverDialect, + Dialect dbDialect, + Properties originalProps, + MonitorInitializer initializer) throws SQLException { + CacheContainer cacheContainer = monitorCaches.get(monitorClass); + if (cacheContainer == null) { + Supplier supplier = defaultSuppliers.get(monitorClass); + if (supplier == null) { + throw new IllegalStateException( + Messages.get("MonitorServiceImpl.monitorTypeNotRegistered", new Object[] {monitorClass})); + } + + cacheContainer = monitorCaches.computeIfAbsent(monitorClass, k -> supplier.get()); + } + + TargetDriverHelper helper = new TargetDriverHelper(); + java.sql.Driver driver = helper.getTargetDriver(originalUrl, originalProps); + final ConnectionProvider defaultConnectionProvider = new DriverConnectionProvider(driver); + final Properties propsCopy = PropertyUtils.copyProperties(originalProps); + final ConnectionServiceImpl connectionService = new ConnectionServiceImpl( + storageService, + this, + telemetryFactory, + defaultConnectionProvider, + originalUrl, + driverProtocol, + driverDialect, + dbDialect, + propsCopy); + + Monitor monitor = cacheContainer.getCache().computeIfAbsent(key, k -> { + MonitorItem monitorItem = new MonitorItem(() -> initializer.createMonitor( + connectionService, + connectionService.getPluginService())); + monitorItem.getMonitor().start(); + return monitorItem; + }).getMonitor(); + + if (monitorClass.isInstance(monitor)) { + return monitorClass.cast(monitor); + } + + throw new IllegalStateException( + Messages.get("MonitorServiceImpl.unexpectedMonitorClass", new Object[] {monitorClass, monitor})); + } + + @Override + public @Nullable T get(Class monitorClass, Object key) { + CacheContainer cacheContainer = monitorCaches.get(monitorClass); + if (cacheContainer == null) { + return null; + } + + MonitorItem item = cacheContainer.getCache().get(key); + if (item == null) { + return null; + } + + Monitor monitor = item.getMonitor(); + if (monitorClass.isInstance(monitor)) { + return monitorClass.cast(monitor); + } + + LOGGER.fine( + Messages.get( + "MonitorServiceImpl.monitorClassMismatch", + new Object[]{key, monitorClass, monitor, monitor.getClass()})); + return null; + } + + @Override + public T remove(Class monitorClass, Object key) { + CacheContainer cacheContainer = monitorCaches.get(monitorClass); + if (cacheContainer == null) { + return null; + } + + MonitorItem item = cacheContainer.getCache().removeIf( + key, monitorItem -> monitorClass.isInstance(monitorItem.getMonitor())); + if (item == null) { + return null; + } + + return monitorClass.cast(item.getMonitor()); + } + + @Override + public void stopAndRemove(Class monitorClass, Object key) { + CacheContainer cacheContainer = monitorCaches.get(monitorClass); + if (cacheContainer == null) { + LOGGER.fine(Messages.get("MonitorServiceImpl.stopAndRemoveMissingMonitorType", new Object[] {monitorClass, key})); + return; + } + + MonitorItem monitorItem = cacheContainer.getCache().remove(key); + if (monitorItem != null) { + monitorItem.getMonitor().stop(); + } + } + + @Override + public void stopAndRemoveMonitors(Class monitorClass) { + CacheContainer cacheContainer = monitorCaches.get(monitorClass); + if (cacheContainer == null) { + LOGGER.fine(Messages.get("MonitorServiceImpl.stopAndRemoveMonitorsMissingType", new Object[] {monitorClass})); + return; + } + + ExternallyManagedCache cache = cacheContainer.getCache(); + for (Map.Entry entry : cache.getEntries().entrySet()) { + MonitorItem monitorItem = cache.remove(entry.getKey()); + if (monitorItem != null) { + monitorItem.getMonitor().stop(); + } + } + } + + @Override + public void stopAndRemoveAll() { + for (Class monitorClass : monitorCaches.keySet()) { + stopAndRemoveMonitors(monitorClass); + } + } + + @Override + public void releaseResources() { + cleanupExecutor.shutdownNow(); + stopAndRemoveAll(); + } + + @Override + public void processEvent(Event event) { + if (!(event instanceof DataAccessEvent)) { + return; + } + + DataAccessEvent accessEvent = (DataAccessEvent) event; + for (CacheContainer container : monitorCaches.values()) { + if (container.getProducedDataClass() == null + || !accessEvent.getDataClass().equals(container.getProducedDataClass())) { + continue; + } + + // The data produced by the monitor in this cache with this key has been accessed recently, so we extend the + // monitor's expiration. + container.getCache().extendExpiration(accessEvent.getKey()); + } + } + + /** + * A container that holds a cache of monitors of a given type with the related settings and info for that type. + */ + protected static class CacheContainer { + private @NonNull final MonitorSettings settings; + private @NonNull final ExternallyManagedCache cache; + private @Nullable final Class producedDataClass; + + /** + * Constructs a CacheContainer instance. As part of the constructor, a new cache will be created based on the given + * settings. + * + * @param settings the settings for the cache and monitor type. + * @param producedDataClass the class of the data produced by the monitor type, if it produces any data. + */ + public CacheContainer(@NonNull final MonitorSettings settings, @Nullable Class producedDataClass) { + this.settings = settings; + this.cache = new ExternallyManagedCache<>(settings.getExpirationTimeoutNanos()); + this.producedDataClass = producedDataClass; + } + + public @NonNull MonitorSettings getSettings() { + return settings; + } + + public @NonNull ExternallyManagedCache getCache() { + return cache; + } + + public @Nullable Class getProducedDataClass() { + return producedDataClass; + } + } + + /** + * A container object that holds a monitor together with the supplier used to generate the monitor. The supplier can + * be used to recreate the monitor if it encounters an error or becomes stuck. + */ + protected static class MonitorItem { + private @NonNull final Supplier monitorSupplier; + private @NonNull final Monitor monitor; + + /** + * Constructs a MonitorItem instance. As part of the constructor, a new monitor will be created using the given + * monitor supplier. + * + * @param monitorSupplier a supplier lambda that produces a monitor. + */ + protected MonitorItem(@NonNull Supplier monitorSupplier) { + this.monitorSupplier = monitorSupplier; + this.monitor = monitorSupplier.get(); + } + + public @NonNull Supplier getMonitorSupplier() { + return monitorSupplier; + } + + public @NonNull Monitor getMonitor() { + return monitor; + } + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorSettings.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorSettings.java new file mode 100644 index 000000000..6774058e8 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorSettings.java @@ -0,0 +1,60 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +import java.util.Set; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * A class defining settings for a monitor or monitor type. + */ +public class MonitorSettings { + private final long expirationTimeoutNanos; + private final long inactiveTimeoutNanos; + private @Nullable final Set errorResponses; + + /** + * Constructs a MonitorSettings instance. + * + * @param expirationTimeoutNanos the amount of time that a monitor should sit in a cache before being considered + * expired. + * @param inactiveTimeoutNanos a duration in nanoseconds defining the maximum amount of time that a monitor should + * take between updating its last-updated timestamp. If a monitor has not updated its + * last-updated timestamp within this duration it will be considered stuck. + * @param errorResponses a {@link Set} defining actions to take if the monitor is in an error state. If null, + * no action will be performed. + */ + public MonitorSettings( + long expirationTimeoutNanos, long inactiveTimeoutNanos, @NonNull Set errorResponses) { + this.expirationTimeoutNanos = expirationTimeoutNanos; + this.inactiveTimeoutNanos = inactiveTimeoutNanos; + this.errorResponses = errorResponses; + } + + public long getExpirationTimeoutNanos() { + return expirationTimeoutNanos; + } + + public long getInactiveTimeoutNanos() { + return inactiveTimeoutNanos; + } + + public @Nullable Set getErrorResponses() { + return errorResponses; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorState.java b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorState.java new file mode 100644 index 000000000..a0f2474a7 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/monitoring/MonitorState.java @@ -0,0 +1,37 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +/** + * Represents the state of a monitor. + */ +public enum MonitorState { + /** + * The monitor is running. + */ + RUNNING, + + /** + * The monitor has stopped running in an expected manner. + */ + STOPPED, + + /** + * The monitor has stopped due to an unexpected error. + */ + ERROR +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/CacheItem.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/CacheItem.java new file mode 100644 index 000000000..e6d38e4ae --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/CacheItem.java @@ -0,0 +1,112 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +import java.util.Objects; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +/** + * A container class that holds a cache value together with the time at which the value should be considered expired. + */ +public class CacheItem { + protected final @NonNull V item; + protected long expirationTimeNanos; + protected final @Nullable ShouldDisposeFunc shouldDisposeFunc; + + /** + * Constructs a CacheItem. + * + * @param item the item value. + * @param expirationTimeNanos the time at which the CacheItem should be considered expired. + */ + protected CacheItem(final @NonNull V item, final long expirationTimeNanos) { + this.item = item; + this.expirationTimeNanos = expirationTimeNanos; + this.shouldDisposeFunc = null; + } + + /** + * Constructs a CacheItem. + * + * @param item the item value. + * @param expirationTimeNanos the time at which the CacheItem should be considered expired. + * @param shouldDisposeFunc a function defining whether an expired item should be disposed. If null, items will + * always be disposed when expired. + */ + protected CacheItem( + final @NonNull V item, final long expirationTimeNanos, @Nullable final ShouldDisposeFunc shouldDisposeFunc) { + this.item = item; + this.expirationTimeNanos = expirationTimeNanos; + this.shouldDisposeFunc = shouldDisposeFunc; + } + + /** + * Indicates whether this item is expired. + * + * @return true if this item is expired, otherwise returns false. + */ + protected boolean isExpired() { + return System.nanoTime() > expirationTimeNanos; + } + + /** + * Renews a cache item's expiration time. + */ + protected void extendExpiration(long timeToLiveNanos) { + this.expirationTimeNanos = System.nanoTime() + timeToLiveNanos; + } + + /** + * Determines if a cache item should be cleaned up. An item should be cleaned up if it has past its expiration time + * and the {@link ShouldDisposeFunc} (if defined) indicates that it should be cleaned up. + * + * @return true if the cache item should be cleaned up. Otherwise, returns false. + */ + protected boolean shouldCleanup() { + final boolean isExpired = this.expirationTimeNanos != 0 && System.nanoTime() > this.expirationTimeNanos; + if (shouldDisposeFunc != null) { + return isExpired && shouldDisposeFunc.shouldDispose(this.item); + } + return isExpired; + } + + @Override + public int hashCode() { + return Objects.hashCode(this.item); + } + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final CacheItem other = (CacheItem) obj; + return Objects.equals(item, other.item); + } + + @Override + public String toString() { + return "CacheItem [item=" + item + ", expirationTimeNanos=" + expirationTimeNanos + "]"; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/CacheMap.java similarity index 73% rename from wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java rename to wrapper/src/main/java/software/amazon/jdbc/util/storage/CacheMap.java index 7d2e2dbbb..2bad26df2 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/CacheMap.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package software.amazon.jdbc.util; +package software.amazon.jdbc.util.storage; import java.util.HashMap; import java.util.Map; @@ -93,50 +93,4 @@ protected void cleanUp() { }); } } - - static class CacheItem { - final V item; - final long expirationTime; - - public CacheItem(final V item, final long expirationTime) { - this.item = item; - this.expirationTime = expirationTime; - } - - boolean isExpired() { - return System.nanoTime() > expirationTime; - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + ((item == null) ? 0 : item.hashCode()); - return result; - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - final CacheItem other = (CacheItem) obj; - if (item == null) { - return other.item == null; - } else { - return item.equals(other.item); - } - } - - @Override - public String toString() { - return "CacheItem [item=" + item + ", expirationTime=" + expirationTime + "]"; - } - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/ExpirationCache.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ExpirationCache.java new file mode 100644 index 000000000..f9c2a7ead --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ExpirationCache.java @@ -0,0 +1,284 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.util.Messages; + +/** + * A cache that can be used to store values that expire after a configured period of time. Entries are disposed when + * removed from the cache if an {@link ItemDisposalFunc} is defined. + * + * @param the type of the keys in the cache. + * @param the type of the values in the cache. + */ +public class ExpirationCache { + private static final Logger LOGGER = Logger.getLogger(ExpirationCache.class.getName()); + protected static final long DEFAULT_TIME_TO_LIVE_NANOS = TimeUnit.MINUTES.toNanos(5); + protected final Map> cache = new ConcurrentHashMap<>(); + protected final boolean isRenewableExpiration; + protected final long timeToLiveNanos; + protected final ShouldDisposeFunc shouldDisposeFunc; + protected final ItemDisposalFunc itemDisposalFunc; + + public ExpirationCache() { + this(false, DEFAULT_TIME_TO_LIVE_NANOS, null, null); + } + + /** + * Constructs an ExpirationCache instance. + * + * @param isRenewableExpiration controls whether an item's expiration should be renewed when retrieved. If the item is + * expired when it is retrieved and isRenewableExpiration is true, the item's expiration + * will be renewed and the item will be returned. + * @param timeToLiveNanos the duration that the item should sit in the cache before being considered expired, in + * nanoseconds. + * @param shouldDisposeFunc a function defining the conditions under which an expired entry should be cleaned up. + * If null is passed, the entry will always be cleaned up if it is expired. + * @param itemDisposalFunc a function defining how to dispose of an item when it is removed. If null is passed, + * the item will be removed without performing any additional operations. + */ + public ExpirationCache( + final boolean isRenewableExpiration, + final long timeToLiveNanos, + final @Nullable ShouldDisposeFunc shouldDisposeFunc, + final @Nullable ItemDisposalFunc itemDisposalFunc) { + this.isRenewableExpiration = isRenewableExpiration; + this.timeToLiveNanos = timeToLiveNanos; + this.shouldDisposeFunc = shouldDisposeFunc; + this.itemDisposalFunc = itemDisposalFunc; + } + + /** + * Stores the given value at the given key. + * + * @param key the key at which the value should be stored. + * @param value the value to store. + * @return the previous value stored at the given key, or null if there was no previous value. If there was a previous + * value it will also be disposed. + */ + public @Nullable V put( + final K key, + final V value) { + final CacheItem cacheItem = + cache.put(key, new CacheItem<>( + value, System.nanoTime() + this.timeToLiveNanos, this.shouldDisposeFunc)); + if (cacheItem == null) { + return null; + } + + // cacheItem is the previous value associated with the key. + if (this.itemDisposalFunc != null) { + this.itemDisposalFunc.dispose(cacheItem.item); + } + + return cacheItem.item; + } + + /** + * If a value does not exist for the given key or the existing value is expired and non-renewable, stores the value + * returned by the given mapping function, unless the function returns null, in which case the key will be removed. + * + * @param key the key for the new or existing value. + * @param mappingFunction the function to call to compute a new value. + * @return the current (existing or computed) value associated with the specified key, or null if the computed value + * is null. + */ + public @Nullable V computeIfAbsent( + final K key, + Function mappingFunction) { + // A list is used to store the cached item for later disposal since lambdas require references to outer variables + // to be final. This allows us to dispose of the item after it has been removed and the cache has been unlocked, + // which is important because the disposal function may be long-running. + final List toDisposeList = new ArrayList<>(1); + final CacheItem cacheItem = cache.compute( + key, + (k, valueItem) -> { + if (valueItem == null) { + // The key is absent; compute and store the new value. + return new CacheItem<>( + mappingFunction.apply(k), + System.nanoTime() + this.timeToLiveNanos, + this.shouldDisposeFunc); + } + + if (valueItem.shouldCleanup() && !this.isRenewableExpiration) { + // The existing value is expired and non-renewable. Mark it for disposal and store the new value. + toDisposeList.add(valueItem.item); + return new CacheItem<>( + mappingFunction.apply(k), + System.nanoTime() + this.timeToLiveNanos, + this.shouldDisposeFunc); + } + + // The existing value is non-expired or renewable. Keep the existing value. + + if (this.isRenewableExpiration) { + valueItem.extendExpiration(this.timeToLiveNanos); + } + + return valueItem; + }); + + if (this.itemDisposalFunc != null && !toDisposeList.isEmpty()) { + this.itemDisposalFunc.dispose(toDisposeList.get(0)); + } + + return cacheItem.item; + } + + /** + * Retrieves the value stored at the given key. + * + * @param key the key for the value. + * @return the value stored at the given key, or null if there is no existing value or the existing value is expired. + */ + public @Nullable V get(final K key) { + final CacheItem cacheItem = cache.get(key); + if (cacheItem == null) { + return null; + } + + if (this.isRenewableExpiration) { + cacheItem.extendExpiration(this.timeToLiveNanos); + } else if (cacheItem.shouldCleanup()) { + return null; + } + + return cacheItem.item; + } + + /** + * Indicates whether a non-expired value is stored at the given key. + * + * @param key the key for the value. + * @return true if there is a non-expired value stored at the given key, otherwise returns false. + */ + public boolean exists(final K key) { + final CacheItem cacheItem = cache.get(key); + return cacheItem != null && !cacheItem.shouldCleanup(); + } + + /** + * Removes and disposes of the value stored at the given key. + * + * @param key the key associated with the value to be removed and disposed. + * @return the value removed from the cache, or null if the key does not exist in the cache. If the value was expired, + * it will still be returned. + */ + public @Nullable V remove(final K key) { + return removeAndDispose(key); + } + + protected @Nullable V removeAndDispose(K key) { + final CacheItem cacheItem = cache.remove(key); + if (cacheItem == null) { + return null; + } + + if (itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); + } + + return cacheItem.item; + } + + /** + * Removes and disposes of all expired entries in the cache. + */ + public void removeExpiredEntries() { + cache.forEach((key, value) -> { + try { + removeIfExpired(key); + } catch (Exception ex) { + LOGGER.fine(Messages.get("ExpirationCache.exceptionWhileRemovingEntry", new Object[] {key, value, ex})); + } + }); + } + + /** + * Removes and disposes of the item stored at the given key if it is expired and the {@link ShouldDisposeFunc} + * (if defined) returns true for the item. Otherwise, does nothing. + * + * @param key the key for the value to check for removal. + */ + public void removeIfExpired(K key) { + // A list is used to store the cached item for later disposal since lambdas require references to outer variables + // to be final. This allows us to dispose of the item after it has been removed and the cache has been unlocked, + // which is important because the disposal function may be long-running. + final List itemList = new ArrayList<>(1); + cache.computeIfPresent(key, (k, cacheItem) -> { + if (cacheItem.shouldCleanup()) { + itemList.add(cacheItem.item); + // Removes the item from the cache map. + return null; + } + + return cacheItem; + }); + + if (itemList.isEmpty()) { + return; + } + + V item = itemList.get(0); + if (item != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(item); + } + } + + /** + * Removes and disposes of all entries in the cache. + */ + public void clear() { + for (K key : cache.keySet()) { + removeAndDispose(key); + } + cache.clear(); + } + + /** + * Gets a map copy of all entries in the cache, including expired entries. + * + * @return a map copy of all entries in the cache, including expired entries. + */ + public Map getEntries() { + final Map entries = new HashMap<>(); + for (final Map.Entry> entry : this.cache.entrySet()) { + entries.put(entry.getKey(), entry.getValue().item); + } + + return entries; + } + + /** + * Gets the current size of the cache, including expired entries. + * + * @return the current size of the cache, including expired entries. + */ + public int size() { + return this.cache.size(); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/ExternallyManagedCache.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ExternallyManagedCache.java new file mode 100644 index 000000000..fb2fbe53d --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ExternallyManagedCache.java @@ -0,0 +1,220 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.util.Messages; + +/** + * A cache with expiration functionality that does not automatically remove expired entries. The removal of expired + * entries is instead handled by an external class. This class is similar to {@link ExpirationCache}, but allows users + * to manually renew item expiration and provides more control over the conditions in which items are removed. Disposal + * of removed items should be handled outside of this class. + * + * @param the type of the keys in the cache. + * @param the type of the values in the cache. + */ +public class ExternallyManagedCache { + private static final Logger LOGGER = Logger.getLogger(ExpirationCache.class.getName()); + protected final Map> cache = new ConcurrentHashMap<>(); + protected final long timeToLiveNanos; + + /** + * Constructs an externally managed cache. + * + * @param timeToLiveNanos the duration that the item should sit in the cache before being considered expired, in + * nanoseconds. + */ + public ExternallyManagedCache(long timeToLiveNanos) { + this.timeToLiveNanos = timeToLiveNanos; + } + + /** + * Stores the given value in the cache at the given key. + * + * @param key the key for the value. + * @param value the value to store. + * @return the previous value stored at the key, or null if there was no value stored at the key. + */ + public @Nullable V put(@NonNull K key, @NonNull V value) { + CacheItem cacheItem = this.cache.put(key, new CacheItem<>(value, System.nanoTime() + timeToLiveNanos)); + if (cacheItem == null) { + return null; + } + + return cacheItem.item; + } + + /** + * Get the value stored at the given key. If the value is expired, null will be returned. + * + * @param key the key for the value. + * @return the value stored at the given key, or null if the key does not exist or the value is expired. + */ + public @Nullable V get(@NonNull K key) { + CacheItem cacheItem = this.cache.get(key); + if (cacheItem == null) { + return null; + } + + if (cacheItem.isExpired()) { + return null; + } + + return cacheItem.item; + } + + /** + * If a value does not exist for the given key, stores the value returned by the given mapping function, unless the + * function returns null, in which case the key will be removed. + * + * @param key the key for the new or existing value. + * @param mappingFunction the function to call to compute a new value. + * @return the current (existing or computed) value associated with the specified key, or null if the computed value + * is null. + */ + public @NonNull V computeIfAbsent(K key, Function mappingFunction) { + final CacheItem cacheItem = cache.compute( + key, + (k, valueItem) -> { + if (valueItem == null) { + // The key is absent; compute and store the new value. + return new CacheItem<>( + mappingFunction.apply(k), + System.nanoTime() + this.timeToLiveNanos); + } + + valueItem.extendExpiration(this.timeToLiveNanos); + return valueItem; + }); + + return cacheItem.item; + } + + /** + * Extends the expiration of the item stored at the given key, if it exists. + * + * @param key the key for the value whose expiration should be extended. + */ + public void extendExpiration(K key) { + final CacheItem cacheItem = cache.get(key); + if (cacheItem != null) { + cacheItem.extendExpiration(this.timeToLiveNanos); + } else { + LOGGER.finest(Messages.get("ExternallyManagedCache.extendExpirationOnNonExistingKey", new Object[] {key})); + } + } + + /** + * Removes the value stored at the given key from the cache. + * + * @param key the key for the value to be removed. + * @return the value that was removed, or null if the key did not exist. + */ + public @Nullable V remove(K key) { + CacheItem cacheItem = cache.remove(key); + if (cacheItem == null) { + return null; + } + + return cacheItem.item; + } + + /** + * Removes the value stored at the given key if the given predicate returns true for the value. Otherwise, does + * nothing. + * + * @param key the key for the value to assess for removal. + * @param predicate a predicate lambda that defines the condition under which the value should be removed. + * @return the removed value, or null if no value was removed. + */ + public @Nullable V removeIf(K key, Predicate predicate) { + // The function only returns a value if it was removed. A list is used to store the removed item since lambdas + // require references to outer variables to be final. + final List removedItemList = new ArrayList<>(1); + cache.computeIfPresent( + key, + (k, valueItem) -> { + if (predicate.test(valueItem.item)) { + removedItemList.add(valueItem.item); + return null; + } + + return valueItem; + }); + + if (removedItemList.isEmpty()) { + return null; + } else { + return removedItemList.get(0); + } + } + + /** + * Removes the value stored at the given key if it is expired and the given predicate returns true for the value. + * Otherwise, does nothing. + * + * @param key the key for the value to assess for removal. + * @param predicate a predicate lambda that defines the condition under which the value should be removed if it is + * also expired. + * @return the removed value, or null if no value was removed. + */ + public @Nullable V removeExpiredIf(K key, Predicate predicate) { + // The function only returns a value if it was removed. A list is used to store the removed item since lambdas + // require references to outer variables to be final. + final List removedItemList = new ArrayList<>(1); + cache.computeIfPresent( + key, + (k, valueItem) -> { + if (valueItem.isExpired() && predicate.test(valueItem.item)) { + removedItemList.add(valueItem.item); + return null; + } + + return valueItem; + }); + + if (removedItemList.isEmpty()) { + return null; + } else { + return removedItemList.get(0); + } + } + + /** + * Gets a map copy of all entries in the cache, including expired entries. + * + * @return a map copy of all entries in the cache, including expired entries. + */ + public Map getEntries() { + final Map entries = new HashMap<>(); + for (final Map.Entry> entry : this.cache.entrySet()) { + entries.put(entry.getKey(), entry.getValue().item); + } + + return entries; + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/ItemDisposalFunc.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ItemDisposalFunc.java new file mode 100644 index 000000000..7bf7b3f4f --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ItemDisposalFunc.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +/** + * An optional function defining extra cleanup steps to take when a cache item is cleaned up. + * + * @param the type of object being disposed + */ +public interface ItemDisposalFunc { + void dispose(V item); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/ShouldDisposeFunc.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ShouldDisposeFunc.java new file mode 100644 index 000000000..eb9683f84 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/ShouldDisposeFunc.java @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +/** + * An optional function defining the conditions under which an expired entry should be cleaned up at cleanup time. + * + * @param the type of object being analyzed for disposal + */ +public interface ShouldDisposeFunc { + boolean shouldDispose(V item); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/SlidingExpirationCache.java similarity index 67% rename from wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java rename to wrapper/src/main/java/software/amazon/jdbc/util/storage/SlidingExpirationCache.java index 4023bd55b..7f670866e 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/SlidingExpirationCache.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package software.amazon.jdbc.util; +package software.amazon.jdbc.util.storage; import java.util.ArrayList; import java.util.HashMap; @@ -29,7 +29,7 @@ public class SlidingExpirationCache { - protected final Map cache = new ConcurrentHashMap<>(); + protected final Map> cache = new ConcurrentHashMap<>(); protected long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); protected final AtomicLong cleanupTimeNanos = new AtomicLong(System.nanoTime() + cleanupIntervalNanos); protected final AtomicReference> shouldDisposeFunc = new AtomicReference<>(null); @@ -93,12 +93,14 @@ public V computeIfAbsent( final long itemExpirationNano) { cleanUp(); - final CacheItem cacheItem = cache.computeIfAbsent( + final CacheItem cacheItem = cache.computeIfAbsent( key, - k -> new CacheItem( + k -> new CacheItem<>( mappingFunction.apply(k), - System.nanoTime() + itemExpirationNano)); - return cacheItem.withExtendExpiration(itemExpirationNano).item; + System.nanoTime() + itemExpirationNano, + this.shouldDisposeFunc.get())); + cacheItem.extendExpiration(itemExpirationNano); + return cacheItem.item; } public V put( @@ -106,17 +108,25 @@ public V put( final V value, final long itemExpirationNano) { cleanUp(); - final CacheItem cacheItem = cache.put(key, new CacheItem(value, System.nanoTime() + itemExpirationNano)); + final CacheItem cacheItem = cache.put( + key, new CacheItem<>(value, System.nanoTime() + itemExpirationNano)); if (cacheItem == null) { return null; } - return cacheItem.withExtendExpiration(itemExpirationNano).item; + + cacheItem.extendExpiration(itemExpirationNano); + return cacheItem.item; } public V get(final K key, final long itemExpirationNano) { cleanUp(); - final CacheItem cacheItem = cache.get(key); - return cacheItem == null ? null : cacheItem.withExtendExpiration(itemExpirationNano).item; + final CacheItem cacheItem = cache.get(key); + if (cacheItem == null) { + return null; + } + + cacheItem.extendExpiration(itemExpirationNano); + return cacheItem.item; } /** @@ -131,7 +141,7 @@ public void remove(final K key) { } protected void removeAndDispose(K key) { - final CacheItem cacheItem = cache.remove(key); + final CacheItem cacheItem = cache.remove(key); if (cacheItem != null && itemDisposalFunc != null) { itemDisposalFunc.dispose(cacheItem.item); } @@ -179,7 +189,7 @@ public void clear() { */ public Map getEntries() { final Map entries = new HashMap<>(); - for (final Map.Entry entry : this.cache.entrySet()) { + for (final Map.Entry> entry : this.cache.entrySet()) { entries.put(entry.getKey(), entry.getValue().item); } return entries; @@ -215,97 +225,8 @@ public void setCleanupIntervalNanos(long cleanupIntervalNanos) { this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); } - /** - * An optional function defining the conditions under which an expired entry should be cleaned up - * at cleanup time. - * - * @param the type of object being analyzed for disposal - */ - public interface ShouldDisposeFunc { - boolean shouldDispose(V item); - } - - /** - * An optional function defining extra cleanup steps to take when a cache item is cleaned up. - * - * @param the type of object being disposed - */ - public interface ItemDisposalFunc { - void dispose(V item); - } - // For testing purposes only - Map getCache() { + Map> getCache() { return cache; } - - class CacheItem { - private final V item; - private long expirationTimeNano; - - /** - * CacheItem constructor. - * - * @param item the item value - * @param expirationTimeNano the amount of time before a CacheItem should be marked as expired. - */ - public CacheItem(final V item, final long expirationTimeNano) { - this.item = item; - this.expirationTimeNano = expirationTimeNano; - } - - /** - * Determines if a cache item should be cleaned up. An item should be cleaned up if it has past - * its expiration time and {@link ShouldDisposeFunc} (if defined) indicates that it should be - * cleaned up. - * - * @return true if the cache item should be cleaned up at cleanup time. Otherwise, returns - * false. - */ - boolean shouldCleanup() { - final ShouldDisposeFunc tempShouldDisposeFunc = shouldDisposeFunc.get(); - if (tempShouldDisposeFunc != null) { - return System.nanoTime() > expirationTimeNano && tempShouldDisposeFunc.shouldDispose(this.item); - } - return System.nanoTime() > expirationTimeNano; - } - - /** - * Renew a cache item's expiration time and return the value. - * - * @param itemExpirationNano the new expiration duration for the item - * @return the item value - */ - public CacheItem withExtendExpiration(final long itemExpirationNano) { - this.expirationTimeNano = System.nanoTime() + itemExpirationNano; - return this; - } - - @Override - public int hashCode() { - return Objects.hashCode(item); - } - - @Override - public boolean equals(final Object obj) { - if (this == obj) { - return true; - } - if (obj == null) { - return false; - } - if (getClass() != obj.getClass()) { - return false; - } - - @SuppressWarnings({"unchecked", "noinspection"}) - final CacheItem other = (CacheItem) obj; - return Objects.equals(item, other.item); - } - - @Override - public String toString() { - return "CacheItem [item=" + item + ", expirationTime=" + expirationTimeNano + "]"; - } - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCacheWithCleanupThread.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/SlidingExpirationCacheWithCleanupThread.java similarity index 96% rename from wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCacheWithCleanupThread.java rename to wrapper/src/main/java/software/amazon/jdbc/util/storage/SlidingExpirationCacheWithCleanupThread.java index 5c16130d1..36c0fbfaa 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCacheWithCleanupThread.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/SlidingExpirationCacheWithCleanupThread.java @@ -14,13 +14,13 @@ * limitations under the License. */ -package software.amazon.jdbc.util; +package software.amazon.jdbc.util.storage; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantLock; import java.util.logging.Logger; +import software.amazon.jdbc.util.ExecutorFactory; public class SlidingExpirationCacheWithCleanupThread extends SlidingExpirationCache { diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageService.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageService.java new file mode 100644 index 000000000..35770f691 --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageService.java @@ -0,0 +1,99 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +import java.util.Map; +import org.checkerframework.checker.nullness.qual.Nullable; + +public interface StorageService { + /** + * Registers a new item class with the storage service. This method needs to be called before adding new classes of + * items to the service, so that the service knows when and how to dispose of the item. Expected item classes will be + * added automatically during driver initialization, but this method can be called to add new classes of items. + * + * @param itemClass the class of the item that will be stored, eg `CustomEndpointInfo.class`. + * @param isRenewableExpiration controls whether the item's expiration should be renewed if the item is fetched, + * regardless of whether it is already expired or not. + * @param timeToLiveNanos how long an item should be stored before being considered expired, in nanoseconds. + * @param shouldDisposeFunc a function defining whether an item should be disposed if expired. If null is passed, + * the item will always be disposed if expired. + * @param itemDisposalFunc a function defining how to dispose of an item when it is removed. If null is + * passed, the item will be removed without performing any additional operations. + * @param the type of item that will be stored under the item class. + */ + void registerItemClassIfAbsent( + Class itemClass, + boolean isRenewableExpiration, + long timeToLiveNanos, + @Nullable ShouldDisposeFunc shouldDisposeFunc, + @Nullable ItemDisposalFunc itemDisposalFunc); + + /** + * Stores an item in the storage service under the given item class. + * + * @param key the key for the item, eg "custom-endpoint.cluster-custom-XYZ.us-east-2.rds.amazonaws.com:5432". + * @param item the item to store. + * @param the type of the item being retrieved. + */ + void set(Object key, V item); + + /** + * Gets an item stored in the storage service. + * + * @param itemClass the expected class of the item being retrieved, eg `CustomEndpointInfo.class`. + * @param key the key for the item, eg "custom-endpoint.cluster-custom-XYZ.us-east-2.rds.amazonaws.com:5432". + * @param the type of the item being retrieved. + * @return the item stored at the given key for the given item class. + */ + @Nullable V get(Class itemClass, Object key); + + /** + * Indicates whether an item exists under the given item class and key. + * + * @param itemClass the class of the item. + * @param key the key for the item, eg "custom-endpoint.cluster-custom-XYZ.us-east-2.rds.amazonaws.com:5432". + * @return true if the item exists under the given item class and key, otherwise returns false. + */ + boolean exists(Class itemClass, Object key); + + /** + * Removes an item stored under the given item class. + * + * @param itemClass the class of the item. + * @param key the key for the item, eg "custom-endpoint.cluster-custom-XYZ.us-east-2.rds.amazonaws.com:5432". + */ + void remove(Class itemClass, Object key); + + /** + * Clears all items of the given item class. For example, storageService.clear(AllowedAndBlockedHosts.class) will + * remove all AllowedAndBlockedHost items from the storage service. + * + * @param itemClass the class of the items to clear. + */ + void clear(Class itemClass); + + /** + * Clears all items from the storage service. + */ + void clearAll(); + + // TODO: this is only called by the suggestedClusterId logic in RdsHostListProvider, which will be removed. This + // method should potentially be removed at that point as well. + @Nullable Map getEntries(Class itemClass); + + int size(Class itemClass); +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageServiceImpl.java b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageServiceImpl.java new file mode 100644 index 000000000..e59e8d72e --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/storage/StorageServiceImpl.java @@ -0,0 +1,193 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import java.util.logging.Logger; +import org.checkerframework.checker.nullness.qual.Nullable; +import software.amazon.jdbc.AllowedAndBlockedHosts; +import software.amazon.jdbc.hostlistprovider.Topology; +import software.amazon.jdbc.plugin.bluegreen.BlueGreenStatus; +import software.amazon.jdbc.util.ExecutorFactory; +import software.amazon.jdbc.util.Messages; +import software.amazon.jdbc.util.events.DataAccessEvent; +import software.amazon.jdbc.util.events.EventPublisher; + +public class StorageServiceImpl implements StorageService { + private static final Logger LOGGER = Logger.getLogger(StorageServiceImpl.class.getName()); + protected static final long DEFAULT_CLEANUP_INTERVAL_NANOS = TimeUnit.MINUTES.toNanos(5); + protected static final Map, Supplier>> defaultCacheSuppliers; + + static { + Map, Supplier>> suppliers = new HashMap<>(); + suppliers.put(Topology.class, ExpirationCache::new); + suppliers.put(AllowedAndBlockedHosts.class, ExpirationCache::new); + suppliers.put(BlueGreenStatus.class, + () -> new ExpirationCache<>(false, TimeUnit.MINUTES.toNanos(60), null, null)); + defaultCacheSuppliers = Collections.unmodifiableMap(suppliers); + } + + protected final EventPublisher publisher; + protected final Map, ExpirationCache> caches = new ConcurrentHashMap<>(); + protected final ScheduledExecutorService cleanupExecutor = + ExecutorFactory.newSingleThreadScheduledThreadExecutor("ssi"); + + public StorageServiceImpl(EventPublisher publisher) { + this(DEFAULT_CLEANUP_INTERVAL_NANOS, publisher); + } + + public StorageServiceImpl(long cleanupIntervalNanos, EventPublisher publisher) { + this.publisher = publisher; + initCleanupThread(cleanupIntervalNanos); + } + + protected void initCleanupThread(long cleanupIntervalNanos) { + this.cleanupExecutor.scheduleAtFixedRate( + this::removeExpiredItems, cleanupIntervalNanos, cleanupIntervalNanos, TimeUnit.NANOSECONDS); + } + + protected void removeExpiredItems() { + LOGGER.finest(Messages.get("StorageServiceImpl.removeExpiredItems")); + for (ExpirationCache cache : caches.values()) { + cache.removeExpiredEntries(); + } + } + + @Override + public void registerItemClassIfAbsent( + Class itemClass, + boolean isRenewableExpiration, + long timeToLiveNanos, + @Nullable ShouldDisposeFunc shouldDisposeFunc, + @Nullable ItemDisposalFunc itemDisposalFunc) { + caches.computeIfAbsent( + itemClass, + k -> new ExpirationCache<>( + isRenewableExpiration, + timeToLiveNanos, + shouldDisposeFunc, + itemDisposalFunc)); + } + + @Override + @SuppressWarnings("unchecked") + public void set(Object key, V value) { + ExpirationCache cache = caches.get(value.getClass()); + if (cache == null) { + Supplier> supplier = defaultCacheSuppliers.get(value.getClass()); + if (supplier == null) { + throw new IllegalStateException( + Messages.get("StorageServiceImpl.itemClassNotRegistered", new Object[] {value.getClass()})); + } else { + cache = caches.computeIfAbsent(value.getClass(), c -> supplier.get()); + } + } + + try { + ExpirationCache typedCache = (ExpirationCache) cache; + typedCache.put(key, value); + } catch (ClassCastException e) { + throw new IllegalArgumentException( + Messages.get("StorageServiceImpl.unexpectedValueMismatch", new Object[] {value, value.getClass(), cache})); + } + } + + @Override + public @Nullable V get(Class itemClass, Object key) { + final ExpirationCache cache = caches.get(itemClass); + if (cache == null) { + return null; + } + + final Object value = cache.get(key); + if (value == null) { + return null; + } + + if (itemClass.isInstance(value)) { + DataAccessEvent event = new DataAccessEvent(itemClass, key); + this.publisher.publish(event); + return itemClass.cast(value); + } + + LOGGER.fine( + Messages.get( + "StorageServiceImpl.itemClassMismatch", + new Object[] {key, itemClass, value, value.getClass()})); + return null; + } + + @Override + public boolean exists(Class itemClass, Object key) { + final ExpirationCache cache = caches.get(itemClass); + if (cache == null) { + return false; + } + + return cache.exists(key); + } + + @Override + public void remove(Class itemClass, Object key) { + final ExpirationCache cache = caches.get(itemClass); + if (cache != null) { + cache.remove(key); + } + } + + @Override + public void clear(Class itemClass) { + final ExpirationCache cache = caches.get(itemClass); + if (cache != null) { + cache.clear(); + } + } + + @Override + public void clearAll() { + for (ExpirationCache cache : caches.values()) { + cache.clear(); + } + } + + @Override + public @Nullable Map getEntries(Class itemClass) { + final ExpirationCache cache = caches.get(itemClass); + if (cache == null) { + return null; + } + + // TODO: remove this method after removing the suggestedClusterId logic + return (Map) cache.getEntries(); + } + + @Override + public int size(Class itemClass) { + final ExpirationCache cache = caches.get(itemClass); + if (cache == null) { + return 0; + } + + return cache.size(); + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java b/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java index 0ea9c28a1..dd99e0fee 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java +++ b/wrapper/src/main/java/software/amazon/jdbc/wrapper/ConnectionWrapper.java @@ -52,10 +52,15 @@ import software.amazon.jdbc.profile.ConfigurationProfile; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.ConnectionUrlParser; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.FullServicesContainerImpl; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.SqlState; import software.amazon.jdbc.util.StringUtils; import software.amazon.jdbc.util.WrapperUtils; +import software.amazon.jdbc.util.connection.ConnectionService; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryFactory; public class ConnectionWrapper implements Connection, CanReleaseResources { @@ -77,13 +82,13 @@ public class ConnectionWrapper implements Connection, CanReleaseResources { protected final ConnectionUrlParser connectionUrlParser = new ConnectionUrlParser(); public ConnectionWrapper( + @NonNull final FullServicesContainer servicesContainer, @NonNull final Properties props, @NonNull final String url, @NonNull final ConnectionProvider defaultConnectionProvider, @Nullable final ConnectionProvider effectiveConnectionProvider, - @NonNull final TargetDriverDialect targetDriverDialect, - @Nullable final ConfigurationProfile configurationProfile, - @NonNull final TelemetryFactory telemetryFactory) + @NonNull final TargetDriverDialect driverDialect, + @Nullable final ConfigurationProfile configurationProfile) throws SQLException { if (StringUtils.isNullOrEmpty(url)) { @@ -99,11 +104,20 @@ public ConnectionWrapper( defaultConnectionProvider, effectiveConnectionProvider, this, - telemetryFactory); + servicesContainer.getTelemetryFactory()); + servicesContainer.setConnectionPluginManager(pluginManager); final PluginServiceImpl pluginService = new PluginServiceImpl( - pluginManager, props, url, this.targetDriverProtocol, targetDriverDialect, this.configurationProfile); - - init(props, pluginManager, telemetryFactory, pluginService, pluginService, pluginService); + servicesContainer, + props, + url, + this.targetDriverProtocol, + driverDialect, + this.configurationProfile); + servicesContainer.setHostListProviderService(pluginService); + servicesContainer.setPluginService(pluginService); + servicesContainer.setPluginManagerService(pluginService); + + init(props, servicesContainer, defaultConnectionProvider, driverDialect); if (PropertyDefinition.LOG_UNCLOSED_CONNECTIONS.getBoolean(props)) { this.openConnectionStacktrace = new Throwable(Messages.get("ConnectionWrapper.unclosedConnectionInstantiated")); @@ -114,41 +128,50 @@ public ConnectionWrapper( protected ConnectionWrapper( @NonNull final Properties props, @NonNull final String url, + @NonNull final ConnectionProvider defaultConnectionProvider, + @NonNull final TargetDriverDialect driverDialect, @NonNull final ConnectionPluginManager connectionPluginManager, @NonNull final TelemetryFactory telemetryFactory, @NonNull final PluginService pluginService, @NonNull final HostListProviderService hostListProviderService, - @NonNull final PluginManagerService pluginManagerService) + @NonNull final PluginManagerService pluginManagerService, + @NonNull final StorageService storageService, + @NonNull final MonitorService monitorService, + @NonNull final ConnectionService connectionService) throws SQLException { if (StringUtils.isNullOrEmpty(url)) { throw new IllegalArgumentException("url"); } - init(props, - connectionPluginManager, telemetryFactory, pluginService, hostListProviderService, pluginManagerService); + FullServicesContainer servicesContainer = new FullServicesContainerImpl( + storageService, + monitorService, + telemetryFactory, + connectionPluginManager, + hostListProviderService, + pluginService, + pluginManagerService + ); + + init(props, servicesContainer, defaultConnectionProvider, driverDialect); } - protected void init( - final Properties props, - final ConnectionPluginManager connectionPluginManager, - final TelemetryFactory telemetryFactory, - final PluginService pluginService, - final HostListProviderService hostListProviderService, - final PluginManagerService pluginManagerService) throws SQLException { - this.pluginManager = connectionPluginManager; - this.telemetryFactory = telemetryFactory; - this.pluginService = pluginService; - this.hostListProviderService = hostListProviderService; - this.pluginManagerService = pluginManagerService; - - this.pluginManager.init( - this.pluginService, props, pluginManagerService, this.configurationProfile); + protected void init(final Properties props, + final FullServicesContainer servicesContainer, + final ConnectionProvider defaultConnectionProvider, + final TargetDriverDialect driverDialect) throws SQLException { + this.pluginManager = servicesContainer.getConnectionPluginManager(); + this.telemetryFactory = servicesContainer.getTelemetryFactory(); + this.pluginService = servicesContainer.getPluginService(); + this.hostListProviderService = servicesContainer.getHostListProviderService(); + this.pluginManagerService = servicesContainer.getPluginManagerService(); + + this.pluginManager.init(servicesContainer, props, pluginManagerService, this.configurationProfile); final HostListProviderSupplier supplier = this.pluginService.getDialect().getHostListProvider(); if (supplier != null) { - final HostListProvider provider = supplier.getProvider( - props, this.originalUrl, this.hostListProviderService, this.pluginService); + final HostListProvider provider = supplier.getProvider(props, this.originalUrl, servicesContainer); hostListProviderService.setHostListProvider(provider); } diff --git a/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties index 2e327a87b..f60196f1b 100644 --- a/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties +++ b/wrapper/src/main/resources/aws_advanced_jdbc_wrapper_messages.properties @@ -14,7 +14,12 @@ # limitations under the License. # -# ADFS Credentials Provider Getter +AbstractMonitor.interruptedWhileTerminating=Interrupted while awaiting termination of monitor ''{0}''. The monitor will be forcefully shut down. +AbstractMonitor.monitorTerminationTimeout=Timed out after waiting {0} seconds for monitor ''{1}'' to terminate gracefully. The monitor will be forcefully shut down. +AbstractMonitor.startingMonitor=Starting monitor: ''{0}''. +AbstractMonitor.stoppingMonitor=Stopping monitor: ''{0}''. +AbstractMonitor.unexpectedError=A monitor encountered an unexpected exception. Monitor: ''{0}''. Exception: ''{1}''. + AdfsCredentialsProviderFactory.failedLogin=Failed login. Could not obtain SAML Assertion from ADFS SignOn Page POST response: \n''{0}'' AdfsCredentialsProviderFactory.invalidHttpsUrl=Invalid HTTPS URL: ''{0}'' AdfsCredentialsProviderFactory.signOnPagePostActionUrl=ADFS SignOn Action URL: ''{0}'' @@ -22,15 +27,10 @@ AdfsCredentialsProviderFactory.signOnPagePostActionRequestFailed=ADFS SignOn Pag AdfsCredentialsProviderFactory.signOnPageRequestFailed=ADFS SignOn Page Request Failed with HTTP status ''{0}'', reason phrase ''{1}'', and response ''{2}'' AdfsCredentialsProviderFactory.signOnPageUrl=ADFS SignOn URL: ''{0}'' -# Aurora Host List Connection Plugin -AuroraHostListConnectionPlugin.providerAlreadySet=Another dynamic host list provider has already been set: {0}. - -Authentication.unsupportedHostname=Unsupported AWS hostname {0}. Amazon domain name in format *.AWS-Region.rds.amazonaws.com or *.rds.AWS-Region.amazonaws.com.cn is expected. AuthenticationToken.useCachedToken=Use cached authentication token = ''{0}'' AuthenticationToken.generatedNewToken=Generated new authentication token = ''{0}'' AuthenticationToken.javaSdkNotInClasspath=Required dependency 'AWS Java SDK RDS v2.x' is not on the classpath. -# Aurora Host List Provider RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRDSProxy=An RDS Proxy url can''t be used as the 'clusterInstanceHostPattern' configuration setting. RdsHostListProvider.clusterInstanceHostPatternNotSupportedForRdsCustom=A custom RDS url can''t be used as the 'clusterInstanceHostPattern' configuration setting. RdsHostListProvider.invalidPattern=Invalid value for the 'clusterInstanceHostPattern' configuration setting - the host pattern must contain a '?' character as a placeholder for the DB instance identifiers of the instances in the cluster. @@ -42,10 +42,8 @@ RdsHostListProvider.errorGettingHostRole=An error occurred while obtaining the c RdsHostListProvider.errorIdentifyConnection=An error occurred while obtaining the connection's host ID. RdsHostListProvider.errorGettingNetworkTimeout=An error occurred while getting the connection network timeout: {0} -# AWS SDK AwsSdk.unsupportedRegion=Unsupported AWS region ''{0}''. For supported regions please read https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -# AWS Secrets Manager Connection Plugin AwsSecretsManagerConnectionPlugin.endpointOverrideMisconfigured=The provided endpoint is invalid and could not be used to create a URI: `{0}`. AwsSecretsManagerConnectionPlugin.endpointOverrideInvalidConnection=A connection to the provided endpoint could not be established: `{0}`. AwsSecretsManagerConnectionPlugin.javaSdkNotInClasspath=Required dependency 'AWS Java SDK for AWS Secrets Manager' is not on the classpath. @@ -54,12 +52,10 @@ AwsSecretsManagerConnectionPlugin.failedToFetchDbCredentials=Was not able to eit AwsSecretsManagerConnectionPlugin.missingRequiredConfigParameter=Configuration parameter ''{0}'' is required. AwsSecretsManagerConnectionPlugin.unhandledException=Unhandled exception: ''{0}'' -# AWS Wrapper Data Source AwsWrapperDataSource.missingJdbcProtocol=Missing JDBC protocol. Could not construct URL. AwsWrapperDataSource.missingTarget=JDBC url or Server name is required. AwsWrapperDataSource.configurationProfileNotFound=Configuration profile ''{0}'' not found. -# Cluster Aware Reader Failover Handler ClusterAwareReaderFailoverHandler.interruptedThread=Thread was interrupted. ClusterAwareReaderFailoverHandler.attemptingReaderConnection=Trying to connect to host: ''{0}'', with properties ''{1}'' ClusterAwareReaderFailoverHandler.readerRequired=Connected to host ''{0}'' but it has a host role of ''{1}'' and does not meet the strict-reader requirement. The connection will be closed. @@ -69,7 +65,6 @@ ClusterAwareReaderFailoverHandler.failedReaderConnection=Failed to connect to ho ClusterAwareReaderFailoverHandler.invalidTopology=''{0}'' was called with an invalid (null or empty) topology. ClusterAwareReaderFailoverHandler.timeout=Reader failover timed out after {0}ms. -# Cluster Aware Writer Failover Handler ClusterAwareWriterFailoverHandler.interruptedThread=Thread was interrupted. ClusterAwareWriterFailoverHandler.successfullyReconnectedToWriterInstance=Successfully re-connected to the current writer instance: ''{0}'' ClusterAwareWriterFailoverHandler.failedToConnectToWriterInstance=Failed to connect to the writer instance. @@ -82,81 +77,61 @@ ClusterAwareWriterFailoverHandler.taskBAttemptConnectionToNewWriterInstance=[Tas ClusterAwareWriterFailoverHandler.taskBFinished=[TaskB] Finished ClusterAwareWriterFailoverHandler.taskBConnectedToReader=[TaskB] Connected to reader: ''{0}'' ClusterAwareWriterFailoverHandler.taskBFailedToConnectToAnyReader=[TaskB] Failed to connect to any reader. -ClusterAwareWriterFailoverHandler.taskBTopologyObtained=[TaskB] Topology obtained: {0} ClusterAwareWriterFailoverHandler.taskBAttemptConnectionToNewWriter=[TaskB] Trying to connect to a new writer: ''{0}'' ClusterAwareWriterFailoverHandler.taskBEncounteredException=[TaskB] encountered an exception: {0} ClusterAwareWriterFailoverHandler.taskAEncounteredException=[TaskA] encountered an exception: {0} ClusterAwareWriterFailoverHandler.standaloneNode=[TaskB] Host {0} is not yet connected to a cluster. The cluster is still being reconfigured. ClusterAwareWriterFailoverHandler.alreadyWriter=Current reader connection is actually a new writer connection. -# Connection String Host List Provider ConnectionStringHostListProvider.parsedListEmpty=Can''t parse connection string: ''{0}''. ConnectionStringHostListProvider.unsupportedIdentifyConnection=ConnectionStringHostListProvider does not support identifyConnection. -# Connection Plugin Manager ConnectionPluginManager.releaseResources=Releasing resources. ConnectionPluginManager.unknownPluginCode=Unknown plugin code: ''{0}''. ConnectionPluginManager.unableToLoadPlugin=Unable to load connection plugin factory: ''{0}''. ConnectionPluginManager.invokedAgainstOldConnection=The internal connection has changed since ''{0}'' was created. This is likely due to failover or read-write splitting functionality. To ensure you are using the updated connection, please re-create Statement and ResultSet objects after failover and/or calling setReadOnly. -# Connection Provider ConnectionProvider.noConnection=The target driver did not return a connection. ConnectionProvider.unsupportedHostSpecSelectorStrategy=Unsupported host selection strategy ''{0}'' specified for this connection provider ''{1}''. Please visit the documentation for all supported strategies. -# Connection Url Builder ConnectionUrlBuilder.missingJdbcProtocol=Missing JDBC protocol and/or host name. Could not construct URL. -# Connection Url Parser ConnectionUrlParser.protocolNotFound=Url should contain a driver protocol. Protocol is not found in url: ''{0}'' -# Connect Time Connection Plugin ConnectTimeConnectionPlugin.connectTime=Connected in {0} nanos. -# Connection Wrapper ConnectionWrapper.unclosedConnectionInstantiated=Unclosed connection was instantiated at this point: ConnectionWrapper.connectionNotOpen=Initial connection isn't open. ConnectionWrapper.finalizingUnclosedConnection=Finalizing a connection that was never closed. -# Console Consumer ConsoleConsumer.unexpectedOutputType=Unexpected outputType: ''{0}''. CredentialsProviderFactory.failedToInitializeHttpClient=Failed to initialize HttpClient. CredentialsProviderFactory.unsupportedIdp=Unsupported Identity Provider ''{0}''. Please visit to the documentation for supported Identity Providers. -# Custom Endpoint Monitor Impl CustomEndpointMonitorImpl.clearCache=Clearing info in the custom endpoint monitor info cache. CustomEndpointMonitorImpl.detectedChangeInCustomEndpointInfo=Detected change in custom endpoint info for ''{0}'':\n{1} CustomEndpointMonitorImpl.exception=Encountered an exception while monitoring custom endpoint ''{0}''. CustomEndpointMonitorImpl.interrupted=Custom endpoint monitor for ''{0}'' was interrupted. -CustomEndpointMonitorImpl.interruptedWhileTerminating=Interrupted while awaiting termination of custom endpoint monitor for ''{0}''. The monitor will be forcefully shut down. -CustomEndpointMonitorImpl.monitorTerminationTimeout=Timed out after waiting {0} seconds for custom endpoint monitor for ''{1}'' to terminate gracefully. The monitor will be forcefully shut down. CustomEndpointMonitorImpl.startingMonitor=Starting custom endpoint monitor for ''{0}''. CustomEndpointMonitorImpl.stoppedMonitor=Stopped custom endpoint monitor for ''{0}''. -CustomEndpointMonitorImpl.stoppingMonitor=Stopping custom endpoint monitor for ''{0}''. CustomEndpointMonitorImpl.unexpectedNumberOfEndpoints=Unexpected number of custom endpoints with endpoint identifier ''{0}'' in region ''{1}''. Expected 1, but found {2}. Endpoints:\n{3}. -# Custom Endpoint Plugin CustomEndpointPlugin.timedOutWaitingForCustomEndpointInfo=The custom endpoint plugin timed out after {0}ms while waiting for custom endpoint info for host ''{1}''. -CustomEndpointPlugin.closeMonitors=Closing custom endpoint monitors. Active custom endpoint monitors will be stopped, closed, and removed from the monitors cache. CustomEndpointPlugin.connectionRequestToCustomEndpoint=Detected a connection request to a custom endpoint URL: ''{0}''. CustomEndpointPlugin.errorParsingEndpointIdentifier=Unable to parse custom endpoint identifier from URL: ''{0}''. -CustomEndpointPlugin.foundInfoInCache=Done waiting for custom endpoint info for ''{0}'':\n{1} CustomEndpointPlugin.interruptedThread=The custom endpoint plugin was interrupted while waiting for custom endpoint info for host ''{0}''. CustomEndpointPlugin.unableToDetermineRegion=Unable to determine connection region. If you are using a non-standard RDS URL, please set the ''{0}'' property. CustomEndpointPlugin.waitingForCustomEndpointInfo=Custom endpoint info for ''{0}'' was not found. Waiting {1}ms for the endpoint monitor to fetch info... -# Custom Endpoint Plugin Factory CustomEndpointPluginFactory.awsSdkNotInClasspath=Required dependency 'AWS Java SDK RDS v2.x' is not on the classpath. -# Data Cache Connection Plugin DataCacheConnectionPlugin.queryResultsCached=[{0}] Query results will be cached: {1} -# Default Connection Plugin DefaultConnectionPlugin.executingMethod=Executing method: ''{0}'' DefaultConnectionPlugin.noHostsAvailable=The default connection plugin received an empty host list from the plugin service. DefaultConnectionPlugin.unknownRoleRequested=A HostSpec with a role of HostRole.UNKNOWN was requested via getHostSpecByStrategy. The requested role must be either HostRole.WRITER or HostRole.READER -# Driver Driver.nullUrl=Url is null. Driver.alreadyRegistered=Driver is already registered. It can only be registered once. Driver.missingDriver=Can''t find the target driver for ''{0}''. Please ensure the target driver is in the classpath and is registered. Here is the list of registered drivers in the classpath: {1} @@ -164,13 +139,14 @@ Driver.notRegistered=Driver is not registered (or it has not been registered usi Driver.urlParsingFailed=Url [{0}] parsing failed with error: [{1}] Driver.configurationProfileNotFound=Configuration profile ''{0}'' not found. -# DataSource DataSource.failedToSetProperty=Failed to set property ''{0}'' on target datasource ''{1}''. -# Execution Time Connection Plugin ExecutionTimeConnectionPlugin.executionTime=Executed {0} in {1} nanos. -# Failover Connection Plugin +ExpirationCache.exceptionWhileRemovingEntry=An exception occurred while removing entry with key ''{0}'' and value ''{1}'': ''{2}''. + +ExternallyManagedCache.extendExpirationOnNonExistingKey=A request was made to extend the expiration of the entry at key ''{0}'', but the key does not exist. + Failover.transactionResolutionUnknownError=Transaction resolution unknown. Please re-configure session state if required and try restarting the transaction. Failover.connectionClosedExplicitly=Unable to failover, the connection has been explicitly closed. Failover.connectionChangedError=The active SQL connection has changed due to a connection failure. Please re-configure session state if required. @@ -203,42 +179,32 @@ Failover.failedReaderConnection=[Reader Failover] Failed to connect to host: ''{ Failover.errorSelectingReaderHost=An error occurred while attempting to select a reader host candidate: ''{0}''. Candidates: Failover.skipFailoverOnInterruptedThread=Do not start failover since the current thread is interrupted. -# Federated Auth Plugin FederatedAuthPlugin.unableToDetermineRegion=Unable to determine connection region. If you are using a non-standard RDS URL, please set the ''{0}'' property. -# HikariPooledConnectionProvider -HikariPooledConnectionProvider.errorConnectingWithDataSource=Unable to connect to ''{0}'' using the Hikari data source. -HikariPooledConnectionProvider.errorConnectingWithDataSourceWithCause=Unable to connect to ''{0}'' using the Hikari data source. Exception message: ''{1}'' - -# Host Availability Strategy HostAvailabilityStrategy.invalidMaxRetries=Invalid value of {0} for configuration parameter `hostAvailabilityStrategyMaxRetries`. It must be an integer greater than 1. HostAvailabilityStrategy.invalidInitialBackoffTime=Invalid value of {0} for configuration parameter `hostAvailabilityStrategyInitialBackoffTime`. It must be an integer greater than 1. -# Host Monitoring Connection Plugin HostMonitoringConnectionPlugin.activatedMonitoring=Executing method ''{0}'', monitoring is activated. HostMonitoringConnectionPlugin.monitoringDeactivated=Monitoring deactivated for method ''{0}''. HostMonitoringConnectionPlugin.unavailableNode=Node ''{0}'' is unavailable. HostMonitoringConnectionPlugin.errorIdentifyingConnection=Error occurred while identifying connection: ''{0}''. HostMonitoringConnectionPlugin.unableToIdentifyConnection=Unable to identify the given connection: ''{0}'', please ensure the correct host list provider is specified. The host list provider in use is: ''{1}''. -# HostSelector HostSelector.noHostsMatchingRole=No hosts were found matching the requested ''{0}'' role. HostSelector.roundRobinInvalidHostWeightPairs=The provided host weight pairs have not been configured correctly. Please ensure the provided host weight pairs is a comma separated list of pairs, each pair in the format of :. Weight values must be an integer greater than or equal to the default weight value of 1. HostSelector.roundRobinInvalidDefaultWeight=The provided default weight value is not valid. Weight values must be an integer greater than or equal to the default weight value of 1. -# IAM Auth Connection Plugin IamAuthConnectionPlugin.unhandledException=Unhandled exception: ''{0}'' IamAuthConnectionPlugin.connectException=Error occurred while opening a connection: ''{0}'' IamAuthConnectionPlugin.unableToDetermineRegion=Unable to determine connection region. If you are using a non-standard RDS URL, please set the ''{0}'' property. -# Limitless Connection Plugin +PartialPluginService.unexpectedMethodCall=Calling ''{0}()'' is not supported with this PluginService. + LimitlessConnectionPlugin.failedToConnectToHost=Failed to connect to host {0}. LimitlessConnectionPlugin.unsupportedDialectOrDatabase=Unsupported dialect ''{0}'' encountered. Please ensure JDBC connection parameters are correct, and refer to the documentation to ensure that the connecting database is compatible with the Limitless Connection Plugin. -# Limitless Query Helper LimitlessQueryHelper.unsupportedDialectOrDatabase=Unsupported dialect ''{0}'' encountered. Please ensure JDBC connection parameters are correct, and refer to the documentation to ensure that the connecting database is compatible with the Limitless Connection Plugin. -# Limitless Router Monitor LimitlessRouterMonitor.exceptionDuringMonitoringStop=Stopping monitoring after unhandled exception was thrown in Limitless Router Monitoring thread for node {0}. LimitlessRouterMonitor.interruptedExceptionDuringMonitoring=Limitless Router Monitoring thread for node {0} was interrupted. LimitlessRouterMonitor.invalidQuery=Limitless Connection Plugin has encountered an error obtaining Limitless Router endpoints. Please ensure that you are connecting to an Aurora Limitless Database Shard Group Endpoint URL. @@ -249,7 +215,6 @@ LimitlessRouterMonitor.openedConnection=Opened Limitless Router Monitor connecti LimitlessRouterMonitor.running=Limitless Router Monitor thread running on node {0}. LimitlessRouterMonitor.stopped=Limitless Router Monitor thread stopped on node {0}. -# Limitless Router Service LimitlessRouterServiceImpl.connectWithHost=Connecting to host {0}. LimitlessRouterServiceImpl.errorClosingMonitor=An error occurred while closing Limitless Router Monitor: {0} LimitlessRouterServiceImpl.errorStartingMonitor=An error occurred while starting Limitless Router Monitor: {0} @@ -268,32 +233,38 @@ LimitlessRouterServiceImpl.selectedHostForRetry=Host {0} has been selected for c LimitlessRouterServiceImpl.synchronouslyGetLimitlessRouters=Fetching Limitless Routers synchronously. LimitlessRouterServiceImpl.usingProvidedConnectUrl=Connecting using provided connection URL. -# Log Query Connection Plugin LogQueryConnectionPlugin.executingQuery=[{0}] Executing query: {1} -# Monitor Connection Context -MonitorConnectionContext.exceptionAbortingConnection=Exception during aborting connection: {0} -MonitorConnectionContext.hostDead=Host {0} is *dead*. -MonitorConnectionContext.hostNotResponding=Host {0} is not *responding* {1}. -MonitorConnectionContext.hostAlive=Host {0} is *alive*. - -# Monitor Thread Container -MonitorThreadContainer.emptyNodeKeys=Provided node keys are empty. - -# Monitor Impl -MonitorImpl.contextNullWarning=Parameter 'context' should not be null. -MonitorImpl.interruptedExceptionDuringMonitoring=Monitoring thread for node {0} was interrupted. -MonitorImpl.exceptionDuringMonitoringContinue=Continuing monitoring after unhandled exception was thrown in monitoring thread for node {0}. -MonitorImpl.exceptionDuringMonitoringStop=Stopping monitoring after unhandled exception was thrown in monitoring thread for node {0}. -MonitorImpl.monitorIsStopped=Monitoring was already stopped for node {0}. -MonitorImpl.stopped=Stopped monitoring thread for node ''{0}''. -MonitorImpl.startMonitoringThreadNewContext=Start monitoring thread for checking new contexts for {0}. -MonitorImpl.stopMonitoringThreadNewContext=Stop monitoring thread for checking new contexts for {0}. -MonitorImpl.startMonitoringThread=Start monitoring thread for {0}. -MonitorImpl.stopMonitoringThread=Stop monitoring thread for {0}. - -# Monitor Service Impl -MonitorServiceImpl.emptyAliasSet=Empty alias set passed for ''{0}''. Set should not be empty. +HostMonitorConnectionContext.exceptionAbortingConnection=Exception during aborting connection: {0} +HostMonitorConnectionContext.hostDead=Host {0} is *dead*. +HostMonitorConnectionContext.hostNotResponding=Host {0} is not *responding* {1}. +HostMonitorConnectionContext.hostAlive=Host {0} is *alive*. + +HostMonitorThreadContainer.emptyNodeKeys=Provided node keys are empty. + +HostMonitorImpl.contextNullWarning=Parameter 'context' should not be null. +HostMonitorImpl.interruptedExceptionDuringMonitoring=Monitoring thread for node {0} was interrupted. +HostMonitorImpl.exceptionDuringMonitoringContinue=Continuing monitoring after unhandled exception was thrown in monitoring thread for node {0}. +HostMonitorImpl.exceptionDuringMonitoringStop=Stopping monitoring after unhandled exception was thrown in monitoring thread for node {0}. +HostMonitorImpl.monitorIsStopped=Monitoring was already stopped for node {0}. +HostMonitorImpl.stopped=Stopped monitoring thread for node ''{0}''. +HostMonitorImpl.startMonitoringThreadNewContext=Start monitoring thread for checking new contexts for {0}. +HostMonitorImpl.stopMonitoringThreadNewContext=Stop monitoring thread for checking new contexts for {0}. +HostMonitorImpl.startMonitoringThread=Start monitoring thread for {0}. +HostMonitorImpl.stopMonitoringThread=Stop monitoring thread for {0}. + +HostMonitorServiceImpl.emptyAliasSet=Empty alias set passed for ''{0}''. Set should not be empty. + +MonitorServiceImpl.checkingMonitors=Checking monitors for errors... +MonitorServiceImpl.monitorClassMismatch=The monitor stored at ''{0}'' did not have the expected type. The expected type was ''{1}'', but the monitor ''{2}'' had a type of ''{3}''. +MonitorServiceImpl.monitorStuck=Monitor ''{0}'' has not been updated within the inactive timeout of {1} milliseconds. The monitor will be stopped. +MonitorServiceImpl.monitorTypeNotRegistered=The given monitor class ''{0}'' is not registered. Please register the monitor class before running monitors of that class with the monitor service. +MonitorServiceImpl.removedExpiredMonitor=Removed expired monitor: ''{0}''. +MonitorServiceImpl.removedErrorMonitor=Removed monitor in error state: ''{0}''. +MonitorServiceImpl.recreatingMonitor=Recreating monitor: ''{0}''. +MonitorServiceImpl.stopAndRemoveMissingMonitorType=The monitor service received a request to stop a monitor with type ''{0}'' and key ''{1}'', but the monitor service does not have any monitors registered under the given type. Please ensure monitors are registered under the correct type. +MonitorServiceImpl.stopAndRemoveMonitorsMissingType=The monitor service received a request to stop all monitors with type ''{0}'', but the monitor service does not have any monitors registered under the given type. Please ensure monitors are registered under the correct type. +MonitorServiceImpl.unexpectedMonitorClass=Monitor type mismatch - the monitor ''{0}'' was unexpectedly found under the ''{1}'' monitor class category. Please verify that monitors are submitted under their concrete class. NodeMonitoringThread.detectedWriter=Writer detected by node monitoring thread: ''{0}''. NodeMonitoringThread.invalidWriterQuery=The writer topology query is invalid: {0} @@ -308,7 +279,6 @@ OktaCredentialsProviderFactory.unableToOpenHttpClient=Unable to open an HTTP cli OktaCredentialsProviderFactory.invalidSamlResponse=The SAML Assertion request did not return a valid response containing a SAMLResponse. OktaCredentialsProviderFactory.samlRequestFailed=Okta SAML Assertion request failed with HTTP status ''{0}'', reason phrase ''{1}'', and response ''{2}'' -# Plugin Service Impl PluginServiceImpl.currentHostNotAllowed=The current host is not in the list of allowed hosts. Current host: ''{0}''. Allowed hosts: {1} PluginServiceImpl.hostListEmpty=Current host list is empty. PluginServiceImpl.releaseResources=Releasing resources. @@ -318,12 +288,10 @@ PluginServiceImpl.failedToRetrieveHostPort=Could not retrieve Host:Port for conn PluginServiceImpl.nonEmptyAliases=fillAliases called when HostSpec already contains the following aliases: ''{0}''. PluginServiceImpl.requiredBlockingHostListProvider=The detected host list provider is not a BlockingHostListProvider. A BlockingHostListProvider is required to force refresh the host list. Detected host list provider: {0} -# Property Utils PropertyUtils.setMethodDoesNotExistOnTarget=Set method for property ''{0}'' does not exist on target ''{1}''. PropertyUtils.failedToSetProperty=Failed to set property ''{0}'' on target ''{1}''. PropertyUtils.failedToSetPropertyWithReason=Failed to set property ''{0}'' on target ''{1}''. {2} -# Read Write Splitting Plugin ReadWriteSplittingPlugin.setReadOnlyOnClosedConnection=setReadOnly cannot be called on a closed connection. ReadWriteSplittingPlugin.errorSwitchingToCachedReader=An error occurred while trying to switch to a cached reader connection: ''{0}''. The driver will attempt to establish a new reader connection. ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause=An error occurred while trying to switch to a cached reader connection: ''{0}''. Error message: ''{1}''. The driver will attempt to establish a new reader connection. @@ -353,12 +321,16 @@ SAMLCredentialsProviderFactory.getSamlAssertionFailed=Failed to get SAML Asserti SamlAuthPlugin.javaStsSdkNotInClasspath=Required dependency 'AWS Java SDK for AWS Secret Token Service' is not on the classpath. SamlAuthPlugin.unhandledException=Unhandled exception: ''{0}'' -# Wrapper Utils +ServicesContainerPluginFactory.servicesContainerRequired=The {0} requires a FullServicesContainer. Please use getInstance(FullServicesContainer, Properties) instead. + +StorageServiceImpl.unexpectedValueMismatch=Attempted to store value ''{0}'' under item class ''{1}'' but there was an unexpected mismatch between the passed in value type and the expected value type. The cache for item class ''{1}'' is ''{2}''. +StorageServiceImpl.itemClassNotRegistered=The given item class ''{0}'' is not registered. Please register the item class before storing items of that class. +StorageServiceImpl.itemClassMismatch=The item stored at ''{0}'' did not have the expected type. The expected type was ''{1}'', but the stored item ''{2}'' had a type of ''{3}''. Returning null. +StorageServiceImpl.removeExpiredItems=Removing expired items from the storage service... + WrapperUtils.noWrapperClassExists=No wrapper class exists for ''{0}''. WrapperUtils.failedToInitializeClass=Can''t initialize class ''{0}''. -# Aurora Stale DNS -AuroraStaleDnsPlugin.requireDynamicProvider=Dynamic host list provider is required. AuroraStaleDnsHelper.clusterEndpointDns=Cluster endpoint resolves to {0}. AuroraStaleDnsHelper.currentWriterNotAllowed=The current writer is not in the list of allowed hosts. Current host: ''{0}''. Allowed hosts: {1} AuroraStaleDnsHelper.writerHostSpec=Writer host: {0} @@ -366,17 +338,13 @@ AuroraStaleDnsHelper.writerInetAddress=Writer host address: {0} AuroraStaleDnsHelper.staleDnsDetected=Stale DNS data detected. Opening a connection to ''{0}''. AuroraStaleDnsHelper.reset=Reset stored writer host. -# Opened Connection Tracker OpenedConnectionTracker.invalidatingConnections=Invalidating opened connections to host: ''{0}'' -# Util Utils.topology={0} \n{1} -# Dialect Manager DialectManager.unknownDialectCode=Unknown dialect code: ''{0}''. DialectManager.unknownDialect=Database dialect can''t be identified. Use configuration parameter ''wrapperDialect'' to configure it. -# Target Driver Dialect Manager TargetDriverDialectManager.unknownDialectCode=Unknown target driver dialect code: ''{0}''. TargetDriverDialectManager.unknownProtocol=Can not find a driver to register for protocol ''{0}''. TargetDriverDialectManager.customDialectNotSupported=Provided custom target driver dialect will be ignored. @@ -386,11 +354,9 @@ TargetDriverDialect.unsupported=This target driver dialect does not support this MysqlConnectorJDriverHelper.canNotRegister=Can''t register driver com.mysql.cj.jdbc.Driver. MariadbDriverHelper.canNotRegister=Can''t register driver org.mariadb.jdbc.Driver. -# Aurora Initial Connection Strategy Plugin AuroraInitialConnectionStrategyPlugin.unsupportedStrategy=Unsupported host selection strategy ''{0}''. AuroraInitialConnectionStrategyPlugin.requireDynamicProvider=Dynamic host list provider is required. -# Fastest Response Time Strategy Plugin NodeResponseTimeMonitor.stopped=Stopped Response time thread for node ''{0}''. NodeResponseTimeMonitor.responseTime=Response time for ''{0}'': {1} ms NodeResponseTimeMonitor.interruptedExceptionDuringMonitoring=Response time thread for node {0} was interrupted. @@ -398,7 +364,6 @@ NodeResponseTimeMonitor.exceptionDuringMonitoringStop=Stopping thread after unha NodeResponseTimeMonitor.openingConnection=Opening a Response time connection to ''{0}''. NodeResponseTimeMonitor.openedConnection=Opened Response time connection: {0}. -# Monitoring RDS HostList Provider ClusterTopologyMonitorImpl.startMonitoringThread=Start cluster topology monitoring thread for ''{0}''. ClusterTopologyMonitorImpl.stopMonitoringThread=Stop cluster topology monitoring thread for ''{0}''. ClusterTopologyMonitorImpl.exceptionDuringMonitoringStop=Stopping cluster topology monitoring after unhandled exception was thrown in monitoring thread for node ''{0}''. diff --git a/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java b/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java index b67028810..c35f6b0f8 100644 --- a/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java +++ b/wrapper/src/test/java/integration/container/aurora/TestAuroraHostListProvider.java @@ -17,18 +17,14 @@ package integration.container.aurora; import java.util.Properties; -import software.amazon.jdbc.HostListProviderService; -import software.amazon.jdbc.PluginService; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.util.FullServicesContainer; public class TestAuroraHostListProvider extends AuroraHostListProvider { public TestAuroraHostListProvider( - HostListProviderService hostListProviderService, - Properties properties, - String originalUrl) { - - super(properties, originalUrl, hostListProviderService, "", "", ""); + FullServicesContainer servicesContainer, Properties properties, String originalUrl) { + super(properties, originalUrl, servicesContainer, "", "", ""); } public static void clearCache() { diff --git a/wrapper/src/test/java/integration/container/aurora/TestPluginServiceImpl.java b/wrapper/src/test/java/integration/container/aurora/TestPluginServiceImpl.java index 4a54ee984..b356eb391 100644 --- a/wrapper/src/test/java/integration/container/aurora/TestPluginServiceImpl.java +++ b/wrapper/src/test/java/integration/container/aurora/TestPluginServiceImpl.java @@ -19,21 +19,21 @@ import java.sql.SQLException; import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; -import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.PluginServiceImpl; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; public class TestPluginServiceImpl extends PluginServiceImpl { public TestPluginServiceImpl( - @NonNull ConnectionPluginManager pluginManager, + @NonNull FullServicesContainer servicesContainer, @NonNull Properties props, @NonNull String originalUrl, String targetDriverProtocol, @NonNull final TargetDriverDialect targetDriverDialect) throws SQLException { - super(pluginManager, props, originalUrl, targetDriverProtocol, targetDriverDialect); + super(servicesContainer, props, originalUrl, targetDriverProtocol, targetDriverDialect); } public static void clearHostAvailabilityCache() { diff --git a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java index 262cf8066..056b3485f 100644 --- a/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java +++ b/wrapper/src/test/java/integration/container/tests/AdvancedPerformanceTest.java @@ -65,8 +65,8 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.provider.Arguments; import software.amazon.jdbc.PropertyDefinition; -import software.amazon.jdbc.plugin.efm.MonitorThreadContainer; -import software.amazon.jdbc.plugin.efm2.MonitorServiceImpl; +import software.amazon.jdbc.plugin.efm.HostMonitorThreadContainer; +import software.amazon.jdbc.plugin.efm2.HostMonitorServiceImpl; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; import software.amazon.jdbc.util.StringUtils; @@ -686,8 +686,8 @@ private void ensureClusterHealthy() throws InterruptedException { TestAuroraHostListProvider.clearCache(); TestPluginServiceImpl.clearHostAvailabilityCache(); - MonitorThreadContainer.releaseInstance(); - MonitorServiceImpl.closeAllMonitors(); + HostMonitorThreadContainer.releaseInstance(); + HostMonitorServiceImpl.closeAllMonitors(); } private static Stream generateParams() { diff --git a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java index 8df6c1669..2519cd041 100644 --- a/wrapper/src/test/java/integration/container/tests/PerformanceTest.java +++ b/wrapper/src/test/java/integration/container/tests/PerformanceTest.java @@ -64,8 +64,8 @@ import software.amazon.jdbc.hostlistprovider.RdsHostListProvider; import software.amazon.jdbc.hostlistprovider.monitoring.MonitoringRdsHostListProvider; import software.amazon.jdbc.plugin.OpenedConnectionTracker; -import software.amazon.jdbc.plugin.efm.MonitorThreadContainer; -import software.amazon.jdbc.plugin.efm2.MonitorServiceImpl; +import software.amazon.jdbc.plugin.efm.HostMonitorThreadContainer; +import software.amazon.jdbc.plugin.efm2.HostMonitorServiceImpl; import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin; import software.amazon.jdbc.util.StringUtils; @@ -145,8 +145,8 @@ public void test_FailureDetectionTime_EnhancedMonitoringEnabled(final String efm throws IOException { OpenedConnectionTracker.clearCache(); - MonitorThreadContainer.releaseInstance(); - MonitorServiceImpl.closeAllMonitors(); + HostMonitorThreadContainer.releaseInstance(); + HostMonitorServiceImpl.closeAllMonitors(); AuroraHostListProvider.clearAll(); MonitoringRdsHostListProvider.clearCache(); @@ -227,8 +227,8 @@ public void test_FailureDetectionTime_FailoverAndEnhancedMonitoringEnabled(final throws IOException { OpenedConnectionTracker.clearCache(); - MonitorThreadContainer.releaseInstance(); - MonitorServiceImpl.closeAllMonitors(); + HostMonitorThreadContainer.releaseInstance(); + HostMonitorServiceImpl.closeAllMonitors(); AuroraHostListProvider.clearAll(); MonitoringRdsHostListProvider.clearCache(); @@ -315,8 +315,8 @@ public void test_FailoverTime_SocketTimeout() throws IOException { private void test_FailoverTime_SocketTimeout(final String plugins) throws IOException { OpenedConnectionTracker.clearCache(); - MonitorThreadContainer.releaseInstance(); - MonitorServiceImpl.closeAllMonitors(); + HostMonitorThreadContainer.releaseInstance(); + HostMonitorServiceImpl.closeAllMonitors(); AuroraHostListProvider.clearAll(); MonitoringRdsHostListProvider.clearCache(); diff --git a/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginChainBuilderTests.java b/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginChainBuilderTests.java index 2745796d1..84efe5ba3 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginChainBuilderTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginChainBuilderTests.java @@ -17,8 +17,8 @@ package software.amazon.jdbc; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; @@ -42,12 +42,14 @@ import software.amazon.jdbc.plugin.failover.FailoverConnectionPlugin; import software.amazon.jdbc.plugin.iam.IamAuthConnectionPlugin; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; public class ConnectionPluginChainBuilderTests { @Mock ConnectionProvider mockConnectionProvider; + @Mock FullServicesContainer mockServicesContainer; @Mock PluginService mockPluginService; @Mock PluginManagerService mockPluginManagerService; @Mock TelemetryFactory mockTelemetryFactory; @@ -64,6 +66,8 @@ void afterEach() throws Exception { @BeforeEach void beforeEach() { closeable = MockitoAnnotations.openMocks(this); + when(mockServicesContainer.getPluginService()).thenReturn(mockPluginService); + when(mockServicesContainer.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockPluginService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockTelemetryFactory.openTelemetryContext(anyString(), any())).thenReturn(mockTelemetryContext); when(mockTelemetryFactory.openTelemetryContext(eq(null), any())).thenReturn(mockTelemetryContext); @@ -78,7 +82,7 @@ public void testSortPlugins() throws SQLException { props.put(PropertyDefinition.PLUGINS.name, "iam,efm2,failover"); List result = builder.getPlugins( - mockPluginService, + mockServicesContainer, mockConnectionProvider, null, mockPluginManagerService, @@ -87,10 +91,10 @@ public void testSortPlugins() throws SQLException { assertNotNull(result); assertEquals(4, result.size()); - assertTrue(result.get(0) instanceof FailoverConnectionPlugin); - assertTrue(result.get(1) instanceof HostMonitoringConnectionPlugin); - assertTrue(result.get(2) instanceof IamAuthConnectionPlugin); - assertTrue(result.get(3) instanceof DefaultConnectionPlugin); + assertInstanceOf(FailoverConnectionPlugin.class, result.get(0)); + assertInstanceOf(HostMonitoringConnectionPlugin.class, result.get(1)); + assertInstanceOf(IamAuthConnectionPlugin.class, result.get(2)); + assertInstanceOf(DefaultConnectionPlugin.class, result.get(3)); } @Test @@ -101,7 +105,7 @@ public void testPreservePluginOrder() throws SQLException { props.put(PropertyDefinition.AUTO_SORT_PLUGIN_ORDER.name, "false"); List result = builder.getPlugins( - mockPluginService, + mockServicesContainer, mockConnectionProvider, null, mockPluginManagerService, @@ -110,10 +114,10 @@ public void testPreservePluginOrder() throws SQLException { assertNotNull(result); assertEquals(4, result.size()); - assertTrue(result.get(0) instanceof IamAuthConnectionPlugin); - assertTrue(result.get(1) instanceof HostMonitoringConnectionPlugin); - assertTrue(result.get(2) instanceof FailoverConnectionPlugin); - assertTrue(result.get(3) instanceof DefaultConnectionPlugin); + assertInstanceOf(IamAuthConnectionPlugin.class, result.get(0)); + assertInstanceOf(HostMonitoringConnectionPlugin.class, result.get(1)); + assertInstanceOf(FailoverConnectionPlugin.class, result.get(2)); + assertInstanceOf(DefaultConnectionPlugin.class, result.get(3)); } @Test @@ -123,7 +127,7 @@ public void testSortPluginsWithStickToPrior() throws SQLException { props.put(PropertyDefinition.PLUGINS.name, "dev,iam,executionTime,connectTime,efm2,failover"); List result = builder.getPlugins( - mockPluginService, + mockServicesContainer, mockConnectionProvider, null, mockPluginManagerService, @@ -132,12 +136,12 @@ public void testSortPluginsWithStickToPrior() throws SQLException { assertNotNull(result); assertEquals(7, result.size()); - assertTrue(result.get(0) instanceof DeveloperConnectionPlugin); - assertTrue(result.get(1) instanceof FailoverConnectionPlugin); - assertTrue(result.get(2) instanceof HostMonitoringConnectionPlugin); - assertTrue(result.get(3) instanceof IamAuthConnectionPlugin); - assertTrue(result.get(4) instanceof ExecutionTimeConnectionPlugin); - assertTrue(result.get(5) instanceof ConnectTimeConnectionPlugin); - assertTrue(result.get(6) instanceof DefaultConnectionPlugin); + assertInstanceOf(DeveloperConnectionPlugin.class, result.get(0)); + assertInstanceOf(FailoverConnectionPlugin.class, result.get(1)); + assertInstanceOf(HostMonitoringConnectionPlugin.class, result.get(2)); + assertInstanceOf(IamAuthConnectionPlugin.class, result.get(3)); + assertInstanceOf(ExecutionTimeConnectionPlugin.class, result.get(4)); + assertInstanceOf(ConnectTimeConnectionPlugin.class, result.get(5)); + assertInstanceOf(DefaultConnectionPlugin.class, result.get(6)); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java b/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java index 8cae7c162..355276ad3 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/ConnectionPluginManagerTests.java @@ -62,6 +62,7 @@ import software.amazon.jdbc.profile.ConfigurationProfile; import software.amazon.jdbc.profile.ConfigurationProfileBuilder; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.WrapperUtils; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -76,6 +77,7 @@ public class ConnectionPluginManagerTests { @Mock ConnectionWrapper mockConnectionWrapper; @Mock TelemetryFactory mockTelemetryFactory; @Mock TelemetryContext mockTelemetryContext; + @Mock FullServicesContainer mockServicesContainer; @Mock PluginService mockPluginService; @Mock PluginManagerService mockPluginManagerService; @Mock TargetDriverDialect mockTargetDriverDialect; @@ -92,6 +94,8 @@ void cleanUp() throws Exception { @BeforeEach void init() { closeable = MockitoAnnotations.openMocks(this); + when(mockServicesContainer.getPluginService()).thenReturn(mockPluginService); + when(mockServicesContainer.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockPluginService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockTelemetryFactory.openTelemetryContext(anyString(), any())).thenReturn(mockTelemetryContext); when(mockTelemetryFactory.openTelemetryContext(eq(null), any())).thenReturn(mockTelemetryContext); @@ -620,7 +624,7 @@ public void testDefaultPlugins() throws SQLException { null, mockConnectionWrapper, mockTelemetryFactory)); - target.init(mockPluginService, testProperties, mockPluginManagerService, configurationProfile); + target.init(mockServicesContainer, testProperties, mockPluginManagerService, configurationProfile); assertEquals(4, target.plugins.size()); assertEquals(AuroraConnectionTrackerPlugin.class, target.plugins.get(0).getClass()); @@ -640,7 +644,7 @@ public void testNoWrapperPlugins() throws SQLException { null, mockConnectionWrapper, mockTelemetryFactory)); - target.init(mockPluginService, testProperties, mockPluginManagerService, configurationProfile); + target.init(mockServicesContainer, testProperties, mockPluginManagerService, configurationProfile); assertEquals(1, target.plugins.size()); } @@ -655,7 +659,7 @@ public void testOverridingDefaultPluginsWithPluginCodes() throws SQLException { null, mockConnectionWrapper, mockTelemetryFactory)); - target.init(mockPluginService, testProperties, mockPluginManagerService, configurationProfile); + target.init(mockServicesContainer, testProperties, mockPluginManagerService, configurationProfile); assertEquals(2, target.plugins.size()); assertEquals(LogQueryConnectionPlugin.class, target.plugins.get(0).getClass()); diff --git a/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java b/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java index 044137a79..3b47f12bb 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/DialectDetectionTests.java @@ -51,6 +51,8 @@ import software.amazon.jdbc.dialect.RdsPgDialect; import software.amazon.jdbc.exceptions.ExceptionManager; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.storage.StorageService; public class DialectDetectionTests { private static final String LOCALHOST = "localhost"; @@ -59,25 +61,30 @@ public class DialectDetectionTests { private static final String MYSQL_PROTOCOL = "jdbc:mysql://"; private static final String PG_PROTOCOL = "jdbc:postgresql://"; private static final String MARIA_PROTOCOL = "jdbc:mariadb://"; - @Mock private HostListProvider mockHostListProvider; + private final Properties props = new Properties(); + private AutoCloseable closeable; + @Mock private FullServicesContainer mockServicesContainer; + @Mock private HostListProviderService mockHostListProviderService; + @Mock private StorageService mockStorageService; @Mock private Connection mockConnection; @Mock private Statement mockStatement; - @Mock private ResultSet successResultSet; - @Mock private ResultSet failResultSet; + @Mock private ResultSet mockSuccessResultSet; + @Mock private ResultSet mockFailResultSet; @Mock private HostSpec mockHost; - @Mock private ConnectionPluginManager pluginManager; + @Mock private ConnectionPluginManager mockPluginManager; @Mock private TargetDriverDialect mockTargetDriverDialect; @Mock private ResultSetMetaData mockResultSetMetaData; - private final Properties props = new Properties(); - private AutoCloseable closeable; @BeforeEach void setUp() throws SQLException { closeable = MockitoAnnotations.openMocks(this); + when(this.mockServicesContainer.getHostListProviderService()).thenReturn(mockHostListProviderService); + when(this.mockServicesContainer.getConnectionPluginManager()).thenReturn(mockPluginManager); + when(this.mockServicesContainer.getStorageService()).thenReturn(mockStorageService); when(this.mockConnection.createStatement()).thenReturn(this.mockStatement); when(this.mockHost.getUrl()).thenReturn("url"); - when(this.failResultSet.next()).thenReturn(false); - pluginManager.plugins = new ArrayList<>(); + when(this.mockFailResultSet.next()).thenReturn(false); + mockPluginManager.plugins = new ArrayList<>(); } @AfterEach @@ -87,9 +94,9 @@ void cleanUp() throws Exception { } PluginServiceImpl getPluginService(String host, String protocol) throws SQLException { - return spy( + PluginServiceImpl pluginService = spy( new PluginServiceImpl( - pluginManager, + mockServicesContainer, new ExceptionManager(), props, protocol + host, @@ -98,6 +105,9 @@ PluginServiceImpl getPluginService(String host, String protocol) throws SQLExcep mockTargetDriverDialect, null, null)); + + when(this.mockServicesContainer.getHostListProviderService()).thenReturn(pluginService); + return pluginService; } @ParameterizedTest @@ -124,7 +134,7 @@ static Stream getInitialDialectArguments() { @Test void testUpdateDialectMysqlUnchanged() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet); + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet); final PluginServiceImpl target = getPluginService(LOCALHOST, MYSQL_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -133,14 +143,14 @@ void testUpdateDialectMysqlUnchanged() throws SQLException { @Test void testUpdateDialectMysqlToRds() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet); - when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'version_comment'")).thenReturn(successResultSet); - when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'report_host'")).thenReturn(successResultSet); - when(successResultSet.getString(2)).thenReturn( + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet); + when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'version_comment'")).thenReturn(mockSuccessResultSet); + when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'report_host'")).thenReturn(mockSuccessResultSet); + when(mockSuccessResultSet.getString(2)).thenReturn( "Source distribution", "Source distribution", ""); - when(successResultSet.next()).thenReturn(true, false, true, true); - when(successResultSet.getMetaData()).thenReturn(mockResultSetMetaData); - when(failResultSet.next()).thenReturn(false); + when(mockSuccessResultSet.next()).thenReturn(true, false, true, true); + when(mockSuccessResultSet.getMetaData()).thenReturn(mockResultSetMetaData); + when(mockFailResultSet.next()).thenReturn(false); final PluginServiceImpl target = getPluginService(LOCALHOST, MYSQL_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -153,8 +163,8 @@ void testUpdateDialectMysqlToRds() throws SQLException { // 1) test DialectManager.getDialect() to return RdsMultiAzDbClusterMysqlDialect // 2) test PluginServiceImpl.updateDialect() with mocked DialectManager.getDialect() void testUpdateDialectMysqlToTaz() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet, successResultSet); - when(successResultSet.next()).thenReturn(true); + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet, mockSuccessResultSet); + when(mockSuccessResultSet.next()).thenReturn(true); final PluginServiceImpl target = getPluginService(LOCALHOST, MYSQL_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -163,10 +173,11 @@ void testUpdateDialectMysqlToTaz() throws SQLException { @Test void testUpdateDialectMysqlToAurora() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet); - when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'aurora_version'")).thenReturn(successResultSet); - when(successResultSet.next()).thenReturn(true, false); + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet); + when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'aurora_version'")).thenReturn(mockSuccessResultSet); + when(mockSuccessResultSet.next()).thenReturn(true, false); final PluginServiceImpl target = getPluginService(LOCALHOST, MYSQL_PROTOCOL); + when(mockServicesContainer.getPluginService()).thenReturn(target); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); assertEquals(AuroraMysqlDialect.class, target.dialect.getClass()); @@ -174,7 +185,7 @@ void testUpdateDialectMysqlToAurora() throws SQLException { @Test void testUpdateDialectPgUnchanged() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet); + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet); final PluginServiceImpl target = getPluginService(LOCALHOST, PG_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -184,12 +195,12 @@ void testUpdateDialectPgUnchanged() throws SQLException { @Test void testUpdateDialectPgToRds() throws SQLException { when(mockStatement.executeQuery(any())) - .thenReturn(successResultSet, failResultSet, failResultSet, successResultSet); - when(successResultSet.getBoolean(any())).thenReturn(false); - when(successResultSet.getBoolean("rds_tools")).thenReturn(true); - when(successResultSet.getBoolean("aurora_stat_utils")).thenReturn(false); - when(successResultSet.next()).thenReturn(true); - when(failResultSet.next()).thenReturn(false); + .thenReturn(mockSuccessResultSet, mockFailResultSet, mockFailResultSet, mockSuccessResultSet); + when(mockSuccessResultSet.getBoolean(any())).thenReturn(false); + when(mockSuccessResultSet.getBoolean("rds_tools")).thenReturn(true); + when(mockSuccessResultSet.getBoolean("aurora_stat_utils")).thenReturn(false); + when(mockSuccessResultSet.next()).thenReturn(true); + when(mockFailResultSet.next()).thenReturn(false); final PluginServiceImpl target = getPluginService(LOCALHOST, PG_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -202,9 +213,9 @@ void testUpdateDialectPgToRds() throws SQLException { // 1) test DialectManager.getDialect() to return RdsMultiAzDbClusterMysqlDialect // 2) test PluginServiceImpl.updateDialect() with mocked DialectManager.getDialect() void testUpdateDialectPgToTaz() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(successResultSet); - when(successResultSet.getBoolean(any())).thenReturn(false); - when(successResultSet.next()).thenReturn(true); + when(mockStatement.executeQuery(any())).thenReturn(mockSuccessResultSet); + when(mockSuccessResultSet.getBoolean(any())).thenReturn(false); + when(mockSuccessResultSet.next()).thenReturn(true); final PluginServiceImpl target = getPluginService(LOCALHOST, PG_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -217,9 +228,9 @@ void testUpdateDialectPgToTaz() throws SQLException { // 1) test DialectManager.getDialect() to return RdsMultiAzDbClusterMysqlDialect // 2) test PluginServiceImpl.updateDialect() with mocked DialectManager.getDialect() void testUpdateDialectPgToAurora() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(successResultSet); - when(successResultSet.next()).thenReturn(true); - when(successResultSet.getBoolean(any())).thenReturn(true); + when(mockStatement.executeQuery(any())).thenReturn(mockSuccessResultSet); + when(mockSuccessResultSet.next()).thenReturn(true); + when(mockSuccessResultSet.getBoolean(any())).thenReturn(true); final PluginServiceImpl target = getPluginService(LOCALHOST, PG_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -228,7 +239,7 @@ void testUpdateDialectPgToAurora() throws SQLException { @Test void testUpdateDialectMariaUnchanged() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet); + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet); final PluginServiceImpl target = getPluginService(LOCALHOST, MARIA_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -237,14 +248,14 @@ void testUpdateDialectMariaUnchanged() throws SQLException { @Test void testUpdateDialectMariaToMysqlRds() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet); - when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'version_comment'")).thenReturn(successResultSet); - when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'report_host'")).thenReturn(successResultSet); - when(successResultSet.getString(2)).thenReturn( + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet); + when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'version_comment'")).thenReturn(mockSuccessResultSet); + when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'report_host'")).thenReturn(mockSuccessResultSet); + when(mockSuccessResultSet.getString(2)).thenReturn( "Source distribution", "Source distribution", ""); - when(successResultSet.next()).thenReturn(true, false, true, true); - when(successResultSet.getMetaData()).thenReturn(mockResultSetMetaData); - when(failResultSet.next()).thenReturn(false); + when(mockSuccessResultSet.next()).thenReturn(true, false, true, true); + when(mockSuccessResultSet.getMetaData()).thenReturn(mockResultSetMetaData); + when(mockFailResultSet.next()).thenReturn(false); final PluginServiceImpl target = getPluginService(LOCALHOST, MARIA_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -257,7 +268,7 @@ void testUpdateDialectMariaToMysqlRds() throws SQLException { // 1) test DialectManager.getDialect() to return RdsMultiAzDbClusterMysqlDialect // 2) test PluginServiceImpl.updateDialect() with mocked DialectManager.getDialect() void testUpdateDialectMariaToMysqlTaz() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet, successResultSet); + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet, mockSuccessResultSet); final PluginServiceImpl target = getPluginService(LOCALHOST, MARIA_PROTOCOL); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); @@ -266,10 +277,11 @@ void testUpdateDialectMariaToMysqlTaz() throws SQLException { @Test void testUpdateDialectMariaToMysqlAurora() throws SQLException { - when(mockStatement.executeQuery(any())).thenReturn(failResultSet); - when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'aurora_version'")).thenReturn(successResultSet); - when(successResultSet.next()).thenReturn(true, false); + when(mockStatement.executeQuery(any())).thenReturn(mockFailResultSet); + when(mockStatement.executeQuery("SHOW VARIABLES LIKE 'aurora_version'")).thenReturn(mockSuccessResultSet); + when(mockSuccessResultSet.next()).thenReturn(true, false); final PluginServiceImpl target = getPluginService(LOCALHOST, MARIA_PROTOCOL); + when(mockServicesContainer.getPluginService()).thenReturn(target); target.setInitialConnectionHostSpec(mockHost); target.updateDialect(mockConnection); assertEquals(AuroraMysqlDialect.class, target.dialect.getClass()); diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index 1b7e4a431..6e5844ccf 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -50,7 +50,7 @@ import software.amazon.jdbc.targetdriverdialect.ConnectInfo; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; import software.amazon.jdbc.util.Pair; -import software.amazon.jdbc.util.SlidingExpirationCache; +import software.amazon.jdbc.util.storage.SlidingExpirationCache; class HikariPooledConnectionProviderTest { @Mock Connection mockConnection; diff --git a/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java b/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java index d9af2a07f..07a22941f 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java +++ b/wrapper/src/test/java/software/amazon/jdbc/PluginServiceImplTests.java @@ -66,14 +66,21 @@ import software.amazon.jdbc.profile.ConfigurationProfileBuilder; import software.amazon.jdbc.states.SessionStateService; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.events.EventPublisher; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.storage.TestStorageServiceImpl; public class PluginServiceImplTests { private static final Properties PROPERTIES = new Properties(); private static final String URL = "url"; private static final String DRIVER_PROTOCOL = "driverProtocol"; + private StorageService storageService; private AutoCloseable closeable; + @Mock FullServicesContainer servicesContainer; + @Mock EventPublisher mockEventPublisher; @Mock ConnectionPluginManager pluginManager; @Mock Connection newConnection; @Mock Connection oldConnection; @@ -95,12 +102,16 @@ void setUp() throws SQLException { when(oldConnection.isClosed()).thenReturn(false); when(newConnection.createStatement()).thenReturn(statement); when(statement.executeQuery(any())).thenReturn(resultSet); + when(servicesContainer.getConnectionPluginManager()).thenReturn(pluginManager); + when(servicesContainer.getStorageService()).thenReturn(storageService); + storageService = new TestStorageServiceImpl(mockEventPublisher); PluginServiceImpl.hostAvailabilityExpiringCache.clear(); } @AfterEach void cleanUp() throws Exception { closeable.close(); + storageService.clearAll(); PluginServiceImpl.hostAvailabilityExpiringCache.clear(); } @@ -111,7 +122,7 @@ public void testOldConnectionNoSuggestion() throws SQLException { PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -140,7 +151,7 @@ public void testOldConnectionDisposeSuggestion() throws SQLException { PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -169,7 +180,7 @@ public void testOldConnectionPreserveSuggestion() throws SQLException { PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -202,7 +213,7 @@ public void testOldConnectionMixedSuggestion() throws SQLException { PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -232,7 +243,7 @@ public void testChangesNewConnectionNewHostNewPortNewRoleNewAvailability() throw PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -271,7 +282,7 @@ public void testChangesNewConnectionNewRoleNewAvailability() throws SQLException PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -310,7 +321,7 @@ public void testChangesNewConnection() throws SQLException { PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -349,7 +360,7 @@ public void testChangesNoChanges() throws SQLException { PluginServiceImpl target = spy(new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -380,7 +391,7 @@ public void testSetNodeListAdded() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -414,7 +425,7 @@ public void testSetNodeListDeleted() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -451,7 +462,7 @@ public void testSetNodeListChanged() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -488,7 +499,7 @@ public void testSetNodeListNoChanges() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -514,7 +525,7 @@ public void testNodeAvailabilityNotChanged() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -543,7 +554,7 @@ public void testNodeAvailabilityChanged_WentDown() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -579,7 +590,7 @@ public void testNodeAvailabilityChanged_WentUp() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -626,7 +637,7 @@ public void testNodeAvailabilityChanged_WentUp_ByAlias() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -671,7 +682,7 @@ public void testNodeAvailabilityChanged_WentUp_MultipleHostsByAlias() throws SQL PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -749,7 +760,7 @@ void testRefreshHostList_withCachedHostAvailability() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -806,7 +817,7 @@ void testForceRefreshHostList_withCachedHostAvailability() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -831,7 +842,7 @@ void testForceRefreshHostList_withCachedHostAvailability() throws SQLException { void testIdentifyConnectionWithNoAliases() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -852,7 +863,7 @@ void testIdentifyConnectionWithAliases() throws SQLException { .build(); PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -880,7 +891,7 @@ void testFillAliasesNonEmptyAliases() throws SQLException { PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, @@ -902,7 +913,7 @@ void testFillAliasesWithInstanceEndpoint(Dialect dialect, String[] expectedInsta final HostSpec empty = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("foo").build(); PluginServiceImpl target = spy( new PluginServiceImpl( - pluginManager, + servicesContainer, new ExceptionManager(), PROPERTIES, URL, diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java index b6af8a04e..797d151be 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsHostListProviderTest.java @@ -47,7 +47,6 @@ import java.util.Collections; import java.util.List; import java.util.Properties; -import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -65,17 +64,22 @@ import software.amazon.jdbc.hostavailability.HostAvailability; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider.FetchTopologyResult; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.events.EventPublisher; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.storage.TestStorageServiceImpl; class RdsHostListProviderTest { - - private final long defaultRefreshRateNano = TimeUnit.SECONDS.toNanos(5); + private StorageService storageService; private RdsHostListProvider rdsHostListProvider; @Mock private Connection mockConnection; @Mock private Statement mockStatement; @Mock private ResultSet mockResultSet; + @Mock private FullServicesContainer mockServicesContainer; @Mock private PluginService mockPluginService; @Mock private HostListProviderService mockHostListProviderService; + @Mock private EventPublisher mockEventPublisher; @Mock Dialect mockTopologyAwareDialect; @Captor private ArgumentCaptor queryCaptor; @@ -89,6 +93,9 @@ class RdsHostListProviderTest { @BeforeEach void setUp() throws SQLException { closeable = MockitoAnnotations.openMocks(this); + storageService = new TestStorageServiceImpl(mockEventPublisher); + when(mockServicesContainer.getHostListProviderService()).thenReturn(mockHostListProviderService); + when(mockServicesContainer.getStorageService()).thenReturn(storageService); when(mockPluginService.getCurrentConnection()).thenReturn(mockConnection); when(mockPluginService.connect(any(HostSpec.class), any(Properties.class))).thenReturn(mockConnection); when(mockPluginService.getCurrentHostSpec()).thenReturn(currentHostSpec); @@ -103,16 +110,15 @@ void setUp() throws SQLException { @AfterEach void tearDown() throws Exception { RdsHostListProvider.clearAll(); + storageService.clearAll(); closeable.close(); } - private RdsHostListProvider getRdsHostListProvider( - HostListProviderService mockHostListProviderService, - String originalUrl) throws SQLException { + private RdsHostListProvider getRdsHostListProvider(String originalUrl) throws SQLException { RdsHostListProvider provider = new RdsHostListProvider( new Properties(), originalUrl, - mockHostListProviderService, + mockServicesContainer, "foo", "bar", "baz"); provider.init(); return provider; @@ -120,12 +126,10 @@ private RdsHostListProvider getRdsHostListProvider( @Test void testGetTopology_returnCachedTopology() throws SQLException { - rdsHostListProvider = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, "protocol://url/")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("protocol://url/")); - final Instant lastUpdated = Instant.now(); final List expected = hosts; - RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, expected, defaultRefreshRateNano); + storageService.set(rdsHostListProvider.clusterId, new Topology(expected)); final FetchTopologyResult result = rdsHostListProvider.getTopology(mockConnection, false); assertEquals(expected, result.hosts); @@ -135,11 +139,10 @@ void testGetTopology_returnCachedTopology() throws SQLException { @Test void testGetTopology_withForceUpdate_returnsUpdatedTopology() throws SQLException { - rdsHostListProvider = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); rdsHostListProvider.isInitialized = true; - RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, hosts, defaultRefreshRateNano); + storageService.set(rdsHostListProvider.clusterId, new Topology(hosts)); final List newHosts = Collections.singletonList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("newHost").build()); @@ -153,13 +156,12 @@ void testGetTopology_withForceUpdate_returnsUpdatedTopology() throws SQLExceptio @Test void testGetTopology_noForceUpdate_queryReturnsEmptyHostList() throws SQLException { - rdsHostListProvider = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); rdsHostListProvider.clusterId = "cluster-id"; rdsHostListProvider.isInitialized = true; final List expected = hosts; - RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, expected, defaultRefreshRateNano); + storageService.set(rdsHostListProvider.clusterId, new Topology(expected)); doReturn(new ArrayList<>()).when(rdsHostListProvider).queryForTopology(mockConnection); @@ -171,8 +173,7 @@ void testGetTopology_noForceUpdate_queryReturnsEmptyHostList() throws SQLExcepti @Test void testGetTopology_withForceUpdate_returnsInitialHostList() throws SQLException { - rdsHostListProvider = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); rdsHostListProvider.clear(); doReturn(new ArrayList<>()).when(rdsHostListProvider).queryForTopology(mockConnection); @@ -181,7 +182,7 @@ void testGetTopology_withForceUpdate_returnsInitialHostList() throws SQLExceptio verify(rdsHostListProvider, atMostOnce()).queryForTopology(mockConnection); assertNotNull(result.hosts); assertEquals( - Arrays.asList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("url").build()), + Collections.singletonList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("url").build()), result.hosts); } @@ -198,8 +199,7 @@ void testQueryForTopology_withDifferentDriverProtocol() throws SQLException { when(mockResultSet.getString(eq(1))).thenReturn("mysql"); - rdsHostListProvider = - getRdsHostListProvider(mockHostListProviderService, "mysql://url/"); + rdsHostListProvider = getRdsHostListProvider("mysql://url/"); List hosts = rdsHostListProvider.queryForTopology(mockConnection); assertEquals(expectedMySQL, hosts); @@ -207,16 +207,14 @@ void testQueryForTopology_withDifferentDriverProtocol() throws SQLException { when(mockResultSet.next()).thenReturn(true, false); when(mockResultSet.getString(eq(1))).thenReturn("postgresql"); - rdsHostListProvider = - getRdsHostListProvider(mockHostListProviderService, "postgresql://url/"); + rdsHostListProvider = getRdsHostListProvider("postgresql://url/"); hosts = rdsHostListProvider.queryForTopology(mockConnection); assertEquals(expectedPostgres, hosts); } @Test void testQueryForTopology_queryResultsInException() throws SQLException { - rdsHostListProvider = - getRdsHostListProvider(mockHostListProviderService, "protocol://url/"); + rdsHostListProvider = getRdsHostListProvider("protocol://url/"); when(mockStatement.executeQuery(queryCaptor.capture())).thenThrow(new SQLSyntaxErrorException()); assertThrows( @@ -225,39 +223,21 @@ void testQueryForTopology_queryResultsInException() throws SQLException { } @Test - void testGetCachedTopology_returnCachedTopology() throws SQLException { - rdsHostListProvider = getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); + void testGetCachedTopology_returnStoredTopology() throws SQLException { + rdsHostListProvider = getRdsHostListProvider("jdbc:someprotocol://url"); final List expected = hosts; - RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, expected, defaultRefreshRateNano); + storageService.set(rdsHostListProvider.clusterId, new Topology(expected)); - final List result = rdsHostListProvider.getCachedTopology(); + final List result = rdsHostListProvider.getStoredTopology(); assertEquals(expected, result); } - @Test - void testGetCachedTopology_returnNull() throws InterruptedException, SQLException { - rdsHostListProvider = getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); - // Test getCachedTopology with empty topology. - assertNull(rdsHostListProvider.getCachedTopology()); - rdsHostListProvider.clear(); - - rdsHostListProvider = getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url"); - final long refreshRateOneNanosecond = 1; - RdsHostListProvider.topologyCache.put(rdsHostListProvider.clusterId, hosts, refreshRateOneNanosecond); - TimeUnit.NANOSECONDS.sleep(1); - - // Test getCachedTopology with expired cache. - assertNull(rdsHostListProvider.getCachedTopology()); - } - @Test void testTopologyCache_NoSuggestedClusterId() throws SQLException { RdsHostListProvider.clearAll(); - RdsHostListProvider provider1 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-a.domain.com/")); + RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.domain.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -270,16 +250,14 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException { doReturn(topologyClusterA) .when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsHostListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); final List topologyProvider1 = provider1.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - RdsHostListProvider provider2 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-b.domain.com/")); + RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-b.domain.com/")); provider2.init(); - assertNull(provider2.getCachedTopology()); + assertNull(provider2.getStoredTopology()); final List topologyClusterB = Arrays.asList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -293,16 +271,15 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException { final List topologyProvider2 = provider2.refresh(mock(Connection.class)); assertEquals(topologyClusterB, topologyProvider2); - assertEquals(2, RdsHostListProvider.topologyCache.size()); + assertEquals(2, storageService.size(Topology.class)); } @Test void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { RdsHostListProvider.clearAll(); - RdsHostListProvider provider1 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); + RdsHostListProvider provider1 = + Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -323,14 +300,13 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsHostListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); final List topologyProvider1 = provider1.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - RdsHostListProvider provider2 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); + RdsHostListProvider provider2 = + Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); assertEquals(provider1.clusterId, provider2.clusterId); @@ -340,16 +316,15 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { final List topologyProvider2 = provider2.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertEquals(1, RdsHostListProvider.topologyCache.size()); + assertEquals(1, storageService.size(Topology.class)); } @Test void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { RdsHostListProvider.clearAll(); - RdsHostListProvider provider1 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); + RdsHostListProvider provider1 = + Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -370,14 +345,13 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsHostListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); final List topologyProvider1 = provider1.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - RdsHostListProvider provider2 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/")); + RdsHostListProvider provider2 = + Mockito.spy(getRdsHostListProvider("jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); assertEquals(provider1.clusterId, provider2.clusterId); @@ -387,16 +361,15 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { final List topologyProvider2 = provider2.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertEquals(1, RdsHostListProvider.topologyCache.size()); + assertEquals(1, storageService.size(Topology.class)); } @Test void testTopologyCache_AcceptSuggestion() throws SQLException { RdsHostListProvider.clearAll(); - RdsHostListProvider provider1 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/")); + RdsHostListProvider provider1 = + Mockito.spy(getRdsHostListProvider("jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -417,16 +390,15 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { doAnswer(a -> topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsHostListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); List topologyProvider1 = provider1.refresh(mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); // RdsHostListProvider.logCache(); - RdsHostListProvider provider2 = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); + RdsHostListProvider provider2 = + Mockito.spy(getRdsHostListProvider("jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); doAnswer(a -> topologyClusterA).when(provider2).queryForTopology(any(Connection.class)); @@ -437,7 +409,7 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { assertNotEquals(provider1.clusterId, provider2.clusterId); assertFalse(provider1.isPrimaryClusterId); assertTrue(provider2.isPrimaryClusterId); - assertEquals(2, RdsHostListProvider.topologyCache.size()); + assertEquals(2, storageService.size(Topology.class)); assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com", RdsHostListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); @@ -454,9 +426,7 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { @Test void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { - rdsHostListProvider = Mockito.spy(getRdsHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(false); assertThrows(SQLException.class, () -> rdsHostListProvider.identifyConnection(mockConnection)); @@ -467,9 +437,7 @@ void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { @Test void testIdentifyConnectionNullTopology() throws SQLException { - rdsHostListProvider = Mockito.spy(getRdsHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); rdsHostListProvider.clusterInstanceTemplate = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) .host("?.pattern").build(); @@ -490,9 +458,7 @@ void testIdentifyConnectionHostNotInTopology() throws SQLException { .role(HostRole.WRITER) .build()); - rdsHostListProvider = Mockito.spy(getRdsHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); doReturn(cachedTopology).when(rdsHostListProvider).refresh(mockConnection); @@ -511,9 +477,7 @@ void testIdentifyConnectionHostInTopology() throws SQLException { expectedHost.setHostId("instance-a-1"); final List cachedTopology = Collections.singletonList(expectedHost); - rdsHostListProvider = Mockito.spy(getRdsHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString(eq(1))).thenReturn("instance-a-1"); doReturn(cachedTopology).when(rdsHostListProvider).refresh(mockConnection); @@ -526,8 +490,7 @@ void testIdentifyConnectionHostInTopology() throws SQLException { @Test void testGetTopology_StaleRecord() throws SQLException { - rdsHostListProvider = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); rdsHostListProvider.isInitialized = true; final String hostName1 = "hostName1"; @@ -560,8 +523,7 @@ void testGetTopology_StaleRecord() throws SQLException { @Test void testGetTopology_InvalidLastUpdatedTimestamp() throws SQLException { - rdsHostListProvider = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); rdsHostListProvider.isInitialized = true; final String hostName = "hostName"; @@ -586,8 +548,7 @@ void testGetTopology_InvalidLastUpdatedTimestamp() throws SQLException { @Test void testGetTopology_returnsLatestWriter() throws SQLException { - rdsHostListProvider = Mockito.spy( - getRdsHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsHostListProvider = Mockito.spy(getRdsHostListProvider("jdbc:someprotocol://url")); rdsHostListProvider.isInitialized = true; HostSpec expectedWriterHost = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -650,23 +611,19 @@ void testClusterUrlUsedAsDefaultClusterId() throws SQLException { String readerClusterUrl = "mycluster.cluster-ro-XYZ.us-east-1.rds.amazonaws.com"; String expectedClusterId = "mycluster.cluster-XYZ.us-east-1.rds.amazonaws.com:1234"; String connectionString = "jdbc:someprotocol://" + readerClusterUrl + ":1234/test"; - RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider( - mockHostListProviderService, - connectionString)); + RdsHostListProvider provider1 = Mockito.spy(getRdsHostListProvider(connectionString)); assertEquals(expectedClusterId, provider1.getClusterId()); List mockTopology = Collections.singletonList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("host").build()); doReturn(mockTopology).when(provider1).queryForTopology(any(Connection.class)); provider1.refresh(); - assertEquals(mockTopology, provider1.getCachedTopology()); + assertEquals(mockTopology, provider1.getStoredTopology()); verify(provider1, times(1)).queryForTopology(mockConnection); - RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider( - mockHostListProviderService, - connectionString)); + RdsHostListProvider provider2 = Mockito.spy(getRdsHostListProvider(connectionString)); assertEquals(expectedClusterId, provider2.getClusterId()); - assertEquals(mockTopology, provider2.getCachedTopology()); + assertEquals(mockTopology, provider2.getStoredTopology()); verify(provider2, never()).queryForTopology(mockConnection); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java index a99ead6c6..df6d6ee50 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/hostlistprovider/RdsMultiAzDbClusterListProviderTest.java @@ -37,16 +37,13 @@ import java.sql.SQLException; import java.sql.SQLSyntaxErrorException; import java.sql.Statement; -import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Properties; -import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import org.mockito.ArgumentCaptor; import org.mockito.Captor; @@ -61,17 +58,22 @@ import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; import software.amazon.jdbc.hostlistprovider.RdsHostListProvider.FetchTopologyResult; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.events.EventPublisher; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.storage.TestStorageServiceImpl; class RdsMultiAzDbClusterListProviderTest { - - private final long defaultRefreshRateNano = TimeUnit.SECONDS.toNanos(5); + private StorageService storageService; private RdsMultiAzDbClusterListProvider rdsMazDbClusterHostListProvider; @Mock private Connection mockConnection; @Mock private Statement mockStatement; @Mock private ResultSet mockResultSet; + @Mock private FullServicesContainer mockServicesContainer; @Mock private PluginService mockPluginService; @Mock private HostListProviderService mockHostListProviderService; + @Mock private EventPublisher mockEventPublisher; @Mock Dialect mockTopologyAwareDialect; @Captor private ArgumentCaptor queryCaptor; @@ -85,6 +87,9 @@ class RdsMultiAzDbClusterListProviderTest { @BeforeEach void setUp() throws SQLException { closeable = MockitoAnnotations.openMocks(this); + storageService = new TestStorageServiceImpl(mockEventPublisher); + when(mockServicesContainer.getHostListProviderService()).thenReturn(mockHostListProviderService); + when(mockServicesContainer.getStorageService()).thenReturn(storageService); when(mockPluginService.getCurrentConnection()).thenReturn(mockConnection); when(mockPluginService.connect(any(HostSpec.class), any(Properties.class))).thenReturn(mockConnection); when(mockPluginService.getCurrentHostSpec()).thenReturn(currentHostSpec); @@ -98,16 +103,15 @@ void setUp() throws SQLException { @AfterEach void tearDown() throws Exception { RdsMultiAzDbClusterListProvider.clearAll(); + storageService.clearAll(); closeable.close(); } - private RdsMultiAzDbClusterListProvider getRdsMazDbClusterHostListProvider( - HostListProviderService mockHostListProviderService, - String originalUrl) throws SQLException { + private RdsMultiAzDbClusterListProvider getRdsMazDbClusterHostListProvider(String originalUrl) throws SQLException { RdsMultiAzDbClusterListProvider provider = new RdsMultiAzDbClusterListProvider( new Properties(), originalUrl, - mockHostListProviderService, + mockServicesContainer, "foo", "bar", "baz", @@ -120,13 +124,9 @@ private RdsMultiAzDbClusterListProvider getRdsMazDbClusterHostListProvider( @Test void testGetTopology_returnCachedTopology() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, "protocol://url/")); - - final Instant lastUpdated = Instant.now(); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("protocol://url/")); final List expected = hosts; - RdsMultiAzDbClusterListProvider.topologyCache.put( - rdsMazDbClusterHostListProvider.clusterId, expected, defaultRefreshRateNano); + storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(expected)); final FetchTopologyResult result = rdsMazDbClusterHostListProvider.getTopology(mockConnection, false); assertEquals(expected, result.hosts); @@ -136,12 +136,10 @@ void testGetTopology_returnCachedTopology() throws SQLException { @Test void testGetTopology_withForceUpdate_returnsUpdatedTopology() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); rdsMazDbClusterHostListProvider.isInitialized = true; - RdsMultiAzDbClusterListProvider.topologyCache.put( - rdsMazDbClusterHostListProvider.clusterId, hosts, defaultRefreshRateNano); + storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(hosts)); final List newHosts = Collections.singletonList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("newHost").build()); @@ -155,14 +153,12 @@ void testGetTopology_withForceUpdate_returnsUpdatedTopology() throws SQLExceptio @Test void testGetTopology_noForceUpdate_queryReturnsEmptyHostList() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); rdsMazDbClusterHostListProvider.clusterId = "cluster-id"; rdsMazDbClusterHostListProvider.isInitialized = true; final List expected = hosts; - RdsMultiAzDbClusterListProvider.topologyCache.put( - rdsMazDbClusterHostListProvider.clusterId, expected, defaultRefreshRateNano); + storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(expected)); doReturn(new ArrayList<>()).when(rdsMazDbClusterHostListProvider).queryForTopology(mockConnection); @@ -174,8 +170,7 @@ void testGetTopology_noForceUpdate_queryReturnsEmptyHostList() throws SQLExcepti @Test void testGetTopology_withForceUpdate_returnsInitialHostList() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, "jdbc:someprotocol://url")); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); rdsMazDbClusterHostListProvider.clear(); doReturn(new ArrayList<>()).when(rdsMazDbClusterHostListProvider).queryForTopology(mockConnection); @@ -184,14 +179,13 @@ void testGetTopology_withForceUpdate_returnsInitialHostList() throws SQLExceptio verify(rdsMazDbClusterHostListProvider, atMostOnce()).queryForTopology(mockConnection); assertNotNull(result.hosts); assertEquals( - Arrays.asList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("url").build()), + Collections.singletonList(new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("url").build()), result.hosts); } @Test void testQueryForTopology_queryResultsInException() throws SQLException { - rdsMazDbClusterHostListProvider = - getRdsMazDbClusterHostListProvider(mockHostListProviderService, "protocol://url/"); + rdsMazDbClusterHostListProvider = getRdsMazDbClusterHostListProvider("protocol://url/"); when(mockStatement.executeQuery(queryCaptor.capture())).thenThrow(new SQLSyntaxErrorException()); assertThrows( @@ -201,43 +195,21 @@ void testQueryForTopology_queryResultsInException() throws SQLException { @Test void testGetCachedTopology_returnCachedTopology() throws SQLException { - rdsMazDbClusterHostListProvider = getRdsMazDbClusterHostListProvider( - mockHostListProviderService, "jdbc:someprotocol://url"); + rdsMazDbClusterHostListProvider = getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url"); final List expected = hosts; - RdsMultiAzDbClusterListProvider.topologyCache.put( - rdsMazDbClusterHostListProvider.clusterId, expected, defaultRefreshRateNano); + storageService.set(rdsMazDbClusterHostListProvider.clusterId, new Topology(expected)); - final List result = rdsMazDbClusterHostListProvider.getCachedTopology(); + final List result = rdsMazDbClusterHostListProvider.getStoredTopology(); assertEquals(expected, result); } - @Test - void testGetCachedTopology_returnNull() throws InterruptedException, SQLException { - rdsMazDbClusterHostListProvider = getRdsMazDbClusterHostListProvider( - mockHostListProviderService, "jdbc:someprotocol://url"); - // Test getCachedTopology with empty topology. - assertNull(rdsMazDbClusterHostListProvider.getCachedTopology()); - rdsMazDbClusterHostListProvider.clear(); - - rdsMazDbClusterHostListProvider = getRdsMazDbClusterHostListProvider( - mockHostListProviderService, "jdbc:someprotocol://url"); - final long refreshRateOneNanosecond = 1; - RdsMultiAzDbClusterListProvider.topologyCache.put( - rdsMazDbClusterHostListProvider.clusterId, hosts, refreshRateOneNanosecond); - TimeUnit.NANOSECONDS.sleep(1); - - // Test getCachedTopology with expired cache. - assertNull(rdsMazDbClusterHostListProvider.getCachedTopology()); - } - @Test void testTopologyCache_NoSuggestedClusterId() throws SQLException { RdsMultiAzDbClusterListProvider.clearAll(); - RdsMultiAzDbClusterListProvider provider1 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-a.domain.com/")); + RdsMultiAzDbClusterListProvider provider1 = + Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:something://cluster-a.domain.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -250,16 +222,15 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException { doReturn(topologyClusterA) .when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - RdsMultiAzDbClusterListProvider provider2 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, - "jdbc:something://cluster-b.domain.com/")); + RdsMultiAzDbClusterListProvider provider2 = + Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:something://cluster-b.domain.com/")); provider2.init(); - assertNull(provider2.getCachedTopology()); + assertNull(provider2.getStoredTopology()); final List topologyClusterB = Arrays.asList( new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) @@ -273,15 +244,15 @@ void testTopologyCache_NoSuggestedClusterId() throws SQLException { final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterB, topologyProvider2); - assertEquals(2, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(2, storageService.size(Topology.class)); } @Test void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { RdsMultiAzDbClusterListProvider.clearAll(); - RdsMultiAzDbClusterListProvider provider1 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, + RdsMultiAzDbClusterListProvider provider1 = + Mockito.spy(getRdsMazDbClusterHostListProvider( "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( @@ -303,13 +274,13 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - RdsMultiAzDbClusterListProvider provider2 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, + RdsMultiAzDbClusterListProvider provider2 = + Mockito.spy(getRdsMazDbClusterHostListProvider( "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); @@ -320,15 +291,15 @@ void testTopologyCache_SuggestedClusterIdForRds() throws SQLException { final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertEquals(1, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(1, storageService.size(Topology.class)); } @Test void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { RdsMultiAzDbClusterListProvider.clearAll(); - RdsMultiAzDbClusterListProvider provider1 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, + RdsMultiAzDbClusterListProvider provider1 = + Mockito.spy(getRdsMazDbClusterHostListProvider( "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( @@ -350,13 +321,13 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { doReturn(topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); final List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); - RdsMultiAzDbClusterListProvider provider2 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, + RdsMultiAzDbClusterListProvider provider2 = + Mockito.spy(getRdsMazDbClusterHostListProvider( "jdbc:something://instance-a-3.xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); @@ -367,15 +338,15 @@ void testTopologyCache_SuggestedClusterIdForInstance() throws SQLException { final List topologyProvider2 = provider2.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider2); - assertEquals(1, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(1, storageService.size(Topology.class)); } @Test void testTopologyCache_AcceptSuggestion() throws SQLException { RdsMultiAzDbClusterListProvider.clearAll(); - RdsMultiAzDbClusterListProvider provider1 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, + RdsMultiAzDbClusterListProvider provider1 = + Mockito.spy(getRdsMazDbClusterHostListProvider( "jdbc:something://instance-a-2.xyz.us-east-2.rds.amazonaws.com/")); provider1.init(); final List topologyClusterA = Arrays.asList( @@ -397,15 +368,15 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { doAnswer(a -> topologyClusterA).when(provider1).queryForTopology(any(Connection.class)); - assertEquals(0, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(0, storageService.size(Topology.class)); List topologyProvider1 = provider1.refresh(Mockito.mock(Connection.class)); assertEquals(topologyClusterA, topologyProvider1); // RdsMultiAzDbClusterListProvider.logCache(); - RdsMultiAzDbClusterListProvider provider2 = Mockito.spy( - getRdsMazDbClusterHostListProvider(mockHostListProviderService, + RdsMultiAzDbClusterListProvider provider2 = + Mockito.spy(getRdsMazDbClusterHostListProvider( "jdbc:something://cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com/")); provider2.init(); @@ -417,7 +388,7 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { assertNotEquals(provider1.clusterId, provider2.clusterId); assertFalse(provider1.isPrimaryClusterId); assertTrue(provider2.isPrimaryClusterId); - assertEquals(2, RdsMultiAzDbClusterListProvider.topologyCache.size()); + assertEquals(2, storageService.size(Topology.class)); assertEquals("cluster-a.cluster-xyz.us-east-2.rds.amazonaws.com", RdsMultiAzDbClusterListProvider.suggestedPrimaryClusterIdCache.get(provider1.clusterId)); @@ -434,9 +405,7 @@ void testTopologyCache_AcceptSuggestion() throws SQLException { @Test void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(false); assertThrows(SQLException.class, () -> rdsMazDbClusterHostListProvider.identifyConnection(mockConnection)); @@ -447,9 +416,7 @@ void testIdentifyConnectionWithInvalidNodeIdQuery() throws SQLException { @Test void testIdentifyConnectionNullTopology() throws SQLException { - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); rdsMazDbClusterHostListProvider.clusterInstanceTemplate = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()) .host("?.pattern").build(); @@ -470,9 +437,7 @@ void testIdentifyConnectionHostNotInTopology() throws SQLException { .role(HostRole.WRITER) .build()); - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString(eq(1))).thenReturn("instance-1"); doReturn(cachedTopology).when(rdsMazDbClusterHostListProvider).refresh(mockConnection); @@ -491,9 +456,7 @@ void testIdentifyConnectionHostInTopology() throws SQLException { .build(); final List cachedTopology = Collections.singletonList(expectedHost); - rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider( - mockHostListProviderService, - "jdbc:someprotocol://url")); + rdsMazDbClusterHostListProvider = Mockito.spy(getRdsMazDbClusterHostListProvider("jdbc:someprotocol://url")); when(mockResultSet.next()).thenReturn(true); when(mockResultSet.getString(eq(1))).thenReturn("instance-a-1"); doReturn(cachedTopology).when(rdsMazDbClusterHostListProvider).refresh(mockConnection); diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/AwsSecretsManagerConnectionPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/AwsSecretsManagerConnectionPluginTest.java index 647a41f37..22a8339c1 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/AwsSecretsManagerConnectionPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/AwsSecretsManagerConnectionPluginTest.java @@ -36,6 +36,7 @@ import java.sql.SQLException; import java.util.Properties; import java.util.stream.Stream; +import org.jetbrains.annotations.NotNull; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -69,6 +70,7 @@ import software.amazon.jdbc.profile.ConfigurationProfileBuilder; import software.amazon.jdbc.states.SessionStateService; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.Pair; import software.amazon.jdbc.util.telemetry.GaugeCallable; @@ -77,6 +79,7 @@ import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.util.telemetry.TelemetryGauge; +@SuppressWarnings("resource") public class AwsSecretsManagerConnectionPluginTest { private static final String TEST_PG_PROTOCOL = "jdbc:aws-wrapper:postgresql:"; @@ -106,6 +109,7 @@ public class AwsSecretsManagerConnectionPluginTest { private AutoCloseable closeable; + @Mock FullServicesContainer mockServicesContainer; @Mock SecretsManagerClient mockSecretsManagerClient; @Mock GetSecretValueRequest mockGetValueRequest; @Mock JdbcCallable connectFunc; @@ -132,6 +136,7 @@ public void init() throws SQLException { when(mockDialectManager.getDialect(anyString(), anyString(), any(Properties.class))) .thenReturn(mockTopologyAwareDialect); + when(mockServicesContainer.getConnectionPluginManager()).thenReturn(mockConnectionPluginManager); when(mockService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockConnectionPluginManager.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockTelemetryFactory.openTelemetryContext(anyString(), any())).thenReturn(mockTelemetryContext); @@ -241,16 +246,7 @@ public void testConnectWithNewSecretsAfterTryingWithCachedSecrets( String protocol, ExceptionHandler exceptionHandler) throws SQLException { this.plugin = new AwsSecretsManagerConnectionPlugin( - new PluginServiceImpl( - mockConnectionPluginManager, - new ExceptionManager(), - TEST_PROPS, - "url", - protocol, - mockDialectManager, - mockTargetDriverDialect, - configurationProfile, - mockSessionStateService), + getPluginService(protocol), TEST_PROPS, (host, r) -> mockSecretsManagerClient, (id) -> mockGetValueRequest); @@ -282,6 +278,19 @@ public void testConnectWithNewSecretsAfterTryingWithCachedSecrets( assertEquals(TEST_PASSWORD, TEST_PROPS.get(PropertyDefinition.PASSWORD.name)); } + private @NotNull PluginServiceImpl getPluginService(String protocol) throws SQLException { + return new PluginServiceImpl( + mockServicesContainer, + new ExceptionManager(), + TEST_PROPS, + "url", + protocol, + mockDialectManager, + mockTargetDriverDialect, + configurationProfile, + mockSessionStateService); + } + /** * The plugin will attempt to open a connection after fetching a secret, but it will fail because the returned secret * could not be parsed. @@ -341,16 +350,7 @@ public void testFailedToGetSecrets() throws SQLException { @ValueSource(strings = {"28000", "28P01"}) public void testFailedInitialConnectionWithWrappedGenericError(final String accessError) throws SQLException { this.plugin = new AwsSecretsManagerConnectionPlugin( - new PluginServiceImpl( - mockConnectionPluginManager, - new ExceptionManager(), - TEST_PROPS, - "url", - TEST_PG_PROTOCOL, - mockDialectManager, - mockTargetDriverDialect, - configurationProfile, - mockSessionStateService), + getPluginService(TEST_PG_PROTOCOL), TEST_PROPS, (host, r) -> mockSecretsManagerClient, (id) -> mockGetValueRequest); @@ -383,16 +383,7 @@ public void testFailedInitialConnectionWithWrappedGenericError(final String acce @Test public void testConnectWithWrappedMySQLException() throws SQLException { this.plugin = new AwsSecretsManagerConnectionPlugin( - new PluginServiceImpl( - mockConnectionPluginManager, - new ExceptionManager(), - TEST_PROPS, - "url", - TEST_MYSQL_PROTOCOL, - mockDialectManager, - mockTargetDriverDialect, - configurationProfile, - mockSessionStateService), + getPluginService(TEST_MYSQL_PROTOCOL), TEST_PROPS, (host, r) -> mockSecretsManagerClient, (id) -> mockGetValueRequest); @@ -424,16 +415,7 @@ public void testConnectWithWrappedMySQLException() throws SQLException { @Test public void testConnectWithWrappedPostgreSQLException() throws SQLException { this.plugin = new AwsSecretsManagerConnectionPlugin( - new PluginServiceImpl( - mockConnectionPluginManager, - new ExceptionManager(), - TEST_PROPS, - "url", - TEST_PG_PROTOCOL, - mockDialectManager, - mockTargetDriverDialect, - configurationProfile, - mockSessionStateService), + getPluginService(TEST_PG_PROTOCOL), TEST_PROPS, (host, r) -> mockSecretsManagerClient, (id) -> mockGetValueRequest); @@ -471,7 +453,7 @@ public void testConnectViaARN(final String arn, final Region expectedRegionParse SECRET_ID_PROPERTY.set(props, arn); this.plugin = spy(new AwsSecretsManagerConnectionPlugin( - new PluginServiceImpl(mockConnectionPluginManager, props, "url", TEST_PG_PROTOCOL, mockTargetDriverDialect), + new PluginServiceImpl(mockServicesContainer, props, "url", TEST_PG_PROTOCOL, mockTargetDriverDialect), props, (host, r) -> mockSecretsManagerClient, (id) -> mockGetValueRequest)); @@ -491,7 +473,7 @@ public void testConnectionWithRegionParameterAndARN(final String arn, final Regi REGION_PROPERTY.set(props, expectedRegion.toString()); this.plugin = spy(new AwsSecretsManagerConnectionPlugin( - new PluginServiceImpl(mockConnectionPluginManager, props, "url", TEST_PG_PROTOCOL, mockTargetDriverDialect), + new PluginServiceImpl(mockServicesContainer, props, "url", TEST_PG_PROTOCOL, mockTargetDriverDialect), props, (host, r) -> mockSecretsManagerClient, (id) -> mockGetValueRequest)); diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java index 668f92e8e..afa6570e0 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointMonitorImplTest.java @@ -18,8 +18,8 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -46,14 +46,16 @@ import software.amazon.jdbc.AllowedAndBlockedHosts; import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.HostSpecBuilder; -import software.amazon.jdbc.PluginService; import software.amazon.jdbc.hostavailability.HostAvailabilityStrategy; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; public class CustomEndpointMonitorImplTest { - @Mock private PluginService mockPluginService; + @Mock private MonitorService mockMonitorService; + @Mock private StorageService mockStorageService; @Mock private BiFunction mockRdsClientFunc; @Mock private RdsClient mockRdsClient; @Mock private DescribeDbClusterEndpointsResponse mockDescribeResponse; @@ -91,7 +93,6 @@ public void init() throws SQLException { twoEndpointList = Arrays.asList(mockClusterEndpoint1, mockClusterEndpoint2); oneEndpointList = Collections.singletonList(mockClusterEndpoint1); - when(mockPluginService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockTelemetryFactory.createCounter(any(String.class))).thenReturn(mockTelemetryCounter); when(mockRdsClientFunc.apply(any(HostSpec.class), any(Region.class))).thenReturn(mockRdsClient); when(mockRdsClient.describeDBClusterEndpoints(any(Consumer.class))).thenReturn(mockDescribeResponse); @@ -108,27 +109,33 @@ public void init() throws SQLException { @AfterEach void cleanUp() throws Exception { closeable.close(); - CustomEndpointPlugin.monitors.clear(); } @Test public void testRun() throws InterruptedException { CustomEndpointMonitorImpl monitor = new CustomEndpointMonitorImpl( - mockPluginService, host, endpointId, Region.US_EAST_1, TimeUnit.MILLISECONDS.toNanos(50), mockRdsClientFunc); + mockStorageService, + mockTelemetryFactory, + host, + endpointId, + Region.US_EAST_1, + TimeUnit.MILLISECONDS.toNanos(50), + mockRdsClientFunc); + monitor.start(); + // Wait for 2 run cycles. The first will return an unexpected number of endpoints in the API response, the second // will return the expected number of endpoints (one). TimeUnit.MILLISECONDS.sleep(100); - assertEquals(expectedInfo, CustomEndpointMonitorImpl.customEndpointInfoCache.get(host.getHost())); - monitor.close(); + assertEquals(expectedInfo, CustomEndpointMonitorImpl.customEndpointInfoCache.get(host.getUrl())); + monitor.stop(); ArgumentCaptor captor = ArgumentCaptor.forClass(AllowedAndBlockedHosts.class); - verify(mockPluginService).setAllowedAndBlockedHosts(captor.capture()); + verify(mockStorageService).set(eq(host.getUrl()), captor.capture()); assertEquals(staticMembersSet, captor.getValue().getAllowedHostIds()); assertNull(captor.getValue().getBlockedHostIds()); // Wait for monitor to close TimeUnit.MILLISECONDS.sleep(50); - assertTrue(monitor.stop.get()); verify(mockRdsClient, atLeastOnce()).close(); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginTest.java index 23e47ad2d..0d41c5f72 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/customendpoint/CustomEndpointPluginTest.java @@ -19,7 +19,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -33,7 +32,6 @@ import java.sql.Statement; import java.util.HashSet; import java.util.Properties; -import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; @@ -49,6 +47,8 @@ import software.amazon.jdbc.hostavailability.HostAvailabilityStrategy; import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.monitoring.MonitorService; import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; @@ -63,7 +63,9 @@ public class CustomEndpointPluginTest { private final HostSpec writerClusterHost = hostSpecBuilder.host(writerClusterUrl).build(); private final HostSpec host = hostSpecBuilder.host(customEndpointUrl).build(); + @Mock private FullServicesContainer mockServicesContainer; @Mock private PluginService mockPluginService; + @Mock private MonitorService mockMonitorService; @Mock private BiFunction mockRdsClientFunc; @Mock private TelemetryFactory mockTelemetryFactory; @Mock private TelemetryCounter mockTelemetryCounter; @@ -78,7 +80,9 @@ public class CustomEndpointPluginTest { public void init() throws SQLException { closeable = MockitoAnnotations.openMocks(this); - when(mockPluginService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); + when(mockServicesContainer.getPluginService()).thenReturn(mockPluginService); + when(mockServicesContainer.getMonitorService()).thenReturn(mockMonitorService); + when(mockServicesContainer.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockTelemetryFactory.createCounter(any(String.class))).thenReturn(mockTelemetryCounter); when(mockMonitor.hasCustomEndpointInfo()).thenReturn(true); when(mockPluginService.getTargetDriverDialect()).thenReturn(mockTargetDriverDialect); @@ -89,11 +93,10 @@ public void init() throws SQLException { void cleanUp() throws Exception { closeable.close(); props.clear(); - CustomEndpointPlugin.monitors.clear(); } - private CustomEndpointPlugin getSpyPlugin() { - CustomEndpointPlugin plugin = new CustomEndpointPlugin(mockPluginService, props, mockRdsClientFunc); + private CustomEndpointPlugin getSpyPlugin() throws SQLException { + CustomEndpointPlugin plugin = new CustomEndpointPlugin(mockServicesContainer, props, mockRdsClientFunc); CustomEndpointPlugin spyPlugin = spy(plugin); doReturn(mockMonitor).when(spyPlugin).createMonitorIfAbsent(any(Properties.class)); return spyPlugin; @@ -153,14 +156,4 @@ public void testExecute_monitorCreated() throws SQLException { verify(spyPlugin, times(1)).createMonitorIfAbsent(eq(props)); verify(mockJdbcMethodFunc, times(1)).call(); } - - @Test - public void testCloseMonitors() throws Exception { - CustomEndpointPlugin.monitors.computeIfAbsent("test-monitor", (key) -> mockMonitor, TimeUnit.SECONDS.toNanos(30)); - - CustomEndpointPlugin.closeMonitors(); - - // close() may be called by the cleanup thread in addition to the call below. - verify(mockMonitor, atLeastOnce()).close(); - } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java index a7bdfb54a..638e99a4f 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/dev/DeveloperConnectionPluginTest.java @@ -17,11 +17,11 @@ package software.amazon.jdbc.plugin.dev; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNotSame; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; @@ -38,20 +38,25 @@ import org.mockito.MockitoAnnotations; import software.amazon.jdbc.ConnectionPluginManager; import software.amazon.jdbc.ConnectionProvider; -import software.amazon.jdbc.PluginServiceImpl; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.dialect.DialectCodes; import software.amazon.jdbc.dialect.DialectManager; import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.FullServicesContainer; +import software.amazon.jdbc.util.FullServicesContainerImpl; +import software.amazon.jdbc.util.monitoring.MonitorService; +import software.amazon.jdbc.util.storage.StorageService; import software.amazon.jdbc.util.telemetry.TelemetryContext; import software.amazon.jdbc.util.telemetry.TelemetryFactory; import software.amazon.jdbc.wrapper.ConnectionWrapper; +@SuppressWarnings({"resource"}) public class DeveloperConnectionPluginTest { - + private FullServicesContainer servicesContainer; + @Mock StorageService mockStorageService; + @Mock MonitorService mockMonitorService; @Mock ConnectionProvider mockConnectionProvider; @Mock Connection mockConnection; - @Mock PluginServiceImpl mockService; @Mock ConnectionPluginManager mockConnectionPluginManager; @Mock ExceptionSimulatorConnectCallback mockConnectCallback; @Mock private TelemetryFactory mockTelemetryFactory; @@ -68,30 +73,31 @@ void cleanUp() throws Exception { @BeforeEach void init() throws SQLException { closeable = MockitoAnnotations.openMocks(this); + servicesContainer = new FullServicesContainerImpl(mockStorageService, mockMonitorService, mockTelemetryFactory); when(mockConnectionProvider.connect(any(), any(), any(), any(), any())).thenReturn(mockConnection); when(mockConnectCallback.getExceptionToRaise(any(), any(), any(), anyBoolean())).thenReturn(null); - when(mockService.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockConnectionPluginManager.getTelemetryFactory()).thenReturn(mockTelemetryFactory); when(mockTelemetryFactory.openTelemetryContext(anyString(), any())).thenReturn(mockTelemetryContext); when(mockTelemetryFactory.openTelemetryContext(eq(null), any())).thenReturn(mockTelemetryContext); } @Test + @SuppressWarnings("try") public void test_RaiseException() throws SQLException { final Properties props = new Properties(); props.put(PropertyDefinition.PLUGINS.name, "dev"); props.put(DialectManager.DIALECT.name, DialectCodes.PG); try (ConnectionWrapper wrapper = new ConnectionWrapper( + servicesContainer, props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)) { + null)) { ExceptionSimulator simulator = wrapper.unwrap(ExceptionSimulator.class); assertNotNull(simulator); @@ -114,13 +120,13 @@ public void test_RaiseExceptionForMethodName() throws SQLException { props.put(PropertyDefinition.PLUGINS.name, "dev"); props.put(DialectManager.DIALECT.name, DialectCodes.PG); try (ConnectionWrapper wrapper = new ConnectionWrapper( + servicesContainer, props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)) { + null)) { ExceptionSimulator simulator = wrapper.unwrap(ExceptionSimulator.class); assertNotNull(simulator); @@ -143,13 +149,13 @@ public void test_RaiseExceptionForAnyMethodName() throws SQLException { props.put(PropertyDefinition.PLUGINS.name, "dev"); props.put(DialectManager.DIALECT.name, DialectCodes.PG); try (ConnectionWrapper wrapper = new ConnectionWrapper( + servicesContainer, props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)) { + null)) { ExceptionSimulator simulator = wrapper.unwrap(ExceptionSimulator.class); assertNotNull(simulator); @@ -172,13 +178,13 @@ public void test_RaiseExceptionForWrongMethodName() throws SQLException { props.put(PropertyDefinition.PLUGINS.name, "dev"); props.put(DialectManager.DIALECT.name, DialectCodes.PG); try (ConnectionWrapper wrapper = new ConnectionWrapper( + servicesContainer, props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)) { + null)) { ExceptionSimulator simulator = wrapper.unwrap(ExceptionSimulator.class); assertNotNull(simulator); @@ -203,13 +209,13 @@ public void test_RaiseExpectedExceptionClass() throws SQLException { props.put(PropertyDefinition.PLUGINS.name, "dev"); props.put(DialectManager.DIALECT.name, DialectCodes.PG); try (ConnectionWrapper wrapper = new ConnectionWrapper( + servicesContainer, props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)) { + null)) { ExceptionSimulator simulator = wrapper.unwrap(ExceptionSimulator.class); assertNotNull(simulator); @@ -232,13 +238,13 @@ public void test_RaiseUnexpectedExceptionClass() throws SQLException { props.put(PropertyDefinition.PLUGINS.name, "dev"); props.put(DialectManager.DIALECT.name, DialectCodes.PG); try (ConnectionWrapper wrapper = new ConnectionWrapper( + servicesContainer, props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)) { + null)) { ExceptionSimulator simulator = wrapper.unwrap(ExceptionSimulator.class); assertNotNull(simulator); @@ -250,7 +256,7 @@ public void test_RaiseUnexpectedExceptionClass() throws SQLException { Throwable thrownException = assertThrows(SQLException.class, wrapper::createStatement); assertNotNull(thrownException); assertNotSame(exception, thrownException); - assertTrue(thrownException instanceof SQLException); + assertInstanceOf(SQLException.class, thrownException); assertNotNull(thrownException.getCause()); assertSame(thrownException.getCause(), exception); @@ -270,23 +276,25 @@ public void test_RaiseExceptionOnConnect() { Throwable thrownException = assertThrows( SQLException.class, - () -> new ConnectionWrapper(props, + () -> new ConnectionWrapper( + servicesContainer, + props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)); + null)); assertSame(exception, thrownException); assertDoesNotThrow( - () -> new ConnectionWrapper(props, + () -> new ConnectionWrapper( + servicesContainer, + props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)); + null)); } @Test @@ -299,13 +307,14 @@ public void test_NoExceptionOnConnectWithCallback() { ExceptionSimulatorManager.setCallback(mockConnectCallback); assertDoesNotThrow( - () -> new ConnectionWrapper(props, + () -> new ConnectionWrapper( + servicesContainer, + props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)); + null)); } @Test @@ -323,22 +332,24 @@ public void test_RaiseExceptionOnConnectWithCallback() { Throwable thrownException = assertThrows( SQLException.class, - () -> new ConnectionWrapper(props, + () -> new ConnectionWrapper( + servicesContainer, + props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)); + null)); assertSame(exception, thrownException); assertDoesNotThrow( - () -> new ConnectionWrapper(props, + () -> new ConnectionWrapper( + servicesContainer, + props, "any-protocol://any-host/", mockConnectionProvider, null, mockTargetDriverDialect, - null, - mockTelemetryFactory)); + null)); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/ConcurrencyTests.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/ConcurrencyTests.java deleted file mode 100644 index 62aa4fc07..000000000 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/ConcurrencyTests.java +++ /dev/null @@ -1,984 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.plugin.efm; - -import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_COUNT; -import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_INTERVAL; -import static software.amazon.jdbc.plugin.efm.HostMonitoringConnectionPlugin.FAILURE_DETECTION_TIME; -import static software.amazon.jdbc.plugin.efm.MonitorServiceImpl.MONITOR_DISPOSAL_TIME_MS; - -import java.sql.Array; -import java.sql.Blob; -import java.sql.CallableStatement; -import java.sql.Clob; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.NClob; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLClientInfoException; -import java.sql.SQLException; -import java.sql.SQLWarning; -import java.sql.SQLXML; -import java.sql.Savepoint; -import java.sql.Statement; -import java.sql.Struct; -import java.util.Date; -import java.util.EnumSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.Executor; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.logging.ConsoleHandler; -import java.util.logging.Level; -import java.util.logging.LogRecord; -import java.util.logging.Logger; -import java.util.logging.SimpleFormatter; -import org.checkerframework.checker.nullness.qual.NonNull; -import org.checkerframework.checker.nullness.qual.Nullable; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import software.amazon.jdbc.AllowedAndBlockedHosts; -import software.amazon.jdbc.ConnectionPlugin; -import software.amazon.jdbc.ConnectionProvider; -import software.amazon.jdbc.HostListProvider; -import software.amazon.jdbc.HostRole; -import software.amazon.jdbc.HostSpec; -import software.amazon.jdbc.HostSpecBuilder; -import software.amazon.jdbc.JdbcCallable; -import software.amazon.jdbc.NodeChangeOptions; -import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.dialect.Dialect; -import software.amazon.jdbc.dialect.UnknownDialect; -import software.amazon.jdbc.hostavailability.HostAvailability; -import software.amazon.jdbc.hostavailability.SimpleHostAvailabilityStrategy; -import software.amazon.jdbc.states.SessionStateService; -import software.amazon.jdbc.targetdriverdialect.PgTargetDriverDialect; -import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; -import software.amazon.jdbc.util.telemetry.TelemetryFactory; - -@Disabled -@SuppressWarnings("checkstyle:OverloadMethodsDeclarationOrder") -public class ConcurrencyTests { - - @Test - public void testUsePluginConcurrently_SeparatePluginInstances() throws InterruptedException { - - final Level logLevel = Level.OFF; - - final Logger efmLogger = Logger.getLogger("software.amazon.jdbc.plugin.efm"); - efmLogger.setUseParentHandlers(false); - ConsoleHandler handler = new ConsoleHandler(); - handler.setLevel(logLevel); - handler.setFormatter(new SimpleFormatter() { - private static final String format = "[%1$tF %1$tT] [%4$-10s] [%2$-7s] %3$s %n"; - - @Override - public synchronized String format(LogRecord lr) { - return String.format(format, - new Date(lr.getMillis()), - lr.getLevel().getLocalizedName(), - lr.getMessage(), - Thread.currentThread().getName() - ); - } - }); - efmLogger.addHandler(handler); - - final ClassLoader mainClassLoader = ClassLoader.getSystemClassLoader(); - final ExecutorService executor = Executors.newCachedThreadPool( - r -> { - final Thread monitoringThread = new Thread(r); - monitoringThread.setDaemon(true); - monitoringThread.setContextClassLoader(mainClassLoader); - return monitoringThread; - }); - - final HostSpec hostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("test-host") - .build(); - hostSpec.addAlias("test-host-alias-a"); - hostSpec.addAlias("test-host-alias-b"); - - for (int i = 0; i < 10; i++) { - executor.submit(() -> { - - final Properties properties = new Properties(); - MONITOR_DISPOSAL_TIME_MS.set(properties, "30000"); - FAILURE_DETECTION_TIME.set(properties, "10000"); - FAILURE_DETECTION_INTERVAL.set(properties, "1000"); - FAILURE_DETECTION_COUNT.set(properties, "1"); - - final JdbcCallable sqlFunction = () -> { - try { - TimeUnit.SECONDS.sleep(5); - } catch (InterruptedException e) { - // do nothing - } - return null; - }; - - final Connection connection = new TestConnection(); - PluginService pluginService = new TestPluginService(hostSpec, connection); - - final HostMonitoringConnectionPlugin targetPlugin = - new HostMonitoringConnectionPlugin(pluginService, properties); - - final Logger threadLogger = Logger.getLogger("software.amazon.jdbc.plugin.efm"); - threadLogger.setLevel(logLevel); - - while (!Thread.currentThread().isInterrupted()) { - try { - threadLogger.log(Level.FINEST, "Run target plugin execute()"); - targetPlugin.execute( - ResultSet.class, - SQLException.class, - Connection.class, - "Connection.executeQuery", - sqlFunction, - new Object[0]); - } catch (SQLException e) { - threadLogger.log(Level.FINEST, "Exception", e); - } - } - threadLogger.log(Level.FINEST, "Stopped."); - }); - } - executor.shutdown(); - - TimeUnit.SECONDS.sleep(60); // test time - - // cool down - executor.shutdownNow(); - } - - @Test - public void testUsePluginConcurrently_SamePluginInstance() throws InterruptedException { - - final Level logLevel = Level.OFF; - - final Logger efmLogger = Logger.getLogger("software.amazon.jdbc.plugin.efm"); - efmLogger.setUseParentHandlers(false); - ConsoleHandler handler = new ConsoleHandler(); - handler.setLevel(logLevel); - handler.setFormatter(new SimpleFormatter() { - private static final String format = "[%1$tF %1$tT] [%4$-10s] [%2$-7s] %3$s %n"; - - @Override - public synchronized String format(LogRecord lr) { - return String.format(format, - new Date(lr.getMillis()), - lr.getLevel().getLocalizedName(), - lr.getMessage(), - Thread.currentThread().getName() - ); - } - }); - efmLogger.addHandler(handler); - - final ClassLoader mainClassLoader = ClassLoader.getSystemClassLoader(); - final ExecutorService executor = Executors.newCachedThreadPool( - r -> { - final Thread monitoringThread = new Thread(r); - monitoringThread.setDaemon(true); - monitoringThread.setContextClassLoader(mainClassLoader); - return monitoringThread; - }); - - final HostSpec hostSpec = new HostSpecBuilder(new SimpleHostAvailabilityStrategy()).host("test-host") - .build(); - hostSpec.addAlias("test-host-alias-a"); - hostSpec.addAlias("test-host-alias-b"); - - final Properties properties = new Properties(); - MONITOR_DISPOSAL_TIME_MS.set(properties, "30000"); - FAILURE_DETECTION_TIME.set(properties, "10000"); - FAILURE_DETECTION_INTERVAL.set(properties, "1000"); - FAILURE_DETECTION_COUNT.set(properties, "1"); - - final JdbcCallable sqlFunction = () -> { - try { - TimeUnit.SECONDS.sleep(5); - } catch (InterruptedException e) { - // do nothing - } - return null; - }; - - final Connection connection = new TestConnection(); - final PluginService pluginService = new TestPluginService(hostSpec, connection); - - final HostMonitoringConnectionPlugin targetPlugin = - new HostMonitoringConnectionPlugin(pluginService, properties); - - for (int i = 0; i < 10; i++) { - executor.submit(() -> { - - final Logger threadLogger = Logger.getLogger("software.amazon.jdbc.plugin.efm"); - threadLogger.setLevel(logLevel); - - while (!Thread.currentThread().isInterrupted()) { - try { - threadLogger.log(Level.FINEST, "Run target plugin execute()"); - targetPlugin.execute( - ResultSet.class, - SQLException.class, - Connection.class, - "Connection.executeQuery", - sqlFunction, - new Object[0]); - } catch (SQLException e) { - threadLogger.log(Level.FINEST, "Exception", e); - } - } - threadLogger.log(Level.FINEST, "Stopped."); - }); - } - executor.shutdown(); - - TimeUnit.SECONDS.sleep(60); // test time - - // cool down - executor.shutdownNow(); - } - - public static class TestSessionStateService implements SessionStateService { - - @Override - public Optional getAutoCommit() throws SQLException { - return Optional.empty(); - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - - } - - @Override - public void setupPristineAutoCommit() throws SQLException { - - } - - @Override - public void setupPristineAutoCommit(boolean autoCommit) throws SQLException { - - } - - @Override - public Optional getReadOnly() throws SQLException { - return Optional.empty(); - } - - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - - } - - @Override - public void setupPristineReadOnly() throws SQLException { - - } - - @Override - public void setupPristineReadOnly(boolean readOnly) throws SQLException { - - } - - @Override - public Optional getCatalog() throws SQLException { - return Optional.empty(); - } - - @Override - public void setCatalog(String catalog) throws SQLException { - - } - - @Override - public void setupPristineCatalog() throws SQLException { - - } - - @Override - public void setupPristineCatalog(String catalog) throws SQLException { - - } - - @Override - public Optional getHoldability() throws SQLException { - return Optional.empty(); - } - - @Override - public void setHoldability(int holdability) throws SQLException { - - } - - @Override - public void setupPristineHoldability() throws SQLException { - - } - - @Override - public void setupPristineHoldability(int holdability) throws SQLException { - - } - - @Override - public Optional getNetworkTimeout() throws SQLException { - return Optional.empty(); - } - - @Override - public void setNetworkTimeout(int milliseconds) throws SQLException { - - } - - @Override - public void setupPristineNetworkTimeout() throws SQLException { - - } - - @Override - public void setupPristineNetworkTimeout(int milliseconds) throws SQLException { - - } - - @Override - public Optional getSchema() throws SQLException { - return Optional.empty(); - } - - @Override - public void setSchema(String schema) throws SQLException { - - } - - @Override - public void setupPristineSchema() throws SQLException { - - } - - @Override - public void setupPristineSchema(String schema) throws SQLException { - - } - - @Override - public Optional getTransactionIsolation() throws SQLException { - return Optional.empty(); - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - - } - - @Override - public void setupPristineTransactionIsolation() throws SQLException { - - } - - @Override - public void setupPristineTransactionIsolation(int level) throws SQLException { - - } - - @Override - public Optional>> getTypeMap() throws SQLException { - return Optional.empty(); - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - - } - - @Override - public void setupPristineTypeMap() throws SQLException { - - } - - @Override - public void setupPristineTypeMap(Map> map) throws SQLException { - - } - - @Override - public void reset() { - - } - - @Override - public void begin() throws SQLException { - - } - - @Override - public void complete() { - - } - - @Override - public void applyCurrentSessionState(Connection newConnection) throws SQLException { - - } - - @Override - public void applyPristineSessionState(Connection connection) throws SQLException { - - } - } - - public static class TestPluginService implements PluginService { - - private final HostSpec hostSpec; - private final Connection connection; - - public TestPluginService(HostSpec hostSpec, Connection connection) { - this.hostSpec = hostSpec; - this.connection = connection; - } - - @Override - public Connection getCurrentConnection() { - return this.connection; - } - - @Override - public HostSpec getCurrentHostSpec() { - return this.hostSpec; - } - - @Override - public void setCurrentConnection(@NonNull Connection connection, @NonNull HostSpec hostSpec) - throws SQLException { - - } - - @Override - public EnumSet setCurrentConnection(@NonNull Connection connection, - @NonNull HostSpec hostSpec, @Nullable ConnectionPlugin skipNotificationForThisPlugin) - throws SQLException { - return null; - } - - @Override - public List getAllHosts() { - return null; - } - - @Override - public List getHosts() { - return null; - } - - @Override - public HostSpec getInitialConnectionHostSpec() { - return null; - } - - @Override - public String getOriginalUrl() { - return null; - } - - @Override - public void setAllowedAndBlockedHosts(AllowedAndBlockedHosts allowedAndBlockedHosts) { - } - - @Override - public boolean acceptsStrategy(HostRole role, String strategy) { - return false; - } - - @Override - public HostSpec getHostSpecByStrategy(HostRole role, String strategy) { - return null; - } - - @Override - public HostSpec getHostSpecByStrategy(List hosts, HostRole role, String strategy) { - return null; - } - - @Override - public HostRole getHostRole(Connection conn) { - return null; - } - - @Override - public void setAvailability(Set hostAliases, HostAvailability availability) { - } - - @Override - public boolean isInTransaction() { - return false; - } - - @Override - public HostListProvider getHostListProvider() { - return null; - } - - @Override - public void refreshHostList() throws SQLException { - } - - @Override - public void refreshHostList(Connection connection) throws SQLException { - } - - @Override - public void forceRefreshHostList() throws SQLException { - } - - @Override - public void forceRefreshHostList(Connection connection) throws SQLException { - } - - @Override - public boolean forceRefreshHostList(final boolean shouldVerifyWriter, long timeoutMs) - throws SQLException { - return false; - } - - @Override - public Connection connect(HostSpec hostSpec, Properties props, @Nullable ConnectionPlugin pluginToSkip) - throws SQLException { - return new TestConnection(); - } - - @Override - public Connection connect(HostSpec hostSpec, Properties props) throws SQLException { - return this.connect(hostSpec, props, null); - } - - @Override - public Connection forceConnect(HostSpec hostSpec, Properties props) throws SQLException { - return this.forceConnect(hostSpec, props, null); - } - - @Override - public Connection forceConnect(HostSpec hostSpec, Properties props, @Nullable ConnectionPlugin pluginToSkip) - throws SQLException { - return new TestConnection(); - } - - @Override - public TelemetryFactory getTelemetryFactory() { - return null; - } - - @Override - public String getTargetName() { - return null; - } - - @Override - public @NonNull SessionStateService getSessionStateService() { - return new TestSessionStateService(); - } - - @Override - public T getPlugin(Class pluginClazz) { - return null; - } - - @Override - public boolean isNetworkException(Throwable throwable) { - return false; - } - - @Override - public boolean isNetworkException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { - return false; - } - - @Override - public boolean isNetworkException(String sqlState) { - return false; - } - - @Override - public boolean isLoginException(String sqlState) { - return false; - } - - @Override - public boolean isLoginException(Throwable throwable) { - return false; - } - - @Override - public boolean isLoginException(Throwable throwable, @Nullable TargetDriverDialect targetDriverDialect) { - return false; - } - - @Override - public Dialect getDialect() { - return new UnknownDialect(); - } - - @Override - public TargetDriverDialect getTargetDriverDialect() { - return new PgTargetDriverDialect(); - } - - public void updateDialect(final @NonNull Connection connection) throws SQLException { } - - @Override - public HostSpec identifyConnection(Connection connection) throws SQLException { - return null; - } - - @Override - public void fillAliases(Connection connection, HostSpec hostSpec) throws SQLException { - - } - - @Override - public HostSpecBuilder getHostSpecBuilder() { - return new HostSpecBuilder(new SimpleHostAvailabilityStrategy()); - } - - @Override - public ConnectionProvider getConnectionProvider() { - return null; - } - - @Override - public boolean isPooledConnectionProvider(HostSpec host, Properties props) { - return false; - } - - @Override - public String getDriverProtocol() { - return null; - } - - @Override - public Properties getProperties() { - return null; - } - - @Override - public void setStatus(final Class clazz, final @NonNull T status, final boolean clusterBound) { - throw new UnsupportedOperationException(); - } - - @Override - public void setStatus(final Class clazz, final @Nullable T status, final String key) { - throw new UnsupportedOperationException(); - } - - @Override - public T getStatus(final @NonNull Class clazz, final boolean clusterBound) { - throw new UnsupportedOperationException(); - } - - public T getStatus(final @NonNull Class clazz, String key) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isPluginInUse(Class pluginClazz) { - return false; - } - } - - public static class TestConnection implements Connection { - - @Override - public Statement createStatement() throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql) throws SQLException { - return null; - } - - @Override - public CallableStatement prepareCall(String sql) throws SQLException { - return null; - } - - @Override - public String nativeSQL(String sql) throws SQLException { - return null; - } - - @Override - public void setAutoCommit(boolean autoCommit) throws SQLException { - - } - - @Override - public boolean getAutoCommit() throws SQLException { - return false; - } - - @Override - public void commit() throws SQLException { - - } - - @Override - public void rollback() throws SQLException { - - } - - @Override - public void close() throws SQLException { - - } - - @Override - public boolean isClosed() throws SQLException { - return false; - } - - @Override - public DatabaseMetaData getMetaData() throws SQLException { - return null; - } - - @Override - public void setReadOnly(boolean readOnly) throws SQLException { - - } - - @Override - public boolean isReadOnly() throws SQLException { - return false; - } - - @Override - public void setCatalog(String catalog) throws SQLException { - - } - - @Override - public String getCatalog() throws SQLException { - return null; - } - - @Override - public void setTransactionIsolation(int level) throws SQLException { - - } - - @Override - public int getTransactionIsolation() throws SQLException { - return 0; - } - - @Override - public SQLWarning getWarnings() throws SQLException { - return null; - } - - @Override - public void clearWarnings() throws SQLException { - - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) - throws SQLException { - return null; - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { - return null; - } - - @Override - public Map> getTypeMap() throws SQLException { - return null; - } - - @Override - public void setTypeMap(Map> map) throws SQLException { - - } - - @Override - public void setHoldability(int holdability) throws SQLException { - - } - - @Override - public int getHoldability() throws SQLException { - return 0; - } - - @Override - public Savepoint setSavepoint() throws SQLException { - return null; - } - - @Override - public Savepoint setSavepoint(String name) throws SQLException { - return null; - } - - @Override - public void rollback(Savepoint savepoint) throws SQLException { - - } - - @Override - public void releaseSavepoint(Savepoint savepoint) throws SQLException { - - } - - @Override - public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) - throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return null; - } - - @Override - public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, - int resultSetHoldability) throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { - return null; - } - - @Override - public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { - return null; - } - - @Override - public Clob createClob() throws SQLException { - return null; - } - - @Override - public Blob createBlob() throws SQLException { - return null; - } - - @Override - public NClob createNClob() throws SQLException { - return null; - } - - @Override - public SQLXML createSQLXML() throws SQLException { - return null; - } - - @Override - public boolean isValid(int timeout) throws SQLException { - return true; - } - - @Override - public void setClientInfo(String name, String value) throws SQLClientInfoException { - - } - - @Override - public void setClientInfo(Properties properties) throws SQLClientInfoException { - - } - - @Override - public String getClientInfo(String name) throws SQLException { - return null; - } - - @Override - public Properties getClientInfo() throws SQLException { - return null; - } - - @Override - public Array createArrayOf(String typeName, Object[] elements) throws SQLException { - return null; - } - - @Override - public Struct createStruct(String typeName, Object[] attributes) throws SQLException { - return null; - } - - @Override - public void setSchema(String schema) throws SQLException { - - } - - @Override - public String getSchema() throws SQLException { - return null; - } - - @Override - public void abort(Executor executor) throws SQLException { - - } - - @Override - public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { - - } - - @Override - public int getNetworkTimeout() throws SQLException { - return 0; - } - - @Override - public T unwrap(Class iface) throws SQLException { - return null; - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - return false; - } - } -} diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorConnectionContextTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostHostMonitorConnectionContextTest.java similarity index 94% rename from wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorConnectionContextTest.java rename to wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostHostMonitorConnectionContextTest.java index e307394ba..7066bbe3c 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorConnectionContextTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostHostMonitorConnectionContextTest.java @@ -35,24 +35,24 @@ import org.mockito.MockitoAnnotations; import software.amazon.jdbc.util.telemetry.TelemetryCounter; -class MonitorConnectionContextTest { +class HostHostMonitorConnectionContextTest { private static final long FAILURE_DETECTION_TIME_MILLIS = 10; private static final long FAILURE_DETECTION_INTERVAL_MILLIS = 100; private static final long FAILURE_DETECTION_COUNT = 3; private static final long VALIDATION_INTERVAL_MILLIS = 50; - private MonitorConnectionContext context; + private HostMonitorConnectionContext context; private AutoCloseable closeable; @Mock Connection connectionToAbort; - @Mock Monitor monitor; + @Mock HostMonitor monitor; @Mock TelemetryCounter abortedConnectionsCounter; @BeforeEach void init() { closeable = MockitoAnnotations.openMocks(this); context = - new MonitorConnectionContext( + new HostMonitorConnectionContext( monitor, null, FAILURE_DETECTION_TIME_MILLIS, @@ -134,7 +134,7 @@ void test_updateConnectionStatus_inactiveContext(boolean isValid) { final long currentTime = System.nanoTime(); final long statusCheckStartTime = System.nanoTime() - FAILURE_DETECTION_TIME_MILLIS; - final MonitorConnectionContext spyContext = spy(context); + final HostMonitorConnectionContext spyContext = spy(context); spyContext.updateConnectionStatus("test-node", statusCheckStartTime, currentTime, isValid); @@ -148,7 +148,7 @@ void test_updateConnectionStatus() { final long statusCheckStartTime = System.nanoTime() - 1000; context.setInactive(); - final MonitorConnectionContext spyContext = spy(context); + final HostMonitorConnectionContext spyContext = spy(context); spyContext.updateConnectionStatus("test-node", statusCheckStartTime, currentTime, true); @@ -159,7 +159,7 @@ void test_updateConnectionStatus() { @Test void test_abortConnection_ignoresSqlException() throws SQLException { context = - new MonitorConnectionContext( + new HostMonitorConnectionContext( monitor, connectionToAbort, FAILURE_DETECTION_TIME_MILLIS, diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorServiceImplTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostHostMonitorServiceImplTest.java similarity index 85% rename from wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorServiceImplTest.java rename to wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostHostMonitorServiceImplTest.java index ce70f0f12..e89279279 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorServiceImplTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostHostMonitorServiceImplTest.java @@ -49,7 +49,7 @@ import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; -class MonitorServiceImplTest { +class HostHostMonitorServiceImplTest { private static final Set NODE_KEYS = new HashSet<>(Collections.singletonList("any.node.domain")); @@ -57,10 +57,10 @@ class MonitorServiceImplTest { private static final int FAILURE_DETECTION_INTERVAL_MILLIS = 100; private static final int FAILURE_DETECTION_COUNT = 3; - @Mock private MonitorInitializer monitorInitializer; + @Mock private HostMonitorInitializer monitorInitializer; @Mock private ExecutorServiceInitializer executorServiceInitializer; - @Mock private Monitor monitorA; - @Mock private Monitor monitorB; + @Mock private HostMonitor monitorA; + @Mock private HostMonitor monitorB; @Mock private ExecutorService executorService; @Mock private Future task; @Mock private HostSpec hostSpec; @@ -71,28 +71,28 @@ class MonitorServiceImplTest { private Properties properties; private AutoCloseable closeable; - private MonitorServiceImpl monitorService; - private MonitorThreadContainer threadContainer; - private ArgumentCaptor contextCaptor; + private HostMonitorServiceImpl monitorService; + private HostMonitorThreadContainer threadContainer; + private ArgumentCaptor contextCaptor; @BeforeEach void init() { properties = new Properties(); closeable = MockitoAnnotations.openMocks(this); - contextCaptor = ArgumentCaptor.forClass(MonitorConnectionContext.class); + contextCaptor = ArgumentCaptor.forClass(HostMonitorConnectionContext.class); when(pluginService.getTelemetryFactory()).thenReturn(telemetryFactory); when(telemetryFactory.createCounter(anyString())).thenReturn(telemetryCounter); when(monitorInitializer.createMonitor( - any(HostSpec.class), any(Properties.class), any(MonitorThreadContainer.class))) + any(HostSpec.class), any(Properties.class), any(HostMonitorThreadContainer.class))) .thenReturn(monitorA, monitorB); when(executorServiceInitializer.createExecutorService()).thenReturn(executorService); - doReturn(task).when(executorService).submit(any(Monitor.class)); + doReturn(task).when(executorService).submit(any(HostMonitor.class)); - threadContainer = MonitorThreadContainer.getInstance(executorServiceInitializer); - monitorService = new MonitorServiceImpl(pluginService, monitorInitializer, executorServiceInitializer); + threadContainer = HostMonitorThreadContainer.getInstance(executorServiceInitializer); + monitorService = new HostMonitorServiceImpl(pluginService, monitorInitializer, executorServiceInitializer); } @AfterEach @@ -142,7 +142,7 @@ void test_startMonitoringCalledMultipleTimes() { void test_stopMonitoringWithInterruptedThread() { doNothing().when(monitorA).stopMonitoring(contextCaptor.capture()); - final MonitorConnectionContext context = + final HostMonitorConnectionContext context = monitorService.startMonitoring( connection, NODE_KEYS, @@ -162,7 +162,7 @@ void test_stopMonitoringWithInterruptedThread() { void test_stopMonitoringCalledTwice() { doNothing().when(monitorA).stopMonitoring(contextCaptor.capture()); - final MonitorConnectionContext context = + final HostMonitorConnectionContext context = monitorService.startMonitoring( connection, NODE_KEYS, @@ -213,11 +213,11 @@ void test_getMonitorCalledWithMultipleNodesInKeys() { final Set nodeKeysTwo = new HashSet<>(); nodeKeysTwo.add("nodeTwo.domain"); - final Monitor monitorOne = monitorService.getMonitor(nodeKeys, hostSpec, properties); + final HostMonitor monitorOne = monitorService.getMonitor(nodeKeys, hostSpec, properties); assertNotNull(monitorOne); // Should get the same monitor as before as contain the same key "nodeTwo.domain" - final Monitor monitorOneSame = monitorService.getMonitor(nodeKeysTwo, hostSpec, properties); + final HostMonitor monitorOneSame = monitorService.getMonitor(nodeKeysTwo, hostSpec, properties); assertNotNull(monitorOneSame); assertEquals(monitorOne, monitorOneSame); @@ -230,16 +230,16 @@ void test_getMonitorCalledWithDifferentNodeKeys() { final Set nodeKeys = new HashSet<>(); nodeKeys.add("nodeNEW.domain"); - final Monitor monitorOne = monitorService.getMonitor(nodeKeys, hostSpec, properties); + final HostMonitor monitorOne = monitorService.getMonitor(nodeKeys, hostSpec, properties); assertNotNull(monitorOne); // Ensuring monitor is the same one and not creating a new one - final Monitor monitorOneDupe = monitorService.getMonitor(nodeKeys, hostSpec, properties); + final HostMonitor monitorOneDupe = monitorService.getMonitor(nodeKeys, hostSpec, properties); assertEquals(monitorOne, monitorOneDupe); // Ensuring monitors are not the same as they have different keys // "any.node.domain" compared to "nodeNEW.domain" - final Monitor monitorTwo = monitorService.getMonitor(NODE_KEYS, hostSpec, properties); + final HostMonitor monitorTwo = monitorService.getMonitor(NODE_KEYS, hostSpec, properties); assertNotNull(monitorTwo); assertNotEquals(monitorOne, monitorTwo); } @@ -256,16 +256,16 @@ void test_getMonitorCalledWithSameKeysInDifferentNodeKeys() { final Set nodeKeysThree = new HashSet<>(); nodeKeysThree.add("nodeB"); - final Monitor monitorOne = monitorService.getMonitor(nodeKeys, hostSpec, properties); + final HostMonitor monitorOne = monitorService.getMonitor(nodeKeys, hostSpec, properties); assertNotNull(monitorOne); // Add a new key using the same monitor // Adding "nodeB" as a new key using the same monitor as "nodeA" - final Monitor monitorOneDupe = monitorService.getMonitor(nodeKeysTwo, hostSpec, properties); + final HostMonitor monitorOneDupe = monitorService.getMonitor(nodeKeysTwo, hostSpec, properties); assertEquals(monitorOne, monitorOneDupe); // Using new keyset but same node, "nodeB" should return same monitor - final Monitor monitorOneDupeAgain = + final HostMonitor monitorOneDupeAgain = monitorService.getMonitor(nodeKeysThree, hostSpec, properties); assertEquals(monitorOne, monitorOneDupeAgain); diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorImplTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostMonitorImplTest.java similarity index 85% rename from wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorImplTest.java rename to wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostMonitorImplTest.java index 13f5c2363..8e460dfee 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MonitorImplTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostMonitorImplTest.java @@ -54,20 +54,20 @@ import software.amazon.jdbc.util.telemetry.TelemetryCounter; import software.amazon.jdbc.util.telemetry.TelemetryFactory; -class MonitorImplTest { +class HostMonitorImplTest { @Mock PluginService pluginService; @Mock Connection connection; @Mock HostSpec hostSpec; @Mock Properties properties; - @Mock MonitorConnectionContext contextWithShortInterval; - @Mock MonitorConnectionContext contextWithLongInterval; + @Mock HostMonitorConnectionContext contextWithShortInterval; + @Mock HostMonitorConnectionContext contextWithLongInterval; @Mock BooleanProperty booleanProperty; @Mock LongProperty longProperty; @Mock ExecutorServiceInitializer executorServiceInitializer; @Mock ExecutorService executorService; @Mock Future futureResult; - @Mock MonitorServiceImpl monitorService; + @Mock HostMonitorServiceImpl monitorService; @Mock TelemetryFactory telemetryFactory; @Mock TelemetryContext telemetryContext; @Mock TelemetryCounter telemetryCounter; @@ -78,8 +78,8 @@ class MonitorImplTest { private static final long LONG_INTERVAL_MILLIS = 300; private AutoCloseable closeable; - private MonitorImpl monitor; - private MonitorThreadContainer threadContainer; + private HostMonitorImpl monitor; + private HostMonitorThreadContainer threadContainer; @BeforeEach void init() throws SQLException { @@ -99,21 +99,21 @@ void init() throws SQLException { when(telemetryFactory.openTelemetryContext(eq(null), any())).thenReturn(telemetryContext); when(telemetryFactory.createCounter(anyString())).thenReturn(telemetryCounter); when(executorServiceInitializer.createExecutorService()).thenReturn(executorService); - threadContainer = MonitorThreadContainer.getInstance(executorServiceInitializer); + threadContainer = HostMonitorThreadContainer.getInstance(executorServiceInitializer); - monitor = spy(new MonitorImpl(pluginService, hostSpec, properties, 0L, threadContainer)); + monitor = spy(new HostMonitorImpl(pluginService, hostSpec, properties, 0L, threadContainer)); } @AfterEach void cleanUp() throws Exception { monitorService.releaseResources(); - MonitorThreadContainer.releaseInstance(); + HostMonitorThreadContainer.releaseInstance(); closeable.close(); } @Test void test_5_isConnectionHealthyWithNoExistingConnection() throws SQLException { - final MonitorImpl.ConnectionStatus status = + final HostMonitorImpl.ConnectionStatus status = monitor.checkConnectionStatus(SHORT_INTERVAL_MILLIS); verify(pluginService).forceConnect(any(HostSpec.class), any(Properties.class)); @@ -129,11 +129,11 @@ void test_6_isConnectionHealthyWithExistingConnection() throws SQLException { // Start up a monitoring connection. monitor.checkConnectionStatus(SHORT_INTERVAL_MILLIS); - final MonitorImpl.ConnectionStatus status1 = + final HostMonitorImpl.ConnectionStatus status1 = monitor.checkConnectionStatus(SHORT_INTERVAL_MILLIS); assertTrue(status1.isValid); - final MonitorImpl.ConnectionStatus status2 = + final HostMonitorImpl.ConnectionStatus status2 = monitor.checkConnectionStatus(SHORT_INTERVAL_MILLIS); assertFalse(status2.isValid); @@ -150,7 +150,7 @@ void test_7_isConnectionHealthyWithSQLException() throws SQLException { assertDoesNotThrow( () -> { - final MonitorImpl.ConnectionStatus status = + final HostMonitorImpl.ConnectionStatus status = monitor.checkConnectionStatus(SHORT_INTERVAL_MILLIS); assertFalse(status.isValid); assertTrue(status.elapsedTimeNano >= 0); @@ -159,8 +159,8 @@ void test_7_isConnectionHealthyWithSQLException() throws SQLException { @Test void test_8_runWithoutContext() { - final Map monitorMap = threadContainer.getMonitorMap(); - final Map> taskMap = threadContainer.getTasksMap(); + final Map monitorMap = threadContainer.getMonitorMap(); + final Map> taskMap = threadContainer.getTasksMap(); // Put monitor into container map final String nodeKey = "monitorA"; @@ -176,13 +176,13 @@ void test_8_runWithoutContext() { assertNull(taskMap.get(monitor)); // Clean-up - MonitorThreadContainer.releaseInstance(); + HostMonitorThreadContainer.releaseInstance(); } @RepeatedTest(1000) void test_9_runWithContext() { - final Map monitorMap = threadContainer.getMonitorMap(); - final Map> taskMap = threadContainer.getTasksMap(); + final Map monitorMap = threadContainer.getMonitorMap(); + final Map> taskMap = threadContainer.getTasksMap(); // Put monitor into container map final String nodeKey = "monitorA"; @@ -214,7 +214,7 @@ void test_9_runWithContext() { assertNull(taskMap.get(monitor)); // Clean-up - MonitorThreadContainer.releaseInstance(); + HostMonitorThreadContainer.releaseInstance(); } @Test @@ -223,8 +223,8 @@ void test_10_ensureStoppedMonitorIsRemovedFromMap() throws InterruptedException when(contextWithShortInterval.getExpectedActiveMonitoringStartTimeNano()).thenReturn(999999999999999L); doThrow(new InterruptedException("Test")).when(monitor).sleep(anyLong()); monitor.activeContexts.add(contextWithShortInterval); - final Map monitorMap = threadContainer.getMonitorMap(); - final Map> taskMap = threadContainer.getTasksMap(); + final Map monitorMap = threadContainer.getMonitorMap(); + final Map> taskMap = threadContainer.getTasksMap(); // Put monitor into container map final String nodeKey = "monitorA"; @@ -242,6 +242,6 @@ void test_10_ensureStoppedMonitorIsRemovedFromMap() throws InterruptedException assertNull(taskMap.get(monitor)); // Clean-up - MonitorThreadContainer.releaseInstance(); + HostMonitorThreadContainer.releaseInstance(); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPluginTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPluginTest.java index 1c3d83359..63320e95d 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPluginTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/HostMonitoringConnectionPluginTest.java @@ -85,11 +85,11 @@ class HostMonitoringConnectionPluginTest { Properties properties = new Properties(); @Mock HostSpec hostSpec; @Mock HostSpec hostSpec2; - @Mock Supplier supplier; + @Mock Supplier supplier; @Mock RdsUtils rdsUtils; - @Mock MonitorConnectionContext context; + @Mock HostMonitorConnectionContext context; @Mock ReentrantLock mockReentrantLock; - @Mock MonitorService monitorService; + @Mock HostMonitorService monitorService; @Mock JdbcCallable sqlFunction; @Mock TargetDriverDialect targetDriverDialect; @@ -165,15 +165,6 @@ private void initializePlugin() { plugin = new HostMonitoringConnectionPlugin(pluginService, properties, supplier, rdsUtils); } - @ParameterizedTest - @MethodSource("generateNullArguments") - void test_initWithNullArguments( - final PluginService pluginService, final Properties properties) { - assertThrows( - IllegalArgumentException.class, - () -> new HostMonitoringConnectionPlugin(pluginService, properties)); - } - @Test void test_executeWithMonitoringDisabled() throws Exception { properties.put("failureDetectionEnabled", Boolean.FALSE.toString()); diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedDefaultMonitorServiceTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedDefaultHostHostMonitorServiceTest.java similarity index 77% rename from wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedDefaultMonitorServiceTest.java rename to wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedDefaultHostHostMonitorServiceTest.java index c408d1398..b4e76d3ac 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedDefaultMonitorServiceTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedDefaultHostHostMonitorServiceTest.java @@ -60,17 +60,17 @@ import software.amazon.jdbc.util.telemetry.TelemetryFactory; /** - * Multithreaded tests for {@link MultiThreadedDefaultMonitorServiceTest}. Repeats each testcase + * Multithreaded tests for {@link MultiThreadedDefaultHostHostMonitorServiceTest}. Repeats each testcase * multiple times. Use a cyclic barrier to ensure threads start at the same time. */ -class MultiThreadedDefaultMonitorServiceTest { +class MultiThreadedDefaultHostHostMonitorServiceTest { - @Mock MonitorInitializer monitorInitializer; + @Mock HostMonitorInitializer monitorInitializer; @Mock ExecutorServiceInitializer executorServiceInitializer; @Mock ExecutorService service; @Mock Future taskA; @Mock HostSpec hostSpec; - @Mock Monitor monitor; + @Mock HostMonitor monitor; @Mock Properties properties; @Mock JdbcConnection connection; @Mock PluginService pluginService; @@ -90,24 +90,24 @@ class MultiThreadedDefaultMonitorServiceTest { "Test thread interrupted due to an unexpected exception."; private AutoCloseable closeable; - private ArgumentCaptor startMonitoringCaptor; - private ArgumentCaptor stopMonitoringCaptor; - private MonitorThreadContainer monitorThreadContainer; + private ArgumentCaptor startMonitoringCaptor; + private ArgumentCaptor stopMonitoringCaptor; + private HostMonitorThreadContainer monitorThreadContainer; @BeforeEach void init(TestInfo testInfo) { closeable = MockitoAnnotations.openMocks(this); - startMonitoringCaptor = ArgumentCaptor.forClass(MonitorConnectionContext.class); - stopMonitoringCaptor = ArgumentCaptor.forClass(MonitorConnectionContext.class); - monitorThreadContainer = MonitorThreadContainer.getInstance(); + startMonitoringCaptor = ArgumentCaptor.forClass(HostMonitorConnectionContext.class); + stopMonitoringCaptor = ArgumentCaptor.forClass(HostMonitorConnectionContext.class); + monitorThreadContainer = HostMonitorThreadContainer.getInstance(); CONCURRENT_TEST_MAP.computeIfAbsent(testInfo.getDisplayName(), k -> new AtomicBoolean(false)); when(monitorInitializer.createMonitor( - any(HostSpec.class), any(Properties.class), any(MonitorThreadContainer.class))) + any(HostSpec.class), any(Properties.class), any(HostMonitorThreadContainer.class))) .thenReturn(monitor); when(executorServiceInitializer.createExecutorService()).thenReturn(service); - doReturn(taskA).when(service).submit(any(Monitor.class)); + doReturn(taskA).when(service).submit(any(HostMonitor.class)); doNothing().when(monitor).startMonitoring(startMonitoringCaptor.capture()); doNothing().when(monitor).stopMonitoring(stopMonitoringCaptor.capture()); when(properties.getProperty(any(String.class))) @@ -126,7 +126,7 @@ void cleanUp(TestInfo testInfo) throws Exception { concurrentCounter.set(0); closeable.close(); - MonitorThreadContainer.releaseInstance(); + HostMonitorThreadContainer.releaseInstance(); } /** Ensure each test case was executed concurrently at least once. */ @@ -144,13 +144,13 @@ void test_1_startMonitoring_multipleConnectionsToDifferentNodes() throws ExecutionException, InterruptedException { final int numConnections = 10; final List> nodeKeyList = generateNodeKeys(numConnections, true); - final List services = generateServices(numConnections); + final List services = generateServices(numConnections); try { - final List contexts = + final List contexts = runStartMonitor(numConnections, services, nodeKeyList); - final List capturedContexts = startMonitoringCaptor.getAllValues(); + final List capturedContexts = startMonitoringCaptor.getAllValues(); assertEquals(numConnections, services.get(0).getThreadContainer().getMonitorMap().size()); assertTrue( @@ -158,7 +158,7 @@ void test_1_startMonitoring_multipleConnectionsToDifferentNodes() && contexts.containsAll(capturedContexts) && capturedContexts.containsAll(contexts)); verify(monitorInitializer, times(numConnections)) - .createMonitor(eq(hostSpec), eq(properties), any(MonitorThreadContainer.class)); + .createMonitor(eq(hostSpec), eq(properties), any(HostMonitorThreadContainer.class)); } finally { releaseResources(services); } @@ -169,13 +169,13 @@ void test_2_startMonitoring_multipleConnectionsToOneNode() throws InterruptedException, ExecutionException { final int numConnections = 10; final List> nodeKeyList = generateNodeKeys(numConnections, false); - final List services = generateServices(numConnections); + final List services = generateServices(numConnections); try { - final List contexts = + final List contexts = runStartMonitor(numConnections, services, nodeKeyList); - final List capturedContexts = startMonitoringCaptor.getAllValues(); + final List capturedContexts = startMonitoringCaptor.getAllValues(); assertEquals(1, services.get(0).getThreadContainer().getMonitorMap().size()); assertTrue( @@ -184,7 +184,7 @@ void test_2_startMonitoring_multipleConnectionsToOneNode() && capturedContexts.containsAll(contexts)); verify(monitorInitializer) - .createMonitor(eq(hostSpec), eq(properties), any(MonitorThreadContainer.class)); + .createMonitor(eq(hostSpec), eq(properties), any(HostMonitorThreadContainer.class)); } finally { releaseResources(services); } @@ -194,13 +194,13 @@ void test_2_startMonitoring_multipleConnectionsToOneNode() void test_3_stopMonitoring_multipleConnectionsToDifferentNodes() throws ExecutionException, InterruptedException { final int numConnections = 10; - final List contexts = generateContexts(numConnections, true); - final List services = generateServices(numConnections); + final List contexts = generateContexts(numConnections, true); + final List services = generateServices(numConnections); try { runStopMonitor(numConnections, services, contexts); - final List capturedContexts = stopMonitoringCaptor.getAllValues(); + final List capturedContexts = stopMonitoringCaptor.getAllValues(); assertTrue( (contexts.size() == capturedContexts.size()) && contexts.containsAll(capturedContexts) @@ -214,13 +214,13 @@ void test_3_stopMonitoring_multipleConnectionsToDifferentNodes() void test_4_stopMonitoring_multipleConnectionsToTheSameNode() throws ExecutionException, InterruptedException { final int numConnections = 10; - final List contexts = generateContexts(numConnections, false); - final List services = generateServices(numConnections); + final List contexts = generateContexts(numConnections, false); + final List services = generateServices(numConnections); try { runStopMonitor(numConnections, services, contexts); - final List capturedContexts = stopMonitoringCaptor.getAllValues(); + final List capturedContexts = stopMonitoringCaptor.getAllValues(); assertTrue( (contexts.size() == capturedContexts.size()) && contexts.containsAll(capturedContexts) @@ -231,7 +231,7 @@ void test_4_stopMonitoring_multipleConnectionsToTheSameNode() } /** - * Run {@link MonitorServiceImpl#startMonitoring(Connection, Set, HostSpec, Properties, int, int, + * Run {@link HostMonitorServiceImpl#startMonitoring(Connection, Set, HostSpec, Properties, int, int, * int)} concurrently in multiple threads. A {@link CountDownLatch} is used to ensure all threads * start at the same time. * @@ -242,16 +242,16 @@ void test_4_stopMonitoring_multipleConnectionsToTheSameNode() * @throws InterruptedException if a thread has been interrupted. * @throws ExecutionException if an exception occurred within a thread. */ - private List runStartMonitor( + private List runStartMonitor( final int numThreads, - final List services, + final List services, final List> nodeKeysList) throws InterruptedException, ExecutionException { final CountDownLatch latch = new CountDownLatch(1); - final List> threads = new ArrayList<>(); + final List> threads = new ArrayList<>(); for (int i = 0; i < numThreads; i++) { - final MonitorServiceImpl service = services.get(i); + final HostMonitorServiceImpl service = services.get(i); final Set nodeKeys = nodeKeysList.get(i); threads.add( @@ -270,7 +270,7 @@ private List runStartMonitor( concurrentCounter.getAndIncrement(); } - final MonitorConnectionContext context = + final HostMonitorConnectionContext context = service.startMonitoring( connection, nodeKeys, @@ -288,8 +288,8 @@ private List runStartMonitor( // Start all threads. latch.countDown(); - final List contexts = new ArrayList<>(); - for (final CompletableFuture thread : threads) { + final List contexts = new ArrayList<>(); + for (final CompletableFuture thread : threads) { contexts.add(thread.get()); } @@ -297,7 +297,7 @@ private List runStartMonitor( } /** - * Run {@link MonitorServiceImpl#stopMonitoring(MonitorConnectionContext)} concurrently in + * Run {@link HostMonitorServiceImpl#stopMonitoring(HostMonitorConnectionContext)} concurrently in * multiple threads. A {@link CountDownLatch} is used to ensure all threads start at the same * time. * @@ -309,15 +309,15 @@ private List runStartMonitor( */ private void runStopMonitor( final int numThreads, - final List services, - final List contexts) + final List services, + final List contexts) throws ExecutionException, InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final List> threads = new ArrayList<>(); for (int i = 0; i < numThreads; i++) { - final MonitorServiceImpl service = services.get(i); - final MonitorConnectionContext context = contexts.get(i); + final HostMonitorServiceImpl service = services.get(i); + final HostMonitorConnectionContext context = contexts.get(i); threads.add( CompletableFuture.runAsync( @@ -380,16 +380,16 @@ private List> generateNodeKeys(final int numNodeKeys, final boolean * node keys. * @return the generated contexts. */ - private List generateContexts( + private List generateContexts( final int numContexts, final boolean diffContext) { final List> nodeKeysList = generateNodeKeys(numContexts, diffContext); - final List contexts = new ArrayList<>(); + final List contexts = new ArrayList<>(); nodeKeysList.forEach( nodeKeys -> { monitorThreadContainer.getOrCreateMonitor(nodeKeys, () -> monitor); contexts.add( - new MonitorConnectionContext( + new HostMonitorConnectionContext( monitor, null, FAILURE_DETECTION_TIME, @@ -402,15 +402,15 @@ private List generateContexts( } /** - * Create multiple {@link MonitorServiceImpl} objects. + * Create multiple {@link HostMonitorServiceImpl} objects. * * @param numServices The number of monitor services to create. * @return a list of monitor services. */ - private List generateServices(final int numServices) { - final List services = new ArrayList<>(); + private List generateServices(final int numServices) { + final List services = new ArrayList<>(); for (int i = 0; i < numServices; i++) { - services.add(new MonitorServiceImpl(pluginService, monitorInitializer, executorServiceInitializer)); + services.add(new HostMonitorServiceImpl(pluginService, monitorInitializer, executorServiceInitializer)); } return services; } @@ -418,10 +418,10 @@ private List generateServices(final int numServices) { /** * Release any resources used by the given services. * - * @param services The {@link MonitorServiceImpl} services to clean. + * @param services The {@link HostMonitorServiceImpl} services to clean. */ - private void releaseResources(final List services) { - for (final MonitorServiceImpl defaultMonitorService : services) { + private void releaseResources(final List services) { + for (final HostMonitorServiceImpl defaultMonitorService : services) { defaultMonitorService.releaseResources(); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedMonitorThreadContainerTest.java b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedHostMonitorThreadContainerTest.java similarity index 83% rename from wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedMonitorThreadContainerTest.java rename to wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedHostMonitorThreadContainerTest.java index d8f06798b..38d0f3191 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedMonitorThreadContainerTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/plugin/efm/MultiThreadedHostMonitorThreadContainerTest.java @@ -30,7 +30,7 @@ import org.mockito.MockitoAnnotations; @Disabled -public class MultiThreadedMonitorThreadContainerTest { +public class MultiThreadedHostMonitorThreadContainerTest { @Mock ExecutorServiceInitializer mockExecutorServiceInitializer; @Mock ExecutorService mockExecutorService; @@ -46,17 +46,17 @@ void init() { @AfterEach void cleanup() throws Exception { closeable.close(); - MonitorThreadContainer.releaseInstance(); + HostMonitorThreadContainer.releaseInstance(); } @RepeatedTest(value = 1000, name = "MonitorThreadContainer ThreadPoolExecutor is not closed prematurely") void testThreadPoolExecutorNotClosedPrematurely() throws InterruptedException { - MonitorThreadContainer.getInstance(mockExecutorServiceInitializer); + HostMonitorThreadContainer.getInstance(mockExecutorServiceInitializer); ExecutorService executorService = Executors.newCachedThreadPool(); - executorService.execute(() -> MonitorThreadContainer.getInstance(mockExecutorServiceInitializer)); + executorService.execute(() -> HostMonitorThreadContainer.getInstance(mockExecutorServiceInitializer)); Thread.sleep(3); - executorService.execute(MonitorThreadContainer::releaseInstance); + executorService.execute(HostMonitorThreadContainer::releaseInstance); executorService.shutdown(); verify(mockExecutorService, times(0)).shutdownNow(); diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/events/BatchingEventPublisherTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/events/BatchingEventPublisherTest.java new file mode 100644 index 000000000..a68bfe32a --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/util/events/BatchingEventPublisherTest.java @@ -0,0 +1,78 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.events; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.sql.SQLException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import software.amazon.jdbc.plugin.customendpoint.CustomEndpointInfo; + +class BatchingEventPublisherTest { + private AutoCloseable closeable; + @Mock private EventSubscriber subscriber; + + @BeforeEach + void setUp() throws SQLException { + closeable = MockitoAnnotations.openMocks(this); + } + + @AfterEach + void tearDown() throws Exception { + closeable.close(); + } + + @Test + public void testPublication() { + BatchingEventPublisher publisher = new BatchingEventPublisher() { + @Override + protected void initPublishingThread(long messageIntervalNanos) { + // Do nothing + } + }; + + Set> eventSubscriptions = new HashSet<>(Collections.singletonList(DataAccessEvent.class)); + publisher.subscribe(subscriber, eventSubscriptions); + publisher.subscribe(subscriber, eventSubscriptions); + assertEquals(1, publisher.subscribersMap.size()); + + DataAccessEvent event = new DataAccessEvent(CustomEndpointInfo.class, "key"); + publisher.publish(event); + publisher.publish(event); + publisher.sendMessages(); + assertTrue(publisher.eventMessages.isEmpty()); + + verify(subscriber, times(1)).processEvent(eq(event)); + + publisher.unsubscribe(subscriber, eventSubscriptions); + publisher.publish(event); + publisher.sendMessages(); + assertTrue(publisher.eventMessages.isEmpty()); + verify(subscriber, times(1)).processEvent(eq(event)); + } +} diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/monitoring/MonitorServiceImplTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/monitoring/MonitorServiceImplTest.java new file mode 100644 index 000000000..cd0bcbe3d --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/util/monitoring/MonitorServiceImplTest.java @@ -0,0 +1,294 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.monitoring; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import java.sql.SQLException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.plugin.customendpoint.CustomEndpointMonitorImpl; +import software.amazon.jdbc.targetdriverdialect.TargetDriverDialect; +import software.amazon.jdbc.util.events.EventPublisher; +import software.amazon.jdbc.util.storage.StorageService; +import software.amazon.jdbc.util.telemetry.TelemetryFactory; + +class MonitorServiceImplTest { + @Mock StorageService storageService; + @Mock TelemetryFactory telemetryFactory; + @Mock TargetDriverDialect targetDriverDialect; + @Mock Dialect dbDialect; + @Mock EventPublisher publisher; + MonitorServiceImpl monitorService; + private AutoCloseable closeable; + + @BeforeEach + void setUp() { + closeable = MockitoAnnotations.openMocks(this); + monitorService = new MonitorServiceImpl(publisher) { + @Override + protected void initCleanupThread(long cleanupIntervalNanos) { + // Do nothing + } + }; + } + + @AfterEach + void tearDown() throws Exception { + closeable.close(); + monitorService.releaseResources(); + } + + @Test + public void testMonitorError_monitorReCreated() throws SQLException, InterruptedException { + monitorService.registerMonitorTypeIfAbsent( + NoOpMonitor.class, + TimeUnit.MINUTES.toNanos(1), + TimeUnit.MINUTES.toNanos(1), + new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + null + ); + String key = "testMonitor"; + NoOpMonitor monitor = monitorService.runIfAbsent( + NoOpMonitor.class, + key, + storageService, + telemetryFactory, + "jdbc:postgresql://somehost/somedb", + "someProtocol", + targetDriverDialect, + dbDialect, + new Properties(), + (connectionService, pluginService) -> new NoOpMonitor(monitorService, 30) + ); + + Monitor storedMonitor = monitorService.get(NoOpMonitor.class, key); + assertNotNull(storedMonitor); + assertEquals(monitor, storedMonitor); + // need to wait to give time for the monitor executor to start the monitor thread. + TimeUnit.MILLISECONDS.sleep(250); + assertEquals(MonitorState.RUNNING, monitor.getState()); + + monitor.state.set(MonitorState.ERROR); + monitorService.checkMonitors(); + + assertEquals(MonitorState.STOPPED, monitor.getState()); + + Monitor newMonitor = monitorService.get(NoOpMonitor.class, key); + assertNotNull(newMonitor); + assertNotEquals(monitor, newMonitor); + // need to wait to give time for the monitor executor to start the monitor thread. + TimeUnit.MILLISECONDS.sleep(250); + assertEquals(MonitorState.RUNNING, newMonitor.getState()); + } + + @Test + public void testMonitorStuck_monitorReCreated() throws SQLException, InterruptedException { + monitorService.registerMonitorTypeIfAbsent( + NoOpMonitor.class, + TimeUnit.MINUTES.toNanos(1), + 1, // heartbeat times out immediately + new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + null + ); + String key = "testMonitor"; + NoOpMonitor monitor = monitorService.runIfAbsent( + NoOpMonitor.class, + key, + storageService, + telemetryFactory, + "jdbc:postgresql://somehost/somedb", + "someProtocol", + targetDriverDialect, + dbDialect, + new Properties(), + (connectionService, pluginService) -> new NoOpMonitor(monitorService, 30) + ); + + Monitor storedMonitor = monitorService.get(NoOpMonitor.class, key); + assertNotNull(storedMonitor); + assertEquals(monitor, storedMonitor); + // need to wait to give time for the monitor executor to start the monitor thread. + TimeUnit.MILLISECONDS.sleep(250); + assertEquals(MonitorState.RUNNING, monitor.getState()); + + // checkMonitors() should detect the heartbeat/inactivity timeout, stop the monitor, and re-create a new one. + monitorService.checkMonitors(); + + assertEquals(MonitorState.STOPPED, monitor.getState()); + + Monitor newMonitor = monitorService.get(NoOpMonitor.class, key); + assertNotNull(newMonitor); + assertNotEquals(monitor, newMonitor); + // need to wait to give time for the monitor executor to start the monitor thread. + TimeUnit.MILLISECONDS.sleep(250); + assertEquals(MonitorState.RUNNING, newMonitor.getState()); + } + + @Test + public void testMonitorExpired() throws SQLException, InterruptedException { + monitorService.registerMonitorTypeIfAbsent( + NoOpMonitor.class, + TimeUnit.MILLISECONDS.toNanos(200), // monitor expires after 200ms + TimeUnit.MINUTES.toNanos(1), + // even though we pass a re-create policy, we should not re-create it if the monitor is expired since this + // indicates it is not being used. + new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + null + ); + String key = "testMonitor"; + NoOpMonitor monitor = monitorService.runIfAbsent( + NoOpMonitor.class, + key, + storageService, + telemetryFactory, + "jdbc:postgresql://somehost/somedb", + "someProtocol", + targetDriverDialect, + dbDialect, + new Properties(), + (connectionService, pluginService) -> new NoOpMonitor(monitorService, 30) + ); + + Monitor storedMonitor = monitorService.get(NoOpMonitor.class, key); + assertNotNull(storedMonitor); + assertEquals(monitor, storedMonitor); + // need to wait to give time for the monitor executor to start the monitor thread. + TimeUnit.MILLISECONDS.sleep(250); + assertEquals(MonitorState.RUNNING, monitor.getState()); + + // checkMonitors() should detect the expiration timeout and stop/remove the monitor. + monitorService.checkMonitors(); + + assertEquals(MonitorState.STOPPED, monitor.getState()); + + Monitor newMonitor = monitorService.get(NoOpMonitor.class, key); + // monitor should have been removed when checkMonitors() was called. + assertNull(newMonitor); + } + + @Test + public void testMonitorMismatch() { + assertThrows(IllegalStateException.class, () -> monitorService.runIfAbsent( + CustomEndpointMonitorImpl.class, + "testMonitor", + storageService, + telemetryFactory, + "jdbc:postgresql://somehost/somedb", + "someProtocol", + targetDriverDialect, + dbDialect, + new Properties(), + // indicated monitor class is CustomEndpointMonitorImpl, but actual monitor is NoOpMonitor. The monitor + // service should detect this and throw an exception. + (connectionService, pluginService) -> new NoOpMonitor(monitorService, 30) + )); + } + + @Test + public void testRemove() throws SQLException, InterruptedException { + monitorService.registerMonitorTypeIfAbsent( + NoOpMonitor.class, + TimeUnit.MINUTES.toNanos(1), + TimeUnit.MINUTES.toNanos(1), + // even though we pass a re-create policy, we should not re-create it if the monitor is expired since this + // indicates it is not being used. + new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + null + ); + + String key = "testMonitor"; + NoOpMonitor monitor = monitorService.runIfAbsent( + NoOpMonitor.class, + key, + storageService, + telemetryFactory, + "jdbc:postgresql://somehost/somedb", + "someProtocol", + targetDriverDialect, + dbDialect, + new Properties(), + (connectionService, pluginService) -> new NoOpMonitor(monitorService, 30) + ); + assertNotNull(monitor); + + // need to wait to give time for the monitor executor to start the monitor thread. + TimeUnit.MILLISECONDS.sleep(250); + Monitor removedMonitor = monitorService.remove(NoOpMonitor.class, key); + assertEquals(monitor, removedMonitor); + assertEquals(MonitorState.RUNNING, monitor.getState()); + } + + @Test + public void testStopAndRemove() throws SQLException, InterruptedException { + monitorService.registerMonitorTypeIfAbsent( + NoOpMonitor.class, + TimeUnit.MINUTES.toNanos(1), + TimeUnit.MINUTES.toNanos(1), + // even though we pass a re-create policy, we should not re-create it if the monitor is expired since this + // indicates it is not being used. + new HashSet<>(Collections.singletonList(MonitorErrorResponse.RECREATE)), + null + ); + + String key = "testMonitor"; + NoOpMonitor monitor = monitorService.runIfAbsent( + NoOpMonitor.class, + key, + storageService, + telemetryFactory, + "jdbc:postgresql://somehost/somedb", + "someProtocol", + targetDriverDialect, + dbDialect, + new Properties(), + (connectionService, pluginService) -> new NoOpMonitor(monitorService, 30) + ); + assertNotNull(monitor); + + // need to wait to give time for the monitor executor to start the monitor thread. + TimeUnit.MILLISECONDS.sleep(250); + monitorService.stopAndRemove(NoOpMonitor.class, key); + assertNull(monitorService.get(NoOpMonitor.class, key)); + assertEquals(MonitorState.STOPPED, monitor.getState()); + } + + static class NoOpMonitor extends AbstractMonitor { + protected NoOpMonitor( + MonitorService monitorService, + long terminationTimeoutSec) { + super(terminationTimeoutSec); + } + + @Override + public void monitor() { + // do nothing. + } + } +} diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/storage/ExpirationCacheTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/storage/ExpirationCacheTest.java new file mode 100644 index 000000000..081b79b55 --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/util/storage/ExpirationCacheTest.java @@ -0,0 +1,115 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +class ExpirationCacheTest { + + @Test + public void testComputeIfAbsent() throws InterruptedException { + ExpirationCache cache = new ExpirationCache<>( + false, + TimeUnit.MILLISECONDS.toNanos(100), + (item) -> true, + DummyItem::close); + String key = "key"; + DummyItem itemSpy = Mockito.spy(new DummyItem()); + DummyItem newDummyItem = new DummyItem(); + assertEquals(itemSpy, cache.computeIfAbsent(key, k -> itemSpy)); + // item is not absent, so the new value should not be stored. + assertEquals(itemSpy, cache.computeIfAbsent(key, k -> newDummyItem)); + + // wait for item to expire. + TimeUnit.MILLISECONDS.sleep(150); + assertEquals(newDummyItem, cache.computeIfAbsent(key, k -> newDummyItem)); + // wait briefly for cache to call the disposal method on the old item. + TimeUnit.MILLISECONDS.sleep(100); + verify(itemSpy, times(1)).close(); + } + + @Test + public void testRenewableExpiration() throws InterruptedException { + ExpirationCache cache = new ExpirationCache<>( + true, + TimeUnit.MILLISECONDS.toNanos(100), + (item) -> true, + DummyItem::close); + String key = "key"; + DummyItem item = new DummyItem(); + cache.put(key, item); + assertEquals(item, cache.get(key)); + + // wait for item to expire. + TimeUnit.MILLISECONDS.sleep(150); + + assertEquals(item, cache.get(key)); + } + + @Test + public void testNonRenewableExpiration() throws InterruptedException { + ExpirationCache cache = new ExpirationCache<>( + false, + TimeUnit.MILLISECONDS.toNanos(100), + (item) -> true, + DummyItem::close); + String key = "key"; + DummyItem itemSpy = Mockito.spy(new DummyItem()); + cache.put(key, itemSpy); + assertEquals(itemSpy, cache.get(key)); + + // wait for item to expire. + TimeUnit.MILLISECONDS.sleep(150); + + assertNull(cache.get(key)); + assertFalse(cache.exists(key)); + + cache.removeExpiredEntries(); + verify(itemSpy, times(1)).close(); + } + + @Test + public void testRemove() { + ExpirationCache cache = new ExpirationCache<>( + true, + TimeUnit.MILLISECONDS.toNanos(100), + (item) -> true, + DummyItem::close); + String key = "key"; + DummyItem itemSpy = Mockito.spy(new DummyItem()); + cache.put(key, itemSpy); + assertEquals(itemSpy, cache.get(key)); + + DummyItem removedItem = cache.remove(key); + assertEquals(itemSpy, removedItem); + verify(itemSpy, times(1)).close(); + } + + static class DummyItem { + protected void close() { + // do nothing. + } + } +} diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/storage/SlidingExpirationCacheTest.java similarity index 96% rename from wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java rename to wrapper/src/test/java/software/amazon/jdbc/util/storage/SlidingExpirationCacheTest.java index 8878219c8..26b105c23 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/storage/SlidingExpirationCacheTest.java @@ -14,7 +14,7 @@ * limitations under the License. */ -package software.amazon.jdbc.util; +package software.amazon.jdbc.util.storage; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -34,8 +34,8 @@ import org.mockito.MockitoAnnotations; public class SlidingExpirationCacheTest { - @Mock SlidingExpirationCache.ItemDisposalFunc mockDisposalFunc; - @Mock SlidingExpirationCache.ShouldDisposeFunc mockShouldDisposeFunc; + @Mock ItemDisposalFunc mockDisposalFunc; + @Mock ShouldDisposeFunc mockShouldDisposeFunc; private AutoCloseable closeable; @BeforeEach diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/storage/TestStorageServiceImpl.java b/wrapper/src/test/java/software/amazon/jdbc/util/storage/TestStorageServiceImpl.java new file mode 100644 index 000000000..bcda6beaa --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/util/storage/TestStorageServiceImpl.java @@ -0,0 +1,33 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util.storage; + +import software.amazon.jdbc.util.events.EventPublisher; + +/** + * A StorageServiceImpl that doesn't sumbit a cleanup thread. This is useful for testing purposes. + */ +public class TestStorageServiceImpl extends StorageServiceImpl { + public TestStorageServiceImpl(EventPublisher publisher) { + super(publisher); + } + + @Override + protected void initCleanupThread(long cleanupIntervalNanos) { + // do nothing + } +}